Image Atomic Operations Tests
diff --git a/external/vulkancts/framework/vulkan/vkTypeUtil.hpp b/external/vulkancts/framework/vulkan/vkTypeUtil.hpp
index 72cd30f..4372041 100644
--- a/external/vulkancts/framework/vulkan/vkTypeUtil.hpp
+++ b/external/vulkancts/framework/vulkan/vkTypeUtil.hpp
@@ -81,6 +81,16 @@
 								VK_COMPONENT_SWIZZLE_A);
 }
 
+inline VkExtent3D makeExtent3D(const tcu::IVec3& vec)
+{
+	return makeExtent3D((deUint32)vec.x(), (deUint32)vec.y(), (deUint32)vec.z());
+}
+
+inline VkExtent3D makeExtent3D(const tcu::UVec3& vec)
+{
+	return makeExtent3D(vec.x(), vec.y(), vec.z());
+}
+
 } // vk
 
 #endif // _VKTYPEUTIL_HPP
diff --git a/external/vulkancts/modules/vulkan/image/CMakeLists.txt b/external/vulkancts/modules/vulkan/image/CMakeLists.txt
index 475182a..e47e1fd 100644
--- a/external/vulkancts/modules/vulkan/image/CMakeLists.txt
+++ b/external/vulkancts/modules/vulkan/image/CMakeLists.txt
@@ -5,6 +5,8 @@
 	vktImageTests.hpp
 	vktImageTestsUtil.cpp
 	vktImageTestsUtil.hpp
+	vktImageAtomicOperationTests.cpp
+	vktImageAtomicOperationTests.hpp
 	vktImageLoadStoreTests.cpp
 	vktImageLoadStoreTests.hpp
 	vktImageQualifiersTests.cpp
diff --git a/external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.cpp b/external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.cpp
new file mode 100644
index 0000000..11a9c69
--- /dev/null
+++ b/external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.cpp
@@ -0,0 +1,987 @@
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file  vktImageAtomicOperationTests.cpp
+ * \brief Image atomic operation tests
+ *//*--------------------------------------------------------------------*/
+
+#include "vktImageAtomicOperationTests.hpp"
+
+#include "deUniquePtr.hpp"
+#include "deStringUtil.hpp"
+
+#include "vktTestCaseUtil.hpp"
+#include "vkPrograms.hpp"
+#include "vkImageUtil.hpp"
+#include "vktImageTestsUtil.hpp"
+#include "vkBuilderUtil.hpp"
+#include "vkRef.hpp"
+#include "vkRefUtil.hpp"
+#include "vkTypeUtil.hpp"
+
+#include "tcuTextureUtil.hpp"
+#include "tcuTexture.hpp"
+#include "tcuVectorType.hpp"
+
+namespace vkt
+{
+namespace image
+{
+namespace
+{
+
+using namespace vk;
+using namespace std;
+using de::toString;
+
+using tcu::TextureFormat;
+using tcu::IVec2;
+using tcu::IVec3;
+using tcu::UVec3;
+using tcu::Vec4;
+using tcu::IVec4;
+using tcu::UVec4;
+using tcu::CubeFace;
+using tcu::Texture1D;
+using tcu::Texture2D;
+using tcu::Texture3D;
+using tcu::Texture2DArray;
+using tcu::TextureCube;
+using tcu::PixelBufferAccess;
+using tcu::ConstPixelBufferAccess;
+using tcu::Vector;
+using tcu::TestContext;
+
+enum
+{
+	NUM_INVOCATIONS_PER_PIXEL = 5u
+};
+
+enum AtomicOperation
+{
+	ATOMIC_OPERATION_ADD = 0,
+	ATOMIC_OPERATION_MIN,
+	ATOMIC_OPERATION_MAX,
+	ATOMIC_OPERATION_AND,
+	ATOMIC_OPERATION_OR,
+	ATOMIC_OPERATION_XOR,
+	ATOMIC_OPERATION_EXCHANGE,
+
+	ATOMIC_OPERATION_LAST
+};
+
+static string getCoordStr (const ImageType		imageType, 
+						   const std::string&	x,
+						   const std::string&	y,
+						   const std::string&	z)
+{
+	switch (imageType)
+	{
+		case IMAGE_TYPE_1D:
+		case IMAGE_TYPE_BUFFER:
+			return x;
+		case IMAGE_TYPE_1D_ARRAY:
+		case IMAGE_TYPE_2D:
+			return string("ivec2(" + x + "," + y + ")");
+		case IMAGE_TYPE_2D_ARRAY:
+		case IMAGE_TYPE_3D:
+		case IMAGE_TYPE_CUBE:
+		case IMAGE_TYPE_CUBE_ARRAY:
+			return string("ivec3(" + x + "," + y + "," + z + ")");
+		default:
+			DE_ASSERT(false);
+			return DE_NULL;
+	}
+}
+
+static string getAtomicFuncArgumentShaderStr (const AtomicOperation	op,
+											  const string&			x,
+											  const string&			y,
+											  const string&			z,
+											  const IVec3&			gridSize)
+{
+	switch (op)
+	{
+		case ATOMIC_OPERATION_ADD:
+		case ATOMIC_OPERATION_MIN:
+		case ATOMIC_OPERATION_MAX:
+		case ATOMIC_OPERATION_AND:
+		case ATOMIC_OPERATION_OR:
+		case ATOMIC_OPERATION_XOR:
+			return string("(" + x + "*" + x + " + " + y + "*" + y + " + " + z + "*" + z + ")");
+		case ATOMIC_OPERATION_EXCHANGE:
+			return string("((" + z + "*" + toString(gridSize.x()) + " + " + x + ")*" + toString(gridSize.y()) + " + " + y + ")");
+		default:
+			DE_ASSERT(false);
+			return DE_NULL;
+	}
+}
+
+static string getAtomicOperationCaseName (const AtomicOperation op)
+{
+	switch (op)
+	{
+		case ATOMIC_OPERATION_ADD:			return string("add");
+		case ATOMIC_OPERATION_MIN:			return string("min");
+		case ATOMIC_OPERATION_MAX:			return string("max");
+		case ATOMIC_OPERATION_AND:			return string("and");
+		case ATOMIC_OPERATION_OR:			return string("or");
+		case ATOMIC_OPERATION_XOR:			return string("xor");
+		case ATOMIC_OPERATION_EXCHANGE:		return string("exchange");
+		default:
+			DE_ASSERT(false);
+			return DE_NULL;
+	}
+}
+
+static string getAtomicOperationShaderFuncName (const AtomicOperation op)
+{
+	switch (op)
+	{
+		case ATOMIC_OPERATION_ADD:			return string("imageAtomicAdd");
+		case ATOMIC_OPERATION_MIN:			return string("imageAtomicMin");
+		case ATOMIC_OPERATION_MAX:			return string("imageAtomicMax");
+		case ATOMIC_OPERATION_AND:			return string("imageAtomicAnd");
+		case ATOMIC_OPERATION_OR:			return string("imageAtomicOr");
+		case ATOMIC_OPERATION_XOR:			return string("imageAtomicXor");
+		case ATOMIC_OPERATION_EXCHANGE:		return string("imageAtomicExchange");
+		default:
+			DE_ASSERT(false);
+			return DE_NULL;
+	}
+}
+
+static deInt32 getOperationInitialValue (const AtomicOperation op)
+{
+	switch (op)
+	{
+		// \note 18 is just an arbitrary small nonzero value.
+		case ATOMIC_OPERATION_ADD:			return 18;
+		case ATOMIC_OPERATION_MIN:			return (1 << 15) - 1;
+		case ATOMIC_OPERATION_MAX:			return 18;
+		case ATOMIC_OPERATION_AND:			return (1 << 15) - 1;
+		case ATOMIC_OPERATION_OR:			return 18;
+		case ATOMIC_OPERATION_XOR:			return 18;
+		case ATOMIC_OPERATION_EXCHANGE:		return 18;
+		default:
+			DE_ASSERT(false);
+			return -1;
+	}
+}
+
+static deInt32 getAtomicFuncArgument (const AtomicOperation op, const IVec3& invocationID, const IVec3& gridSize)
+{
+	const int x = invocationID.x();
+	const int y = invocationID.y();
+	const int z = invocationID.z();
+
+	switch (op)
+	{
+		// \note Fall-throughs.
+		case ATOMIC_OPERATION_ADD:
+		case ATOMIC_OPERATION_MIN:
+		case ATOMIC_OPERATION_MAX:
+		case ATOMIC_OPERATION_AND:
+		case ATOMIC_OPERATION_OR:
+		case ATOMIC_OPERATION_XOR:
+			return x*x + y*y + z*z;
+		case ATOMIC_OPERATION_EXCHANGE:
+			return (z*gridSize.x() + x)*gridSize.y() + y;
+		default:
+			DE_ASSERT(false);
+			return -1;
+	}
+}
+
+//! An order-independent operation is one for which the end result doesn't depend on the order in which the operations are carried (i.e. is both commutative and associative).
+static bool isOrderIndependentAtomicOperation (const AtomicOperation op)
+{
+	return	op == ATOMIC_OPERATION_ADD ||
+			op == ATOMIC_OPERATION_MIN ||
+			op == ATOMIC_OPERATION_MAX ||
+			op == ATOMIC_OPERATION_AND ||
+			op == ATOMIC_OPERATION_OR ||
+			op == ATOMIC_OPERATION_XOR;
+}
+
+//! Computes the result of an atomic operation where "a" is the data operated on and "b" is the parameter to the atomic function.
+static deInt32 computeBinaryAtomicOperationResult (const AtomicOperation op, const deInt32 a, const deInt32 b)
+{
+	switch (op)
+	{
+		case ATOMIC_OPERATION_ADD:			return a + b;
+		case ATOMIC_OPERATION_MIN:			return de::min(a, b);
+		case ATOMIC_OPERATION_MAX:			return de::max(a, b);
+		case ATOMIC_OPERATION_AND:			return a & b;
+		case ATOMIC_OPERATION_OR:			return a | b;
+		case ATOMIC_OPERATION_XOR:			return a ^ b;
+		case ATOMIC_OPERATION_EXCHANGE:		return b;
+		default:
+			DE_ASSERT(false);
+			return -1;
+	}
+}
+
+class BinaryAtomicEndResultCase : public vkt::TestCase
+{
+public:
+								BinaryAtomicEndResultCase  (tcu::TestContext&			testCtx,
+															const string&				name,
+															const string&				description,
+															const ImageType				imageType,
+															const tcu::UVec3&			imageSize,
+															const tcu::TextureFormat&	format,
+															const AtomicOperation		operation,
+															const glu::GLSLVersion		glslVersion);
+
+	void						initPrograms			   (SourceCollections&			sourceCollections) const;
+	TestInstance*				createInstance			   (Context&					context) const;
+private:
+
+	const ImageType				m_imageType;
+	const tcu::UVec3			m_imageSize;
+	const tcu::TextureFormat	m_format;
+	const AtomicOperation		m_operation;
+	const glu::GLSLVersion		m_glslVersion;
+};
+
+BinaryAtomicEndResultCase::BinaryAtomicEndResultCase (tcu::TestContext&			testCtx,
+													  const string&				name,
+													  const string&				description,
+													  const ImageType			imageType,
+													  const tcu::UVec3&			imageSize,
+													  const tcu::TextureFormat&	format,
+													  const AtomicOperation		operation,
+													  const glu::GLSLVersion	glslVersion)
+	: TestCase		(testCtx, name, description)
+	, m_imageType	(imageType)
+	, m_imageSize	(imageSize)
+	, m_format		(format)
+	, m_operation	(operation)
+	, m_glslVersion	(glslVersion)
+{
+}
+
+void BinaryAtomicEndResultCase::initPrograms (SourceCollections& sourceCollections) const
+{
+	const string	versionDecl				= glu::getGLSLVersionDeclaration(m_glslVersion);
+
+	const bool		uintFormat				= isUintFormat(mapTextureFormat(m_format));
+	const bool		intFormat				= isIntFormat(mapTextureFormat(m_format));
+	const UVec3		gridSize				= getShaderGridSize(m_imageType, m_imageSize);
+	const string	atomicCoord				= getCoordStr(m_imageType, "gx % " + toString(gridSize.x()), "gy", "gz");
+
+	const string	atomicArgExpr			= (uintFormat ? "uint" : intFormat ? "int" : "float")
+											+ getAtomicFuncArgumentShaderStr(m_operation, "gx", "gy", "gz", IVec3(NUM_INVOCATIONS_PER_PIXEL*gridSize.x(), gridSize.y(), gridSize.z()));
+
+	const string	atomicInvocation		= getAtomicOperationShaderFuncName(m_operation) + "(u_resultImage, " + atomicCoord + ", " + atomicArgExpr + ")";
+	const string	shaderImageFormatStr	= getShaderImageFormatQualifier(m_format);
+	const string	shaderImageTypeStr		= getShaderImageType(m_format, m_imageType);
+
+	string source = versionDecl + "\n"
+					"precision highp " + shaderImageTypeStr + ";\n"
+					"\n"
+					"layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+					"layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+					"\n"
+					"void main (void)\n"
+					"{\n"
+					"	int gx = int(gl_GlobalInvocationID.x);\n"
+					"	int gy = int(gl_GlobalInvocationID.y);\n"
+					"	int gz = int(gl_GlobalInvocationID.z);\n"
+					"	" + atomicInvocation + ";\n"
+					"}\n";
+
+	sourceCollections.glslSources.add(m_name) << glu::ComputeSource(source.c_str());
+}
+
+class BinaryAtomicIntermValuesCase : public vkt::TestCase
+{
+public:
+								BinaryAtomicIntermValuesCase   (tcu::TestContext&			testCtx,
+																const string&				name,
+																const string&				description,
+																const ImageType				imageType,
+																const tcu::UVec3&			imageSize,
+																const tcu::TextureFormat&	format,
+																const AtomicOperation		operation,
+																const glu::GLSLVersion		glslVersion);
+
+	void						initPrograms				   (SourceCollections&			sourceCollections) const;
+	TestInstance*				createInstance				   (Context&					context) const;
+private:
+
+	const ImageType				m_imageType;
+	const tcu::UVec3			m_imageSize;
+	const tcu::TextureFormat	m_format;
+	const AtomicOperation		m_operation;
+	const glu::GLSLVersion		m_glslVersion;
+};
+
+BinaryAtomicIntermValuesCase::BinaryAtomicIntermValuesCase (TestContext&			testCtx,
+															const string&			name,
+															const string&			description,
+															const ImageType			imageType,
+															const tcu::UVec3&		imageSize,
+															const TextureFormat&	format,
+															const AtomicOperation	operation,
+															const glu::GLSLVersion	glslVersion)
+	: TestCase		(testCtx, name, description)
+	, m_imageType	(imageType)
+	, m_imageSize	(imageSize)
+	, m_format		(format)
+	, m_operation	(operation)
+	, m_glslVersion	(glslVersion)
+{
+}
+
+void BinaryAtomicIntermValuesCase::initPrograms (SourceCollections& sourceCollections) const
+{
+	const string	versionDecl				= glu::getGLSLVersionDeclaration(m_glslVersion);
+
+	const bool		uintFormat				= isUintFormat(mapTextureFormat(m_format));
+	const bool		intFormat				= isIntFormat(mapTextureFormat(m_format));
+	const string	colorVecTypeName		= string(uintFormat ? "u" : intFormat ? "i" : "") + "vec4";
+	const UVec3		gridSize				= getShaderGridSize(m_imageType, m_imageSize);
+	const string	atomicCoord				= getCoordStr(m_imageType, "gx % " + toString(gridSize.x()), "gy", "gz");
+	const string	invocationCoord			= getCoordStr(m_imageType, "gx", "gy", "gz");
+	const string	atomicArgExpr			= (uintFormat ? "uint" : intFormat ? "int" : "float")
+											+ getAtomicFuncArgumentShaderStr(m_operation, "gx", "gy", "gz", IVec3(NUM_INVOCATIONS_PER_PIXEL*gridSize.x(), gridSize.y(), gridSize.z()));
+
+	const string	atomicInvocation		= getAtomicOperationShaderFuncName(m_operation) + "(u_resultImage, " + atomicCoord + ", " + atomicArgExpr + ")";
+	const string	shaderImageFormatStr	= getShaderImageFormatQualifier(m_format);
+	const string	shaderImageTypeStr		= getShaderImageType(m_format, m_imageType);
+
+	string source = versionDecl + "\n"
+					"precision highp " + shaderImageTypeStr + ";\n"
+					"\n"
+					"layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+					"layout (" + shaderImageFormatStr + ", binding=0) coherent uniform " + shaderImageTypeStr + " u_resultImage;\n"
+					"layout (" + shaderImageFormatStr + ", binding=1) writeonly uniform " + shaderImageTypeStr + " u_intermValuesImage;\n"
+					"\n"
+					"void main (void)\n"
+					"{\n"
+					"	int gx = int(gl_GlobalInvocationID.x);\n"
+					"	int gy = int(gl_GlobalInvocationID.y);\n"
+					"	int gz = int(gl_GlobalInvocationID.z);\n"
+					"	imageStore(u_intermValuesImage, " + invocationCoord + ", " + colorVecTypeName + "(" + atomicInvocation + "));\n"
+					"}\n";
+
+	sourceCollections.glslSources.add(m_name) << glu::ComputeSource(source.c_str());
+}
+
+class BinaryAtomicInstanceBase : public vkt::TestInstance
+{
+public:
+
+								BinaryAtomicInstanceBase (Context&						context,
+														  const string&					name,
+														  const ImageType				imageType,
+														  const tcu::UVec3&				imageSize,
+														  const TextureFormat&			format,
+														  const AtomicOperation			operation);
+
+	tcu::TestStatus				iterate					 (void);
+
+	virtual deUint32			getOutputBufferSize		 (void) const = 0;
+
+	virtual void				prepareResources		 (void) = 0;
+	virtual void				prepareDescriptors		 (void) = 0;
+
+	virtual void				commandsBeforeCompute	 (const VkCommandBuffer			cmdBuffer) const = 0;
+	virtual void				commandsAfterCompute	 (const VkCommandBuffer			cmdBuffer) const = 0;
+
+	virtual bool				verifyResult			 (Allocation&					outputBufferAllocation) const = 0;
+
+protected:
+	const string				m_name;
+	const ImageType				m_imageType;
+	const tcu::UVec3			m_imageSize;
+	const TextureFormat			m_format;
+	const AtomicOperation		m_operation;
+
+	de::MovePtr<Buffer>			m_outputBuffer;
+	Move<VkDescriptorPool>		m_descriptorPool;
+	Move<VkDescriptorSetLayout>	m_descriptorSetLayout;
+	Move<VkDescriptorSet>		m_descriptorSet;
+	de::MovePtr<Image>			m_resultImage;
+	Move<VkImageView>			m_resultImageView;
+};
+
+BinaryAtomicInstanceBase::BinaryAtomicInstanceBase (Context&				context,
+													const string&			name,
+													const ImageType			imageType,
+													const tcu::UVec3&		imageSize,
+													const TextureFormat&	format,
+													const AtomicOperation	operation)
+	: vkt::TestInstance	(context)
+	, m_name			(name)
+	, m_imageType		(imageType)
+	, m_imageSize		(imageSize)
+	, m_format			(format)
+	, m_operation		(operation)
+{
+}
+
+tcu::TestStatus	BinaryAtomicInstanceBase::iterate (void)
+{
+	const VkDevice			device				= m_context.getDevice();
+	const DeviceInterface&	deviceInterface		= m_context.getDeviceInterface();
+	const VkQueue			queue				= m_context.getUniversalQueue();
+	const deUint32			queueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
+	Allocator&				allocator			= m_context.getDefaultAllocator();
+	const VkDeviceSize		imageSizeInBytes	= tcu::getPixelSize(m_format) * getNumPixels(m_imageType, m_imageSize);
+	const VkDeviceSize		outBuffSizeInBytes	= getOutputBufferSize();
+
+	const VkImageCreateInfo imageParams	=
+	{
+		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,					// VkStructureType			sType;
+		DE_NULL,												// const void*				pNext;
+		m_imageType == IMAGE_TYPE_CUBE || 
+		m_imageType == IMAGE_TYPE_CUBE_ARRAY ? 
+		VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u,				// VkImageCreateFlags		flags;
+		mapImageType(m_imageType),								// VkImageType				imageType;
+		mapTextureFormat(m_format),								// VkFormat					format;
+		makeExtent3D(getLayerSize(m_imageType, m_imageSize)),	// VkExtent3D				extent;
+		1u,														// deUint32					mipLevels;
+		getNumLayers(m_imageType, m_imageSize),					// deUint32					arrayLayers;
+		VK_SAMPLE_COUNT_1_BIT,									// VkSampleCountFlagBits	samples;
+		VK_IMAGE_TILING_OPTIMAL,								// VkImageTiling			tiling;
+		VK_IMAGE_USAGE_STORAGE_BIT | 
+		VK_IMAGE_USAGE_TRANSFER_SRC_BIT | 
+		VK_IMAGE_USAGE_TRANSFER_DST_BIT,						// VkImageUsageFlags		usage;
+		VK_SHARING_MODE_EXCLUSIVE,								// VkSharingMode			sharingMode;
+		0u,														// deUint32					queueFamilyIndexCount;
+		DE_NULL,												// const deUint32*			pQueueFamilyIndices;
+		VK_IMAGE_LAYOUT_UNDEFINED,								// VkImageLayout			initialLayout;
+	};
+
+	//Create the image that is going to store results of atomic operations
+	m_resultImage = de::MovePtr<Image>(new Image(deviceInterface, device, allocator, imageParams, MemoryRequirement::Any));
+
+	const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
+
+	m_resultImageView = makeImageView(deviceInterface, device, m_resultImage->get(), mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange);
+
+	//Prepare the buffer with the initial data for the image 
+	const Buffer inputBuffer(deviceInterface, device, allocator, makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT), MemoryRequirement::HostVisible);
+
+	Allocation& inputBufferAllocation = inputBuffer.getAllocation();
+	
+	//Prepare the initial data for the image
+	const tcu::IVec4 initialValue(getOperationInitialValue(m_operation));
+
+	tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize);
+	tcu::PixelBufferAccess inputPixelBuffer(m_format, gridSize.x(), gridSize.y(), gridSize.z(), inputBufferAllocation.getHostPtr());
+
+	for (deUint32 z = 0; z < gridSize.z(); z++)
+	for (deUint32 y = 0; y < gridSize.y(); y++)
+	for (deUint32 x = 0; x < gridSize.x(); x++)
+	{
+		inputPixelBuffer.setPixel(initialValue, x, y, z);
+	}
+
+	flushMappedMemoryRange(deviceInterface, device, inputBufferAllocation.getMemory(), inputBufferAllocation.getOffset(), imageSizeInBytes);
+
+	// Create a buffer to store shader output copied from result image
+	m_outputBuffer = de::MovePtr<Buffer>(new Buffer(deviceInterface, device, allocator, makeBufferCreateInfo(outBuffSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
+
+	prepareResources();
+
+	prepareDescriptors();
+
+	// Create pipeline
+	const Unique<VkShaderModule>	shaderModule(createShaderModule(deviceInterface, device, m_context.getBinaryCollection().get(m_name), 0));
+	const Unique<VkPipelineLayout>	pipelineLayout(makePipelineLayout(deviceInterface, device, *m_descriptorSetLayout));
+	const Unique<VkPipeline>		pipeline(makeComputePipeline(deviceInterface, device, *pipelineLayout, *shaderModule));
+
+	// Create command buffer
+	const Unique<VkCommandPool>		cmdPool(makeCommandPool(deviceInterface, device, queueFamilyIndex));
+	const Unique<VkCommandBuffer>	cmdBuffer(makeCommandBuffer(deviceInterface, device, *cmdPool));
+
+	beginCommandBuffer(deviceInterface, *cmdBuffer);
+
+	deviceInterface.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+	deviceInterface.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
+
+	const VkBufferMemoryBarrier inputBufferPostHostWriteBarrier	= 
+		makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, 
+								VK_ACCESS_TRANSFER_READ_BIT,
+								*inputBuffer,
+								0ull,
+								imageSizeInBytes);
+
+	const VkImageMemoryBarrier	resultImagePreCopyBarrier = 
+		makeImageMemoryBarrier(	0u, 
+								VK_ACCESS_TRANSFER_WRITE_BIT,
+								VK_IMAGE_LAYOUT_UNDEFINED, 
+								VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+								m_resultImage->get(), 
+								subresourceRange);
+
+	deviceInterface.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, 0u, DE_NULL, 1u, &inputBufferPostHostWriteBarrier, 1u, &resultImagePreCopyBarrier);
+
+	const VkBufferImageCopy		bufferImageCopyParams = makeBufferImageCopy(makeExtent3D(getLayerSize(m_imageType, m_imageSize)), getNumLayers(m_imageType, m_imageSize));
+
+	deviceInterface.cmdCopyBufferToImage(*cmdBuffer, *inputBuffer, m_resultImage->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferImageCopyParams);
+
+	const VkImageMemoryBarrier	resultImagePostCopyBarrier	= 
+		makeImageMemoryBarrier(	VK_ACCESS_TRANSFER_WRITE_BIT, 
+								VK_ACCESS_SHADER_READ_BIT,
+								VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+								VK_IMAGE_LAYOUT_GENERAL,
+								m_resultImage->get(),
+								subresourceRange);
+
+	deviceInterface.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, DE_FALSE, 0u, DE_NULL, 0u, DE_NULL, 1u, &resultImagePostCopyBarrier);
+
+	commandsBeforeCompute(*cmdBuffer);
+
+	deviceInterface.cmdDispatch(*cmdBuffer, NUM_INVOCATIONS_PER_PIXEL*gridSize.x(), gridSize.y(), gridSize.z());
+
+	commandsAfterCompute(*cmdBuffer);
+
+	const VkBufferMemoryBarrier	outputBufferPreHostReadBarrier 
+		= makeBufferMemoryBarrier(	VK_ACCESS_TRANSFER_WRITE_BIT,
+									VK_ACCESS_HOST_READ_BIT,
+									m_outputBuffer->get(),
+									0ull,
+									outBuffSizeInBytes);
+
+	deviceInterface.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, 0u, DE_NULL, 1u, &outputBufferPreHostReadBarrier, 0u, DE_NULL);
+
+	endCommandBuffer(deviceInterface, *cmdBuffer);
+
+	submitCommandsAndWait(deviceInterface, device, queue, *cmdBuffer);
+
+	Allocation& outputBufferAllocation = m_outputBuffer->getAllocation();
+
+	invalidateMappedMemoryRange(deviceInterface, device, outputBufferAllocation.getMemory(), outputBufferAllocation.getOffset(), outBuffSizeInBytes);
+
+	if (verifyResult(outputBufferAllocation))
+		return tcu::TestStatus::pass("Comparison succeeded");
+	else
+		return tcu::TestStatus::fail("Comparison failed");
+}
+
+class BinaryAtomicEndResultInstance : public BinaryAtomicInstanceBase
+{
+public:
+
+						BinaryAtomicEndResultInstance  (Context&				context,
+														const string&			name,
+														const ImageType			imageType,
+														const tcu::UVec3&		imageSize,
+														const TextureFormat&	format,
+														const AtomicOperation	operation)
+							: BinaryAtomicInstanceBase(context, name, imageType, imageSize, format, operation) {}
+
+	virtual deUint32	getOutputBufferSize			   (void) const;
+
+	virtual void		prepareResources			   (void) {}
+	virtual void		prepareDescriptors			   (void);
+
+	virtual void		commandsBeforeCompute		   (const VkCommandBuffer) const {}
+	virtual void		commandsAfterCompute		   (const VkCommandBuffer	cmdBuffer) const;
+
+	virtual bool		verifyResult				   (Allocation&				outputBufferAllocation) const;
+};
+
+deUint32 BinaryAtomicEndResultInstance::getOutputBufferSize (void) const
+{
+	return tcu::getPixelSize(m_format) * getNumPixels(m_imageType, m_imageSize);
+}
+
+void BinaryAtomicEndResultInstance::prepareDescriptors (void)
+{
+	const VkDevice			device			= m_context.getDevice();
+	const DeviceInterface&	deviceInterface = m_context.getDeviceInterface();
+
+	m_descriptorSetLayout =
+		DescriptorSetLayoutBuilder()
+		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+		.build(deviceInterface, device);
+
+	m_descriptorPool =
+		DescriptorPoolBuilder()
+		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
+		.build(deviceInterface, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+	m_descriptorSet = makeDescriptorSet(deviceInterface, device, *m_descriptorPool, *m_descriptorSetLayout);
+
+	const VkDescriptorImageInfo	descResultImageInfo = makeDescriptorImageInfo(DE_NULL, *m_resultImageView, VK_IMAGE_LAYOUT_GENERAL);
+
+	DescriptorSetUpdateBuilder()
+		.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
+		.update(deviceInterface, device);
+}
+
+void BinaryAtomicEndResultInstance::commandsAfterCompute (const VkCommandBuffer	cmdBuffer) const
+{
+	const DeviceInterface&			deviceInterface		= m_context.getDeviceInterface();
+	const VkImageSubresourceRange	subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
+
+	const VkImageMemoryBarrier	resultImagePostDispatchBarrier = 
+		makeImageMemoryBarrier(	VK_ACCESS_SHADER_WRITE_BIT,
+								VK_ACCESS_TRANSFER_READ_BIT,
+								VK_IMAGE_LAYOUT_GENERAL,
+								VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+								m_resultImage->get(),
+								subresourceRange);
+
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, 0u, DE_NULL, 0u, DE_NULL, 1u, &resultImagePostDispatchBarrier);
+
+	const VkBufferImageCopy		bufferImageCopyParams = makeBufferImageCopy(makeExtent3D(getLayerSize(m_imageType, m_imageSize)), getNumLayers(m_imageType, m_imageSize));
+
+	deviceInterface.cmdCopyImageToBuffer(cmdBuffer, m_resultImage->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_outputBuffer->get(), 1u, &bufferImageCopyParams);
+}
+
+bool BinaryAtomicEndResultInstance::verifyResult (Allocation& outputBufferAllocation) const
+{
+	const UVec3	gridSize			= getShaderGridSize(m_imageType, m_imageSize);
+	const IVec3 extendedGridSize	= IVec3(NUM_INVOCATIONS_PER_PIXEL*gridSize.x(), gridSize.y(), gridSize.z());
+
+	tcu::ConstPixelBufferAccess resultBuffer(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputBufferAllocation.getHostPtr());
+
+	for (deInt32 z = 0; z < resultBuffer.getDepth();  z++)
+	for (deInt32 y = 0; y < resultBuffer.getHeight(); y++)
+	for (deInt32 x = 0; x < resultBuffer.getWidth();  x++)
+	{
+		deInt32 resultValue = resultBuffer.getPixelInt(x, y, z).x();
+
+		if (isOrderIndependentAtomicOperation(m_operation))
+		{
+			deInt32 reference = getOperationInitialValue(m_operation);
+
+			for (deInt32 i = 0; i < static_cast<deInt32>(NUM_INVOCATIONS_PER_PIXEL); i++)
+			{
+				const IVec3 gid(x + i*gridSize.x(), y, z);
+				reference = computeBinaryAtomicOperationResult(m_operation, reference, getAtomicFuncArgument(m_operation, gid, extendedGridSize));
+			}
+
+			if (resultValue != reference)
+				return false;
+		}
+		else if (m_operation == ATOMIC_OPERATION_EXCHANGE)
+		{
+			// Check if the end result equals one of the atomic args.
+			bool matchFound = false;
+
+			for (deInt32 i = 0; i < static_cast<deInt32>(NUM_INVOCATIONS_PER_PIXEL) && !matchFound; i++)
+			{
+				const IVec3 gid(x + i*gridSize.x(), y, z);
+				matchFound = (resultValue == getAtomicFuncArgument(m_operation, gid, extendedGridSize));
+			}
+
+			if (!matchFound)
+				return false;
+		}
+		else
+			DE_ASSERT(false);
+	}
+	return true;
+}
+
+TestInstance* BinaryAtomicEndResultCase::createInstance (Context& context) const
+{
+	return new BinaryAtomicEndResultInstance(context, m_name, m_imageType, m_imageSize, m_format, m_operation);
+}
+
+class BinaryAtomicIntermValuesInstance : public BinaryAtomicInstanceBase
+{
+public:
+
+						BinaryAtomicIntermValuesInstance   (Context&				context,
+															const string&			name,
+															const ImageType			imageType,
+															const tcu::UVec3&		imageSize,
+															const TextureFormat&	format,
+															const AtomicOperation	operation)
+							: BinaryAtomicInstanceBase(context, name, imageType, imageSize, format, operation) {}
+
+	virtual deUint32	getOutputBufferSize				   (void) const;
+
+	virtual void		prepareResources				   (void);
+	virtual void		prepareDescriptors				   (void);
+
+	virtual void		commandsBeforeCompute			   (const VkCommandBuffer	cmdBuffer) const;
+	virtual void		commandsAfterCompute			   (const VkCommandBuffer	cmdBuffer) const;
+
+	virtual bool		verifyResult					   (Allocation&				outputBufferAllocation) const;
+
+protected:
+
+	bool				verifyRecursive					   (const deInt32			index,
+															const deInt32			valueSoFar, 
+															bool					argsUsed[NUM_INVOCATIONS_PER_PIXEL],
+															const deInt32			atomicArgs[NUM_INVOCATIONS_PER_PIXEL],
+															const deInt32			resultValues[NUM_INVOCATIONS_PER_PIXEL]) const;
+	de::MovePtr<Image>	m_intermResultsImage;
+	Move<VkImageView>	m_intermResultsImageView;
+};
+
+deUint32 BinaryAtomicIntermValuesInstance::getOutputBufferSize (void) const
+{
+	return NUM_INVOCATIONS_PER_PIXEL * tcu::getPixelSize(m_format) * getNumPixels(m_imageType, m_imageSize);
+}
+
+void BinaryAtomicIntermValuesInstance::prepareResources (void)
+{
+	const VkDevice			device			= m_context.getDevice();
+	const DeviceInterface&	deviceInterface = m_context.getDeviceInterface();
+	Allocator&				allocator		= m_context.getDefaultAllocator();
+
+	const UVec3 layerSize			= getLayerSize(m_imageType, m_imageSize);
+	const UVec3 extendedLayerSize	= UVec3(NUM_INVOCATIONS_PER_PIXEL * layerSize.x(), layerSize.y(), layerSize.z());
+
+	const VkImageCreateInfo imageParams =
+	{
+		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,		// VkStructureType			sType;
+		DE_NULL,									// const void*				pNext;
+		m_imageType == IMAGE_TYPE_CUBE ||
+		m_imageType == IMAGE_TYPE_CUBE_ARRAY ?
+		VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u,	// VkImageCreateFlags		flags;
+		mapImageType(m_imageType),					// VkImageType				imageType;
+		mapTextureFormat(m_format),					// VkFormat					format;
+		makeExtent3D(extendedLayerSize),			// VkExtent3D				extent;
+		1u,											// deUint32					mipLevels;
+		getNumLayers(m_imageType, m_imageSize),		// deUint32					arrayLayers;
+		VK_SAMPLE_COUNT_1_BIT,						// VkSampleCountFlagBits	samples;
+		VK_IMAGE_TILING_OPTIMAL,					// VkImageTiling			tiling;
+		VK_IMAGE_USAGE_STORAGE_BIT |
+		VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
+		VK_SHARING_MODE_EXCLUSIVE,					// VkSharingMode			sharingMode;
+		0u,											// deUint32					queueFamilyIndexCount;
+		DE_NULL,									// const deUint32*			pQueueFamilyIndices;
+		VK_IMAGE_LAYOUT_UNDEFINED,					// VkImageLayout			initialLayout;
+	};
+
+	m_intermResultsImage = de::MovePtr<Image>(new Image(deviceInterface, device, allocator, imageParams, MemoryRequirement::Any));
+
+	const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
+
+	m_intermResultsImageView = makeImageView(deviceInterface, device, m_intermResultsImage->get(), mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange);
+}
+
+void BinaryAtomicIntermValuesInstance::prepareDescriptors (void)
+{
+	const VkDevice			device			= m_context.getDevice();
+	const DeviceInterface&	deviceInterface = m_context.getDeviceInterface();
+
+	m_descriptorSetLayout =
+		DescriptorSetLayoutBuilder()
+		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+		.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+		.build(deviceInterface, device);
+
+	m_descriptorPool =
+		DescriptorPoolBuilder()
+		.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2u)
+		.build(deviceInterface, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+	m_descriptorSet = makeDescriptorSet(deviceInterface, device, *m_descriptorPool, *m_descriptorSetLayout);
+
+	const VkDescriptorImageInfo	descResultImageInfo			= makeDescriptorImageInfo(DE_NULL, *m_resultImageView, VK_IMAGE_LAYOUT_GENERAL);
+	const VkDescriptorImageInfo	descIntermResultsImageInfo	= makeDescriptorImageInfo(DE_NULL, *m_intermResultsImageView, VK_IMAGE_LAYOUT_GENERAL);
+
+	DescriptorSetUpdateBuilder()
+		.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
+		.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descIntermResultsImageInfo)
+		.update(deviceInterface, device);
+}
+
+void BinaryAtomicIntermValuesInstance::commandsBeforeCompute (const VkCommandBuffer cmdBuffer) const
+{
+	const DeviceInterface&			deviceInterface		= m_context.getDeviceInterface();
+	const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
+
+	const VkImageMemoryBarrier	imagePreDispatchBarrier =
+		makeImageMemoryBarrier(	0u,
+								VK_ACCESS_SHADER_WRITE_BIT,
+								VK_IMAGE_LAYOUT_UNDEFINED,
+								VK_IMAGE_LAYOUT_GENERAL,
+								m_intermResultsImage->get(),
+								subresourceRange);
+
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, DE_FALSE, 0u, DE_NULL, 0u, DE_NULL, 1u, &imagePreDispatchBarrier);
+}
+
+void BinaryAtomicIntermValuesInstance::commandsAfterCompute (const VkCommandBuffer cmdBuffer) const
+{
+	const DeviceInterface&			deviceInterface		= m_context.getDeviceInterface();
+	const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
+
+	const VkImageMemoryBarrier	imagePostDispatchBarrier = 
+		makeImageMemoryBarrier(	VK_ACCESS_SHADER_WRITE_BIT,
+								VK_ACCESS_TRANSFER_READ_BIT,
+								VK_IMAGE_LAYOUT_GENERAL,
+								VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+								m_intermResultsImage->get(),
+								subresourceRange);
+
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, 0u, DE_NULL, 0u, DE_NULL, 1u, &imagePostDispatchBarrier);
+
+	const UVec3					layerSize				= getLayerSize(m_imageType, m_imageSize);
+	const UVec3					extendedLayerSize		= UVec3(NUM_INVOCATIONS_PER_PIXEL * layerSize.x(), layerSize.y(), layerSize.z());
+	const VkBufferImageCopy		bufferImageCopyParams	= makeBufferImageCopy(makeExtent3D(extendedLayerSize), getNumLayers(m_imageType, m_imageSize));
+
+	deviceInterface.cmdCopyImageToBuffer(cmdBuffer, m_intermResultsImage->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_outputBuffer->get(), 1u, &bufferImageCopyParams);
+}
+
+bool BinaryAtomicIntermValuesInstance::verifyResult (Allocation&	outputBufferAllocation) const
+{
+	const UVec3	gridSize		 = getShaderGridSize(m_imageType, m_imageSize);
+	const IVec3 extendedGridSize = IVec3(NUM_INVOCATIONS_PER_PIXEL*gridSize.x(), gridSize.y(), gridSize.z());
+
+	tcu::ConstPixelBufferAccess resultBuffer(m_format, extendedGridSize.x(), extendedGridSize.y(), extendedGridSize.z(), outputBufferAllocation.getHostPtr());
+
+	for (deInt32 z = 0; z < resultBuffer.getDepth(); z++)
+	for (deInt32 y = 0; y < resultBuffer.getHeight(); y++)
+	for (deUint32 x = 0; x < gridSize.x(); x++)
+	{
+		deInt32 resultValues[NUM_INVOCATIONS_PER_PIXEL];
+		deInt32 atomicArgs[NUM_INVOCATIONS_PER_PIXEL];
+		bool	argsUsed[NUM_INVOCATIONS_PER_PIXEL];
+				
+		for (deInt32 i = 0; i < static_cast<deInt32>(NUM_INVOCATIONS_PER_PIXEL); i++)
+		{
+			IVec3 gid(x + i*gridSize.x(), y, z);
+
+			resultValues[i] = resultBuffer.getPixelInt(gid.x(), gid.y(), gid.z()).x();
+			atomicArgs[i]	= getAtomicFuncArgument(m_operation, gid, extendedGridSize);
+			argsUsed[i]		= false;
+		}
+
+		// Verify that the return values form a valid sequence.
+		if (!verifyRecursive(0, getOperationInitialValue(m_operation), argsUsed, atomicArgs, resultValues))
+		{
+			return false;
+		}
+	}
+
+	return true;
+}
+
+bool BinaryAtomicIntermValuesInstance::verifyRecursive (const deInt32	index, 
+														const deInt32	valueSoFar, 
+														bool			argsUsed[NUM_INVOCATIONS_PER_PIXEL],
+														const deInt32	atomicArgs[NUM_INVOCATIONS_PER_PIXEL],
+														const deInt32	resultValues[NUM_INVOCATIONS_PER_PIXEL]) const
+{
+	if (index >= static_cast<deInt32>(NUM_INVOCATIONS_PER_PIXEL))
+		return true;
+
+	for (deInt32 i = 0; i < static_cast<deInt32>(NUM_INVOCATIONS_PER_PIXEL); i++)
+	{
+		if (!argsUsed[i] && resultValues[i] == valueSoFar)
+		{
+			argsUsed[i] = true;
+
+			if (verifyRecursive(index + 1, computeBinaryAtomicOperationResult(m_operation, valueSoFar, atomicArgs[i]), argsUsed, atomicArgs, resultValues))
+			{
+				return true;
+			}
+
+			argsUsed[i] = false;
+		}
+	}
+
+	return false;
+}
+
+TestInstance* BinaryAtomicIntermValuesCase::createInstance (Context& context) const
+{
+	return new BinaryAtomicIntermValuesInstance(context, m_name, m_imageType, m_imageSize, m_format, m_operation);
+}
+
+} // anonymous ns
+
+tcu::TestCaseGroup* createImageAtomicOperationTests (tcu::TestContext& testCtx)
+{
+	de::MovePtr<tcu::TestCaseGroup> imageAtomicOperationsTests(new tcu::TestCaseGroup(testCtx, "atomic_operations", "Atomic image operations cases"));
+
+	struct ImageParams
+	{
+		ImageParams(const ImageType imageType, const tcu::UVec3& imageSize)
+			: m_imageType	(imageType)
+			, m_imageSize	(imageSize)
+		{
+		}
+		const ImageType		m_imageType;
+		const tcu::UVec3	m_imageSize;
+	};
+
+	static const ImageParams imageParamsArray[] =
+	{
+		ImageParams(IMAGE_TYPE_1D,			tcu::UVec3(64u, 1u, 1u)),
+		ImageParams(IMAGE_TYPE_1D_ARRAY,	tcu::UVec3(64u, 1u, 8u)),
+		ImageParams(IMAGE_TYPE_2D,			tcu::UVec3(64u, 64u, 1u)),
+		ImageParams(IMAGE_TYPE_2D_ARRAY,	tcu::UVec3(64u, 64u, 8u)),
+		ImageParams(IMAGE_TYPE_3D,			tcu::UVec3(64u, 64u, 8u)),
+		ImageParams(IMAGE_TYPE_CUBE,		tcu::UVec3(64u, 64u, 1u)),
+		ImageParams(IMAGE_TYPE_CUBE_ARRAY,	tcu::UVec3(64u, 64u, 2u))
+	};
+
+	static const tcu::TextureFormat formats[] =
+	{
+		tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::UNSIGNED_INT32),
+		tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT32)
+	};
+
+	for (deUint32 operationI = 0; operationI < ATOMIC_OPERATION_LAST; operationI++)
+	{
+		const AtomicOperation operation = (AtomicOperation)operationI;
+
+		de::MovePtr<tcu::TestCaseGroup> operationGroup(new tcu::TestCaseGroup(testCtx, getAtomicOperationCaseName(operation).c_str(), ""));
+
+		for (deUint32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParamsArray); imageTypeNdx++)
+		{
+			const ImageType	 imageType = imageParamsArray[imageTypeNdx].m_imageType;
+			const tcu::UVec3 imageSize = imageParamsArray[imageTypeNdx].m_imageSize;
+
+			for (deUint32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
+			{
+				const TextureFormat&	format		= formats[formatNdx];
+				const std::string		formatName	= getShaderImageFormatQualifier(format);
+
+				//!< Atomic case checks the end result of the operations, and not the intermediate return values
+				const string caseEndResult = getImageTypeName(imageType) + "_" + formatName + "_end_result";
+				operationGroup->addChild(new BinaryAtomicEndResultCase(testCtx, caseEndResult, "", imageType, imageSize, format, operation, glu::GLSL_VERSION_440));
+
+				//!< Atomic case checks the return values of the atomic function and not the end result.
+				const string caseIntermValues = getImageTypeName(imageType) + "_" + formatName + "_intermediate_values";
+				operationGroup->addChild(new BinaryAtomicIntermValuesCase(testCtx, caseIntermValues, "", imageType, imageSize, format, operation, glu::GLSL_VERSION_440));
+			}
+		}
+
+		imageAtomicOperationsTests->addChild(operationGroup.release());
+	}
+
+	return imageAtomicOperationsTests.release();
+}
+
+} // image
+} // vkt
diff --git a/external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.hpp b/external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.hpp
new file mode 100644
index 0000000..93dc124
--- /dev/null
+++ b/external/vulkancts/modules/vulkan/image/vktImageAtomicOperationTests.hpp
@@ -0,0 +1,46 @@
+#ifndef _VKTIMAGEATOMICOPERATIONTESTS_HPP
+#define _VKTIMAGEATOMICOPERATIONTESTS_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file  vktImageAtomicOperationTests.hpp
+ * \brief Image atomic operation tests
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+
+namespace vkt
+{
+namespace image
+{
+
+tcu::TestCaseGroup* createImageAtomicOperationTests (tcu::TestContext& testCtx);
+
+} // image
+} // vkt
+
+#endif // _VKTIMAGEATOMICOPERATIONTESTS_HPP
diff --git a/external/vulkancts/modules/vulkan/image/vktImageQualifiersTests.cpp b/external/vulkancts/modules/vulkan/image/vktImageQualifiersTests.cpp
index cb53c4c..c4d8f30 100644
--- a/external/vulkancts/modules/vulkan/image/vktImageQualifiersTests.cpp
+++ b/external/vulkancts/modules/vulkan/image/vktImageQualifiersTests.cpp
@@ -72,31 +72,9 @@
 static const char* const	g_ShaderReadOffsetsYStr		= "int[]( 2, 5, 8, 11 )";
 static const char* const	g_ShaderReadOffsetsZStr		= "int[]( 3, 6, 9, 12 )";
 
-const tcu::UVec3 getComputeGridSize (const ImageType imageType, const tcu::UVec4& imageSize)
+const tcu::UVec3 getLocalWorkGroupSize (const ImageType imageType, const tcu::UVec3& imageSize)
 {
-	switch (imageType)
-	{
-		case IMAGE_TYPE_1D:
-		case IMAGE_TYPE_2D:
-		case IMAGE_TYPE_2D_ARRAY:
-		case IMAGE_TYPE_3D:
-		case IMAGE_TYPE_CUBE:
-		case IMAGE_TYPE_CUBE_ARRAY:
-		case IMAGE_TYPE_BUFFER:
-			return tcu::UVec3(imageSize.x(), imageSize.y(), imageSize.z() * imageSize.w());
-
-		case IMAGE_TYPE_1D_ARRAY:
-			return tcu::UVec3(imageSize.x(), imageSize.w(), 1);
-
-		default:
-			DE_FATAL("Unknown image type");
-			return tcu::UVec3(1, 1, 1);
-	}
-}
-
-const tcu::UVec3 getLocalWorkGroupSize (const ImageType imageType, const tcu::UVec4& imageSize)
-{
-	const tcu::UVec3 computeGridSize	= getComputeGridSize(imageType, imageSize);
+	const tcu::UVec3 computeGridSize	= getShaderGridSize(imageType, imageSize);
 
 	const tcu::UVec3 localWorkGroupSize = tcu::UVec3(de::min(g_localWorkGroupSizeBase.x(), computeGridSize.x()),
 													 de::min(g_localWorkGroupSizeBase.y(), computeGridSize.y()),
@@ -104,9 +82,9 @@
 	return localWorkGroupSize;
 }
 
-const tcu::UVec3 getNumWorkGroups (const ImageType imageType, const tcu::UVec4& imageSize)
+const tcu::UVec3 getNumWorkGroups (const ImageType imageType, const tcu::UVec3& imageSize)
 {
-	const tcu::UVec3 computeGridSize	= getComputeGridSize(imageType, imageSize);
+	const tcu::UVec3 computeGridSize	= getShaderGridSize(imageType, imageSize);
 	const tcu::UVec3 localWorkGroupSize = getLocalWorkGroupSize(imageType, imageSize);
 
 	return computeGridSize / localWorkGroupSize;
@@ -141,7 +119,7 @@
 
 bool comparePixelBuffers (tcu::TestContext&						testCtx,
 						  const ImageType						imageType,
-						  const tcu::UVec4&						imageSize,
+						  const tcu::UVec3&						imageSize,
 						  const tcu::TextureFormat&				format,
 						  const tcu::ConstPixelBufferAccess&	reference,
 						  const tcu::ConstPixelBufferAccess&	result)
@@ -152,7 +130,7 @@
 	const bool		 intFormat			= isIntFormat(mapTextureFormat(format)) || isUintFormat(mapTextureFormat(format));
 	deUint32		 passedLayers		= 0;
 
-	for (deUint32 layerNdx = 0; layerNdx < imageSize.z() * imageSize.w(); ++layerNdx)
+	for (deUint32 layerNdx = 0; layerNdx < getNumLayers(imageType, imageSize); ++layerNdx)
 	{
 		const std::string comparisonName = "Comparison" + de::toString(layerNdx);
 
@@ -186,7 +164,7 @@
 			++passedLayers;
 	}
 
-	return passedLayers == (imageSize.z() * imageSize.w());
+	return passedLayers == getNumLayers(imageType, imageSize);
 }
 
 const std::string getCoordStr (const ImageType		imageType,
@@ -233,7 +211,7 @@
 															 const std::string&			description,
 															 const Qualifier			qualifier,
 															 const ImageType			imageType,
-															 const tcu::UVec4&			imageSize,
+															 const tcu::UVec3&			imageSize,
 															 const tcu::TextureFormat&	format,
 															 const glu::GLSLVersion		glslVersion);
 
@@ -246,7 +224,7 @@
 
 	const Qualifier				m_qualifier;
 	const ImageType				m_imageType;
-	const tcu::UVec4			m_imageSize;
+	const tcu::UVec3			m_imageSize;
 	const tcu::TextureFormat	m_format;
 	const glu::GLSLVersion		m_glslVersion;
 };
@@ -256,7 +234,7 @@
 												  const std::string&		description,
 												  const Qualifier			qualifier,
 												  const ImageType			imageType,
-												  const tcu::UVec4&			imageSize,
+												  const tcu::UVec3&			imageSize,
 												  const tcu::TextureFormat&	format,
 												  const glu::GLSLVersion	glslVersion)
 	: vkt::TestCase(testCtx, name, description)
@@ -338,7 +316,7 @@
 									MemoryQualifierInstanceBase		(Context&					context,
 																	 const std::string&			name,
 																	 const ImageType			imageType,
-																	 const tcu::UVec4&			imageSize,
+																	 const tcu::UVec3&			imageSize,
 																	 const tcu::TextureFormat&	format);
 
 	virtual							~MemoryQualifierInstanceBase	(void) {};
@@ -360,7 +338,7 @@
 
 	const std::string				m_name;
 	const ImageType					m_imageType;
-	const tcu::UVec4				m_imageSize;
+	const tcu::UVec3				m_imageSize;
 	const tcu::TextureFormat		m_format;
 
 	de::MovePtr<Buffer>				m_buffer;
@@ -372,7 +350,7 @@
 MemoryQualifierInstanceBase::MemoryQualifierInstanceBase (Context&					context,
 														  const std::string&		name,
 														  const ImageType			imageType,
-														  const tcu::UVec4&			imageSize,
+														  const tcu::UVec3&			imageSize,
 														  const tcu::TextureFormat&	format)
 	: vkt::TestInstance(context)
 	, m_name(name)
@@ -389,7 +367,7 @@
 	const VkQueue			queue				= m_context.getUniversalQueue();
 	const deUint32			queueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
 
-	const VkDeviceSize	bufferSizeInBytes = m_imageSize.x() * m_imageSize.y() * m_imageSize.z() * m_imageSize.w() * tcu::getPixelSize(m_format);
+	const VkDeviceSize	bufferSizeInBytes = getNumPixels(m_imageType, m_imageSize) * tcu::getPixelSize(m_format);
 
 	// Prepare resources for the test
 	prepareResources(bufferSizeInBytes);
@@ -430,7 +408,7 @@
 	const Allocation& allocation = m_buffer->getAllocation();
 	invalidateMappedMemoryRange(deviceInterface, device, allocation.getMemory(), allocation.getOffset(), bufferSizeInBytes);
 
-	const tcu::UVec3 computeGridSize = getComputeGridSize(m_imageType, m_imageSize);
+	const tcu::UVec3 computeGridSize = getShaderGridSize(m_imageType, m_imageSize);
 	tcu::ConstPixelBufferAccess resultPixelBuffer(m_format, computeGridSize.x(), computeGridSize.y(), computeGridSize.z(), allocation.getHostPtr());
 
 	// Create a reference image
@@ -447,7 +425,7 @@
 tcu::TextureLevel MemoryQualifierInstanceBase::generateReferenceImage (void) const
 {
 	// Generate a reference image data using the storage format
-	const tcu::UVec3 computeGridSize = getComputeGridSize(m_imageType, m_imageSize);
+	const tcu::UVec3 computeGridSize = getShaderGridSize(m_imageType, m_imageSize);
 
 	tcu::TextureLevel base(m_format, computeGridSize.x(), computeGridSize.y(), computeGridSize.z());
 	tcu::PixelBufferAccess baseAccess = base.getAccess();
@@ -493,7 +471,7 @@
 						MemoryQualifierInstanceImage	(Context&					context,
 														 const std::string&			name,
 														 const ImageType			imageType,
-														 const tcu::UVec4&			imageSize,
+														 const tcu::UVec3&			imageSize,
 														 const tcu::TextureFormat&	format)
 							: MemoryQualifierInstanceBase(context, name, imageType, imageSize, format) {}
 
@@ -523,35 +501,36 @@
 	// Create image
 	const VkImageCreateInfo imageCreateInfo =
 	{
-		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,									// VkStructureType			sType;
-		DE_NULL,																// const void*				pNext;
+		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,							// VkStructureType			sType;
+		DE_NULL,														// const void*				pNext;
 		m_imageType == IMAGE_TYPE_CUBE ||
 		m_imageType	== IMAGE_TYPE_CUBE_ARRAY
-		? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u,			// VkImageCreateFlags		flags;
-		mapImageType(m_imageType),												// VkImageType				imageType;
-		mapTextureFormat(m_format),												// VkFormat					format;
-		vk::makeExtent3D(m_imageSize.x(), m_imageSize.y(), m_imageSize.z()),	// VkExtent3D				extent;
-		1u,																		// deUint32					mipLevels;
-		m_imageSize.w(),														// deUint32					arrayLayers;
-		VK_SAMPLE_COUNT_1_BIT,													// VkSampleCountFlagBits	samples;
-		VK_IMAGE_TILING_OPTIMAL,												// VkImageTiling			tiling;
-		VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT,			// VkImageUsageFlags		usage;
-		VK_SHARING_MODE_EXCLUSIVE,												// VkSharingMode			sharingMode;
-		0u,																		// deUint32					queueFamilyIndexCount;
-		DE_NULL,																// const deUint32*			pQueueFamilyIndices;
-		VK_IMAGE_LAYOUT_UNDEFINED,												// VkImageLayout			initialLayout;
+		? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u,	// VkImageCreateFlags		flags;
+		mapImageType(m_imageType),										// VkImageType				imageType;
+		mapTextureFormat(m_format),										// VkFormat					format;
+		makeExtent3D(getLayerSize(m_imageType, m_imageSize)),			// VkExtent3D				extent;
+		1u,																// deUint32					mipLevels;
+		getNumLayers(m_imageType, m_imageSize),							// deUint32					arrayLayers;
+		VK_SAMPLE_COUNT_1_BIT,											// VkSampleCountFlagBits	samples;
+		VK_IMAGE_TILING_OPTIMAL,										// VkImageTiling			tiling;
+		VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT,	// VkImageUsageFlags		usage;
+		VK_SHARING_MODE_EXCLUSIVE,										// VkSharingMode			sharingMode;
+		0u,																// deUint32					queueFamilyIndexCount;
+		DE_NULL,														// const deUint32*			pQueueFamilyIndices;
+		VK_IMAGE_LAYOUT_UNDEFINED,										// VkImageLayout			initialLayout;
 	};
 
 	m_image = de::MovePtr<Image>(new Image(deviceInterface, device, allocator, imageCreateInfo, MemoryRequirement::Any));
 
 	// Create imageView
-	const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_imageSize.w());
+	const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
 	m_imageView = makeImageView(deviceInterface, device, m_image->get(), mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange);
 
 	// Create a buffer to store shader output (copied from image data)
 	const VkBufferCreateInfo	bufferCreateInfo = makeBufferCreateInfo(bufferSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
 	m_buffer = de::MovePtr<Buffer>(new Buffer(deviceInterface, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible));
 }
+
 void MemoryQualifierInstanceImage::prepareDescriptors (void)
 {
 	const VkDevice			device			= m_context.getDevice();
@@ -585,7 +564,7 @@
 	DE_UNREF(bufferSizeInBytes);
 
 	const DeviceInterface&			deviceInterface	 = m_context.getDeviceInterface();
-	const VkImageSubresourceRange	subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_imageSize.w());
+	const VkImageSubresourceRange	subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
 
 	const VkImageMemoryBarrier imageLayoutBarrier
 		= makeImageMemoryBarrier(0u,
@@ -595,13 +574,13 @@
 								 m_image->get(),
 								 subresourceRange);
 
-	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageLayoutBarrier);
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageLayoutBarrier);
 }
 
-void MemoryQualifierInstanceImage::commandsAfterCompute (const VkCommandBuffer cmdBuffer, const VkDeviceSize	bufferSizeInBytes) const
+void MemoryQualifierInstanceImage::commandsAfterCompute (const VkCommandBuffer cmdBuffer, const VkDeviceSize bufferSizeInBytes) const
 {
 	const DeviceInterface&			deviceInterface	 = m_context.getDeviceInterface();
-	const VkImageSubresourceRange	subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_imageSize.w());
+	const VkImageSubresourceRange	subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize));
 
 	const VkImageMemoryBarrier imagePreCopyBarrier
 		= makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
@@ -611,9 +590,9 @@
 								 m_image->get(),
 								 subresourceRange);
 
-	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imagePreCopyBarrier);
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imagePreCopyBarrier);
 
-	const VkBufferImageCopy copyParams = makeBufferImageCopy(vk::makeExtent3D(m_imageSize.x(), m_imageSize.y(), m_imageSize.z()), m_imageSize.w());
+	const VkBufferImageCopy copyParams = makeBufferImageCopy(makeExtent3D(getLayerSize(m_imageType, m_imageSize)), getNumLayers(m_imageType, m_imageSize));
 	deviceInterface.cmdCopyImageToBuffer(cmdBuffer, m_image->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer->get(), 1u, &copyParams);
 
 	const VkBufferMemoryBarrier bufferPostCopyBarrier
@@ -623,7 +602,7 @@
 								  0ull,
 								  bufferSizeInBytes);
 
-	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferPostCopyBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &bufferPostCopyBarrier, 0u, DE_NULL);
 }
 
 class MemoryQualifierInstanceBuffer : public MemoryQualifierInstanceBase
@@ -632,7 +611,7 @@
 						MemoryQualifierInstanceBuffer	(Context&					context,
 														 const std::string&			name,
 														 const ImageType			imageType,
-														 const tcu::UVec4&			imageSize,
+														 const tcu::UVec3&			imageSize,
 														 const tcu::TextureFormat&	format)
 							: MemoryQualifierInstanceBase(context, name, imageType, imageSize, format) {}
 
@@ -702,7 +681,7 @@
 								  0ull,
 								  bufferSizeInBytes);
 
-	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
+	deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &shaderWriteBarrier, 0u, DE_NULL);
 }
 
 TestInstance* MemoryQualifierTestCase::createInstance (Context& context) const
@@ -719,22 +698,27 @@
 {
 	de::MovePtr<tcu::TestCaseGroup> imageQualifiersTests(new tcu::TestCaseGroup(testCtx, "qualifiers", "Coherent, volatile and restrict"));
 
-	struct ImageParameters
+	struct ImageParams
 	{
-		ImageType	imageType;
-		tcu::UVec4	imageSize;
+		ImageParams(const ImageType imageType, const tcu::UVec3& imageSize)
+			: m_imageType	(imageType)
+			, m_imageSize	(imageSize) 
+		{
+		}
+		ImageType	m_imageType;
+		tcu::UVec3	m_imageSize;
 	};
 
-	static const ImageParameters imageParametersArray[] =
+	static const ImageParams imageParamsArray[] =
 	{
-		{ IMAGE_TYPE_1D,			tcu::UVec4(64, 1,  1, 1)	},
-		{ IMAGE_TYPE_1D_ARRAY,		tcu::UVec4(64, 1,  1, 8)	},
-		{ IMAGE_TYPE_2D,			tcu::UVec4(64, 64, 1, 1)	},
-		{ IMAGE_TYPE_2D_ARRAY,		tcu::UVec4(64, 64, 1, 8)	},
-		{ IMAGE_TYPE_3D,			tcu::UVec4(64, 64, 8, 1)	},
-		{ IMAGE_TYPE_CUBE,			tcu::UVec4(64, 64, 1, 6)	},
-		{ IMAGE_TYPE_CUBE_ARRAY,	tcu::UVec4(64, 64, 1, 6*8)	},
-		{ IMAGE_TYPE_BUFFER,		tcu::UVec4(64, 1,  1, 1)	}
+		ImageParams(IMAGE_TYPE_1D,			tcu::UVec3(64u, 1u,  1u)),
+		ImageParams(IMAGE_TYPE_1D_ARRAY,	tcu::UVec3(64u, 1u,  8u)),
+		ImageParams(IMAGE_TYPE_2D,			tcu::UVec3(64u, 64u, 1u)),
+		ImageParams(IMAGE_TYPE_2D_ARRAY,	tcu::UVec3(64u, 64u, 8u)),
+		ImageParams(IMAGE_TYPE_3D,			tcu::UVec3(64u, 64u, 8u)),
+		ImageParams(IMAGE_TYPE_CUBE,		tcu::UVec3(64u, 64u, 1u)),
+		ImageParams(IMAGE_TYPE_CUBE_ARRAY,	tcu::UVec3(64u, 64u, 2u)),
+		ImageParams(IMAGE_TYPE_BUFFER,		tcu::UVec3(64u, 1u,  1u))
 	};
 
 	static const tcu::TextureFormat formats[] =
@@ -755,10 +739,10 @@
 
 		de::MovePtr<tcu::TestCaseGroup> qualifierGroup(new tcu::TestCaseGroup(testCtx, memoryQualifierName, ""));
 
-		for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); imageTypeNdx++)
+		for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParamsArray); imageTypeNdx++)
 		{
-			const ImageType		imageType = imageParametersArray[imageTypeNdx].imageType;
-			const tcu::UVec4	imageSize = imageParametersArray[imageTypeNdx].imageSize;
+			const ImageType		imageType = imageParamsArray[imageTypeNdx].m_imageType;
+			const tcu::UVec3	imageSize = imageParamsArray[imageTypeNdx].m_imageSize;
 
 			if (memoryQualifier == MemoryQualifierTestCase::QUALIFIER_RESTRICT)
 			{
diff --git a/external/vulkancts/modules/vulkan/image/vktImageTests.cpp b/external/vulkancts/modules/vulkan/image/vktImageTests.cpp
index 42bcd98..6a4ecdd 100644
--- a/external/vulkancts/modules/vulkan/image/vktImageTests.cpp
+++ b/external/vulkancts/modules/vulkan/image/vktImageTests.cpp
@@ -33,6 +33,7 @@
 #include "vktImageQualifiersTests.hpp"
 #include "vktImageSizeTests.hpp"
 #include "vktTestGroupUtil.hpp"
+#include "vktImageAtomicOperationTests.hpp"
 
 namespace vkt
 {
@@ -45,12 +46,13 @@
 void createChildren (tcu::TestCaseGroup* imageTests)
 {
 	tcu::TestContext&	testCtx		= imageTests->getTestContext();
-
+	
 	imageTests->addChild(createImageStoreTests(testCtx));
 	imageTests->addChild(createImageLoadStoreTests(testCtx));
 	imageTests->addChild(createImageFormatReinterpretTests(testCtx));
 	imageTests->addChild(createImageQualifiersTests(testCtx));
 	imageTests->addChild(createImageSizeTests(testCtx));
+	imageTests->addChild(createImageAtomicOperationTests(testCtx));
 }
 
 } // anonymous
diff --git a/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.cpp b/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.cpp
index 7fc0a22..ea4e5cb 100644
--- a/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.cpp
+++ b/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.cpp
@@ -62,6 +62,141 @@
 	VK_CHECK(vk.bindImageMemory(device, *m_image, m_allocation->getMemory(), m_allocation->getOffset()));
 }
 
+tcu::UVec3 getShaderGridSize (const ImageType imageType, const tcu::UVec3& imageSize)
+{
+	switch (imageType)
+	{
+		case IMAGE_TYPE_1D:
+		case IMAGE_TYPE_BUFFER:
+			return tcu::UVec3(imageSize.x(), 1u, 1u);
+
+		case IMAGE_TYPE_1D_ARRAY:
+			return tcu::UVec3(imageSize.x(), imageSize.z(), 1u);
+
+		case IMAGE_TYPE_2D:
+			return tcu::UVec3(imageSize.x(), imageSize.y(), 1u);
+
+		case IMAGE_TYPE_2D_ARRAY:
+		case IMAGE_TYPE_3D:
+			return tcu::UVec3(imageSize.x(), imageSize.y(), imageSize.z());
+
+		case IMAGE_TYPE_CUBE:
+			return tcu::UVec3(imageSize.x(), imageSize.y(), 6u);
+
+		case IMAGE_TYPE_CUBE_ARRAY:
+			return tcu::UVec3(imageSize.x(), imageSize.y(), 6u * imageSize.z());
+
+		default:
+			DE_FATAL("Unknown image type");
+			return tcu::UVec3(1u, 1u, 1u);
+	}
+}
+
+tcu::UVec3 getLayerSize (const ImageType imageType, const tcu::UVec3& imageSize)
+{
+	switch (imageType)
+	{
+		case IMAGE_TYPE_1D:
+		case IMAGE_TYPE_1D_ARRAY:
+		case IMAGE_TYPE_BUFFER:
+			return tcu::UVec3(imageSize.x(), 1u, 1u);
+
+		case IMAGE_TYPE_2D:
+		case IMAGE_TYPE_2D_ARRAY:
+		case IMAGE_TYPE_CUBE:
+		case IMAGE_TYPE_CUBE_ARRAY:
+			return tcu::UVec3(imageSize.x(), imageSize.y(), 1u);
+
+		case IMAGE_TYPE_3D:
+			return tcu::UVec3(imageSize.x(), imageSize.y(), imageSize.z());
+
+		default:
+			DE_FATAL("Unknown image type");
+			return tcu::UVec3(1u, 1u, 1u);
+	}
+}
+
+deUint32 getNumLayers (const ImageType imageType, const tcu::UVec3& imageSize)
+{
+	switch (imageType)
+	{
+		case IMAGE_TYPE_1D:
+		case IMAGE_TYPE_2D:
+		case IMAGE_TYPE_3D:
+		case IMAGE_TYPE_BUFFER:
+			return 1u;
+
+		case IMAGE_TYPE_1D_ARRAY:
+		case IMAGE_TYPE_2D_ARRAY:
+			return imageSize.z();
+
+		case IMAGE_TYPE_CUBE:
+			return 6u;
+
+		case IMAGE_TYPE_CUBE_ARRAY:
+			return imageSize.z() * 6u;
+
+		default:
+			DE_FATAL("Unknown image type");
+			return 0u;
+	}
+}
+
+deUint32 getNumPixels (const ImageType imageType, const tcu::UVec3& imageSize)
+{
+	const tcu::UVec3 gridSize = getShaderGridSize(imageType, imageSize);
+
+	return gridSize.x() * gridSize.y() * gridSize.z();
+}
+
+deUint32 getDimensions (const ImageType imageType)
+{
+	switch (imageType)
+	{
+		case IMAGE_TYPE_1D:
+		case IMAGE_TYPE_BUFFER:
+			return 1u;
+
+		case IMAGE_TYPE_1D_ARRAY:
+		case IMAGE_TYPE_2D:
+			return 2u;
+
+		case IMAGE_TYPE_2D_ARRAY:
+		case IMAGE_TYPE_CUBE:
+		case IMAGE_TYPE_CUBE_ARRAY:
+		case IMAGE_TYPE_3D:
+			return 3u;
+
+		default:
+			DE_FATAL("Unknown image type");
+			return 0u;
+	}
+}
+
+deUint32 getLayerDimensions (const ImageType imageType)
+{
+	switch (imageType)
+	{
+		case IMAGE_TYPE_1D:
+		case IMAGE_TYPE_BUFFER:
+		case IMAGE_TYPE_1D_ARRAY:
+			return 1u;
+
+		case IMAGE_TYPE_2D:
+		case IMAGE_TYPE_2D_ARRAY:
+		case IMAGE_TYPE_CUBE:
+		case IMAGE_TYPE_CUBE_ARRAY:
+			return 2u;
+
+		case IMAGE_TYPE_3D:
+			return 3u;
+
+		default:
+			DE_FATAL("Unknown image type");
+			return 0u;
+	}
+}
+
 VkBufferCreateInfo makeBufferCreateInfo (const VkDeviceSize			bufferSize,
 										 const VkBufferUsageFlags	usage)
 {
diff --git a/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.hpp b/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.hpp
index 45e7932..d4dc254 100644
--- a/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.hpp
+++ b/external/vulkancts/modules/vulkan/image/vktImageTestsUtil.hpp
@@ -105,6 +105,13 @@
 	Image&							operator=		(const Image&);
 };
 
+tcu::UVec3	getShaderGridSize	(const ImageType imageType, const tcu::UVec3& imageSize);	//!< Size used for addresing image in a shader
+tcu::UVec3	getLayerSize		(const ImageType imageType, const tcu::UVec3& imageSize);	//!< Size of a single layer
+deUint32	getNumLayers		(const ImageType imageType, const tcu::UVec3& imageSize);	//!< Number of array layers (for array and cube types)
+deUint32	getNumPixels		(const ImageType imageType, const tcu::UVec3& imageSize);	//!< Number of texels in an image
+deUint32	getDimensions		(const ImageType imageType);								//!< Coordinate dimension used for addressing (e.g. 3 (x,y,z) for 2d array)
+deUint32	getLayerDimensions	(const ImageType imageType);								//!< Coordinate dimension used for addressing a single layer (e.g. 2 (x,y) for 2d array)
+
 vk::Move<vk::VkCommandPool>		makeCommandPool					(const vk::DeviceInterface&			vk,
 																 const vk::VkDevice					device,
 																 const deUint32						queueFamilyIndex);
@@ -171,11 +178,6 @@
 																 const vk::VkQueue					queue,
 																 const vk::VkCommandBuffer			cmdBuffer);
 
-inline vk::VkExtent3D makeExtent3D (const tcu::IVec3& vec)
-{
-	return vk::makeExtent3D(vec.x(), vec.y(), vec.z());
-}
-
 inline vk::VkDeviceSize getImageSizeBytes (const tcu::IVec3& imageSize, const vk::VkFormat format)
 {
 	return tcu::getPixelSize(vk::mapVkFormat(format)) * imageSize.x() * imageSize.y() * imageSize.z();
diff --git a/external/vulkancts/mustpass/1.0.0/vk-default.txt b/external/vulkancts/mustpass/1.0.0/vk-default.txt
index 8a87b29..59b581c 100644
--- a/external/vulkancts/mustpass/1.0.0/vk-default.txt
+++ b/external/vulkancts/mustpass/1.0.0/vk-default.txt
@@ -80301,3 +80301,199 @@
 dEQP-VK.image.image_size.buffer.readonly_writeonly_12
 dEQP-VK.image.image_size.buffer.readonly_writeonly_1
 dEQP-VK.image.image_size.buffer.readonly_writeonly_7
+dEQP-VK.image.atomic_operations.add.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.add.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.add.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.add.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.add.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.add.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.add.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.add.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.add.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.add.cube_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.min.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.min.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.min.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.min.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.min.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.min.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.min.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.min.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.min.cube_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.max.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.max.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.max.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.max.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.max.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.max.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.max.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.max.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.max.cube_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.and.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.and.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.and.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.and.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.and.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.and.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.and.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.and.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.and.cube_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.or.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.or.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.or.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.or.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.or.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.or.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.or.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.or.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.or.cube_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.xor.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.xor.cube_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.1d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.1d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.2d_array_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.2d_array_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.3d_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.3d_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.3d_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_r32i_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array_r32ui_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array_r32ui_intermediate_values
+dEQP-VK.image.atomic_operations.exchange.cube_array_r32i_end_result
+dEQP-VK.image.atomic_operations.exchange.cube_array_r32i_intermediate_values