Use int to represent max image units per shader in Vulkan instead of uint32_t to be consistent with other APIs

PiperOrigin-RevId: 359279965
Change-Id: I8bbd5df3f2dd413478e075068f22eefc4a0c4d8b
diff --git a/tensorflow/lite/delegates/gpu/common/gpu_info.h b/tensorflow/lite/delegates/gpu/common/gpu_info.h
index 77162e3..cdcbd64 100644
--- a/tensorflow/lite/delegates/gpu/common/gpu_info.h
+++ b/tensorflow/lite/delegates/gpu/common/gpu_info.h
@@ -230,7 +230,7 @@
   uint32_t api_version_minor = -1;
   uint32_t api_version_patch = -1;
 
-  uint32_t max_per_stage_descriptor_sampled_images = 0;
+  int max_per_stage_descriptor_sampled_images = 0;
   uint32_t max_compute_work_group_invocations;
   uint32_t max_image_dimension_2d;
   uint32_t max_image_array_layers;