Fix for quantize_model error reporting.

Change-Id: Icffdece7804a60beb4b59eb347738457c81b9986
diff --git a/tensorflow/lite/tools/optimize/quantize_model.cc b/tensorflow/lite/tools/optimize/quantize_model.cc
index bb1deb6..5cca49e 100644
--- a/tensorflow/lite/tools/optimize/quantize_model.cc
+++ b/tensorflow/lite/tools/optimize/quantize_model.cc
@@ -950,12 +950,12 @@
           !allow_float) {
         TF_LITE_REPORT_ERROR(
             error_reporter,
-            "Quantization to 16x8-bit not yet supported for op: %",
+            "Quantization to 16x8-bit not yet supported for op: '%s'.\n",
             EnumNameBuiltinOperator(op_code));
         return kTfLiteError;
       } else if (!property.quantizable && !allow_float) {
         TF_LITE_REPORT_ERROR(error_reporter,
-                             "Quantization not yet supported for op: %",
+                             "Quantization not yet supported for op: '%s'.\n",
                              EnumNameBuiltinOperator(op_code));
         return kTfLiteError;
       }