drm/i915/selftests: Calculate maximum ring size for preemption chain

32 is too many for the likes of kbl, and in order to insert that many
requests into the ring requires us to declare the first few hung --
understandably a slow and unexpected process. Instead, measure the size
of a singe requests and use that to estimate the upper bound on the
chain length we can use for our test, remembering to flush the previous
chain between tests for safety.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: "Yokoyama, Caz" <caz.yokoyama@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321194031.20240-1-chris@chris-wilson.co.uk
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 9e871eb..0afebbe 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -615,14 +615,33 @@
 		struct i915_sched_attr attr = {
 			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
 		};
-		int count, i;
+		struct i915_request *rq;
+		int ring_size, count, i;
 
 		if (!intel_engine_has_preemption(engine))
 			continue;
 
-		for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
-			struct i915_request *rq;
+		rq = igt_spinner_create_request(&lo.spin,
+						lo.ctx, engine,
+						MI_ARB_CHECK);
+		if (IS_ERR(rq))
+			goto err_wedged;
+		i915_request_add(rq);
 
+		ring_size = rq->wa_tail - rq->head;
+		if (ring_size < 0)
+			ring_size += rq->ring->size;
+		ring_size = rq->ring->size / ring_size;
+		pr_debug("%s(%s): Using maximum of %d requests\n",
+			 __func__, engine->name, ring_size);
+
+		igt_spinner_end(&lo.spin);
+		if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+			pr_err("Timed out waiting to flush %s\n", engine->name);
+			goto err_wedged;
+		}
+
+		for_each_prime_number_from(count, 1, ring_size) {
 			rq = igt_spinner_create_request(&hi.spin,
 							hi.ctx, engine,
 							MI_ARB_CHECK);
@@ -664,6 +683,21 @@
 				goto err_wedged;
 			}
 			igt_spinner_end(&lo.spin);
+
+			rq = i915_request_alloc(engine, lo.ctx);
+			if (IS_ERR(rq))
+				goto err_wedged;
+			i915_request_add(rq);
+			if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+				struct drm_printer p =
+					drm_info_printer(i915->drm.dev);
+
+				pr_err("Failed to flush low priority chain of %d requests\n",
+				       count);
+				intel_engine_dump(engine, &p,
+						  "%s\n", engine->name);
+				goto err_wedged;
+			}
 		}
 	}