summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c845
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.h15
6 files changed, 5 insertions, 894 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d5dc4427d664..2b31a4ee0b4c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,7 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, i915_timeline_live_selftests)
+selftest(timelines, intel_timeline_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 510eb176bb2c..b55da4d9ccba 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
-selftest(timelines, i915_timeline_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
deleted file mode 100644
index 44d031446f08..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#include <linux/prime_numbers.h>
-
-#include "gem/i915_gem_pm.h"
-
-#include "i915_random.h"
-#include "i915_selftest.h"
-
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "mock_timeline.h"
-
-static struct page *hwsp_page(struct i915_timeline *tl)
-{
- struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- return sg_page(obj->mm.pages->sgl);
-}
-
-static unsigned long hwsp_cacheline(struct i915_timeline *tl)
-{
- unsigned long address = (unsigned long)page_address(hwsp_page(tl));
-
- return (address + tl->hwsp_offset) / CACHELINE_BYTES;
-}
-
-#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
-
-struct mock_hwsp_freelist {
- struct drm_i915_private *i915;
- struct radix_tree_root cachelines;
- struct i915_timeline **history;
- unsigned long count, max;
- struct rnd_state prng;
-};
-
-enum {
- SHUFFLE = BIT(0),
-};
-
-static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
- unsigned int idx,
- struct i915_timeline *tl)
-{
- tl = xchg(&state->history[idx], tl);
- if (tl) {
- radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
- i915_timeline_put(tl);
- }
-}
-
-static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
- unsigned int count,
- unsigned int flags)
-{
- struct i915_timeline *tl;
- unsigned int idx;
-
- while (count--) {
- unsigned long cacheline;
- int err;
-
- tl = i915_timeline_create(&state->i915->gt, NULL);
- if (IS_ERR(tl))
- return PTR_ERR(tl);
-
- cacheline = hwsp_cacheline(tl);
- err = radix_tree_insert(&state->cachelines, cacheline, tl);
- if (err) {
- if (err == -EEXIST) {
- pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
- cacheline);
- }
- i915_timeline_put(tl);
- return err;
- }
-
- idx = state->count++ % state->max;
- __mock_hwsp_record(state, idx, tl);
- }
-
- if (flags & SHUFFLE)
- i915_prandom_shuffle(state->history,
- sizeof(*state->history),
- min(state->count, state->max),
- &state->prng);
-
- count = i915_prandom_u32_max_state(min(state->count, state->max),
- &state->prng);
- while (count--) {
- idx = --state->count % state->max;
- __mock_hwsp_record(state, idx, NULL);
- }
-
- return 0;
-}
-
-static int mock_hwsp_freelist(void *arg)
-{
- struct mock_hwsp_freelist state;
- const struct {
- const char *name;
- unsigned int flags;
- } phases[] = {
- { "linear", 0 },
- { "shuffled", SHUFFLE },
- { },
- }, *p;
- unsigned int na;
- int err = 0;
-
- INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
- state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
-
- state.i915 = mock_gem_device();
- if (!state.i915)
- return -ENOMEM;
-
- /*
- * Create a bunch of timelines and check that their HWSP do not overlap.
- * Free some, and try again.
- */
-
- state.max = PAGE_SIZE / sizeof(*state.history);
- state.count = 0;
- state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
- if (!state.history) {
- err = -ENOMEM;
- goto err_put;
- }
-
- mutex_lock(&state.i915->drm.struct_mutex);
- for (p = phases; p->name; p++) {
- pr_debug("%s(%s)\n", __func__, p->name);
- for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
- err = __mock_hwsp_timeline(&state, na, p->flags);
- if (err)
- goto out;
- }
- }
-
-out:
- for (na = 0; na < state.max; na++)
- __mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
- kfree(state.history);
-err_put:
- drm_dev_put(&state.i915->drm);
- return err;
-}
-
-struct __igt_sync {
- const char *name;
- u32 seqno;
- bool expected;
- bool set;
-};
-
-static int __igt_sync(struct i915_timeline *tl,
- u64 ctx,
- const struct __igt_sync *p,
- const char *name)
-{
- int ret;
-
- if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
- pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
- name, p->name, ctx, p->seqno, yesno(p->expected));
- return -EINVAL;
- }
-
- if (p->set) {
- ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int igt_sync(void *arg)
-{
- const struct __igt_sync pass[] = {
- { "unset", 0, false, false },
- { "new", 0, false, true },
- { "0a", 0, true, true },
- { "1a", 1, false, true },
- { "1b", 1, true, true },
- { "0b", 0, true, false },
- { "2a", 2, false, true },
- { "4", 4, false, true },
- { "INT_MAX", INT_MAX, false, true },
- { "INT_MAX-1", INT_MAX-1, true, false },
- { "INT_MAX+1", (u32)INT_MAX+1, false, true },
- { "INT_MAX", INT_MAX, true, false },
- { "UINT_MAX", UINT_MAX, false, true },
- { "wrap", 0, false, true },
- { "unwrap", UINT_MAX, true, false },
- {},
- }, *p;
- struct i915_timeline tl;
- int order, offset;
- int ret = -ENODEV;
-
- mock_timeline_init(&tl, 0);
- for (p = pass; p->name; p++) {
- for (order = 1; order < 64; order++) {
- for (offset = -1; offset <= (order > 1); offset++) {
- u64 ctx = BIT_ULL(order) + offset;
-
- ret = __igt_sync(&tl, ctx, p, "1");
- if (ret)
- goto out;
- }
- }
- }
- mock_timeline_fini(&tl);
-
- mock_timeline_init(&tl, 0);
- for (order = 1; order < 64; order++) {
- for (offset = -1; offset <= (order > 1); offset++) {
- u64 ctx = BIT_ULL(order) + offset;
-
- for (p = pass; p->name; p++) {
- ret = __igt_sync(&tl, ctx, p, "2");
- if (ret)
- goto out;
- }
- }
- }
-
-out:
- mock_timeline_fini(&tl);
- return ret;
-}
-
-static unsigned int random_engine(struct rnd_state *rnd)
-{
- return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
-}
-
-static int bench_sync(void *arg)
-{
- struct rnd_state prng;
- struct i915_timeline tl;
- unsigned long end_time, count;
- u64 prng32_1M;
- ktime_t kt;
- int order, last_order;
-
- mock_timeline_init(&tl, 0);
-
- /* Lookups from cache are very fast and so the random number generation
- * and the loop itself becomes a significant factor in the per-iteration
- * timings. We try to compensate the results by measuring the overhead
- * of the prng and subtract it from the reported results.
- */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- u32 x;
-
- /* Make sure the compiler doesn't optimise away the prng call */
- WRITE_ONCE(x, prandom_u32_state(&prng));
-
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- pr_debug("%s: %lu random evaluations, %lluns/prng\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
-
- /* Benchmark (only) setting random context ids */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- u64 id = i915_prandom_u64_state(&prng);
-
- __i915_timeline_sync_set(&tl, id, 0);
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
- pr_info("%s: %lu random insertions, %lluns/insert\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- /* Benchmark looking up the exact same context ids as we just set */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- end_time = count;
- kt = ktime_get();
- while (end_time--) {
- u64 id = i915_prandom_u64_state(&prng);
-
- if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
- mock_timeline_fini(&tl);
- pr_err("Lookup of %llu failed\n", id);
- return -EINVAL;
- }
- }
- kt = ktime_sub(ktime_get(), kt);
- kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
- pr_info("%s: %lu random lookups, %lluns/lookup\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- mock_timeline_fini(&tl);
- cond_resched();
-
- mock_timeline_init(&tl, 0);
-
- /* Benchmark setting the first N (in order) contexts */
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- __i915_timeline_sync_set(&tl, count++, 0);
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- pr_info("%s: %lu in-order insertions, %lluns/insert\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- /* Benchmark looking up the exact same context ids as we just set */
- end_time = count;
- kt = ktime_get();
- while (end_time--) {
- if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
- pr_err("Lookup of %lu failed\n", end_time);
- mock_timeline_fini(&tl);
- return -EINVAL;
- }
- }
- kt = ktime_sub(ktime_get(), kt);
- pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- mock_timeline_fini(&tl);
- cond_resched();
-
- mock_timeline_init(&tl, 0);
-
- /* Benchmark searching for a random context id and maybe changing it */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- u32 id = random_engine(&prng);
- u32 seqno = prandom_u32_state(&prng);
-
- if (!__i915_timeline_sync_is_later(&tl, id, seqno))
- __i915_timeline_sync_set(&tl, id, seqno);
-
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
- pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_fini(&tl);
- cond_resched();
-
- /* Benchmark searching for a known context id and changing the seqno */
- for (last_order = 1, order = 1; order < 32;
- ({ int tmp = last_order; last_order = order; order += tmp; })) {
- unsigned int mask = BIT(order) - 1;
-
- mock_timeline_init(&tl, 0);
-
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- /* Without assuming too many details of the underlying
- * implementation, try to identify its phase-changes
- * (if any)!
- */
- u64 id = (u64)(count & mask) << order;
-
- __i915_timeline_sync_is_later(&tl, id, 0);
- __i915_timeline_sync_set(&tl, id, 0);
-
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
- __func__, count, order,
- (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_fini(&tl);
- cond_resched();
- }
-
- return 0;
-}
-
-int i915_timeline_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(mock_hwsp_freelist),
- SUBTEST(igt_sync),
- SUBTEST(bench_sync),
- };
-
- return i915_subtests(tests, NULL);
-}
-
-static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (INTEL_GEN(rq->i915) >= 8) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = addr;
- *cs++ = 0;
- *cs++ = value;
- } else if (INTEL_GEN(rq->i915) >= 4) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = 0;
- *cs++ = addr;
- *cs++ = value;
- } else {
- *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = addr;
- *cs++ = value;
- *cs++ = MI_NOOP;
- }
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static struct i915_request *
-tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
-{
- struct i915_request *rq;
- int err;
-
- lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
-
- err = i915_timeline_pin(tl);
- if (err) {
- rq = ERR_PTR(err);
- goto out;
- }
-
- rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- goto out_unpin;
-
- err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
- i915_request_add(rq);
- if (err)
- rq = ERR_PTR(err);
-
-out_unpin:
- i915_timeline_unpin(tl);
-out:
- if (IS_ERR(rq))
- pr_err("Failed to write to timeline!\n");
- return rq;
-}
-
-static struct i915_timeline *
-checked_i915_timeline_create(struct drm_i915_private *i915)
-{
- struct i915_timeline *tl;
-
- tl = i915_timeline_create(&i915->gt, NULL);
- if (IS_ERR(tl))
- return tl;
-
- if (*tl->hwsp_seqno != tl->seqno) {
- pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
- *tl->hwsp_seqno, tl->seqno);
- i915_timeline_put(tl);
- return ERR_PTR(-EINVAL);
- }
-
- return tl;
-}
-
-static int live_hwsp_engine(void *arg)
-{
-#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long count, n;
- int err = 0;
-
- /*
- * Create a bunch of timelines and check we can write
- * independently to each of their breadcrumb slots.
- */
-
- timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
- sizeof(*timelines),
- GFP_KERNEL);
- if (!timelines)
- return -ENOMEM;
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- count = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- for (n = 0; n < NUM_TIMELINES; n++) {
- struct i915_timeline *tl;
- struct i915_request *rq;
-
- tl = checked_i915_timeline_create(i915);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out;
- }
-
- rq = tl_write(tl, engine, count);
- if (IS_ERR(rq)) {
- i915_timeline_put(tl);
- err = PTR_ERR(rq);
- goto out;
- }
-
- timelines[count++] = tl;
- }
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
-
- for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
-
- if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
- err = -EINVAL;
- }
- i915_timeline_put(tl);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kvfree(timelines);
-
- return err;
-#undef NUM_TIMELINES
-}
-
-static int live_hwsp_alternate(void *arg)
-{
-#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long count, n;
- int err = 0;
-
- /*
- * Create a bunch of timelines and check we can write
- * independently to each of their breadcrumb slots with adjacent
- * engines.
- */
-
- timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
- sizeof(*timelines),
- GFP_KERNEL);
- if (!timelines)
- return -ENOMEM;
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- count = 0;
- for (n = 0; n < NUM_TIMELINES; n++) {
- for_each_engine(engine, i915, id) {
- struct i915_timeline *tl;
- struct i915_request *rq;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- tl = checked_i915_timeline_create(i915);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out;
- }
-
- rq = tl_write(tl, engine, count);
- if (IS_ERR(rq)) {
- i915_timeline_put(tl);
- err = PTR_ERR(rq);
- goto out;
- }
-
- timelines[count++] = tl;
- }
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
-
- for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
-
- if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
- err = -EINVAL;
- }
- i915_timeline_put(tl);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kvfree(timelines);
-
- return err;
-#undef NUM_TIMELINES
-}
-
-static int live_hwsp_wrap(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- struct i915_timeline *tl;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- int err = 0;
-
- /*
- * Across a seqno wrap, we need to keep the old cacheline alive for
- * foreign GPU references.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- tl = i915_timeline_create(&i915->gt, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
- if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
- goto out_free;
-
- err = i915_timeline_pin(tl);
- if (err)
- goto out_free;
-
- for_each_engine(engine, i915, id) {
- const u32 *hwsp_seqno[2];
- struct i915_request *rq;
- u32 seqno[2];
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- tl->seqno = -4u;
-
- err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
- seqno[0], tl->hwsp_offset);
-
- err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- hwsp_seqno[0] = tl->hwsp_seqno;
-
- err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
- seqno[1], tl->hwsp_offset);
-
- err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- hwsp_seqno[1] = tl->hwsp_seqno;
-
- /* With wrap should come a new hwsp */
- GEM_BUG_ON(seqno[1] >= seqno[0]);
- GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
-
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("Wait for timeline writes timed out!\n");
- err = -EIO;
- goto out;
- }
-
- if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
- pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
- *hwsp_seqno[0], *hwsp_seqno[1],
- seqno[0], seqno[1]);
- err = -EINVAL;
- goto out;
- }
-
- i915_retire_requests(i915); /* recycle HWSP */
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
-
- i915_timeline_unpin(tl);
-out_free:
- i915_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
-}
-
-static int live_hwsp_recycle(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long count;
- int err = 0;
-
- /*
- * Check seqno writes into one timeline at a time. We expect to
- * recycle the breadcrumb slot between iterations and neither
- * want to confuse ourselves or the GPU.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- count = 0;
- for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- do {
- struct i915_timeline *tl;
- struct i915_request *rq;
-
- tl = checked_i915_timeline_create(i915);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out;
- }
-
- rq = tl_write(tl, engine, count);
- if (IS_ERR(rq)) {
- i915_timeline_put(tl);
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("Wait for timeline writes timed out!\n");
- i915_timeline_put(tl);
- err = -EIO;
- goto out;
- }
-
- if (*tl->hwsp_seqno != count) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- count, *tl->hwsp_seqno);
- err = -EINVAL;
- }
-
- i915_timeline_put(tl);
- count++;
-
- if (err)
- goto out;
-
- i915_timelines_park(i915); /* Encourage recycling! */
- } while (!__igt_timeout(end_time, NULL));
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
-}
-
-int i915_timeline_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_hwsp_recycle),
- SUBTEST(live_hwsp_engine),
- SUBTEST(live_hwsp_alternate),
- SUBTEST(live_hwsp_wrap),
- };
-
- if (i915_terminally_wedged(i915))
- return 0;
-
- return i915_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 021ba42a3a00..2741805b56c2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -68,7 +68,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
@@ -199,7 +199,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915_timelines_init(i915);
+ intel_timelines_init(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -230,7 +230,7 @@ err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
deleted file mode 100644
index c80ac0fbdd3b..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#include "../i915_timeline.h"
-
-#include "mock_timeline.h"
-
-void mock_timeline_init(struct i915_timeline *timeline, u64 context)
-{
- timeline->gt = NULL;
- timeline->fence_context = context;
-
- mutex_init(&timeline->mutex);
-
- INIT_ACTIVE_REQUEST(&timeline->last_request);
- INIT_LIST_HEAD(&timeline->requests);
-
- i915_syncmap_init(&timeline->sync);
-
- INIT_LIST_HEAD(&timeline->link);
-}
-
-void mock_timeline_fini(struct i915_timeline *timeline)
-{
- i915_syncmap_free(&timeline->sync);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
deleted file mode 100644
index b6deaa61110d..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#ifndef __MOCK_TIMELINE__
-#define __MOCK_TIMELINE__
-
-struct i915_timeline;
-
-void mock_timeline_init(struct i915_timeline *timeline, u64 context);
-void mock_timeline_fini(struct i915_timeline *timeline);
-
-#endif /* !__MOCK_TIMELINE__ */