Skip to content
This repository was archived by the owner on Oct 3, 2024. It is now read-only.

Commit 3a534c3

Browse files
icklezhenyw
authored andcommitted
drm/i915/gvt: Parse default state to update reg whitelist
Rather than break existing context objects by incorrectly forcing them to rogue cache coherency and trying to assert a new mapping, read the reg whitelist from the default context image. And use gvt->gt, never &dev_priv->gt. Fixes: 493f30c ("drm/i915/gvt: parse init context to update cmd accessible reg whitelist") Acked-by: Zhenyu Wang <[email protected]> Signed-off-by: Chris Wilson <[email protected]> Cc: Joonas Lahtinen <[email protected]> Cc: Kevin Tian <[email protected]> Cc: Wang Zhi <[email protected]> Cc: Yan Zhao <[email protected]> Cc: Zhenyu Wang <[email protected]> Cc: Zhi Wang <[email protected]> Signed-off-by: Zhenyu Wang <[email protected]> Link: http://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent ab07fea commit 3a534c3

File tree

1 file changed

+20
-73
lines changed

1 file changed

+20
-73
lines changed

drivers/gpu/drm/i915/gvt/cmd_parser.c

Lines changed: 20 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141
#include "gt/intel_lrc.h"
4242
#include "gt/intel_ring.h"
4343
#include "gt/intel_gt_requests.h"
44+
#include "gt/shmem_utils.h"
4445
#include "gvt.h"
4546
#include "i915_pvinfo.h"
4647
#include "trace.h"
@@ -3094,111 +3095,57 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
30943095
*/
30953096
void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
30963097
{
3098+
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
30973099
struct intel_gvt *gvt = vgpu->gvt;
3098-
struct drm_i915_private *dev_priv = gvt->gt->i915;
30993100
struct intel_engine_cs *engine;
31003101
enum intel_engine_id id;
3101-
const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
3102-
struct i915_request *rq;
3103-
struct intel_vgpu_submission *s = &vgpu->submission;
3104-
struct i915_request *requests[I915_NUM_ENGINES] = {};
3105-
bool is_ctx_pinned[I915_NUM_ENGINES] = {};
3106-
int ret = 0;
31073102

31083103
if (gvt->is_reg_whitelist_updated)
31093104
return;
31103105

3111-
for_each_engine(engine, &dev_priv->gt, id) {
3112-
ret = intel_context_pin(s->shadow[id]);
3113-
if (ret) {
3114-
gvt_vgpu_err("fail to pin shadow ctx\n");
3115-
goto out;
3116-
}
3117-
is_ctx_pinned[id] = true;
3118-
3119-
rq = i915_request_create(s->shadow[id]);
3120-
if (IS_ERR(rq)) {
3121-
gvt_vgpu_err("fail to alloc default request\n");
3122-
ret = -EIO;
3123-
goto out;
3124-
}
3125-
requests[id] = i915_request_get(rq);
3126-
i915_request_add(rq);
3127-
}
3128-
3129-
if (intel_gt_wait_for_idle(&dev_priv->gt,
3130-
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
3131-
ret = -EIO;
3132-
goto out;
3133-
}
3134-
31353106
/* scan init ctx to update cmd accessible list */
3136-
for_each_engine(engine, &dev_priv->gt, id) {
3137-
int size = engine->context_size - PAGE_SIZE;
3138-
void *vaddr;
3107+
for_each_engine(engine, gvt->gt, id) {
31393108
struct parser_exec_state s;
3140-
struct drm_i915_gem_object *obj;
3141-
struct i915_request *rq;
3142-
3143-
rq = requests[id];
3144-
GEM_BUG_ON(!i915_request_completed(rq));
3145-
GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3146-
obj = rq->context->state->obj;
3147-
3148-
if (!obj) {
3149-
ret = -EIO;
3150-
goto out;
3151-
}
3109+
void *vaddr;
3110+
int ret;
31523111

3153-
i915_gem_object_set_cache_coherency(obj,
3154-
I915_CACHE_LLC);
3112+
if (!engine->default_state)
3113+
continue;
31553114

3156-
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
3115+
vaddr = shmem_pin_map(engine->default_state);
31573116
if (IS_ERR(vaddr)) {
3158-
gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
3159-
id, PTR_ERR(vaddr));
3160-
ret = PTR_ERR(vaddr);
3161-
goto out;
3117+
gvt_err("failed to map %s->default state, err:%zd\n",
3118+
engine->name, PTR_ERR(vaddr));
3119+
return;
31623120
}
31633121

31643122
s.buf_type = RING_BUFFER_CTX;
31653123
s.buf_addr_type = GTT_BUFFER;
31663124
s.vgpu = vgpu;
31673125
s.engine = engine;
31683126
s.ring_start = 0;
3169-
s.ring_size = size;
3127+
s.ring_size = engine->context_size - start;
31703128
s.ring_head = 0;
3171-
s.ring_tail = size;
3129+
s.ring_tail = s.ring_size;
31723130
s.rb_va = vaddr + start;
31733131
s.workload = NULL;
31743132
s.is_ctx_wa = false;
31753133
s.is_init_ctx = true;
31763134

31773135
/* skipping the first RING_CTX_SIZE(0x50) dwords */
31783136
ret = ip_gma_set(&s, RING_CTX_SIZE);
3179-
if (ret) {
3180-
i915_gem_object_unpin_map(obj);
3181-
goto out;
3137+
if (ret == 0) {
3138+
ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
3139+
if (ret)
3140+
gvt_err("Scan init ctx error\n");
31823141
}
31833142

3184-
ret = command_scan(&s, 0, size, 0, size);
3143+
shmem_unpin_map(engine->default_state, vaddr);
31853144
if (ret)
3186-
gvt_err("Scan init ctx error\n");
3187-
3188-
i915_gem_object_unpin_map(obj);
3145+
return;
31893146
}
31903147

3191-
out:
3192-
if (!ret)
3193-
gvt->is_reg_whitelist_updated = true;
3194-
3195-
for (id = 0; id < I915_NUM_ENGINES ; id++) {
3196-
if (requests[id])
3197-
i915_request_put(requests[id]);
3198-
3199-
if (is_ctx_pinned[id])
3200-
intel_context_unpin(s->shadow[id]);
3201-
}
3148+
gvt->is_reg_whitelist_updated = true;
32023149
}
32033150

32043151
int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)

0 commit comments

Comments
 (0)