Skip to content

Commit 5503983

Browse files
unerligeashutoshx
authored andcommitted
xe/oa: Fix query mode of operation for OAR/OAC
This is a set of squashed commits to facilitate smooth applying to stable. Each commit message is retained for reference. 1) Allow a GGTT mapped batch to be submitted to user exec queue For a OA use case, one of the HW registers needs to be modified by submitting an MI_LOAD_REGISTER_IMM command to the users exec queue, so that the register is modified in the user's hardware context. In order to do this a batch that is mapped in GGTT, needs to be submitted to the user exec queue. Since all user submissions use q->vm and hence PPGTT, add some plumbing to enable submission of batches mapped in GGTT. v2: ggtt is zero-initialized, so no need to set it false (Matt Brost) 2) xe/oa: Use MI_LOAD_REGISTER_IMMEDIATE to enable OAR/OAC To enable OAR/OAC, a bit in RING_CONTEXT_CONTROL needs to be set. Setting this bit cause the context image size to change and if not done correct, can cause undesired hangs. Current code uses a separate exec_queue to modify this bit and is error-prone. As per HW recommendation, submit MI_LOAD_REGISTER_IMM to the target hardware context to modify the relevant bit. In v2 version, an attempt to submit everything to the user-queue was made, but it failed the unprivileged-single-ctx-counters test. It appears that the OACTXCONTROL must be modified from a remote context. In v3 version, all context specific register configurations were moved to use LOAD_REGISTER_IMMEDIATE and that seems to work well. This is a cleaner way, since we can now submit all configuration to user exec_queue and the fence handling is simplified. v2: (Matt) - set job->ggtt to true if create job is successful - unlock vm on job error (Ashutosh) - don't wait on job submission - use kernel exec queue where possible v3: (Ashutosh) - Fix checkpatch issues - Remove extra spaces/new-lines - Add Fixes: and Cc: tags - Reset context control bit when OA stream is closed - Submit all config via MI_LOAD_REGISTER_IMMEDIATE (Umesh) - Update commit message for v3 experiment - Squash patches for easier port to stable v4: (Ashutosh) - No need to pass q to xe_oa_submit_bb - Do not support exec queues with width > 1 - Fix disabling of CTX_CTRL_OAC_CONTEXT_ENABLE v5: (Ashutosh) - Drop reg_lri related comments - Use XE_OA_SUBMIT_NO_DEPS in xe_oa_load_with_lri Fixes: 8135f1c ("drm/xe/oa: Don't reset OAC_CONTEXT_ENABLE on OA stream close") Signed-off-by: Umesh Nerlige Ramappa <[email protected]> Reviewed-by: Matthew Brost <[email protected]> # commit 1 Reviewed-by: Ashutosh Dixit <[email protected]> Cc: [email protected] Reviewed-by: Jonathan Cavitt <[email protected]> Signed-off-by: Ashutosh Dixit <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 0708908 commit 5503983

File tree

3 files changed

+51
-92
lines changed

3 files changed

+51
-92
lines changed

drivers/gpu/drm/xe/xe_oa.c

Lines changed: 45 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -74,12 +74,6 @@ struct xe_oa_config {
7474
struct rcu_head rcu;
7575
};
7676

77-
struct flex {
78-
struct xe_reg reg;
79-
u32 offset;
80-
u32 value;
81-
};
82-
8377
struct xe_oa_open_param {
8478
struct xe_file *xef;
8579
u32 oa_unit_id;
@@ -605,19 +599,38 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
605599
return ret;
606600
}
607601

602+
static void xe_oa_lock_vma(struct xe_exec_queue *q)
603+
{
604+
if (q->vm) {
605+
down_read(&q->vm->lock);
606+
xe_vm_lock(q->vm, false);
607+
}
608+
}
609+
610+
static void xe_oa_unlock_vma(struct xe_exec_queue *q)
611+
{
612+
if (q->vm) {
613+
xe_vm_unlock(q->vm);
614+
up_read(&q->vm->lock);
615+
}
616+
}
617+
608618
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
609619
struct xe_bb *bb)
610620
{
621+
struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
611622
struct xe_sched_job *job;
612623
struct dma_fence *fence;
613624
int err = 0;
614625

615-
/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
616-
job = xe_bb_create_job(stream->k_exec_q, bb);
626+
xe_oa_lock_vma(q);
627+
628+
job = xe_bb_create_job(q, bb);
617629
if (IS_ERR(job)) {
618630
err = PTR_ERR(job);
619631
goto exit;
620632
}
633+
job->ggtt = true;
621634

622635
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
623636
for (int i = 0; i < stream->num_syncs && !err; i++)
@@ -632,10 +645,13 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
632645
fence = dma_fence_get(&job->drm.s_fence->finished);
633646
xe_sched_job_push(job);
634647

648+
xe_oa_unlock_vma(q);
649+
635650
return fence;
636651
err_put_job:
637652
xe_sched_job_put(job);
638653
exit:
654+
xe_oa_unlock_vma(q);
639655
return ERR_PTR(err);
640656
}
641657

@@ -684,65 +700,19 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
684700
dma_fence_put(stream->last_fence);
685701
}
686702

687-
static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
688-
struct xe_bb *bb, const struct flex *flex, u32 count)
689-
{
690-
u32 offset = xe_bo_ggtt_addr(lrc->bo);
691-
692-
do {
693-
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT |
694-
MI_FORCE_WRITE_COMPLETION_CHECK |
695-
MI_SDI_NUM_DW(1);
696-
bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
697-
bb->cs[bb->len++] = 0;
698-
bb->cs[bb->len++] = flex->value;
699-
700-
} while (flex++, --count);
701-
}
702-
703-
static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
704-
const struct flex *flex, u32 count)
705-
{
706-
struct dma_fence *fence;
707-
struct xe_bb *bb;
708-
int err;
709-
710-
bb = xe_bb_new(stream->gt, 4 * count, false);
711-
if (IS_ERR(bb)) {
712-
err = PTR_ERR(bb);
713-
goto exit;
714-
}
715-
716-
xe_oa_store_flex(stream, lrc, bb, flex, count);
717-
718-
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
719-
if (IS_ERR(fence)) {
720-
err = PTR_ERR(fence);
721-
goto free_bb;
722-
}
723-
xe_bb_free(bb, fence);
724-
dma_fence_put(fence);
725-
726-
return 0;
727-
free_bb:
728-
xe_bb_free(bb, NULL);
729-
exit:
730-
return err;
731-
}
732-
733-
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
703+
static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
734704
{
735705
struct dma_fence *fence;
736706
struct xe_bb *bb;
737707
int err;
738708

739-
bb = xe_bb_new(stream->gt, 3, false);
709+
bb = xe_bb_new(stream->gt, 2 * count + 1, false);
740710
if (IS_ERR(bb)) {
741711
err = PTR_ERR(bb);
742712
goto exit;
743713
}
744714

745-
write_cs_mi_lri(bb, reg_lri, 1);
715+
write_cs_mi_lri(bb, reg_lri, count);
746716

747717
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
748718
if (IS_ERR(fence)) {
@@ -762,71 +732,55 @@ static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *re
762732
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
763733
{
764734
const struct xe_oa_format *format = stream->oa_buffer.format;
765-
struct xe_lrc *lrc = stream->exec_q->lrc[0];
766-
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
767735
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
768736
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
769737

770-
struct flex regs_context[] = {
738+
struct xe_oa_reg reg_lri[] = {
771739
{
772740
OACTXCONTROL(stream->hwe->mmio_base),
773-
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
774741
enable ? OA_COUNTER_RESUME : 0,
775742
},
743+
{
744+
OAR_OACONTROL,
745+
oacontrol,
746+
},
776747
{
777748
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
778-
regs_offset + CTX_CONTEXT_CONTROL,
779-
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
749+
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
750+
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
780751
},
781752
};
782-
struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
783-
int err;
784-
785-
/* Modify stream hwe context image with regs_context */
786-
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
787-
regs_context, ARRAY_SIZE(regs_context));
788-
if (err)
789-
return err;
790753

791-
/* Apply reg_lri using LRI */
792-
return xe_oa_load_with_lri(stream, &reg_lri);
754+
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
793755
}
794756

795757
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
796758
{
797759
const struct xe_oa_format *format = stream->oa_buffer.format;
798-
struct xe_lrc *lrc = stream->exec_q->lrc[0];
799-
u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
800760
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
801761
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
802-
struct flex regs_context[] = {
762+
struct xe_oa_reg reg_lri[] = {
803763
{
804764
OACTXCONTROL(stream->hwe->mmio_base),
805-
stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
806765
enable ? OA_COUNTER_RESUME : 0,
807766
},
767+
{
768+
OAC_OACONTROL,
769+
oacontrol
770+
},
808771
{
809772
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
810-
regs_offset + CTX_CONTEXT_CONTROL,
811-
_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
773+
_MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
774+
enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
812775
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
813776
},
814777
};
815-
struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
816-
int err;
817778

818779
/* Set ccs select to enable programming of OAC_OACONTROL */
819780
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
820781
__oa_ccs_select(stream));
821782

822-
/* Modify stream hwe context image with regs_context */
823-
err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
824-
regs_context, ARRAY_SIZE(regs_context));
825-
if (err)
826-
return err;
827-
828-
/* Apply reg_lri using LRI */
829-
return xe_oa_load_with_lri(stream, &reg_lri);
783+
return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
830784
}
831785

832786
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
@@ -2110,8 +2064,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
21102064
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
21112065
return -ENOENT;
21122066

2113-
if (param.exec_q->width > 1)
2114-
drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
2067+
if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
2068+
return -EOPNOTSUPP;
21152069
}
21162070

21172071
/*

drivers/gpu/drm/xe/xe_ring_ops.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
223223

224224
static u32 get_ppgtt_flag(struct xe_sched_job *job)
225225
{
226-
return job->q->vm ? BIT(8) : 0;
226+
if (job->q->vm && !job->ggtt)
227+
return BIT(8);
228+
229+
return 0;
227230
}
228231

229232
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)

drivers/gpu/drm/xe/xe_sched_job_types.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,8 @@ struct xe_sched_job {
5656
u32 migrate_flush_flags;
5757
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
5858
bool ring_ops_flush_tlb;
59+
/** @ggtt: mapped in ggtt. */
60+
bool ggtt;
5961
/** @ptrs: per instance pointers. */
6062
struct xe_job_ptrs ptrs[];
6163
};

0 commit comments

Comments
 (0)