drm/sched: Cleanup event names
All events now start with the same prefix (drm_sched_job_). drm_sched_job_wait_dep was misleading because it wasn't waiting at all. It's now replaced by trace_drm_sched_job_unschedulable, which is only traced if the job cannot be scheduled. For moot dependencies, nothing is traced. Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Signed-off-by: Philipp Stanner <phasta@kernel.org> Link: https://lore.kernel.org/r/20250526125505.2360-8-pierre-eric.pelloux-prayer@amd.com
This commit is contained in:
parent
0a41e1e914
commit
a5db7581f2
3 changed files with 10 additions and 10 deletions
|
|
@ -63,17 +63,17 @@ DECLARE_EVENT_CLASS(drm_sched_job,
|
|||
__entry->job_count, __entry->hw_job_count, __entry->client_id)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(drm_sched_job, drm_sched_job,
|
||||
DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
|
||||
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
|
||||
TP_ARGS(sched_job, entity)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(drm_sched_job, drm_run_job,
|
||||
DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
|
||||
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
|
||||
TP_ARGS(sched_job, entity)
|
||||
);
|
||||
|
||||
TRACE_EVENT(drm_sched_process_job,
|
||||
TRACE_EVENT(drm_sched_job_done,
|
||||
TP_PROTO(struct drm_sched_fence *fence),
|
||||
TP_ARGS(fence),
|
||||
TP_STRUCT__entry(
|
||||
|
|
@ -112,7 +112,7 @@ TRACE_EVENT(drm_sched_job_add_dep,
|
|||
__entry->ctx, __entry->seqno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(drm_sched_job_wait_dep,
|
||||
TRACE_EVENT(drm_sched_job_unschedulable,
|
||||
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
|
||||
TP_ARGS(sched_job, fence),
|
||||
TP_STRUCT__entry(
|
||||
|
|
|
|||
|
|
@ -477,10 +477,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
|
|||
|
||||
while ((entity->dependency =
|
||||
drm_sched_job_dependency(sched_job, entity))) {
|
||||
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
|
||||
|
||||
if (drm_sched_entity_add_dependency_cb(entity))
|
||||
if (drm_sched_entity_add_dependency_cb(entity)) {
|
||||
trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* skip jobs from entity that marked guilty */
|
||||
|
|
@ -586,7 +586,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
|
|||
bool first;
|
||||
ktime_t submit_ts;
|
||||
|
||||
trace_drm_sched_job(sched_job, entity);
|
||||
trace_drm_sched_job_queue(sched_job, entity);
|
||||
|
||||
if (trace_drm_sched_job_add_dep_enabled()) {
|
||||
struct dma_fence *entry;
|
||||
|
|
|
|||
|
|
@ -401,7 +401,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
|
|||
atomic_sub(s_job->credits, &sched->credit_count);
|
||||
atomic_dec(sched->score);
|
||||
|
||||
trace_drm_sched_process_job(s_fence);
|
||||
trace_drm_sched_job_done(s_fence);
|
||||
|
||||
dma_fence_get(&s_fence->finished);
|
||||
drm_sched_fence_finished(s_fence, result);
|
||||
|
|
@ -1234,7 +1234,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
|
|||
atomic_add(sched_job->credits, &sched->credit_count);
|
||||
drm_sched_job_begin(sched_job);
|
||||
|
||||
trace_drm_run_job(sched_job, entity);
|
||||
trace_drm_sched_job_run(sched_job, entity);
|
||||
/*
|
||||
* The run_job() callback must by definition return a fence whose
|
||||
* refcount has been incremented for the scheduler already.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue