LCOV - code coverage report
Current view: top level - include/trace/events - sched.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 20 42 47.6 %
Date: 2021-04-22 12:43:58 Functions: 20 115 17.4 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #undef TRACE_SYSTEM
       3             : #define TRACE_SYSTEM sched
       4             : 
       5             : #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
       6             : #define _TRACE_SCHED_H
       7             : 
       8             : #include <linux/kthread.h>
       9             : #include <linux/sched/numa_balancing.h>
      10             : #include <linux/tracepoint.h>
      11             : #include <linux/binfmts.h>
      12             : 
      13             : /*
      14             :  * Tracepoint for calling kthread_stop, performed to end a kthread:
      15             :  */
      16           0 : TRACE_EVENT(sched_kthread_stop,
      17             : 
      18             :         TP_PROTO(struct task_struct *t),
      19             : 
      20             :         TP_ARGS(t),
      21             : 
      22             :         TP_STRUCT__entry(
      23             :                 __array(        char,   comm,   TASK_COMM_LEN   )
      24             :                 __field(        pid_t,  pid                     )
      25             :         ),
      26             : 
      27             :         TP_fast_assign(
      28             :                 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
      29             :                 __entry->pid = t->pid;
      30             :         ),
      31             : 
      32             :         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
      33             : );
      34             : 
      35             : /*
      36             :  * Tracepoint for the return value of the kthread stopping:
      37             :  */
      38           0 : TRACE_EVENT(sched_kthread_stop_ret,
      39             : 
      40             :         TP_PROTO(int ret),
      41             : 
      42             :         TP_ARGS(ret),
      43             : 
      44             :         TP_STRUCT__entry(
      45             :                 __field(        int,    ret     )
      46             :         ),
      47             : 
      48             :         TP_fast_assign(
      49             :                 __entry->ret = ret;
      50             :         ),
      51             : 
      52             :         TP_printk("ret=%d", __entry->ret)
      53             : );
      54             : 
      55             : /**
      56             :  * sched_kthread_work_queue_work - called when a work gets queued
      57             :  * @worker:     pointer to the kthread_worker
      58             :  * @work:       pointer to struct kthread_work
      59             :  *
      60             :  * This event occurs when a work is queued immediately or once a
      61             :  * delayed work is actually queued (ie: once the delay has been
      62             :  * reached).
      63             :  */
      64           0 : TRACE_EVENT(sched_kthread_work_queue_work,
      65             : 
      66             :         TP_PROTO(struct kthread_worker *worker,
      67             :                  struct kthread_work *work),
      68             : 
      69             :         TP_ARGS(worker, work),
      70             : 
      71             :         TP_STRUCT__entry(
      72             :                 __field( void *,        work    )
      73             :                 __field( void *,        function)
      74             :                 __field( void *,        worker)
      75             :         ),
      76             : 
      77             :         TP_fast_assign(
      78             :                 __entry->work                = work;
      79             :                 __entry->function    = work->func;
      80             :                 __entry->worker              = worker;
      81             :         ),
      82             : 
      83             :         TP_printk("work struct=%p function=%ps worker=%p",
      84             :                   __entry->work, __entry->function, __entry->worker)
      85             : );
      86             : 
      87             : /**
      88             :  * sched_kthread_work_execute_start - called immediately before the work callback
      89             :  * @work:       pointer to struct kthread_work
      90             :  *
      91             :  * Allows to track kthread work execution.
      92             :  */
      93           0 : TRACE_EVENT(sched_kthread_work_execute_start,
      94             : 
      95             :         TP_PROTO(struct kthread_work *work),
      96             : 
      97             :         TP_ARGS(work),
      98             : 
      99             :         TP_STRUCT__entry(
     100             :                 __field( void *,        work    )
     101             :                 __field( void *,        function)
     102             :         ),
     103             : 
     104             :         TP_fast_assign(
     105             :                 __entry->work                = work;
     106             :                 __entry->function    = work->func;
     107             :         ),
     108             : 
     109             :         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
     110             : );
     111             : 
     112             : /**
     113             :  * sched_kthread_work_execute_end - called immediately after the work callback
     114             :  * @work:       pointer to struct work_struct
     115             :  * @function:   pointer to worker function
     116             :  *
     117             :  * Allows to track workqueue execution.
     118             :  */
     119           0 : TRACE_EVENT(sched_kthread_work_execute_end,
     120             : 
     121             :         TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
     122             : 
     123             :         TP_ARGS(work, function),
     124             : 
     125             :         TP_STRUCT__entry(
     126             :                 __field( void *,        work    )
     127             :                 __field( void *,        function)
     128             :         ),
     129             : 
     130             :         TP_fast_assign(
     131             :                 __entry->work                = work;
     132             :                 __entry->function    = function;
     133             :         ),
     134             : 
     135             :         TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
     136             : );
     137             : 
     138             : /*
     139             :  * Tracepoint for waking up a task:
     140             :  */
     141           0 : DECLARE_EVENT_CLASS(sched_wakeup_template,
     142             : 
     143             :         TP_PROTO(struct task_struct *p),
     144             : 
     145             :         TP_ARGS(__perf_task(p)),
     146             : 
     147             :         TP_STRUCT__entry(
     148             :                 __array(        char,   comm,   TASK_COMM_LEN   )
     149             :                 __field(        pid_t,  pid                     )
     150             :                 __field(        int,    prio                    )
     151             :                 __field(        int,    success                 )
     152             :                 __field(        int,    target_cpu              )
     153             :         ),
     154             : 
     155             :         TP_fast_assign(
     156             :                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
     157             :                 __entry->pid         = p->pid;
     158             :                 __entry->prio                = p->prio; /* XXX SCHED_DEADLINE */
     159             :                 __entry->success     = 1; /* rudiment, kill when possible */
     160             :                 __entry->target_cpu  = task_cpu(p);
     161             :         ),
     162             : 
     163             :         TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
     164             :                   __entry->comm, __entry->pid, __entry->prio,
     165             :                   __entry->target_cpu)
     166             : );
     167             : 
     168             : /*
     169             :  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
     170             :  * called from the waking context.
     171             :  */
     172       28056 : DEFINE_EVENT(sched_wakeup_template, sched_waking,
     173             :              TP_PROTO(struct task_struct *p),
     174             :              TP_ARGS(p));
     175             : 
     176             : /*
     177             :  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
     178             :  * It is not always called from the waking context.
     179             :  */
     180       28050 : DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
     181             :              TP_PROTO(struct task_struct *p),
     182             :              TP_ARGS(p));
     183             : 
     184             : /*
     185             :  * Tracepoint for waking up a new task:
     186             :  */
     187        1954 : DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
     188             :              TP_PROTO(struct task_struct *p),
     189             :              TP_ARGS(p));
     190             : 
     191             : #ifdef CREATE_TRACE_POINTS
     192           0 : static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
     193             : {
     194           0 :         unsigned int state;
     195             : 
     196             : #ifdef CONFIG_SCHED_DEBUG
     197             :         BUG_ON(p != current);
     198             : #endif /* CONFIG_SCHED_DEBUG */
     199             : 
     200             :         /*
     201             :          * Preemption ignores task state, therefore preempted tasks are always
     202             :          * RUNNING (we will not have dequeued if state != RUNNING).
     203             :          */
     204           0 :         if (preempt)
     205             :                 return TASK_REPORT_MAX;
     206             : 
     207             :         /*
     208             :          * task_state_index() uses fls() and returns a value from 0-8 range.
     209             :          * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
     210             :          * it for left shift operation to get the correct task->state
     211             :          * mapping.
     212             :          */
     213           0 :         state = task_state_index(p);
     214             : 
     215           0 :         return state ? (1 << (state - 1)) : state;
     216             : }
     217             : #endif /* CREATE_TRACE_POINTS */
     218             : 
     219             : /*
     220             :  * Tracepoint for task switches, performed by the scheduler:
     221             :  */
     222       53033 : TRACE_EVENT(sched_switch,
     223             : 
     224             :         TP_PROTO(bool preempt,
     225             :                  struct task_struct *prev,
     226             :                  struct task_struct *next),
     227             : 
     228             :         TP_ARGS(preempt, prev, next),
     229             : 
     230             :         TP_STRUCT__entry(
     231             :                 __array(        char,   prev_comm,      TASK_COMM_LEN   )
     232             :                 __field(        pid_t,  prev_pid                        )
     233             :                 __field(        int,    prev_prio                       )
     234             :                 __field(        long,   prev_state                      )
     235             :                 __array(        char,   next_comm,      TASK_COMM_LEN   )
     236             :                 __field(        pid_t,  next_pid                        )
     237             :                 __field(        int,    next_prio                       )
     238             :         ),
     239             : 
     240             :         TP_fast_assign(
     241             :                 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
     242             :                 __entry->prev_pid    = prev->pid;
     243             :                 __entry->prev_prio   = prev->prio;
     244             :                 __entry->prev_state  = __trace_sched_switch_state(preempt, prev);
     245             :                 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
     246             :                 __entry->next_pid    = next->pid;
     247             :                 __entry->next_prio   = next->prio;
     248             :                 /* XXX SCHED_DEADLINE */
     249             :         ),
     250             : 
     251             :         TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
     252             :                 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
     253             : 
     254             :                 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
     255             :                   __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
     256             :                                 { TASK_INTERRUPTIBLE, "S" },
     257             :                                 { TASK_UNINTERRUPTIBLE, "D" },
     258             :                                 { __TASK_STOPPED, "T" },
     259             :                                 { __TASK_TRACED, "t" },
     260             :                                 { EXIT_DEAD, "X" },
     261             :                                 { EXIT_ZOMBIE, "Z" },
     262             :                                 { TASK_PARKED, "P" },
     263             :                                 { TASK_DEAD, "I" }) :
     264             :                   "R",
     265             : 
     266             :                 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
     267             :                 __entry->next_comm, __entry->next_pid, __entry->next_prio)
     268             : );
     269             : 
     270             : /*
     271             :  * Tracepoint for a task being migrated:
     272             :  */
     273        1988 : TRACE_EVENT(sched_migrate_task,
     274             : 
     275             :         TP_PROTO(struct task_struct *p, int dest_cpu),
     276             : 
     277             :         TP_ARGS(p, dest_cpu),
     278             : 
     279             :         TP_STRUCT__entry(
     280             :                 __array(        char,   comm,   TASK_COMM_LEN   )
     281             :                 __field(        pid_t,  pid                     )
     282             :                 __field(        int,    prio                    )
     283             :                 __field(        int,    orig_cpu                )
     284             :                 __field(        int,    dest_cpu                )
     285             :         ),
     286             : 
     287             :         TP_fast_assign(
     288             :                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
     289             :                 __entry->pid         = p->pid;
     290             :                 __entry->prio                = p->prio; /* XXX SCHED_DEADLINE */
     291             :                 __entry->orig_cpu    = task_cpu(p);
     292             :                 __entry->dest_cpu    = dest_cpu;
     293             :         ),
     294             : 
     295             :         TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
     296             :                   __entry->comm, __entry->pid, __entry->prio,
     297             :                   __entry->orig_cpu, __entry->dest_cpu)
     298             : );
     299             : 
     300           0 : DECLARE_EVENT_CLASS(sched_process_template,
     301             : 
     302             :         TP_PROTO(struct task_struct *p),
     303             : 
     304             :         TP_ARGS(p),
     305             : 
     306             :         TP_STRUCT__entry(
     307             :                 __array(        char,   comm,   TASK_COMM_LEN   )
     308             :                 __field(        pid_t,  pid                     )
     309             :                 __field(        int,    prio                    )
     310             :         ),
     311             : 
     312             :         TP_fast_assign(
     313             :                 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
     314             :                 __entry->pid         = p->pid;
     315             :                 __entry->prio                = p->prio; /* XXX SCHED_DEADLINE */
     316             :         ),
     317             : 
     318             :         TP_printk("comm=%s pid=%d prio=%d",
     319             :                   __entry->comm, __entry->pid, __entry->prio)
     320             : );
     321             : 
     322             : /*
     323             :  * Tracepoint for freeing a task:
     324             :  */
     325           0 : DEFINE_EVENT(sched_process_template, sched_process_free,
     326             :              TP_PROTO(struct task_struct *p),
     327             :              TP_ARGS(p));
     328             : 
     329             : /*
     330             :  * Tracepoint for a task exiting:
     331             :  */
     332           0 : DEFINE_EVENT(sched_process_template, sched_process_exit,
     333             :              TP_PROTO(struct task_struct *p),
     334             :              TP_ARGS(p));
     335             : 
     336             : /*
     337             :  * Tracepoint for waiting on task to unschedule:
     338             :  */
     339         168 : DEFINE_EVENT(sched_process_template, sched_wait_task,
     340             :         TP_PROTO(struct task_struct *p),
     341             :         TP_ARGS(p));
     342             : 
     343             : /*
     344             :  * Tracepoint for a waiting task:
     345             :  */
     346           0 : TRACE_EVENT(sched_process_wait,
     347             : 
     348             :         TP_PROTO(struct pid *pid),
     349             : 
     350             :         TP_ARGS(pid),
     351             : 
     352             :         TP_STRUCT__entry(
     353             :                 __array(        char,   comm,   TASK_COMM_LEN   )
     354             :                 __field(        pid_t,  pid                     )
     355             :                 __field(        int,    prio                    )
     356             :         ),
     357             : 
     358             :         TP_fast_assign(
     359             :                 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
     360             :                 __entry->pid         = pid_nr(pid);
     361             :                 __entry->prio                = current->prio; /* XXX SCHED_DEADLINE */
     362             :         ),
     363             : 
     364             :         TP_printk("comm=%s pid=%d prio=%d",
     365             :                   __entry->comm, __entry->pid, __entry->prio)
     366             : );
     367             : 
     368             : /*
     369             :  * Tracepoint for kernel_clone:
     370             :  */
     371        1826 : TRACE_EVENT(sched_process_fork,
     372             : 
     373             :         TP_PROTO(struct task_struct *parent, struct task_struct *child),
     374             : 
     375             :         TP_ARGS(parent, child),
     376             : 
     377             :         TP_STRUCT__entry(
     378             :                 __array(        char,   parent_comm,    TASK_COMM_LEN   )
     379             :                 __field(        pid_t,  parent_pid                      )
     380             :                 __array(        char,   child_comm,     TASK_COMM_LEN   )
     381             :                 __field(        pid_t,  child_pid                       )
     382             :         ),
     383             : 
     384             :         TP_fast_assign(
     385             :                 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
     386             :                 __entry->parent_pid  = parent->pid;
     387             :                 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
     388             :                 __entry->child_pid   = child->pid;
     389             :         ),
     390             : 
     391             :         TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
     392             :                 __entry->parent_comm, __entry->parent_pid,
     393             :                 __entry->child_comm, __entry->child_pid)
     394             : );
     395             : 
     396             : /*
     397             :  * Tracepoint for exec:
     398             :  */
     399        2042 : TRACE_EVENT(sched_process_exec,
     400             : 
     401             :         TP_PROTO(struct task_struct *p, pid_t old_pid,
     402             :                  struct linux_binprm *bprm),
     403             : 
     404             :         TP_ARGS(p, old_pid, bprm),
     405             : 
     406             :         TP_STRUCT__entry(
     407             :                 __string(       filename,       bprm->filename       )
     408             :                 __field(        pid_t,          pid             )
     409             :                 __field(        pid_t,          old_pid         )
     410             :         ),
     411             : 
     412             :         TP_fast_assign(
     413             :                 __assign_str(filename, bprm->filename);
     414             :                 __entry->pid         = p->pid;
     415             :                 __entry->old_pid     = old_pid;
     416             :         ),
     417             : 
     418             :         TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
     419             :                   __entry->pid, __entry->old_pid)
     420             : );
     421             : 
     422             : 
     423             : #ifdef CONFIG_SCHEDSTATS
     424             : #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
     425             : #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
     426             : #else
     427             : #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
     428             : #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
     429             : #endif
     430             : 
     431             : /*
     432             :  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
     433             :  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
     434             :  */
     435             : DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
     436             : 
     437             :         TP_PROTO(struct task_struct *tsk, u64 delay),
     438             : 
     439             :         TP_ARGS(__perf_task(tsk), __perf_count(delay)),
     440             : 
     441             :         TP_STRUCT__entry(
     442             :                 __array( char,  comm,   TASK_COMM_LEN   )
     443             :                 __field( pid_t, pid                     )
     444             :                 __field( u64,   delay                   )
     445             :         ),
     446             : 
     447             :         TP_fast_assign(
     448             :                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
     449             :                 __entry->pid = tsk->pid;
     450             :                 __entry->delay       = delay;
     451             :         ),
     452             : 
     453             :         TP_printk("comm=%s pid=%d delay=%Lu [ns]",
     454             :                         __entry->comm, __entry->pid,
     455             :                         (unsigned long long)__entry->delay)
     456             : );
     457             : 
     458             : /*
     459             :  * Tracepoint for accounting wait time (time the task is runnable
     460             :  * but not actually running due to scheduler contention).
     461             :  */
     462             : DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
     463             :              TP_PROTO(struct task_struct *tsk, u64 delay),
     464             :              TP_ARGS(tsk, delay));
     465             : 
     466             : /*
     467             :  * Tracepoint for accounting sleep time (time the task is not runnable,
     468             :  * including iowait, see below).
     469             :  */
     470             : DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
     471             :              TP_PROTO(struct task_struct *tsk, u64 delay),
     472             :              TP_ARGS(tsk, delay));
     473             : 
     474             : /*
     475             :  * Tracepoint for accounting iowait time (time the task is not runnable
     476             :  * due to waiting on IO to complete).
     477             :  */
     478             : DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
     479             :              TP_PROTO(struct task_struct *tsk, u64 delay),
     480             :              TP_ARGS(tsk, delay));
     481             : 
     482             : /*
     483             :  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
     484             :  */
     485             : DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
     486             :              TP_PROTO(struct task_struct *tsk, u64 delay),
     487             :              TP_ARGS(tsk, delay));
     488             : 
     489             : /*
     490             :  * Tracepoint for accounting runtime (time the task is executing
     491             :  * on a CPU).
     492             :  */
     493           0 : DECLARE_EVENT_CLASS(sched_stat_runtime,
     494             : 
     495             :         TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
     496             : 
     497             :         TP_ARGS(tsk, __perf_count(runtime), vruntime),
     498             : 
     499             :         TP_STRUCT__entry(
     500             :                 __array( char,  comm,   TASK_COMM_LEN   )
     501             :                 __field( pid_t, pid                     )
     502             :                 __field( u64,   runtime                 )
     503             :                 __field( u64,   vruntime                        )
     504             :         ),
     505             : 
     506             :         TP_fast_assign(
     507             :                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
     508             :                 __entry->pid         = tsk->pid;
     509             :                 __entry->runtime     = runtime;
     510             :                 __entry->vruntime    = vruntime;
     511             :         ),
     512             : 
     513             :         TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
     514             :                         __entry->comm, __entry->pid,
     515             :                         (unsigned long long)__entry->runtime,
     516             :                         (unsigned long long)__entry->vruntime)
     517             : );
     518             : 
     519       82108 : DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
     520             :              TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
     521             :              TP_ARGS(tsk, runtime, vruntime));
     522             : 
     523             : /*
     524             :  * Tracepoint for showing priority inheritance modifying a tasks
     525             :  * priority.
     526             :  */
     527           0 : TRACE_EVENT(sched_pi_setprio,
     528             : 
     529             :         TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
     530             : 
     531             :         TP_ARGS(tsk, pi_task),
     532             : 
     533             :         TP_STRUCT__entry(
     534             :                 __array( char,  comm,   TASK_COMM_LEN   )
     535             :                 __field( pid_t, pid                     )
     536             :                 __field( int,   oldprio                 )
     537             :                 __field( int,   newprio                 )
     538             :         ),
     539             : 
     540             :         TP_fast_assign(
     541             :                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
     542             :                 __entry->pid         = tsk->pid;
     543             :                 __entry->oldprio     = tsk->prio;
     544             :                 __entry->newprio     = pi_task ?
     545             :                                 min(tsk->normal_prio, pi_task->prio) :
     546             :                                 tsk->normal_prio;
     547             :                 /* XXX SCHED_DEADLINE bits missing */
     548             :         ),
     549             : 
     550             :         TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
     551             :                         __entry->comm, __entry->pid,
     552             :                         __entry->oldprio, __entry->newprio)
     553             : );
     554             : 
     555             : #ifdef CONFIG_DETECT_HUNG_TASK
     556             : TRACE_EVENT(sched_process_hang,
     557             :         TP_PROTO(struct task_struct *tsk),
     558             :         TP_ARGS(tsk),
     559             : 
     560             :         TP_STRUCT__entry(
     561             :                 __array( char,  comm,   TASK_COMM_LEN   )
     562             :                 __field( pid_t, pid                     )
     563             :         ),
     564             : 
     565             :         TP_fast_assign(
     566             :                 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
     567             :                 __entry->pid = tsk->pid;
     568             :         ),
     569             : 
     570             :         TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
     571             : );
     572             : #endif /* CONFIG_DETECT_HUNG_TASK */
     573             : 
     574             : /*
     575             :  * Tracks migration of tasks from one runqueue to another. Can be used to
     576             :  * detect if automatic NUMA balancing is bouncing between nodes.
     577             :  */
     578           0 : TRACE_EVENT(sched_move_numa,
     579             : 
     580             :         TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
     581             : 
     582             :         TP_ARGS(tsk, src_cpu, dst_cpu),
     583             : 
     584             :         TP_STRUCT__entry(
     585             :                 __field( pid_t, pid                     )
     586             :                 __field( pid_t, tgid                    )
     587             :                 __field( pid_t, ngid                    )
     588             :                 __field( int,   src_cpu                 )
     589             :                 __field( int,   src_nid                 )
     590             :                 __field( int,   dst_cpu                 )
     591             :                 __field( int,   dst_nid                 )
     592             :         ),
     593             : 
     594             :         TP_fast_assign(
     595             :                 __entry->pid         = task_pid_nr(tsk);
     596             :                 __entry->tgid                = task_tgid_nr(tsk);
     597             :                 __entry->ngid                = task_numa_group_id(tsk);
     598             :                 __entry->src_cpu     = src_cpu;
     599             :                 __entry->src_nid     = cpu_to_node(src_cpu);
     600             :                 __entry->dst_cpu     = dst_cpu;
     601             :                 __entry->dst_nid     = cpu_to_node(dst_cpu);
     602             :         ),
     603             : 
     604             :         TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
     605             :                         __entry->pid, __entry->tgid, __entry->ngid,
     606             :                         __entry->src_cpu, __entry->src_nid,
     607             :                         __entry->dst_cpu, __entry->dst_nid)
     608             : );
     609             : 
     610           0 : DECLARE_EVENT_CLASS(sched_numa_pair_template,
     611             : 
     612             :         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
     613             :                  struct task_struct *dst_tsk, int dst_cpu),
     614             : 
     615             :         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
     616             : 
     617             :         TP_STRUCT__entry(
     618             :                 __field( pid_t, src_pid                 )
     619             :                 __field( pid_t, src_tgid                )
     620             :                 __field( pid_t, src_ngid                )
     621             :                 __field( int,   src_cpu                 )
     622             :                 __field( int,   src_nid                 )
     623             :                 __field( pid_t, dst_pid                 )
     624             :                 __field( pid_t, dst_tgid                )
     625             :                 __field( pid_t, dst_ngid                )
     626             :                 __field( int,   dst_cpu                 )
     627             :                 __field( int,   dst_nid                 )
     628             :         ),
     629             : 
     630             :         TP_fast_assign(
     631             :                 __entry->src_pid     = task_pid_nr(src_tsk);
     632             :                 __entry->src_tgid    = task_tgid_nr(src_tsk);
     633             :                 __entry->src_ngid    = task_numa_group_id(src_tsk);
     634             :                 __entry->src_cpu     = src_cpu;
     635             :                 __entry->src_nid     = cpu_to_node(src_cpu);
     636             :                 __entry->dst_pid     = dst_tsk ? task_pid_nr(dst_tsk) : 0;
     637             :                 __entry->dst_tgid    = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
     638             :                 __entry->dst_ngid    = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
     639             :                 __entry->dst_cpu     = dst_cpu;
     640             :                 __entry->dst_nid     = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
     641             :         ),
     642             : 
     643             :         TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
     644             :                         __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
     645             :                         __entry->src_cpu, __entry->src_nid,
     646             :                         __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
     647             :                         __entry->dst_cpu, __entry->dst_nid)
     648             : );
     649             : 
     650           0 : DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
     651             : 
     652             :         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
     653             :                  struct task_struct *dst_tsk, int dst_cpu),
     654             : 
     655             :         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
     656             : );
     657             : 
     658           0 : DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
     659             : 
     660             :         TP_PROTO(struct task_struct *src_tsk, int src_cpu,
     661             :                  struct task_struct *dst_tsk, int dst_cpu),
     662             : 
     663             :         TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
     664             : );
     665             : 
     666             : 
     667             : /*
     668             :  * Tracepoint for waking a polling cpu without an IPI.
     669             :  */
     670          56 : TRACE_EVENT(sched_wake_idle_without_ipi,
     671             : 
     672             :         TP_PROTO(int cpu),
     673             : 
     674             :         TP_ARGS(cpu),
     675             : 
     676             :         TP_STRUCT__entry(
     677             :                 __field(        int,    cpu     )
     678             :         ),
     679             : 
     680             :         TP_fast_assign(
     681             :                 __entry->cpu = cpu;
     682             :         ),
     683             : 
     684             :         TP_printk("cpu=%d", __entry->cpu)
     685             : );
     686             : 
     687             : /*
     688             :  * Following tracepoints are not exported in tracefs and provide hooking
     689             :  * mechanisms only for testing and debugging purposes.
     690             :  *
     691             :  * Postfixed with _tp to make them easily identifiable in the code.
     692             :  */
     693       72332 : DECLARE_TRACE(pelt_cfs_tp,
     694             :         TP_PROTO(struct cfs_rq *cfs_rq),
     695             :         TP_ARGS(cfs_rq));
     696             : 
     697       23150 : DECLARE_TRACE(pelt_rt_tp,
     698             :         TP_PROTO(struct rq *rq),
     699             :         TP_ARGS(rq));
     700             : 
     701       23242 : DECLARE_TRACE(pelt_dl_tp,
     702             :         TP_PROTO(struct rq *rq),
     703             :         TP_ARGS(rq));
     704             : 
     705           0 : DECLARE_TRACE(pelt_thermal_tp,
     706             :         TP_PROTO(struct rq *rq),
     707             :         TP_ARGS(rq));
     708             : 
     709       32840 : DECLARE_TRACE(pelt_irq_tp,
     710             :         TP_PROTO(struct rq *rq),
     711             :         TP_ARGS(rq));
     712             : 
     713       77384 : DECLARE_TRACE(pelt_se_tp,
     714             :         TP_PROTO(struct sched_entity *se),
     715             :         TP_ARGS(se));
     716             : 
     717       15858 : DECLARE_TRACE(sched_cpu_capacity_tp,
     718             :         TP_PROTO(struct rq *rq),
     719             :         TP_ARGS(rq));
     720             : 
     721       19132 : DECLARE_TRACE(sched_overutilized_tp,
     722             :         TP_PROTO(struct root_domain *rd, bool overutilized),
     723             :         TP_ARGS(rd, overutilized));
     724             : 
     725       63068 : DECLARE_TRACE(sched_util_est_cfs_tp,
     726             :         TP_PROTO(struct cfs_rq *cfs_rq),
     727             :         TP_ARGS(cfs_rq));
     728             : 
     729       19335 : DECLARE_TRACE(sched_util_est_se_tp,
     730             :         TP_PROTO(struct sched_entity *se),
     731             :         TP_ARGS(se));
     732             : 
     733       31614 : DECLARE_TRACE(sched_update_nr_running_tp,
     734             :         TP_PROTO(struct rq *rq, int change),
     735             :         TP_ARGS(rq, change));
     736             : 
     737             : #endif /* _TRACE_SCHED_H */
     738             : 
     739             : /* This part must be outside protection */
     740             : #include <trace/define_trace.h>

Generated by: LCOV version 1.14