LCOV - code coverage report
Current view: top level - kernel - kthread.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 162 479 33.8 %
Date: 2021-04-22 12:43:58 Functions: 20 50 40.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /* Kernel thread helper functions.
       3             :  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
       4             :  *   Copyright (C) 2009 Red Hat, Inc.
       5             :  *
       6             :  * Creation is done via kthreadd, so that we get a clean environment
       7             :  * even if we're invoked from userspace (think modprobe, hotplug cpu,
       8             :  * etc.).
       9             :  */
      10             : #include <uapi/linux/sched/types.h>
      11             : #include <linux/mm.h>
      12             : #include <linux/mmu_context.h>
      13             : #include <linux/sched.h>
      14             : #include <linux/sched/mm.h>
      15             : #include <linux/sched/task.h>
      16             : #include <linux/kthread.h>
      17             : #include <linux/completion.h>
      18             : #include <linux/err.h>
      19             : #include <linux/cgroup.h>
      20             : #include <linux/cpuset.h>
      21             : #include <linux/unistd.h>
      22             : #include <linux/file.h>
      23             : #include <linux/export.h>
      24             : #include <linux/mutex.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/freezer.h>
      27             : #include <linux/ptrace.h>
      28             : #include <linux/uaccess.h>
      29             : #include <linux/numa.h>
      30             : #include <linux/sched/isolation.h>
      31             : #include <trace/events/sched.h>
      32             : 
      33             : 
      34             : static DEFINE_SPINLOCK(kthread_create_lock);
      35             : static LIST_HEAD(kthread_create_list);
      36             : struct task_struct *kthreadd_task;
      37             : 
      38             : struct kthread_create_info
      39             : {
      40             :         /* Information passed to kthread() from kthreadd. */
      41             :         int (*threadfn)(void *data);
      42             :         void *data;
      43             :         int node;
      44             : 
      45             :         /* Result passed back to kthread_create() from kthreadd. */
      46             :         struct task_struct *result;
      47             :         struct completion *done;
      48             : 
      49             :         struct list_head list;
      50             : };
      51             : 
      52             : struct kthread {
      53             :         unsigned long flags;
      54             :         unsigned int cpu;
      55             :         int (*threadfn)(void *);
      56             :         void *data;
      57             :         mm_segment_t oldfs;
      58             :         struct completion parked;
      59             :         struct completion exited;
      60             : #ifdef CONFIG_BLK_CGROUP
      61             :         struct cgroup_subsys_state *blkcg_css;
      62             : #endif
      63             : };
      64             : 
      65             : enum KTHREAD_BITS {
      66             :         KTHREAD_IS_PER_CPU = 0,
      67             :         KTHREAD_SHOULD_STOP,
      68             :         KTHREAD_SHOULD_PARK,
      69             : };
      70             : 
      71          48 : static inline void set_kthread_struct(void *kthread)
      72             : {
      73             :         /*
      74             :          * We abuse ->set_child_tid to avoid the new member and because it
      75             :          * can't be wrongly copied by copy_process(). We also rely on fact
      76             :          * that the caller can't exec, so PF_KTHREAD can't be cleared.
      77             :          */
      78          48 :         current->set_child_tid = (__force void __user *)kthread;
      79             : }
      80             : 
      81       29437 : static inline struct kthread *to_kthread(struct task_struct *k)
      82             : {
      83       29437 :         WARN_ON(!(k->flags & PF_KTHREAD));
      84       29437 :         return (__force void *)k->set_child_tid;
      85             : }
      86             : 
      87           0 : void free_kthread_struct(struct task_struct *k)
      88             : {
      89           0 :         struct kthread *kthread;
      90             : 
      91             :         /*
      92             :          * Can be NULL if this kthread was created by kernel_thread()
      93             :          * or if kmalloc() in kthread() failed.
      94             :          */
      95           0 :         kthread = to_kthread(k);
      96             : #ifdef CONFIG_BLK_CGROUP
      97             :         WARN_ON_ONCE(kthread && kthread->blkcg_css);
      98             : #endif
      99           0 :         kfree(kthread);
     100           0 : }
     101             : 
     102             : /**
     103             :  * kthread_should_stop - should this kthread return now?
     104             :  *
     105             :  * When someone calls kthread_stop() on your kthread, it will be woken
     106             :  * and this will return true.  You should then return, and your return
     107             :  * value will be passed through to kthread_stop().
     108             :  */
     109        8062 : bool kthread_should_stop(void)
     110             : {
     111        8062 :         return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
     112             : }
     113             : EXPORT_SYMBOL(kthread_should_stop);
     114             : 
     115        9370 : bool __kthread_should_park(struct task_struct *k)
     116             : {
     117        9370 :         return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
     118             : }
     119             : EXPORT_SYMBOL_GPL(__kthread_should_park);
     120             : 
     121             : /**
     122             :  * kthread_should_park - should this kthread park now?
     123             :  *
     124             :  * When someone calls kthread_park() on your kthread, it will be woken
     125             :  * and this will return true.  You should then do the necessary
     126             :  * cleanup and call kthread_parkme()
     127             :  *
     128             :  * Similar to kthread_should_stop(), but this keeps the thread alive
     129             :  * and in a park position. kthread_unpark() "restarts" the thread and
     130             :  * calls the thread function again.
     131             :  */
     132        7788 : bool kthread_should_park(void)
     133             : {
     134        7788 :         return __kthread_should_park(current);
     135             : }
     136             : EXPORT_SYMBOL_GPL(kthread_should_park);
     137             : 
     138             : /**
     139             :  * kthread_freezable_should_stop - should this freezable kthread return now?
     140             :  * @was_frozen: optional out parameter, indicates whether %current was frozen
     141             :  *
     142             :  * kthread_should_stop() for freezable kthreads, which will enter
     143             :  * refrigerator if necessary.  This function is safe from kthread_stop() /
     144             :  * freezer deadlock and freezable kthreads should use this function instead
     145             :  * of calling try_to_freeze() directly.
     146             :  */
     147           0 : bool kthread_freezable_should_stop(bool *was_frozen)
     148             : {
     149           0 :         bool frozen = false;
     150             : 
     151           0 :         might_sleep();
     152             : 
     153           0 :         if (unlikely(freezing(current)))
     154             :                 frozen = __refrigerator(true);
     155             : 
     156           0 :         if (was_frozen)
     157           0 :                 *was_frozen = frozen;
     158             : 
     159           0 :         return kthread_should_stop();
     160             : }
     161             : EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
     162             : 
     163             : /**
     164             :  * kthread_func - return the function specified on kthread creation
     165             :  * @task: kthread task in question
     166             :  *
     167             :  * Returns NULL if the task is not a kthread.
     168             :  */
     169           0 : void *kthread_func(struct task_struct *task)
     170             : {
     171           0 :         if (task->flags & PF_KTHREAD)
     172           0 :                 return to_kthread(task)->threadfn;
     173             :         return NULL;
     174             : }
     175             : EXPORT_SYMBOL_GPL(kthread_func);
     176             : 
     177             : /**
     178             :  * kthread_data - return data value specified on kthread creation
     179             :  * @task: kthread task in question
     180             :  *
     181             :  * Return the data value specified when kthread @task was created.
     182             :  * The caller is responsible for ensuring the validity of @task when
     183             :  * calling this function.
     184             :  */
     185        3803 : void *kthread_data(struct task_struct *task)
     186             : {
     187        3803 :         return to_kthread(task)->data;
     188             : }
     189             : EXPORT_SYMBOL_GPL(kthread_data);
     190             : 
     191             : /**
     192             :  * kthread_probe_data - speculative version of kthread_data()
     193             :  * @task: possible kthread task in question
     194             :  *
     195             :  * @task could be a kthread task.  Return the data value specified when it
     196             :  * was created if accessible.  If @task isn't a kthread task or its data is
     197             :  * inaccessible for any reason, %NULL is returned.  This function requires
     198             :  * that @task itself is safe to dereference.
     199             :  */
     200           0 : void *kthread_probe_data(struct task_struct *task)
     201             : {
     202           0 :         struct kthread *kthread = to_kthread(task);
     203           0 :         void *data = NULL;
     204             : 
     205           0 :         copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
     206           0 :         return data;
     207             : }
     208             : 
     209          48 : static void __kthread_parkme(struct kthread *self)
     210             : {
     211          60 :         for (;;) {
     212             :                 /*
     213             :                  * TASK_PARKED is a special state; we must serialize against
     214             :                  * possible pending wakeups to avoid store-store collisions on
     215             :                  * task->state.
     216             :                  *
     217             :                  * Such a collision might possibly result in the task state
     218             :                  * changin from TASK_PARKED and us failing the
     219             :                  * wait_task_inactive() in kthread_park().
     220             :                  */
     221          60 :                 set_special_state(TASK_PARKED);
     222          60 :                 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
     223             :                         break;
     224             : 
     225             :                 /*
     226             :                  * Thread is going to call schedule(), do not preempt it,
     227             :                  * or the caller of kthread_park() may spend more time in
     228             :                  * wait_task_inactive().
     229             :                  */
     230          12 :                 preempt_disable();
     231          12 :                 complete(&self->parked);
     232          12 :                 schedule_preempt_disabled();
     233          60 :                 preempt_enable();
     234             :         }
     235          48 :         __set_current_state(TASK_RUNNING);
     236          48 : }
     237             : 
     238           0 : void kthread_parkme(void)
     239             : {
     240           0 :         __kthread_parkme(to_kthread(current));
     241           0 : }
     242             : EXPORT_SYMBOL_GPL(kthread_parkme);
     243             : 
     244          48 : static int kthread(void *_create)
     245             : {
     246             :         /* Copy data: it's on kthread's stack */
     247          48 :         struct kthread_create_info *create = _create;
     248          48 :         int (*threadfn)(void *data) = create->threadfn;
     249          48 :         void *data = create->data;
     250          48 :         struct completion *done;
     251          48 :         struct kthread *self;
     252          48 :         int ret;
     253             : 
     254          48 :         self = kzalloc(sizeof(*self), GFP_KERNEL);
     255          48 :         set_kthread_struct(self);
     256             : 
     257             :         /* If user was SIGKILLed, I release the structure. */
     258          48 :         done = xchg(&create->done, NULL);
     259          48 :         if (!done) {
     260           0 :                 kfree(create);
     261           0 :                 do_exit(-EINTR);
     262             :         }
     263             : 
     264          48 :         if (!self) {
     265           0 :                 create->result = ERR_PTR(-ENOMEM);
     266           0 :                 complete(done);
     267           0 :                 do_exit(-ENOMEM);
     268             :         }
     269             : 
     270          48 :         self->threadfn = threadfn;
     271          48 :         self->data = data;
     272          48 :         init_completion(&self->exited);
     273          48 :         init_completion(&self->parked);
     274          48 :         current->vfork_done = &self->exited;
     275             : 
     276             :         /* OK, tell user we're spawned, wait for stop or wakeup */
     277          48 :         __set_current_state(TASK_UNINTERRUPTIBLE);
     278          48 :         create->result = current;
     279             :         /*
     280             :          * Thread is going to call schedule(), do not preempt it,
     281             :          * or the creator may spend more time in wait_task_inactive().
     282             :          */
     283          48 :         preempt_disable();
     284          48 :         complete(done);
     285          48 :         schedule_preempt_disabled();
     286          48 :         preempt_enable();
     287             : 
     288          48 :         ret = -EINTR;
     289          48 :         if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
     290          48 :                 cgroup_kthread_ready();
     291          48 :                 __kthread_parkme(self);
     292          48 :                 ret = threadfn(data);
     293             :         }
     294           0 :         do_exit(ret);
     295             : }
     296             : 
     297             : /* called from kernel_clone() to get node information for about to be created task */
     298         951 : int tsk_fork_get_node(struct task_struct *tsk)
     299             : {
     300             : #ifdef CONFIG_NUMA
     301         951 :         if (tsk == kthreadd_task)
     302          48 :                 return tsk->pref_node_fork;
     303             : #endif
     304             :         return NUMA_NO_NODE;
     305             : }
     306             : 
     307          48 : static void create_kthread(struct kthread_create_info *create)
     308             : {
     309          48 :         int pid;
     310             : 
     311             : #ifdef CONFIG_NUMA
     312          48 :         current->pref_node_fork = create->node;
     313             : #endif
     314             :         /* We want our own signal handler (we take no signals by default). */
     315          48 :         pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
     316          48 :         if (pid < 0) {
     317             :                 /* If user was SIGKILLed, I release the structure. */
     318           0 :                 struct completion *done = xchg(&create->done, NULL);
     319             : 
     320           0 :                 if (!done) {
     321           0 :                         kfree(create);
     322           0 :                         return;
     323             :                 }
     324           0 :                 create->result = ERR_PTR(pid);
     325           0 :                 complete(done);
     326             :         }
     327             : }
     328             : 
     329             : static __printf(4, 0)
     330          48 : struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
     331             :                                                     void *data, int node,
     332             :                                                     const char namefmt[],
     333             :                                                     va_list args)
     334             : {
     335          48 :         DECLARE_COMPLETION_ONSTACK(done);
     336          48 :         struct task_struct *task;
     337          48 :         struct kthread_create_info *create = kmalloc(sizeof(*create),
     338             :                                                      GFP_KERNEL);
     339             : 
     340          48 :         if (!create)
     341          48 :                 return ERR_PTR(-ENOMEM);
     342          48 :         create->threadfn = threadfn;
     343          48 :         create->data = data;
     344          48 :         create->node = node;
     345          48 :         create->done = &done;
     346             : 
     347          48 :         spin_lock(&kthread_create_lock);
     348          48 :         list_add_tail(&create->list, &kthread_create_list);
     349          48 :         spin_unlock(&kthread_create_lock);
     350             : 
     351          48 :         wake_up_process(kthreadd_task);
     352             :         /*
     353             :          * Wait for completion in killable state, for I might be chosen by
     354             :          * the OOM killer while kthreadd is trying to allocate memory for
     355             :          * new kernel thread.
     356             :          */
     357          48 :         if (unlikely(wait_for_completion_killable(&done))) {
     358             :                 /*
     359             :                  * If I was SIGKILLed before kthreadd (or new kernel thread)
     360             :                  * calls complete(), leave the cleanup of this structure to
     361             :                  * that thread.
     362             :                  */
     363           0 :                 if (xchg(&create->done, NULL))
     364          48 :                         return ERR_PTR(-EINTR);
     365             :                 /*
     366             :                  * kthreadd (or new kernel thread) will call complete()
     367             :                  * shortly.
     368             :                  */
     369           0 :                 wait_for_completion(&done);
     370             :         }
     371          48 :         task = create->result;
     372          48 :         if (!IS_ERR(task)) {
     373          48 :                 static const struct sched_param param = { .sched_priority = 0 };
     374          48 :                 char name[TASK_COMM_LEN];
     375             : 
     376             :                 /*
     377             :                  * task is already visible to other tasks, so updating
     378             :                  * COMM must be protected.
     379             :                  */
     380          48 :                 vsnprintf(name, sizeof(name), namefmt, args);
     381          48 :                 set_task_comm(task, name);
     382             :                 /*
     383             :                  * root may have changed our (kthreadd's) priority or CPU mask.
     384             :                  * The kernel thread should not inherit these properties.
     385             :                  */
     386          48 :                 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
     387          48 :                 set_cpus_allowed_ptr(task,
     388             :                                      housekeeping_cpumask(HK_FLAG_KTHREAD));
     389             :         }
     390          48 :         kfree(create);
     391          48 :         return task;
     392             : }
     393             : 
     394             : /**
     395             :  * kthread_create_on_node - create a kthread.
     396             :  * @threadfn: the function to run until signal_pending(current).
     397             :  * @data: data ptr for @threadfn.
     398             :  * @node: task and thread structures for the thread are allocated on this node
     399             :  * @namefmt: printf-style name for the thread.
     400             :  *
     401             :  * Description: This helper function creates and names a kernel
     402             :  * thread.  The thread will be stopped: use wake_up_process() to start
     403             :  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
     404             :  * is affine to all CPUs.
     405             :  *
     406             :  * If thread is going to be bound on a particular cpu, give its node
     407             :  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
     408             :  * When woken, the thread will run @threadfn() with @data as its
     409             :  * argument. @threadfn() can either call do_exit() directly if it is a
     410             :  * standalone thread for which no one will call kthread_stop(), or
     411             :  * return when 'kthread_should_stop()' is true (which means
     412             :  * kthread_stop() has been called).  The return value should be zero
     413             :  * or a negative error number; it will be passed to kthread_stop().
     414             :  *
     415             :  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
     416             :  */
     417          48 : struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
     418             :                                            void *data, int node,
     419             :                                            const char namefmt[],
     420             :                                            ...)
     421             : {
     422          48 :         struct task_struct *task;
     423          48 :         va_list args;
     424             : 
     425          48 :         va_start(args, namefmt);
     426          48 :         task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
     427          48 :         va_end(args);
     428             : 
     429          48 :         return task;
     430             : }
     431             : EXPORT_SYMBOL(kthread_create_on_node);
     432             : 
     433          52 : static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
     434             : {
     435          52 :         unsigned long flags;
     436             : 
     437          52 :         if (!wait_task_inactive(p, state)) {
     438           0 :                 WARN_ON(1);
     439           0 :                 return;
     440             :         }
     441             : 
     442             :         /* It's safe because the task is inactive. */
     443          52 :         raw_spin_lock_irqsave(&p->pi_lock, flags);
     444          52 :         do_set_cpus_allowed(p, mask);
     445          52 :         p->flags |= PF_NO_SETAFFINITY;
     446          52 :         raw_spin_unlock_irqrestore(&p->pi_lock, flags);
     447             : }
     448             : 
     449          24 : static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
     450             : {
     451          24 :         __kthread_bind_mask(p, cpumask_of(cpu), state);
     452          24 : }
     453             : 
     454          28 : void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
     455             : {
     456          28 :         __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
     457          28 : }
     458             : 
     459             : /**
     460             :  * kthread_bind - bind a just-created kthread to a cpu.
     461             :  * @p: thread created by kthread_create().
     462             :  * @cpu: cpu (might not be online, must be possible) for @k to run on.
     463             :  *
     464             :  * Description: This function is equivalent to set_cpus_allowed(),
     465             :  * except that @cpu doesn't need to be online, and the thread must be
     466             :  * stopped (i.e., just returned from kthread_create()).
     467             :  */
     468          12 : void kthread_bind(struct task_struct *p, unsigned int cpu)
     469             : {
     470           0 :         __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
     471           0 : }
     472             : EXPORT_SYMBOL(kthread_bind);
     473             : 
     474             : /**
     475             :  * kthread_create_on_cpu - Create a cpu bound kthread
     476             :  * @threadfn: the function to run until signal_pending(current).
     477             :  * @data: data ptr for @threadfn.
     478             :  * @cpu: The cpu on which the thread should be bound,
     479             :  * @namefmt: printf-style name for the thread. Format is restricted
     480             :  *           to "name.*%u". Code fills in cpu number.
     481             :  *
     482             :  * Description: This helper function creates and names a kernel thread
     483             :  */
     484          12 : struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
     485             :                                           void *data, unsigned int cpu,
     486             :                                           const char *namefmt)
     487             : {
     488          12 :         struct task_struct *p;
     489             : 
     490          12 :         p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
     491             :                                    cpu);
     492          12 :         if (IS_ERR(p))
     493             :                 return p;
     494          12 :         kthread_bind(p, cpu);
     495             :         /* CPU hotplug need to bind once again when unparking the thread. */
     496          12 :         to_kthread(p)->cpu = cpu;
     497          12 :         return p;
     498             : }
     499             : 
     500          30 : void kthread_set_per_cpu(struct task_struct *k, int cpu)
     501             : {
     502          30 :         struct kthread *kthread = to_kthread(k);
     503          30 :         if (!kthread)
     504             :                 return;
     505             : 
     506          30 :         WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
     507             : 
     508          30 :         if (cpu < 0) {
     509           0 :                 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     510           0 :                 return;
     511             :         }
     512             : 
     513          30 :         kthread->cpu = cpu;
     514          30 :         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     515             : }
     516             : 
     517        8137 : bool kthread_is_per_cpu(struct task_struct *k)
     518             : {
     519        8137 :         struct kthread *kthread = to_kthread(k);
     520        8142 :         if (!kthread)
     521             :                 return false;
     522             : 
     523        7855 :         return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
     524             : }
     525             : 
     526             : /**
     527             :  * kthread_unpark - unpark a thread created by kthread_create().
     528             :  * @k:          thread created by kthread_create().
     529             :  *
     530             :  * Sets kthread_should_park() for @k to return false, wakes it, and
     531             :  * waits for it to return. If the thread is marked percpu then its
     532             :  * bound to the cpu again.
     533             :  */
     534          12 : void kthread_unpark(struct task_struct *k)
     535             : {
     536          12 :         struct kthread *kthread = to_kthread(k);
     537             : 
     538             :         /*
     539             :          * Newly created kthread was parked when the CPU was offline.
     540             :          * The binding was lost and we need to set it again.
     541             :          */
     542          12 :         if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
     543          12 :                 __kthread_bind(k, kthread->cpu, TASK_PARKED);
     544             : 
     545          12 :         clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     546             :         /*
     547             :          * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
     548             :          */
     549          12 :         wake_up_state(k, TASK_PARKED);
     550          12 : }
     551             : EXPORT_SYMBOL_GPL(kthread_unpark);
     552             : 
     553             : /**
     554             :  * kthread_park - park a thread created by kthread_create().
     555             :  * @k: thread created by kthread_create().
     556             :  *
     557             :  * Sets kthread_should_park() for @k to return true, wakes it, and
     558             :  * waits for it to return. This can also be called after kthread_create()
     559             :  * instead of calling wake_up_process(): the thread will park without
     560             :  * calling threadfn().
     561             :  *
     562             :  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
     563             :  * If called by the kthread itself just the park bit is set.
     564             :  */
     565          12 : int kthread_park(struct task_struct *k)
     566             : {
     567          12 :         struct kthread *kthread = to_kthread(k);
     568             : 
     569          12 :         if (WARN_ON(k->flags & PF_EXITING))
     570             :                 return -ENOSYS;
     571             : 
     572          12 :         if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
     573             :                 return -EBUSY;
     574             : 
     575          12 :         set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     576          12 :         if (k != current) {
     577          12 :                 wake_up_process(k);
     578             :                 /*
     579             :                  * Wait for __kthread_parkme() to complete(), this means we
     580             :                  * _will_ have TASK_PARKED and are about to call schedule().
     581             :                  */
     582          12 :                 wait_for_completion(&kthread->parked);
     583             :                 /*
     584             :                  * Now wait for that schedule() to complete and the task to
     585             :                  * get scheduled out.
     586             :                  */
     587          12 :                 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
     588             :         }
     589             : 
     590             :         return 0;
     591             : }
     592             : EXPORT_SYMBOL_GPL(kthread_park);
     593             : 
     594             : /**
     595             :  * kthread_stop - stop a thread created by kthread_create().
     596             :  * @k: thread created by kthread_create().
     597             :  *
     598             :  * Sets kthread_should_stop() for @k to return true, wakes it, and
     599             :  * waits for it to exit. This can also be called after kthread_create()
     600             :  * instead of calling wake_up_process(): the thread will exit without
     601             :  * calling threadfn().
     602             :  *
     603             :  * If threadfn() may call do_exit() itself, the caller must ensure
     604             :  * task_struct can't go away.
     605             :  *
     606             :  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
     607             :  * was never called.
     608             :  */
     609           0 : int kthread_stop(struct task_struct *k)
     610             : {
     611           0 :         struct kthread *kthread;
     612           0 :         int ret;
     613             : 
     614           0 :         trace_sched_kthread_stop(k);
     615             : 
     616           0 :         get_task_struct(k);
     617           0 :         kthread = to_kthread(k);
     618           0 :         set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
     619           0 :         kthread_unpark(k);
     620           0 :         wake_up_process(k);
     621           0 :         wait_for_completion(&kthread->exited);
     622           0 :         ret = k->exit_code;
     623           0 :         put_task_struct(k);
     624             : 
     625           0 :         trace_sched_kthread_stop_ret(ret);
     626           0 :         return ret;
     627             : }
     628             : EXPORT_SYMBOL(kthread_stop);
     629             : 
     630           1 : int kthreadd(void *unused)
     631             : {
     632           1 :         struct task_struct *tsk = current;
     633             : 
     634             :         /* Setup a clean context for our children to inherit. */
     635           1 :         set_task_comm(tsk, "kthreadd");
     636           1 :         ignore_signals(tsk);
     637           1 :         set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
     638           1 :         set_mems_allowed(node_states[N_MEMORY]);
     639             : 
     640           1 :         current->flags |= PF_NOFREEZE;
     641           1 :         cgroup_init_kthreadd();
     642             : 
     643          49 :         for (;;) {
     644          49 :                 set_current_state(TASK_INTERRUPTIBLE);
     645          49 :                 if (list_empty(&kthread_create_list))
     646          49 :                         schedule();
     647          48 :                 __set_current_state(TASK_RUNNING);
     648             : 
     649          48 :                 spin_lock(&kthread_create_lock);
     650          96 :                 while (!list_empty(&kthread_create_list)) {
     651          48 :                         struct kthread_create_info *create;
     652             : 
     653          48 :                         create = list_entry(kthread_create_list.next,
     654             :                                             struct kthread_create_info, list);
     655          48 :                         list_del_init(&create->list);
     656          48 :                         spin_unlock(&kthread_create_lock);
     657             : 
     658          48 :                         create_kthread(create);
     659             : 
     660         144 :                         spin_lock(&kthread_create_lock);
     661             :                 }
     662          97 :                 spin_unlock(&kthread_create_lock);
     663             :         }
     664             : 
     665             :         return 0;
     666             : }
     667             : 
     668           0 : void __kthread_init_worker(struct kthread_worker *worker,
     669             :                                 const char *name,
     670             :                                 struct lock_class_key *key)
     671             : {
     672           0 :         memset(worker, 0, sizeof(struct kthread_worker));
     673           0 :         raw_spin_lock_init(&worker->lock);
     674           0 :         lockdep_set_class_and_name(&worker->lock, key, name);
     675           0 :         INIT_LIST_HEAD(&worker->work_list);
     676           0 :         INIT_LIST_HEAD(&worker->delayed_work_list);
     677           0 : }
     678             : EXPORT_SYMBOL_GPL(__kthread_init_worker);
     679             : 
     680             : /**
     681             :  * kthread_worker_fn - kthread function to process kthread_worker
     682             :  * @worker_ptr: pointer to initialized kthread_worker
     683             :  *
     684             :  * This function implements the main cycle of kthread worker. It processes
     685             :  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
     686             :  * is empty.
     687             :  *
     688             :  * The works are not allowed to keep any locks, disable preemption or interrupts
     689             :  * when they finish. There is defined a safe point for freezing when one work
     690             :  * finishes and before a new one is started.
     691             :  *
     692             :  * Also the works must not be handled by more than one worker at the same time,
     693             :  * see also kthread_queue_work().
     694             :  */
     695           0 : int kthread_worker_fn(void *worker_ptr)
     696             : {
     697           0 :         struct kthread_worker *worker = worker_ptr;
     698           0 :         struct kthread_work *work;
     699             : 
     700             :         /*
     701             :          * FIXME: Update the check and remove the assignment when all kthread
     702             :          * worker users are created using kthread_create_worker*() functions.
     703             :          */
     704           0 :         WARN_ON(worker->task && worker->task != current);
     705           0 :         worker->task = current;
     706             : 
     707           0 :         if (worker->flags & KTW_FREEZABLE)
     708             :                 set_freezable();
     709             : 
     710           0 : repeat:
     711           0 :         set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
     712             : 
     713           0 :         if (kthread_should_stop()) {
     714           0 :                 __set_current_state(TASK_RUNNING);
     715           0 :                 raw_spin_lock_irq(&worker->lock);
     716           0 :                 worker->task = NULL;
     717           0 :                 raw_spin_unlock_irq(&worker->lock);
     718           0 :                 return 0;
     719             :         }
     720             : 
     721           0 :         work = NULL;
     722           0 :         raw_spin_lock_irq(&worker->lock);
     723           0 :         if (!list_empty(&worker->work_list)) {
     724           0 :                 work = list_first_entry(&worker->work_list,
     725             :                                         struct kthread_work, node);
     726           0 :                 list_del_init(&work->node);
     727             :         }
     728           0 :         worker->current_work = work;
     729           0 :         raw_spin_unlock_irq(&worker->lock);
     730             : 
     731           0 :         if (work) {
     732           0 :                 kthread_work_func_t func = work->func;
     733           0 :                 __set_current_state(TASK_RUNNING);
     734           0 :                 trace_sched_kthread_work_execute_start(work);
     735           0 :                 work->func(work);
     736             :                 /*
     737             :                  * Avoid dereferencing work after this point.  The trace
     738             :                  * event only cares about the address.
     739             :                  */
     740           0 :                 trace_sched_kthread_work_execute_end(work, func);
     741           0 :         } else if (!freezing(current))
     742           0 :                 schedule();
     743             : 
     744           0 :         try_to_freeze();
     745           0 :         cond_resched();
     746           0 :         goto repeat;
     747             : }
     748             : EXPORT_SYMBOL_GPL(kthread_worker_fn);
     749             : 
     750             : static __printf(3, 0) struct kthread_worker *
     751           0 : __kthread_create_worker(int cpu, unsigned int flags,
     752             :                         const char namefmt[], va_list args)
     753             : {
     754           0 :         struct kthread_worker *worker;
     755           0 :         struct task_struct *task;
     756           0 :         int node = NUMA_NO_NODE;
     757             : 
     758           0 :         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
     759           0 :         if (!worker)
     760           0 :                 return ERR_PTR(-ENOMEM);
     761             : 
     762           0 :         kthread_init_worker(worker);
     763             : 
     764           0 :         if (cpu >= 0)
     765           0 :                 node = cpu_to_node(cpu);
     766             : 
     767           0 :         task = __kthread_create_on_node(kthread_worker_fn, worker,
     768             :                                                 node, namefmt, args);
     769           0 :         if (IS_ERR(task))
     770           0 :                 goto fail_task;
     771             : 
     772           0 :         if (cpu >= 0)
     773           0 :                 kthread_bind(task, cpu);
     774             : 
     775           0 :         worker->flags = flags;
     776           0 :         worker->task = task;
     777           0 :         wake_up_process(task);
     778           0 :         return worker;
     779             : 
     780           0 : fail_task:
     781           0 :         kfree(worker);
     782           0 :         return ERR_CAST(task);
     783             : }
     784             : 
     785             : /**
     786             :  * kthread_create_worker - create a kthread worker
     787             :  * @flags: flags modifying the default behavior of the worker
     788             :  * @namefmt: printf-style name for the kthread worker (task).
     789             :  *
     790             :  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     791             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     792             :  * when the worker was SIGKILLed.
     793             :  */
     794             : struct kthread_worker *
     795           0 : kthread_create_worker(unsigned int flags, const char namefmt[], ...)
     796             : {
     797           0 :         struct kthread_worker *worker;
     798           0 :         va_list args;
     799             : 
     800           0 :         va_start(args, namefmt);
     801           0 :         worker = __kthread_create_worker(-1, flags, namefmt, args);
     802           0 :         va_end(args);
     803             : 
     804           0 :         return worker;
     805             : }
     806             : EXPORT_SYMBOL(kthread_create_worker);
     807             : 
     808             : /**
     809             :  * kthread_create_worker_on_cpu - create a kthread worker and bind it
     810             :  *      to a given CPU and the associated NUMA node.
     811             :  * @cpu: CPU number
     812             :  * @flags: flags modifying the default behavior of the worker
     813             :  * @namefmt: printf-style name for the kthread worker (task).
     814             :  *
     815             :  * Use a valid CPU number if you want to bind the kthread worker
     816             :  * to the given CPU and the associated NUMA node.
     817             :  *
     818             :  * A good practice is to add the cpu number also into the worker name.
     819             :  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
     820             :  *
     821             :  * CPU hotplug:
     822             :  * The kthread worker API is simple and generic. It just provides a way
     823             :  * to create, use, and destroy workers.
     824             :  *
     825             :  * It is up to the API user how to handle CPU hotplug. They have to decide
     826             :  * how to handle pending work items, prevent queuing new ones, and
     827             :  * restore the functionality when the CPU goes off and on. There are a
     828             :  * few catches:
     829             :  *
     830             :  *    - CPU affinity gets lost when it is scheduled on an offline CPU.
     831             :  *
     832             :  *    - The worker might not exist when the CPU was off when the user
     833             :  *      created the workers.
     834             :  *
     835             :  * Good practice is to implement two CPU hotplug callbacks and to
     836             :  * destroy/create the worker when the CPU goes down/up.
     837             :  *
     838             :  * Return:
     839             :  * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     840             :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     841             :  * when the worker was SIGKILLed.
     842             :  */
     843             : struct kthread_worker *
     844           0 : kthread_create_worker_on_cpu(int cpu, unsigned int flags,
     845             :                              const char namefmt[], ...)
     846             : {
     847           0 :         struct kthread_worker *worker;
     848           0 :         va_list args;
     849             : 
     850           0 :         va_start(args, namefmt);
     851           0 :         worker = __kthread_create_worker(cpu, flags, namefmt, args);
     852           0 :         va_end(args);
     853             : 
     854           0 :         return worker;
     855             : }
     856             : EXPORT_SYMBOL(kthread_create_worker_on_cpu);
     857             : 
     858             : /*
     859             :  * Returns true when the work could not be queued at the moment.
     860             :  * It happens when it is already pending in a worker list
     861             :  * or when it is being cancelled.
     862             :  */
     863           0 : static inline bool queuing_blocked(struct kthread_worker *worker,
     864             :                                    struct kthread_work *work)
     865             : {
     866           0 :         lockdep_assert_held(&worker->lock);
     867             : 
     868           0 :         return !list_empty(&work->node) || work->canceling;
     869             : }
     870             : 
     871           0 : static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
     872             :                                              struct kthread_work *work)
     873             : {
     874           0 :         lockdep_assert_held(&worker->lock);
     875           0 :         WARN_ON_ONCE(!list_empty(&work->node));
     876             :         /* Do not use a work with >1 worker, see kthread_queue_work() */
     877           0 :         WARN_ON_ONCE(work->worker && work->worker != worker);
     878           0 : }
     879             : 
     880             : /* insert @work before @pos in @worker */
     881           0 : static void kthread_insert_work(struct kthread_worker *worker,
     882             :                                 struct kthread_work *work,
     883             :                                 struct list_head *pos)
     884             : {
     885           0 :         kthread_insert_work_sanity_check(worker, work);
     886             : 
     887           0 :         trace_sched_kthread_work_queue_work(worker, work);
     888             : 
     889           0 :         list_add_tail(&work->node, pos);
     890           0 :         work->worker = worker;
     891           0 :         if (!worker->current_work && likely(worker->task))
     892           0 :                 wake_up_process(worker->task);
     893           0 : }
     894             : 
     895             : /**
     896             :  * kthread_queue_work - queue a kthread_work
     897             :  * @worker: target kthread_worker
     898             :  * @work: kthread_work to queue
     899             :  *
     900             :  * Queue @work to work processor @task for async execution.  @task
     901             :  * must have been created with kthread_worker_create().  Returns %true
     902             :  * if @work was successfully queued, %false if it was already pending.
     903             :  *
     904             :  * Reinitialize the work if it needs to be used by another worker.
     905             :  * For example, when the worker was stopped and started again.
     906             :  */
     907           0 : bool kthread_queue_work(struct kthread_worker *worker,
     908             :                         struct kthread_work *work)
     909             : {
     910           0 :         bool ret = false;
     911           0 :         unsigned long flags;
     912             : 
     913           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
     914           0 :         if (!queuing_blocked(worker, work)) {
     915           0 :                 kthread_insert_work(worker, work, &worker->work_list);
     916           0 :                 ret = true;
     917             :         }
     918           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
     919           0 :         return ret;
     920             : }
     921             : EXPORT_SYMBOL_GPL(kthread_queue_work);
     922             : 
     923             : /**
     924             :  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
     925             :  *      delayed work when the timer expires.
     926             :  * @t: pointer to the expired timer
     927             :  *
     928             :  * The format of the function is defined by struct timer_list.
     929             :  * It should have been called from irqsafe timer with irq already off.
     930             :  */
     931           0 : void kthread_delayed_work_timer_fn(struct timer_list *t)
     932             : {
     933           0 :         struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
     934           0 :         struct kthread_work *work = &dwork->work;
     935           0 :         struct kthread_worker *worker = work->worker;
     936           0 :         unsigned long flags;
     937             : 
     938             :         /*
     939             :          * This might happen when a pending work is reinitialized.
     940             :          * It means that it is used a wrong way.
     941             :          */
     942           0 :         if (WARN_ON_ONCE(!worker))
     943             :                 return;
     944             : 
     945           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
     946             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
     947           0 :         WARN_ON_ONCE(work->worker != worker);
     948             : 
     949             :         /* Move the work from worker->delayed_work_list. */
     950           0 :         WARN_ON_ONCE(list_empty(&work->node));
     951           0 :         list_del_init(&work->node);
     952           0 :         if (!work->canceling)
     953           0 :                 kthread_insert_work(worker, work, &worker->work_list);
     954             : 
     955           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
     956             : }
     957             : EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
     958             : 
     959           0 : static void __kthread_queue_delayed_work(struct kthread_worker *worker,
     960             :                                          struct kthread_delayed_work *dwork,
     961             :                                          unsigned long delay)
     962             : {
     963           0 :         struct timer_list *timer = &dwork->timer;
     964           0 :         struct kthread_work *work = &dwork->work;
     965             : 
     966           0 :         WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
     967             : 
     968             :         /*
     969             :          * If @delay is 0, queue @dwork->work immediately.  This is for
     970             :          * both optimization and correctness.  The earliest @timer can
     971             :          * expire is on the closest next tick and delayed_work users depend
     972             :          * on that there's no such delay when @delay is 0.
     973             :          */
     974           0 :         if (!delay) {
     975           0 :                 kthread_insert_work(worker, work, &worker->work_list);
     976           0 :                 return;
     977             :         }
     978             : 
     979             :         /* Be paranoid and try to detect possible races already now. */
     980           0 :         kthread_insert_work_sanity_check(worker, work);
     981             : 
     982           0 :         list_add(&work->node, &worker->delayed_work_list);
     983           0 :         work->worker = worker;
     984           0 :         timer->expires = jiffies + delay;
     985           0 :         add_timer(timer);
     986             : }
     987             : 
     988             : /**
     989             :  * kthread_queue_delayed_work - queue the associated kthread work
     990             :  *      after a delay.
     991             :  * @worker: target kthread_worker
     992             :  * @dwork: kthread_delayed_work to queue
     993             :  * @delay: number of jiffies to wait before queuing
     994             :  *
     995             :  * If the work has not been pending it starts a timer that will queue
     996             :  * the work after the given @delay. If @delay is zero, it queues the
     997             :  * work immediately.
     998             :  *
     999             :  * Return: %false if the @work has already been pending. It means that
    1000             :  * either the timer was running or the work was queued. It returns %true
    1001             :  * otherwise.
    1002             :  */
    1003           0 : bool kthread_queue_delayed_work(struct kthread_worker *worker,
    1004             :                                 struct kthread_delayed_work *dwork,
    1005             :                                 unsigned long delay)
    1006             : {
    1007           0 :         struct kthread_work *work = &dwork->work;
    1008           0 :         unsigned long flags;
    1009           0 :         bool ret = false;
    1010             : 
    1011           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1012             : 
    1013           0 :         if (!queuing_blocked(worker, work)) {
    1014           0 :                 __kthread_queue_delayed_work(worker, dwork, delay);
    1015           0 :                 ret = true;
    1016             :         }
    1017             : 
    1018           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1019           0 :         return ret;
    1020             : }
    1021             : EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
    1022             : 
    1023             : struct kthread_flush_work {
    1024             :         struct kthread_work     work;
    1025             :         struct completion       done;
    1026             : };
    1027             : 
    1028           0 : static void kthread_flush_work_fn(struct kthread_work *work)
    1029             : {
    1030           0 :         struct kthread_flush_work *fwork =
    1031           0 :                 container_of(work, struct kthread_flush_work, work);
    1032           0 :         complete(&fwork->done);
    1033           0 : }
    1034             : 
    1035             : /**
    1036             :  * kthread_flush_work - flush a kthread_work
    1037             :  * @work: work to flush
    1038             :  *
    1039             :  * If @work is queued or executing, wait for it to finish execution.
    1040             :  */
    1041           0 : void kthread_flush_work(struct kthread_work *work)
    1042             : {
    1043           0 :         struct kthread_flush_work fwork = {
    1044             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1045           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1046             :         };
    1047           0 :         struct kthread_worker *worker;
    1048           0 :         bool noop = false;
    1049             : 
    1050           0 :         worker = work->worker;
    1051           0 :         if (!worker)
    1052           0 :                 return;
    1053             : 
    1054           0 :         raw_spin_lock_irq(&worker->lock);
    1055             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1056           0 :         WARN_ON_ONCE(work->worker != worker);
    1057             : 
    1058           0 :         if (!list_empty(&work->node))
    1059           0 :                 kthread_insert_work(worker, &fwork.work, work->node.next);
    1060           0 :         else if (worker->current_work == work)
    1061           0 :                 kthread_insert_work(worker, &fwork.work,
    1062             :                                     worker->work_list.next);
    1063             :         else
    1064             :                 noop = true;
    1065             : 
    1066           0 :         raw_spin_unlock_irq(&worker->lock);
    1067             : 
    1068           0 :         if (!noop)
    1069           0 :                 wait_for_completion(&fwork.done);
    1070             : }
    1071             : EXPORT_SYMBOL_GPL(kthread_flush_work);
    1072             : 
    1073             : /*
    1074             :  * This function removes the work from the worker queue. Also it makes sure
    1075             :  * that it won't get queued later via the delayed work's timer.
    1076             :  *
    1077             :  * The work might still be in use when this function finishes. See the
    1078             :  * current_work proceed by the worker.
    1079             :  *
    1080             :  * Return: %true if @work was pending and successfully canceled,
    1081             :  *      %false if @work was not pending
    1082             :  */
    1083           0 : static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
    1084             :                                   unsigned long *flags)
    1085             : {
    1086             :         /* Try to cancel the timer if exists. */
    1087           0 :         if (is_dwork) {
    1088           0 :                 struct kthread_delayed_work *dwork =
    1089           0 :                         container_of(work, struct kthread_delayed_work, work);
    1090           0 :                 struct kthread_worker *worker = work->worker;
    1091             : 
    1092             :                 /*
    1093             :                  * del_timer_sync() must be called to make sure that the timer
    1094             :                  * callback is not running. The lock must be temporary released
    1095             :                  * to avoid a deadlock with the callback. In the meantime,
    1096             :                  * any queuing is blocked by setting the canceling counter.
    1097             :                  */
    1098           0 :                 work->canceling++;
    1099           0 :                 raw_spin_unlock_irqrestore(&worker->lock, *flags);
    1100           0 :                 del_timer_sync(&dwork->timer);
    1101           0 :                 raw_spin_lock_irqsave(&worker->lock, *flags);
    1102           0 :                 work->canceling--;
    1103             :         }
    1104             : 
    1105             :         /*
    1106             :          * Try to remove the work from a worker list. It might either
    1107             :          * be from worker->work_list or from worker->delayed_work_list.
    1108             :          */
    1109           0 :         if (!list_empty(&work->node)) {
    1110           0 :                 list_del_init(&work->node);
    1111           0 :                 return true;
    1112             :         }
    1113             : 
    1114             :         return false;
    1115             : }
    1116             : 
    1117             : /**
    1118             :  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
    1119             :  * @worker: kthread worker to use
    1120             :  * @dwork: kthread delayed work to queue
    1121             :  * @delay: number of jiffies to wait before queuing
    1122             :  *
    1123             :  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
    1124             :  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
    1125             :  * @work is guaranteed to be queued immediately.
    1126             :  *
    1127             :  * Return: %true if @dwork was pending and its timer was modified,
    1128             :  * %false otherwise.
    1129             :  *
    1130             :  * A special case is when the work is being canceled in parallel.
    1131             :  * It might be caused either by the real kthread_cancel_delayed_work_sync()
    1132             :  * or yet another kthread_mod_delayed_work() call. We let the other command
    1133             :  * win and return %false here. The caller is supposed to synchronize these
    1134             :  * operations a reasonable way.
    1135             :  *
    1136             :  * This function is safe to call from any context including IRQ handler.
    1137             :  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
    1138             :  * for details.
    1139             :  */
    1140           0 : bool kthread_mod_delayed_work(struct kthread_worker *worker,
    1141             :                               struct kthread_delayed_work *dwork,
    1142             :                               unsigned long delay)
    1143             : {
    1144           0 :         struct kthread_work *work = &dwork->work;
    1145           0 :         unsigned long flags;
    1146           0 :         int ret = false;
    1147             : 
    1148           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1149             : 
    1150             :         /* Do not bother with canceling when never queued. */
    1151           0 :         if (!work->worker)
    1152           0 :                 goto fast_queue;
    1153             : 
    1154             :         /* Work must not be used with >1 worker, see kthread_queue_work() */
    1155           0 :         WARN_ON_ONCE(work->worker != worker);
    1156             : 
    1157             :         /* Do not fight with another command that is canceling this work. */
    1158           0 :         if (work->canceling)
    1159           0 :                 goto out;
    1160             : 
    1161           0 :         ret = __kthread_cancel_work(work, true, &flags);
    1162           0 : fast_queue:
    1163           0 :         __kthread_queue_delayed_work(worker, dwork, delay);
    1164           0 : out:
    1165           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1166           0 :         return ret;
    1167             : }
    1168             : EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
    1169             : 
    1170           0 : static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
    1171             : {
    1172           0 :         struct kthread_worker *worker = work->worker;
    1173           0 :         unsigned long flags;
    1174           0 :         int ret = false;
    1175             : 
    1176           0 :         if (!worker)
    1177           0 :                 goto out;
    1178             : 
    1179           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1180             :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1181           0 :         WARN_ON_ONCE(work->worker != worker);
    1182             : 
    1183           0 :         ret = __kthread_cancel_work(work, is_dwork, &flags);
    1184             : 
    1185           0 :         if (worker->current_work != work)
    1186           0 :                 goto out_fast;
    1187             : 
    1188             :         /*
    1189             :          * The work is in progress and we need to wait with the lock released.
    1190             :          * In the meantime, block any queuing by setting the canceling counter.
    1191             :          */
    1192           0 :         work->canceling++;
    1193           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1194           0 :         kthread_flush_work(work);
    1195           0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1196           0 :         work->canceling--;
    1197             : 
    1198           0 : out_fast:
    1199           0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1200           0 : out:
    1201           0 :         return ret;
    1202             : }
    1203             : 
    1204             : /**
    1205             :  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
    1206             :  * @work: the kthread work to cancel
    1207             :  *
    1208             :  * Cancel @work and wait for its execution to finish.  This function
    1209             :  * can be used even if the work re-queues itself. On return from this
    1210             :  * function, @work is guaranteed to be not pending or executing on any CPU.
    1211             :  *
    1212             :  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
    1213             :  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
    1214             :  *
    1215             :  * The caller must ensure that the worker on which @work was last
    1216             :  * queued can't be destroyed before this function returns.
    1217             :  *
    1218             :  * Return: %true if @work was pending, %false otherwise.
    1219             :  */
    1220           0 : bool kthread_cancel_work_sync(struct kthread_work *work)
    1221             : {
    1222           0 :         return __kthread_cancel_work_sync(work, false);
    1223             : }
    1224             : EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
    1225             : 
    1226             : /**
    1227             :  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
    1228             :  *      wait for it to finish.
    1229             :  * @dwork: the kthread delayed work to cancel
    1230             :  *
    1231             :  * This is kthread_cancel_work_sync() for delayed works.
    1232             :  *
    1233             :  * Return: %true if @dwork was pending, %false otherwise.
    1234             :  */
    1235           0 : bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
    1236             : {
    1237           0 :         return __kthread_cancel_work_sync(&dwork->work, true);
    1238             : }
    1239             : EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
    1240             : 
    1241             : /**
    1242             :  * kthread_flush_worker - flush all current works on a kthread_worker
    1243             :  * @worker: worker to flush
    1244             :  *
    1245             :  * Wait until all currently executing or pending works on @worker are
    1246             :  * finished.
    1247             :  */
    1248           0 : void kthread_flush_worker(struct kthread_worker *worker)
    1249             : {
    1250           0 :         struct kthread_flush_work fwork = {
    1251             :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1252           0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1253             :         };
    1254             : 
    1255           0 :         kthread_queue_work(worker, &fwork.work);
    1256           0 :         wait_for_completion(&fwork.done);
    1257           0 : }
    1258             : EXPORT_SYMBOL_GPL(kthread_flush_worker);
    1259             : 
    1260             : /**
    1261             :  * kthread_destroy_worker - destroy a kthread worker
    1262             :  * @worker: worker to be destroyed
    1263             :  *
    1264             :  * Flush and destroy @worker.  The simple flush is enough because the kthread
    1265             :  * worker API is used only in trivial scenarios.  There are no multi-step state
    1266             :  * machines needed.
    1267             :  */
    1268           0 : void kthread_destroy_worker(struct kthread_worker *worker)
    1269             : {
    1270           0 :         struct task_struct *task;
    1271             : 
    1272           0 :         task = worker->task;
    1273           0 :         if (WARN_ON(!task))
    1274             :                 return;
    1275             : 
    1276           0 :         kthread_flush_worker(worker);
    1277           0 :         kthread_stop(task);
    1278           0 :         WARN_ON(!list_empty(&worker->work_list));
    1279           0 :         kfree(worker);
    1280             : }
    1281             : EXPORT_SYMBOL(kthread_destroy_worker);
    1282             : 
    1283             : /**
    1284             :  * kthread_use_mm - make the calling kthread operate on an address space
    1285             :  * @mm: address space to operate on
    1286             :  */
    1287           0 : void kthread_use_mm(struct mm_struct *mm)
    1288             : {
    1289           0 :         struct mm_struct *active_mm;
    1290           0 :         struct task_struct *tsk = current;
    1291             : 
    1292           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1293           0 :         WARN_ON_ONCE(tsk->mm);
    1294             : 
    1295           0 :         task_lock(tsk);
    1296             :         /* Hold off tlb flush IPIs while switching mm's */
    1297           0 :         local_irq_disable();
    1298           0 :         active_mm = tsk->active_mm;
    1299           0 :         if (active_mm != mm) {
    1300           0 :                 mmgrab(mm);
    1301           0 :                 tsk->active_mm = mm;
    1302             :         }
    1303           0 :         tsk->mm = mm;
    1304           0 :         membarrier_update_current_mm(mm);
    1305           0 :         switch_mm_irqs_off(active_mm, mm, tsk);
    1306           0 :         local_irq_enable();
    1307           0 :         task_unlock(tsk);
    1308             : #ifdef finish_arch_post_lock_switch
    1309             :         finish_arch_post_lock_switch();
    1310             : #endif
    1311             : 
    1312             :         /*
    1313             :          * When a kthread starts operating on an address space, the loop
    1314             :          * in membarrier_{private,global}_expedited() may not observe
    1315             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1316             :          * memory barrier after storing to tsk->mm, before accessing
    1317             :          * user-space memory. A full memory barrier for membarrier
    1318             :          * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
    1319             :          * mmdrop(), or explicitly with smp_mb().
    1320             :          */
    1321           0 :         if (active_mm != mm)
    1322           0 :                 mmdrop(active_mm);
    1323             :         else
    1324           0 :                 smp_mb();
    1325             : 
    1326           0 :         to_kthread(tsk)->oldfs = force_uaccess_begin();
    1327           0 : }
    1328             : EXPORT_SYMBOL_GPL(kthread_use_mm);
    1329             : 
    1330             : /**
    1331             :  * kthread_unuse_mm - reverse the effect of kthread_use_mm()
    1332             :  * @mm: address space to operate on
    1333             :  */
    1334           0 : void kthread_unuse_mm(struct mm_struct *mm)
    1335             : {
    1336           0 :         struct task_struct *tsk = current;
    1337             : 
    1338           0 :         WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
    1339           0 :         WARN_ON_ONCE(!tsk->mm);
    1340             : 
    1341           0 :         force_uaccess_end(to_kthread(tsk)->oldfs);
    1342             : 
    1343           0 :         task_lock(tsk);
    1344             :         /*
    1345             :          * When a kthread stops operating on an address space, the loop
    1346             :          * in membarrier_{private,global}_expedited() may not observe
    1347             :          * that tsk->mm, and not issue an IPI. Membarrier requires a
    1348             :          * memory barrier after accessing user-space memory, before
    1349             :          * clearing tsk->mm.
    1350             :          */
    1351           0 :         smp_mb__after_spinlock();
    1352           0 :         sync_mm_rss(mm);
    1353           0 :         local_irq_disable();
    1354           0 :         tsk->mm = NULL;
    1355           0 :         membarrier_update_current_mm(NULL);
    1356             :         /* active_mm is still 'mm' */
    1357           0 :         enter_lazy_tlb(mm, tsk);
    1358           0 :         local_irq_enable();
    1359           0 :         task_unlock(tsk);
    1360           0 : }
    1361             : EXPORT_SYMBOL_GPL(kthread_unuse_mm);
    1362             : 
    1363             : #ifdef CONFIG_BLK_CGROUP
    1364             : /**
    1365             :  * kthread_associate_blkcg - associate blkcg to current kthread
    1366             :  * @css: the cgroup info
    1367             :  *
    1368             :  * Current thread must be a kthread. The thread is running jobs on behalf of
    1369             :  * other threads. In some cases, we expect the jobs attach cgroup info of
    1370             :  * original threads instead of that of current thread. This function stores
    1371             :  * original thread's cgroup info in current kthread context for later
    1372             :  * retrieval.
    1373             :  */
    1374             : void kthread_associate_blkcg(struct cgroup_subsys_state *css)
    1375             : {
    1376             :         struct kthread *kthread;
    1377             : 
    1378             :         if (!(current->flags & PF_KTHREAD))
    1379             :                 return;
    1380             :         kthread = to_kthread(current);
    1381             :         if (!kthread)
    1382             :                 return;
    1383             : 
    1384             :         if (kthread->blkcg_css) {
    1385             :                 css_put(kthread->blkcg_css);
    1386             :                 kthread->blkcg_css = NULL;
    1387             :         }
    1388             :         if (css) {
    1389             :                 css_get(css);
    1390             :                 kthread->blkcg_css = css;
    1391             :         }
    1392             : }
    1393             : EXPORT_SYMBOL(kthread_associate_blkcg);
    1394             : 
    1395             : /**
    1396             :  * kthread_blkcg - get associated blkcg css of current kthread
    1397             :  *
    1398             :  * Current thread must be a kthread.
    1399             :  */
    1400             : struct cgroup_subsys_state *kthread_blkcg(void)
    1401             : {
    1402             :         struct kthread *kthread;
    1403             : 
    1404             :         if (current->flags & PF_KTHREAD) {
    1405             :                 kthread = to_kthread(current);
    1406             :                 if (kthread)
    1407             :                         return kthread->blkcg_css;
    1408             :         }
    1409             :         return NULL;
    1410             : }
    1411             : EXPORT_SYMBOL(kthread_blkcg);
    1412             : #endif

Generated by: LCOV version 1.14