Skip to content

Commit 85ba2d8

Browse files
Roland McGrathtorvalds
Roland McGrath
authored andcommitted
tracehook: wait_task_inactive
This extends wait_task_inactive() with a new argument so it can be used in a "soft" mode where it will check for the task changing state unexpectedly and back off. There is no change to existing callers. This lays the groundwork to allow robust, noninvasive tracing that can try to sample a blocked thread but back off safely if it wakes up. Signed-off-by: Roland McGrath <[email protected]> Cc: Oleg Nesterov <[email protected]> Reviewed-by: Ingo Molnar <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 1f5a4ad commit 85ba2d8

File tree

5 files changed

+37
-8
lines changed

5 files changed

+37
-8
lines changed

arch/ia64/kernel/perfmon.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -2626,7 +2626,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
26262626
/*
26272627
* make sure the task is off any CPU
26282628
*/
2629-
wait_task_inactive(task);
2629+
wait_task_inactive(task, 0);
26302630

26312631
/* more to come... */
26322632

@@ -4774,7 +4774,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
47744774

47754775
UNPROTECT_CTX(ctx, flags);
47764776

4777-
wait_task_inactive(task);
4777+
wait_task_inactive(task, 0);
47784778

47794779
PROTECT_CTX(ctx, flags);
47804780

include/linux/sched.h

+6-2
Original file line numberDiff line numberDiff line change
@@ -1882,9 +1882,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
18821882
extern char *get_task_comm(char *to, struct task_struct *tsk);
18831883

18841884
#ifdef CONFIG_SMP
1885-
extern void wait_task_inactive(struct task_struct * p);
1885+
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
18861886
#else
1887-
#define wait_task_inactive(p) do { } while (0)
1887+
static inline unsigned long wait_task_inactive(struct task_struct *p,
1888+
long match_state)
1889+
{
1890+
return 1;
1891+
}
18881892
#endif
18891893

18901894
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)

kernel/kthread.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176176
return;
177177
}
178178
/* Must have done schedule() in kthread() before we set_task_cpu */
179-
wait_task_inactive(k);
179+
wait_task_inactive(k, 0);
180180
set_task_cpu(k, cpu);
181181
k->cpus_allowed = cpumask_of_cpu(cpu);
182182
k->rt.nr_cpus_allowed = 1;

kernel/ptrace.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
107107
read_unlock(&tasklist_lock);
108108

109109
if (!ret && !kill)
110-
wait_task_inactive(child);
110+
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111111

112112
/* All systems go.. */
113113
return ret;

kernel/sched.c

+27-2
Original file line numberDiff line numberDiff line change
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
18671867
/*
18681868
* wait_task_inactive - wait for a thread to unschedule.
18691869
*
1870+
* If @match_state is nonzero, it's the @p->state value just checked and
1871+
* not expected to change. If it changes, i.e. @p might have woken up,
1872+
* then return zero. When we succeed in waiting for @p to be off its CPU,
1873+
* we return a positive number (its total switch count). If a second call
1874+
* a short while later returns the same number, the caller can be sure that
1875+
* @p has remained unscheduled the whole time.
1876+
*
18701877
* The caller must ensure that the task *will* unschedule sometime soon,
18711878
* else this function might spin for a *long* time. This function can't
18721879
* be called with interrupts off, or it may introduce deadlock with
18731880
* smp_call_function() if an IPI is sent by the same process we are
18741881
* waiting to become inactive.
18751882
*/
1876-
void wait_task_inactive(struct task_struct *p)
1883+
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
18771884
{
18781885
unsigned long flags;
18791886
int running, on_rq;
1887+
unsigned long ncsw;
18801888
struct rq *rq;
18811889

18821890
for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
18991907
* return false if the runqueue has changed and p
19001908
* is actually now running somewhere else!
19011909
*/
1902-
while (task_running(rq, p))
1910+
while (task_running(rq, p)) {
1911+
if (match_state && unlikely(p->state != match_state))
1912+
return 0;
19031913
cpu_relax();
1914+
}
19041915

19051916
/*
19061917
* Ok, time to look more closely! We need the rq
@@ -1910,8 +1921,20 @@ void wait_task_inactive(struct task_struct *p)
19101921
rq = task_rq_lock(p, &flags);
19111922
running = task_running(rq, p);
19121923
on_rq = p->se.on_rq;
1924+
ncsw = 0;
1925+
if (!match_state || p->state == match_state) {
1926+
ncsw = p->nivcsw + p->nvcsw;
1927+
if (unlikely(!ncsw))
1928+
ncsw = 1;
1929+
}
19131930
task_rq_unlock(rq, &flags);
19141931

1932+
/*
1933+
* If it changed from the expected state, bail out now.
1934+
*/
1935+
if (unlikely(!ncsw))
1936+
break;
1937+
19151938
/*
19161939
* Was it really running after all now that we
19171940
* checked with the proper locks actually held?
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
19441967
*/
19451968
break;
19461969
}
1970+
1971+
return ncsw;
19471972
}
19481973

19491974
/***

0 commit comments

Comments
 (0)