patch-2.4.7 linux/kernel/sched.c
Next file: linux/kernel/softirq.c
Previous file: linux/kernel/ptrace.c
Back to the patch index
Back to the overall index
- Lines: 87
- Date:
Tue Jul 17 18:30:50 2001
- Orig file:
v2.4.6/linux/kernel/sched.c
- Orig date:
Tue Jul 3 17:08:22 2001
diff -u --recursive --new-file v2.4.6/linux/kernel/sched.c linux/kernel/sched.c
@@ -25,6 +25,7 @@
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/completion.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -543,11 +544,6 @@
release_kernel_lock(prev, this_cpu);
- /* Do "administrative" work here while we don't hold any locks */
- if (softirq_pending(this_cpu))
- goto handle_softirq;
-handle_softirq_back:
-
/*
* 'sched_data' is protected by the fact that we can run
* only one process per CPU.
@@ -611,8 +607,11 @@
#endif
spin_unlock_irq(&runqueue_lock);
- if (prev == next)
+ if (prev == next) {
+ /* We won't go through the normal tail, so do this by hand */
+ prev->policy &= ~SCHED_YIELD;
goto same_process;
+ }
#ifdef CONFIG_SMP
/*
@@ -689,14 +688,12 @@
goto repeat_schedule;
still_running:
+ if (!(prev->cpus_allowed & (1UL << this_cpu)))
+ goto still_running_back;
c = goodness(prev, this_cpu, prev->active_mm);
next = prev;
goto still_running_back;
-handle_softirq:
- do_softirq();
- goto handle_softirq_back;
-
move_rr_last:
if (!prev->counter) {
prev->counter = NICE_TO_TICKS(prev->nice);
@@ -763,6 +760,36 @@
__wake_up_common(q, mode, nr, 1);
wq_read_unlock_irqrestore(&q->lock, flags);
}
+}
+
+void complete(struct completion *x)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+}
+
+void wait_for_completion(struct completion *x)
+{
+ spin_lock_irq(&x->wait.lock);
+ if (!x->done) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ wait.flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue_tail(&x->wait, &wait);
+ do {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&x->wait.lock);
+ schedule();
+ spin_lock_irq(&x->wait.lock);
+ } while (!x->done);
+ __remove_wait_queue(&x->wait, &wait);
+ }
+ x->done--;
+ spin_unlock_irq(&x->wait.lock);
}
#define SLEEP_ON_VAR \
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)