|
@@ -6610,8 +6610,6 @@ static inline int should_resched(void)
|
|
|
|
|
|
static void __cond_resched(void)
|
|
|
{
|
|
|
- __might_sleep(__FILE__, __LINE__, 0);
|
|
|
-
|
|
|
add_preempt_count(PREEMPT_ACTIVE);
|
|
|
schedule();
|
|
|
sub_preempt_count(PREEMPT_ACTIVE);
|
|
@@ -6628,14 +6626,14 @@ int __sched _cond_resched(void)
|
|
|
EXPORT_SYMBOL(_cond_resched);
|
|
|
|
|
|
/*
|
|
|
- * cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
|
|
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
|
|
* call schedule, and on return reacquire the lock.
|
|
|
*
|
|
|
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
|
|
|
* operations here to prevent schedule() from being called twice (once via
|
|
|
* spin_unlock(), once by hand).
|
|
|
*/
|
|
|
-int cond_resched_lock(spinlock_t *lock)
|
|
|
+int __cond_resched_lock(spinlock_t *lock)
|
|
|
{
|
|
|
int resched = should_resched();
|
|
|
int ret = 0;
|
|
@@ -6651,9 +6649,9 @@ int cond_resched_lock(spinlock_t *lock)
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(cond_resched_lock);
|
|
|
+EXPORT_SYMBOL(__cond_resched_lock);
|
|
|
|
|
|
-int __sched cond_resched_softirq(void)
|
|
|
+int __sched __cond_resched_softirq(void)
|
|
|
{
|
|
|
BUG_ON(!in_softirq());
|
|
|
|
|
@@ -6665,7 +6663,7 @@ int __sched cond_resched_softirq(void)
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(cond_resched_softirq);
|
|
|
+EXPORT_SYMBOL(__cond_resched_softirq);
|
|
|
|
|
|
/**
|
|
|
* yield - yield the current processor to other threads.
|