ソースを参照

Merge branch 'pm-freezer' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into pm-freezer

* 'pm-freezer' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc: (24 commits)
  freezer: fix wait_event_freezable/__thaw_task races
  freezer: kill unused set_freezable_with_signal()
  dmatest: don't use set_freezable_with_signal()
  usb_storage: don't use set_freezable_with_signal()
  freezer: remove unused @sig_only from freeze_task()
  freezer: use lock_task_sighand() in fake_signal_wake_up()
  freezer: restructure __refrigerator()
  freezer: fix set_freezable[_with_signal]() race
  freezer: remove should_send_signal() and update frozen()
  freezer: remove now unused TIF_FREEZE
  freezer: make freezing() test freeze conditions in effect instead of TIF_FREEZE
  cgroup_freezer: prepare for removal of TIF_FREEZE
  freezer: clean up freeze_processes() failure path
  freezer: kill PF_FREEZING
  freezer: test freezable conditions while holding freezer_lock
  freezer: make freezing indicate freeze condition in effect
  freezer: use dedicated lock instead of task_lock() + memory barrier
  freezer: don't distinguish nosig tasks on thaw
  freezer: remove racy clear_freeze_flag() and set PF_NOFREEZE on dead tasks
  freezer: rename thaw_process() to __thaw_task() and simplify the implementation
  ...
Rafael J. Wysocki 13 年 前
コミット
986b11c3ee
57 ファイル変更303 行追加401 行削除
  1. 7 7
      Documentation/power/freezing-of-tasks.txt
  2. 0 2
      arch/alpha/include/asm/thread_info.h
  3. 0 2
      arch/arm/include/asm/thread_info.h
  4. 0 2
      arch/avr32/include/asm/thread_info.h
  5. 0 2
      arch/blackfin/include/asm/thread_info.h
  6. 0 2
      arch/cris/include/asm/thread_info.h
  7. 0 2
      arch/frv/include/asm/thread_info.h
  8. 0 2
      arch/h8300/include/asm/thread_info.h
  9. 0 2
      arch/ia64/include/asm/thread_info.h
  10. 0 2
      arch/m32r/include/asm/thread_info.h
  11. 0 1
      arch/m68k/include/asm/thread_info.h
  12. 0 2
      arch/microblaze/include/asm/thread_info.h
  13. 0 2
      arch/mips/include/asm/thread_info.h
  14. 0 2
      arch/mn10300/include/asm/thread_info.h
  15. 0 2
      arch/parisc/include/asm/thread_info.h
  16. 0 2
      arch/powerpc/include/asm/thread_info.h
  17. 0 2
      arch/s390/include/asm/thread_info.h
  18. 0 2
      arch/sh/include/asm/thread_info.h
  19. 0 2
      arch/sparc/include/asm/thread_info_32.h
  20. 0 2
      arch/sparc/include/asm/thread_info_64.h
  21. 0 2
      arch/um/include/asm/thread_info.h
  22. 0 2
      arch/unicore32/include/asm/thread_info.h
  23. 0 2
      arch/x86/include/asm/thread_info.h
  24. 0 2
      arch/xtensa/include/asm/thread_info.h
  25. 0 2
      drivers/bluetooth/btmrvl_main.c
  26. 27 19
      drivers/dma/dmatest.c
  27. 0 2
      drivers/mfd/twl6030-irq.c
  28. 1 1
      drivers/net/irda/stir4200.c
  29. 6 9
      drivers/platform/x86/thinkpad_acpi.c
  30. 0 2
      drivers/staging/rts_pstor/rtsx.c
  31. 7 6
      drivers/usb/storage/usb.c
  32. 1 1
      fs/btrfs/async-thread.c
  33. 2 6
      fs/btrfs/disk-io.c
  34. 1 2
      fs/ext4/super.c
  35. 1 3
      fs/fs-writeback.c
  36. 2 2
      fs/gfs2/log.c
  37. 2 2
      fs/gfs2/quota.c
  38. 1 1
      fs/jbd/journal.c
  39. 1 1
      fs/jbd2/journal.c
  40. 1 1
      fs/jfs/jfs_logmgr.c
  41. 2 2
      fs/jfs/jfs_txnmgr.c
  42. 1 1
      fs/nilfs2/segment.c
  43. 1 1
      fs/xfs/xfs_buf.c
  44. 38 70
      include/linux/freezer.h
  45. 1 0
      include/linux/kthread.h
  46. 1 3
      include/linux/sched.h
  47. 28 35
      kernel/cgroup_freezer.c
  48. 1 2
      kernel/exit.c
  49. 0 1
      kernel/fork.c
  50. 104 99
      kernel/freezer.c
  51. 26 1
      kernel/kthread.c
  52. 2 13
      kernel/power/hibernate.c
  53. 31 46
      kernel/power/process.c
  54. 3 5
      kernel/power/suspend.c
  55. 1 3
      kernel/power/user.c
  56. 2 6
      mm/backing-dev.c
  57. 1 1
      mm/oom_kill.c

+ 7 - 7
Documentation/power/freezing-of-tasks.txt

@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called.  It executes
 try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
 try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
 either wakes them up, if they are kernel threads, or sends fake signals to them,
 either wakes them up, if they are kernel threads, or sends fake signals to them,
 if they are user space processes.  A task that has TIF_FREEZE set, should react
 if they are user space processes.  A task that has TIF_FREEZE set, should react
-to it by calling the function called refrigerator() (defined in
+to it by calling the function called __refrigerator() (defined in
 kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
 kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
 to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
 to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
 Then, we say that the task is 'frozen' and therefore the set of functions
 Then, we say that the task is 'frozen' and therefore the set of functions
@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
 defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
 defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
 User space processes are generally frozen before kernel threads.
 User space processes are generally frozen before kernel threads.
 
 
-It is not recommended to call refrigerator() directly.  Instead, it is
-recommended to use the try_to_freeze() function (defined in
-include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
-task enter refrigerator() if the flag is set.
+__refrigerator() must not be called directly.  Instead, use the
+try_to_freeze() function (defined in include/linux/freezer.h), that checks
+the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
+flag is set.
 
 
 For user space processes try_to_freeze() is called automatically from the
 For user space processes try_to_freeze() is called automatically from the
 signal-handling code, but the freezable kernel threads need to call it
 signal-handling code, but the freezable kernel threads need to call it
@@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
 After the system memory state has been restored from a hibernation image and
 After the system memory state has been restored from a hibernation image and
 devices have been reinitialized, the function thaw_processes() is called in
 devices have been reinitialized, the function thaw_processes() is called in
 order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that
 order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that
-have been frozen leave refrigerator() and continue running.
+have been frozen leave __refrigerator() and continue running.
 
 
 III. Which kernel threads are freezable?
 III. Which kernel threads are freezable?
 
 
 Kernel threads are not freezable by default.  However, a kernel thread may clear
 Kernel threads are not freezable by default.  However, a kernel thread may clear
 PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
 PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
-directly is strongly discouraged).  From this point it is regarded as freezable
+directly is not allowed).  From this point it is regarded as freezable
 and must call try_to_freeze() in a suitable place.
 and must call try_to_freeze() in a suitable place.
 
 
 IV. Why do we do that?
 IV. Why do we do that?

+ 0 - 2
arch/alpha/include/asm/thread_info.h

@@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
 #define TIF_UAC_SIGBUS		12	/* ! userspace part of 'osf_sysinfo' */
 #define TIF_UAC_SIGBUS		12	/* ! userspace part of 'osf_sysinfo' */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	14	/* restore signal mask in do_signal */
 #define TIF_RESTORE_SIGMASK	14	/* restore signal mask in do_signal */
-#define TIF_FREEZE		16	/* is freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 /* Work to do on interrupt/exception return.  */
 /* Work to do on interrupt/exception return.  */
 #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
 #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \

+ 0 - 2
arch/arm/include/asm/thread_info.h

@@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define TIF_POLLING_NRFLAG	16
 #define TIF_POLLING_NRFLAG	16
 #define TIF_USING_IWMMXT	17
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_SECCOMP		21
 #define TIF_SECCOMP		21
 
 
@@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 
 

+ 0 - 2
arch/avr32/include/asm/thread_info.h

@@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTORE_SIGMASK	7	/* restore signal mask in do_signal */
 #define TIF_RESTORE_SIGMASK	7	/* restore signal mask in do_signal */
 #define TIF_CPU_GOING_TO_SLEEP	8	/* CPU is entering sleep 0 mode */
 #define TIF_CPU_GOING_TO_SLEEP	8	/* CPU is entering sleep 0 mode */
 #define TIF_NOTIFY_RESUME	9	/* callback before returning to user */
 #define TIF_NOTIFY_RESUME	9	/* callback before returning to user */
-#define TIF_FREEZE		29
 #define TIF_DEBUG		30	/* debugging enabled */
 #define TIF_DEBUG		30	/* debugging enabled */
 #define TIF_USERSPACE		31      /* true if FS sets userspace */
 #define TIF_USERSPACE		31      /* true if FS sets userspace */
 
 
@@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
 #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 
 /* Note: The masks below must never span more than 16 bits! */
 /* Note: The masks below must never span more than 16 bits! */
 
 

+ 0 - 2
arch/blackfin/include/asm/thread_info.h

@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
 					   TIF_NEED_RESCHED */
 					   TIF_NEED_RESCHED */
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
-#define TIF_FREEZE		6	/* is freezing for suspend */
 #define TIF_IRQ_SYNC		7	/* sync pipeline stage */
 #define TIF_IRQ_SYNC		7	/* sync pipeline stage */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_SINGLESTEP		9
 #define TIF_SINGLESTEP		9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_IRQ_SYNC		(1<<TIF_IRQ_SYNC)
 #define _TIF_IRQ_SYNC		(1<<TIF_IRQ_SYNC)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)

+ 0 - 2
arch/cris/include/asm/thread_info.h

@@ -86,7 +86,6 @@ struct thread_info {
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
-#define TIF_FREEZE		18	/* is freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -94,7 +93,6 @@ struct thread_info {
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */

+ 0 - 2
arch/frv/include/asm/thread_info.h

@@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
-#define TIF_FREEZE		18	/* freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */

+ 0 - 2
arch/h8300/include/asm/thread_info.h

@@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		4	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_NOTIFY_RESUME	6	/* callback before returning to user */
 #define TIF_NOTIFY_RESUME	6	/* callback before returning to user */
-#define TIF_FREEZE		16	/* is freezing for suspend */
 
 
 /* as above, but as bit values */
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 
 

+ 0 - 2
arch/ia64/include/asm/thread_info.h

@@ -113,7 +113,6 @@ struct thread_info {
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */
 #define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */
 #define TIF_DB_DISABLED		19	/* debug trap disabled for fsyscall */
 #define TIF_DB_DISABLED		19	/* debug trap disabled for fsyscall */
-#define TIF_FREEZE		20	/* is freezing for suspend */
 #define TIF_RESTORE_RSE		21	/* user RBS is newer than kernel RBS */
 #define TIF_RESTORE_RSE		21	/* user RBS is newer than kernel RBS */
 
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
@@ -126,7 +125,6 @@ struct thread_info {
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)
 #define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)
 #define _TIF_DB_DISABLED	(1 << TIF_DB_DISABLED)
 #define _TIF_DB_DISABLED	(1 << TIF_DB_DISABLED)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_RESTORE_RSE	(1 << TIF_RESTORE_RSE)
 #define _TIF_RESTORE_RSE	(1 << TIF_RESTORE_RSE)
 
 
 /* "work to do on user-return" bits */
 /* "work to do on user-return" bits */

+ 0 - 2
arch/m32r/include/asm/thread_info.h

@@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19	/* is freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */

+ 0 - 1
arch/m68k/include/asm/thread_info.h

@@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_DELAYED_TRACE	14	/* single step a syscall */
 #define TIF_DELAYED_TRACE	14	/* single step a syscall */
 #define TIF_SYSCALL_TRACE	15	/* syscall trace active */
 #define TIF_SYSCALL_TRACE	15	/* syscall trace active */
 #define TIF_MEMDIE		16	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		16	/* is terminating due to OOM killer */
-#define TIF_FREEZE		17	/* thread is freezing for suspend */
 #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal */
 #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal */
 
 
 #endif	/* _ASM_M68K_THREAD_INFO_H */
 #endif	/* _ASM_M68K_THREAD_INFO_H */

+ 0 - 2
arch/microblaze/include/asm/thread_info.h

@@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE		6	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		6	/* is terminating due to OOM killer */
 #define TIF_SYSCALL_AUDIT	9       /* syscall auditing active */
 #define TIF_SYSCALL_AUDIT	9       /* syscall auditing active */
 #define TIF_SECCOMP		10      /* secure computing */
 #define TIF_SECCOMP		10      /* secure computing */
-#define TIF_FREEZE		14	/* Freezing for suspend */
 
 
 /* true if poll_idle() is polling TIF_NEED_RESCHED */
 /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	16
 #define TIF_POLLING_NRFLAG	16
@@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_IRET		(1 << TIF_IRET)
 #define _TIF_IRET		(1 << TIF_IRET)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 
 

+ 0 - 2
arch/mips/include/asm/thread_info.h

@@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19
 #define TIF_FIXADE		20	/* Fix address errors in software */
 #define TIF_FIXADE		20	/* Fix address errors in software */
 #define TIF_LOGADE		21	/* Log address errors to syslog */
 #define TIF_LOGADE		21	/* Log address errors to syslog */
 #define TIF_32BIT_REGS		22	/* also implies 16/32 fprs */
 #define TIF_32BIT_REGS		22	/* also implies 16/32 fprs */
@@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_FIXADE		(1<<TIF_FIXADE)
 #define _TIF_FIXADE		(1<<TIF_FIXADE)
 #define _TIF_LOGADE		(1<<TIF_LOGADE)
 #define _TIF_LOGADE		(1<<TIF_LOGADE)
 #define _TIF_32BIT_REGS		(1<<TIF_32BIT_REGS)
 #define _TIF_32BIT_REGS		(1<<TIF_32BIT_REGS)

+ 0 - 2
arch/mn10300/include/asm/thread_info.h

@@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		17	/* is terminating due to OOM killer */
-#define TIF_FREEZE		18	/* freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	+(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	+(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	+(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	+(1 << TIF_NOTIFY_RESUME)
@@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
 #define _TIF_SINGLESTEP		+(1 << TIF_SINGLESTEP)
 #define _TIF_SINGLESTEP		+(1 << TIF_SINGLESTEP)
 #define _TIF_RESTORE_SIGMASK	+(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	+(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG	+(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	+(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		+(1 << TIF_FREEZE)
 
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */

+ 0 - 2
arch/parisc/include/asm/thread_info.h

@@ -58,7 +58,6 @@ struct thread_info {
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	6	/* restore saved signal mask */
 #define TIF_RESTORE_SIGMASK	6	/* restore saved signal mask */
-#define TIF_FREEZE		7	/* is freezing for suspend */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
 #define TIF_SINGLESTEP		9	/* single stepping? */
 #define TIF_SINGLESTEP		9	/* single stepping? */
 #define TIF_BLOCKSTEP		10	/* branch stepping? */
 #define TIF_BLOCKSTEP		10	/* branch stepping? */
@@ -69,7 +68,6 @@ struct thread_info {
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)

+ 0 - 2
arch/powerpc/include/asm/thread_info.h

@@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTOREALL		11	/* Restore all regs (implies NOERROR) */
 #define TIF_RESTOREALL		11	/* Restore all regs (implies NOERROR) */
 #define TIF_NOERROR		12	/* Force successful syscall return */
 #define TIF_NOERROR		12	/* Force successful syscall return */
 #define TIF_NOTIFY_RESUME	13	/* callback before returning to user */
 #define TIF_NOTIFY_RESUME	13	/* callback before returning to user */
-#define TIF_FREEZE		14	/* Freezing for suspend */
 #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
 #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */
 #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */
 #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */
 
 
@@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_RESTOREALL		(1<<TIF_RESTOREALL)
 #define _TIF_RESTOREALL		(1<<TIF_RESTOREALL)
 #define _TIF_NOERROR		(1<<TIF_NOERROR)
 #define _TIF_NOERROR		(1<<TIF_NOERROR)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
 #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \

+ 0 - 2
arch/s390/include/asm/thread_info.h

@@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	19	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	19	/* restore signal mask in do_signal() */
 #define TIF_SINGLE_STEP		20	/* This task is single stepped */
 #define TIF_SINGLE_STEP		20	/* This task is single stepped */
-#define TIF_FREEZE		21	/* thread is freezing for suspend */
 
 
 #define _TIF_SYSCALL		(1<<TIF_SYSCALL)
 #define _TIF_SYSCALL		(1<<TIF_SYSCALL)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)
 #define _TIF_31BIT		(1<<TIF_31BIT)
 #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
 #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #ifdef CONFIG_64BIT
 #ifdef CONFIG_64BIT
 #define is_32bit_task()		(test_thread_flag(TIF_31BIT))
 #define is_32bit_task()		(test_thread_flag(TIF_31BIT))

+ 0 - 2
arch/sh/include/asm/thread_info.h

@@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
 #define TIF_SYSCALL_TRACEPOINT	8	/* for ftrace syscall instrumentation */
 #define TIF_SYSCALL_TRACEPOINT	8	/* for ftrace syscall instrumentation */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
-#define TIF_FREEZE		19	/* Freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 
 /*
 /*
  * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
  * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we

+ 0 - 2
arch/sparc/include/asm/thread_info_32.h

@@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
 #define TIF_POLLING_NRFLAG	9	/* true if poll_idle() is polling
 #define TIF_POLLING_NRFLAG	9	/* true if poll_idle() is polling
 					 * TIF_NEED_RESCHED */
 					 * TIF_NEED_RESCHED */
 #define TIF_MEMDIE		10	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		10	/* is terminating due to OOM killer */
-#define TIF_FREEZE		11	/* is freezing for suspend */
 
 
 /* as above, but as bit values */
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
 #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | \
 #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | \
 					 _TIF_SIGPENDING | \
 					 _TIF_SIGPENDING | \
 					 _TIF_RESTORE_SIGMASK)
 					 _TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 

+ 0 - 2
arch/sparc/include/asm/thread_info_64.h

@@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
 /* flag bit 12 is available */
 /* flag bit 12 is available */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		13	/* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG	14
 #define TIF_POLLING_NRFLAG	14
-#define TIF_FREEZE		15	/* is freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
@@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \
 #define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \
 				 _TIF_DO_NOTIFY_RESUME_MASK | \
 				 _TIF_DO_NOTIFY_RESUME_MASK | \

+ 0 - 2
arch/um/include/asm/thread_info.h

@@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_SYSCALL_AUDIT	6
 #define TIF_SYSCALL_AUDIT	6
 #define TIF_RESTORE_SIGMASK	7
 #define TIF_RESTORE_SIGMASK	7
-#define TIF_FREEZE		16	/* is freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
@@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_MEMDIE		(1 << TIF_MEMDIE)
 #define _TIF_MEMDIE		(1 << TIF_MEMDIE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 
 
 #endif
 #endif

+ 0 - 2
arch/unicore32/include/asm/thread_info.h

@@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_TRACE	8
 #define TIF_MEMDIE		18
 #define TIF_MEMDIE		18
-#define TIF_FREEZE		19
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_RESTORE_SIGMASK	20
 
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
 
 
 /*
 /*

+ 0 - 2
arch/x86/include/asm/thread_info.h

@@ -90,7 +90,6 @@ struct thread_info {
 #define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 #define TIF_DEBUG		21	/* uses debug registers */
 #define TIF_DEBUG		21	/* uses debug registers */
 #define TIF_IO_BITMAP		22	/* uses I/O bitmap */
 #define TIF_IO_BITMAP		22	/* uses I/O bitmap */
-#define TIF_FREEZE		23	/* is freezing for suspend */
 #define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 #define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
@@ -112,7 +111,6 @@ struct thread_info {
 #define _TIF_FORK		(1 << TIF_FORK)
 #define _TIF_FORK		(1 << TIF_FORK)
 #define _TIF_DEBUG		(1 << TIF_DEBUG)
 #define _TIF_DEBUG		(1 << TIF_DEBUG)
 #define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
 #define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
-#define _TIF_FREEZE		(1 << TIF_FREEZE)
 #define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
 #define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)

+ 0 - 2
arch/xtensa/include/asm/thread_info.h

@@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */
 #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_FREEZE		17	/* is freezing for suspend */
 
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
@@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_IRET		(1<<TIF_IRET)
 #define _TIF_IRET		(1<<TIF_IRET)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE		(1<<TIF_FREEZE)
 
 
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */

+ 0 - 2
drivers/bluetooth/btmrvl_main.c

@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
 
 
 	init_waitqueue_entry(&wait, current);
 	init_waitqueue_entry(&wait, current);
 
 
-	current->flags |= PF_NOFREEZE;
-
 	for (;;) {
 	for (;;) {
 		add_wait_queue(&thread->wait_q, &wait);
 		add_wait_queue(&thread->wait_q, &wait);
 
 

+ 27 - 19
drivers/dma/dmatest.c

@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
 	return error_count;
 	return error_count;
 }
 }
 
 
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+	bool			done;
+	wait_queue_head_t	*wait;
+};
+
+static void dmatest_callback(void *arg)
 {
 {
-	complete(completion);
+	struct dmatest_done *done = arg;
+
+	done->done = true;
+	wake_up_all(done->wait);
 }
 }
 
 
 /*
 /*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
  */
  */
 static int dmatest_func(void *data)
 static int dmatest_func(void *data)
 {
 {
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
 	struct dmatest_thread	*thread = data;
 	struct dmatest_thread	*thread = data;
+	struct dmatest_done	done = { .wait = &done_wait };
 	struct dma_chan		*chan;
 	struct dma_chan		*chan;
 	const char		*thread_name;
 	const char		*thread_name;
 	unsigned int		src_off, dst_off, len;
 	unsigned int		src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
 	int			i;
 	int			i;
 
 
 	thread_name = current->comm;
 	thread_name = current->comm;
-	set_freezable_with_signal();
+	set_freezable();
 
 
 	ret = -ENOMEM;
 	ret = -ENOMEM;
 
 
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
 		struct dma_async_tx_descriptor *tx = NULL;
 		struct dma_async_tx_descriptor *tx = NULL;
 		dma_addr_t dma_srcs[src_cnt];
 		dma_addr_t dma_srcs[src_cnt];
 		dma_addr_t dma_dsts[dst_cnt];
 		dma_addr_t dma_dsts[dst_cnt];
-		struct completion cmp;
-		unsigned long start, tmo, end = 0 /* compiler... */;
-		bool reload = true;
 		u8 align = 0;
 		u8 align = 0;
 
 
 		total_tests++;
 		total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
 			continue;
 			continue;
 		}
 		}
 
 
-		init_completion(&cmp);
+		done.done = false;
 		tx->callback = dmatest_callback;
 		tx->callback = dmatest_callback;
-		tx->callback_param = &cmp;
+		tx->callback_param = &done;
 		cookie = tx->tx_submit(tx);
 		cookie = tx->tx_submit(tx);
 
 
 		if (dma_submit_error(cookie)) {
 		if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
 		}
 		}
 		dma_async_issue_pending(chan);
 		dma_async_issue_pending(chan);
 
 
-		do {
-			start = jiffies;
-			if (reload)
-				end = start + msecs_to_jiffies(timeout);
-			else if (end <= start)
-				end = start + 1;
-			tmo = wait_for_completion_interruptible_timeout(&cmp,
-								end - start);
-			reload = try_to_freeze();
-		} while (tmo == -ERESTARTSYS);
+		wait_event_freezable_timeout(done_wait, done.done,
+					     msecs_to_jiffies(timeout));
 
 
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
 
-		if (tmo == 0) {
+		if (!done.done) {
+			/*
+			 * We're leaving the timed out dma operation with
+			 * dangling pointer to done_wait.  To make this
+			 * correct, we'll need to allocate wait_done for
+			 * each test iteration and perform "who's gonna
+			 * free it this time?" dancing.  For now, just
+			 * leave it dangling.
+			 */
 			pr_warning("%s: #%u: test timed out\n",
 			pr_warning("%s: #%u: test timed out\n",
 				   thread_name, total_tests - 1);
 				   thread_name, total_tests - 1);
 			failed_tests++;
 			failed_tests++;

+ 0 - 2
drivers/mfd/twl6030-irq.c

@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
 	static const unsigned max_i2c_errors = 100;
 	static const unsigned max_i2c_errors = 100;
 	int ret;
 	int ret;
 
 
-	current->flags |= PF_NOFREEZE;
-
 	while (!kthread_should_stop()) {
 	while (!kthread_should_stop()) {
 		int i;
 		int i;
 		union {
 		union {

+ 1 - 1
drivers/net/irda/stir4200.c

@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
 
 
 			write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
 			write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
 
 
-			refrigerator();
+			try_to_freeze();
 
 
 			if (change_speed(stir, stir->speed))
 			if (change_speed(stir, stir->speed))
 				break;
 				break;

+ 6 - 9
drivers/platform/x86/thinkpad_acpi.c

@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
 	u32 poll_mask, event_mask;
 	u32 poll_mask, event_mask;
 	unsigned int si, so;
 	unsigned int si, so;
 	unsigned long t;
 	unsigned long t;
-	unsigned int change_detector, must_reset;
+	unsigned int change_detector;
 	unsigned int poll_freq;
 	unsigned int poll_freq;
+	bool was_frozen;
 
 
 	mutex_lock(&hotkey_thread_mutex);
 	mutex_lock(&hotkey_thread_mutex);
 
 
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
 				t = 100;	/* should never happen... */
 				t = 100;	/* should never happen... */
 		}
 		}
 		t = msleep_interruptible(t);
 		t = msleep_interruptible(t);
-		if (unlikely(kthread_should_stop()))
+		if (unlikely(kthread_freezable_should_stop(&was_frozen)))
 			break;
 			break;
-		must_reset = try_to_freeze();
-		if (t > 0 && !must_reset)
+
+		if (t > 0 && !was_frozen)
 			continue;
 			continue;
 
 
 		mutex_lock(&hotkey_thread_data_mutex);
 		mutex_lock(&hotkey_thread_data_mutex);
-		if (must_reset || hotkey_config_change != change_detector) {
+		if (was_frozen || hotkey_config_change != change_detector) {
 			/* forget old state on thaw or config change */
 			/* forget old state on thaw or config change */
 			si = so;
 			si = so;
 			t = 0;
 			t = 0;
@@ -2528,10 +2529,6 @@ exit:
 static void hotkey_poll_stop_sync(void)
 static void hotkey_poll_stop_sync(void)
 {
 {
 	if (tpacpi_hotkey_task) {
 	if (tpacpi_hotkey_task) {
-		if (frozen(tpacpi_hotkey_task) ||
-		    freezing(tpacpi_hotkey_task))
-			thaw_process(tpacpi_hotkey_task);
-
 		kthread_stop(tpacpi_hotkey_task);
 		kthread_stop(tpacpi_hotkey_task);
 		tpacpi_hotkey_task = NULL;
 		tpacpi_hotkey_task = NULL;
 		mutex_lock(&hotkey_thread_mutex);
 		mutex_lock(&hotkey_thread_mutex);

+ 0 - 2
drivers/staging/rts_pstor/rtsx.c

@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
 	struct rtsx_chip *chip = dev->chip;
 	struct rtsx_chip *chip = dev->chip;
 	struct Scsi_Host *host = rtsx_to_host(dev);
 	struct Scsi_Host *host = rtsx_to_host(dev);
 
 
-	current->flags |= PF_NOFREEZE;
-
 	for (;;) {
 	for (;;) {
 		if (wait_for_completion_interruptible(&dev->cmnd_ready))
 		if (wait_for_completion_interruptible(&dev->cmnd_ready))
 			break;
 			break;

+ 7 - 6
drivers/usb/storage/usb.c

@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
 
 
 	dev_dbg(dev, "device found\n");
 	dev_dbg(dev, "device found\n");
 
 
-	set_freezable_with_signal();
+	set_freezable();
+
 	/*
 	/*
 	 * Wait for the timeout to expire or for a disconnect
 	 * Wait for the timeout to expire or for a disconnect
 	 *
 	 *
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
 	 * fail to freeze, but we can't be non-freezable either. Nor can
 	 * fail to freeze, but we can't be non-freezable either. Nor can
 	 * khubd freeze while waiting for scanning to complete as it may
 	 * khubd freeze while waiting for scanning to complete as it may
 	 * hold the device lock, causing a hang when suspending devices.
 	 * hold the device lock, causing a hang when suspending devices.
-	 * So we request a fake signal when freezing and use
-	 * interruptible sleep to kick us out of our wait early when
-	 * freezing happens.
+	 * So instead of using wait_event_freezable(), explicitly test
+	 * for (DONT_SCAN || freezing) in interruptible wait and proceed
+	 * if any of DONT_SCAN, freezing or timeout has happened.
 	 */
 	 */
 	if (delay_use > 0) {
 	if (delay_use > 0) {
 		dev_dbg(dev, "waiting for device to settle "
 		dev_dbg(dev, "waiting for device to settle "
 				"before scanning\n");
 				"before scanning\n");
 		wait_event_interruptible_timeout(us->delay_wait,
 		wait_event_interruptible_timeout(us->delay_wait,
-				test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
-				delay_use * HZ);
+				test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
+				freezing(current), delay_use * HZ);
 	}
 	}
 
 
 	/* If the device is still connected, perform the scanning */
 	/* If the device is still connected, perform the scanning */

+ 1 - 1
fs/btrfs/async-thread.c

@@ -340,7 +340,7 @@ again:
 		if (freezing(current)) {
 		if (freezing(current)) {
 			worker->working = 0;
 			worker->working = 0;
 			spin_unlock_irq(&worker->lock);
 			spin_unlock_irq(&worker->lock);
-			refrigerator();
+			try_to_freeze();
 		} else {
 		} else {
 			spin_unlock_irq(&worker->lock);
 			spin_unlock_irq(&worker->lock);
 			if (!kthread_should_stop()) {
 			if (!kthread_should_stop()) {

+ 2 - 6
fs/btrfs/disk-io.c

@@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
 			btrfs_run_defrag_inodes(root->fs_info);
 			btrfs_run_defrag_inodes(root->fs_info);
 		}
 		}
 
 
-		if (freezing(current)) {
-			refrigerator();
-		} else {
+		if (!try_to_freeze()) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop())
 			if (!kthread_should_stop())
 				schedule();
 				schedule();
@@ -1635,9 +1633,7 @@ sleep:
 		wake_up_process(root->fs_info->cleaner_kthread);
 		wake_up_process(root->fs_info->cleaner_kthread);
 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
 		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
 
 
-		if (freezing(current)) {
-			refrigerator();
-		} else {
+		if (!try_to_freeze()) {
 			set_current_state(TASK_INTERRUPTIBLE);
 			set_current_state(TASK_INTERRUPTIBLE);
 			if (!kthread_should_stop() &&
 			if (!kthread_should_stop() &&
 			    !btrfs_transaction_blocked(root->fs_info))
 			    !btrfs_transaction_blocked(root->fs_info))

+ 1 - 2
fs/ext4/super.c

@@ -2884,8 +2884,7 @@ cont_thread:
 		}
 		}
 		mutex_unlock(&eli->li_list_mtx);
 		mutex_unlock(&eli->li_list_mtx);
 
 
-		if (freezing(current))
-			refrigerator();
+		try_to_freeze();
 
 
 		cur = jiffies;
 		cur = jiffies;
 		if ((time_after_eq(cur, next_wakeup)) ||
 		if ((time_after_eq(cur, next_wakeup)) ||

+ 1 - 3
fs/fs-writeback.c

@@ -947,7 +947,7 @@ int bdi_writeback_thread(void *data)
 
 
 	trace_writeback_thread_start(bdi);
 	trace_writeback_thread_start(bdi);
 
 
-	while (!kthread_should_stop()) {
+	while (!kthread_freezable_should_stop(NULL)) {
 		/*
 		/*
 		 * Remove own delayed wake-up timer, since we are already awake
 		 * Remove own delayed wake-up timer, since we are already awake
 		 * and we'll take care of the preriodic write-back.
 		 * and we'll take care of the preriodic write-back.
@@ -977,8 +977,6 @@ int bdi_writeback_thread(void *data)
 			 */
 			 */
 			schedule();
 			schedule();
 		}
 		}
-
-		try_to_freeze();
 	}
 	}
 
 
 	/* Flush any work that raced with us exiting */
 	/* Flush any work that raced with us exiting */

+ 2 - 2
fs/gfs2/log.c

@@ -951,8 +951,8 @@ int gfs2_logd(void *data)
 			wake_up(&sdp->sd_log_waitq);
 			wake_up(&sdp->sd_log_waitq);
 
 
 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
 		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
-		if (freezing(current))
-			refrigerator();
+
+		try_to_freeze();
 
 
 		do {
 		do {
 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
 			prepare_to_wait(&sdp->sd_logd_waitq, &wait,

+ 2 - 2
fs/gfs2/quota.c

@@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)
 		/* Check for & recover partially truncated inodes */
 		/* Check for & recover partially truncated inodes */
 		quotad_check_trunc_list(sdp);
 		quotad_check_trunc_list(sdp);
 
 
-		if (freezing(current))
-			refrigerator();
+		try_to_freeze();
+
 		t = min(quotad_timeo, statfs_timeo);
 		t = min(quotad_timeo, statfs_timeo);
 
 
 		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
 		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);

+ 1 - 1
fs/jbd/journal.c

@@ -166,7 +166,7 @@ loop:
 		 */
 		 */
 		jbd_debug(1, "Now suspending kjournald\n");
 		jbd_debug(1, "Now suspending kjournald\n");
 		spin_unlock(&journal->j_state_lock);
 		spin_unlock(&journal->j_state_lock);
-		refrigerator();
+		try_to_freeze();
 		spin_lock(&journal->j_state_lock);
 		spin_lock(&journal->j_state_lock);
 	} else {
 	} else {
 		/*
 		/*

+ 1 - 1
fs/jbd2/journal.c

@@ -173,7 +173,7 @@ loop:
 		 */
 		 */
 		jbd_debug(1, "Now suspending kjournald2\n");
 		jbd_debug(1, "Now suspending kjournald2\n");
 		write_unlock(&journal->j_state_lock);
 		write_unlock(&journal->j_state_lock);
-		refrigerator();
+		try_to_freeze();
 		write_lock(&journal->j_state_lock);
 		write_lock(&journal->j_state_lock);
 	} else {
 	} else {
 		/*
 		/*

+ 1 - 1
fs/jfs/jfs_logmgr.c

@@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
 
 
 		if (freezing(current)) {
 		if (freezing(current)) {
 			spin_unlock_irq(&log_redrive_lock);
 			spin_unlock_irq(&log_redrive_lock);
-			refrigerator();
+			try_to_freeze();
 		} else {
 		} else {
 			set_current_state(TASK_INTERRUPTIBLE);
 			set_current_state(TASK_INTERRUPTIBLE);
 			spin_unlock_irq(&log_redrive_lock);
 			spin_unlock_irq(&log_redrive_lock);

+ 2 - 2
fs/jfs/jfs_txnmgr.c

@@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
 
 
 		if (freezing(current)) {
 		if (freezing(current)) {
 			LAZY_UNLOCK(flags);
 			LAZY_UNLOCK(flags);
-			refrigerator();
+			try_to_freeze();
 		} else {
 		} else {
 			DECLARE_WAITQUEUE(wq, current);
 			DECLARE_WAITQUEUE(wq, current);
 
 
@@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
 
 
 		if (freezing(current)) {
 		if (freezing(current)) {
 			TXN_UNLOCK();
 			TXN_UNLOCK();
-			refrigerator();
+			try_to_freeze();
 		} else {
 		} else {
 			set_current_state(TASK_INTERRUPTIBLE);
 			set_current_state(TASK_INTERRUPTIBLE);
 			TXN_UNLOCK();
 			TXN_UNLOCK();

+ 1 - 1
fs/nilfs2/segment.c

@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
 
 
 	if (freezing(current)) {
 	if (freezing(current)) {
 		spin_unlock(&sci->sc_state_lock);
 		spin_unlock(&sci->sc_state_lock);
-		refrigerator();
+		try_to_freeze();
 		spin_lock(&sci->sc_state_lock);
 		spin_lock(&sci->sc_state_lock);
 	} else {
 	} else {
 		DEFINE_WAIT(wait);
 		DEFINE_WAIT(wait);

+ 1 - 1
fs/xfs/xfs_buf.c

@@ -1703,7 +1703,7 @@ xfsbufd(
 
 
 		if (unlikely(freezing(current))) {
 		if (unlikely(freezing(current))) {
 			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
 			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
-			refrigerator();
+			try_to_freeze();
 		} else {
 		} else {
 			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
 			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
 		}
 		}

+ 38 - 70
include/linux/freezer.h

@@ -5,71 +5,58 @@
 
 
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/wait.h>
+#include <linux/atomic.h>
 
 
 #ifdef CONFIG_FREEZER
 #ifdef CONFIG_FREEZER
+extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */
+extern bool pm_freezing;		/* PM freezing in effect */
+extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */
+
 /*
 /*
  * Check if a process has been frozen
  * Check if a process has been frozen
  */
  */
-static inline int frozen(struct task_struct *p)
+static inline bool frozen(struct task_struct *p)
 {
 {
 	return p->flags & PF_FROZEN;
 	return p->flags & PF_FROZEN;
 }
 }
 
 
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
-	return test_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-/*
- * Request that a process be frozen
- */
-static inline void set_freeze_flag(struct task_struct *p)
-{
-	set_tsk_thread_flag(p, TIF_FREEZE);
-}
+extern bool freezing_slow_path(struct task_struct *p);
 
 
 /*
 /*
- * Sometimes we may need to cancel the previous 'freeze' request
+ * Check if there is a request to freeze a process
  */
  */
-static inline void clear_freeze_flag(struct task_struct *p)
-{
-	clear_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-static inline bool should_send_signal(struct task_struct *p)
+static inline bool freezing(struct task_struct *p)
 {
 {
-	return !(p->flags & PF_FREEZER_NOSIG);
+	if (likely(!atomic_read(&system_freezing_cnt)))
+		return false;
+	return freezing_slow_path(p);
 }
 }
 
 
 /* Takes and releases task alloc lock using task_lock() */
 /* Takes and releases task alloc lock using task_lock() */
-extern int thaw_process(struct task_struct *p);
+extern void __thaw_task(struct task_struct *t);
 
 
-extern void refrigerator(void);
+extern bool __refrigerator(bool check_kthr_stop);
 extern int freeze_processes(void);
 extern int freeze_processes(void);
 extern int freeze_kernel_threads(void);
 extern int freeze_kernel_threads(void);
 extern void thaw_processes(void);
 extern void thaw_processes(void);
 
 
-static inline int try_to_freeze(void)
+static inline bool try_to_freeze(void)
 {
 {
-	if (freezing(current)) {
-		refrigerator();
-		return 1;
-	} else
-		return 0;
+	might_sleep();
+	if (likely(!freezing(current)))
+		return false;
+	return __refrigerator(false);
 }
 }
 
 
-extern bool freeze_task(struct task_struct *p, bool sig_only);
-extern void cancel_freezing(struct task_struct *p);
+extern bool freeze_task(struct task_struct *p);
+extern bool set_freezable(void);
 
 
 #ifdef CONFIG_CGROUP_FREEZER
 #ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_freezing_or_frozen(struct task_struct *task);
+extern bool cgroup_freezing(struct task_struct *task);
 #else /* !CONFIG_CGROUP_FREEZER */
 #else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline bool cgroup_freezing(struct task_struct *task)
 {
 {
-	return 0;
+	return false;
 }
 }
 #endif /* !CONFIG_CGROUP_FREEZER */
 #endif /* !CONFIG_CGROUP_FREEZER */
 
 
@@ -117,23 +104,6 @@ static inline int freezer_should_skip(struct task_struct *p)
 	return !!(p->flags & PF_FREEZER_SKIP);
 	return !!(p->flags & PF_FREEZER_SKIP);
 }
 }
 
 
-/*
- * Tell the freezer that the current task should be frozen by it
- */
-static inline void set_freezable(void)
-{
-	current->flags &= ~PF_NOFREEZE;
-}
-
-/*
- * Tell the freezer that the current task should be frozen by it and that it
- * should send a fake signal to the task to freeze it.
- */
-static inline void set_freezable_with_signal(void)
-{
-	current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
-}
-
 /*
 /*
  * Freezer-friendly wrappers around wait_event_interruptible(),
  * Freezer-friendly wrappers around wait_event_interruptible(),
  * wait_event_killable() and wait_event_interruptible_timeout(), originally
  * wait_event_killable() and wait_event_interruptible_timeout(), originally
@@ -152,47 +122,45 @@ static inline void set_freezable_with_signal(void)
 #define wait_event_freezable(wq, condition)				\
 #define wait_event_freezable(wq, condition)				\
 ({									\
 ({									\
 	int __retval;							\
 	int __retval;							\
-	do {								\
+	for (;;) {							\
 		__retval = wait_event_interruptible(wq, 		\
 		__retval = wait_event_interruptible(wq, 		\
 				(condition) || freezing(current));	\
 				(condition) || freezing(current));	\
-		if (__retval && !freezing(current))			\
+		if (__retval || (condition))				\
 			break;						\
 			break;						\
-		else if (!(condition))					\
-			__retval = -ERESTARTSYS;			\
-	} while (try_to_freeze());					\
+		try_to_freeze();					\
+	}								\
 	__retval;							\
 	__retval;							\
 })
 })
 
 
-
 #define wait_event_freezable_timeout(wq, condition, timeout)		\
 #define wait_event_freezable_timeout(wq, condition, timeout)		\
 ({									\
 ({									\
 	long __retval = timeout;					\
 	long __retval = timeout;					\
-	do {								\
+	for (;;) {							\
 		__retval = wait_event_interruptible_timeout(wq,		\
 		__retval = wait_event_interruptible_timeout(wq,		\
 				(condition) || freezing(current),	\
 				(condition) || freezing(current),	\
 				__retval); 				\
 				__retval); 				\
-	} while (try_to_freeze());					\
+		if (__retval <= 0 || (condition))			\
+			break;						\
+		try_to_freeze();					\
+	}								\
 	__retval;							\
 	__retval;							\
 })
 })
+
 #else /* !CONFIG_FREEZER */
 #else /* !CONFIG_FREEZER */
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void set_freeze_flag(struct task_struct *p) {}
-static inline void clear_freeze_flag(struct task_struct *p) {}
-static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline bool frozen(struct task_struct *p) { return false; }
+static inline bool freezing(struct task_struct *p) { return false; }
 
 
-static inline void refrigerator(void) {}
+static inline bool __refrigerator(bool check_kthr_stop) { return false; }
 static inline int freeze_processes(void) { return -ENOSYS; }
 static inline int freeze_processes(void) { return -ENOSYS; }
 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
 static inline void thaw_processes(void) {}
 static inline void thaw_processes(void) {}
 
 
-static inline int try_to_freeze(void) { return 0; }
+static inline bool try_to_freeze(void) { return false; }
 
 
 static inline void freezer_do_not_count(void) {}
 static inline void freezer_do_not_count(void) {}
 static inline void freezer_count(void) {}
 static inline void freezer_count(void) {}
 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
 static inline void set_freezable(void) {}
 static inline void set_freezable(void) {}
-static inline void set_freezable_with_signal(void) {}
 
 
 #define wait_event_freezable(wq, condition)				\
 #define wait_event_freezable(wq, condition)				\
 		wait_event_interruptible(wq, condition)
 		wait_event_interruptible(wq, condition)

+ 1 - 0
include/linux/kthread.h

@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
 void kthread_bind(struct task_struct *k, unsigned int cpu);
 void kthread_bind(struct task_struct *k, unsigned int cpu);
 int kthread_stop(struct task_struct *k);
 int kthread_stop(struct task_struct *k);
 int kthread_should_stop(void);
 int kthread_should_stop(void);
+bool kthread_freezable_should_stop(bool *was_frozen);
 void *kthread_data(struct task_struct *k);
 void *kthread_data(struct task_struct *k);
 
 
 int kthreadd(void *unused);
 int kthreadd(void *unused);

+ 1 - 3
include/linux/sched.h

@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task)	\
 #define task_contributes_to_load(task)	\
 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
-				 (task->flags & PF_FREEZING) == 0)
+				 (task->flags & PF_FROZEN) == 0)
 
 
 #define __set_task_state(tsk, state_value)		\
 #define __set_task_state(tsk, state_value)		\
 	do { (tsk)->state = (state_value); } while (0)
 	do { (tsk)->state = (state_value); } while (0)
@@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
 #define PF_MEMALLOC	0x00000800	/* Allocating memory */
 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
 #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
 #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
-#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
 #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
 #define PF_FROZEN	0x00010000	/* frozen for system suspend */
 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
 #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
@@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
-#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
 
 
 /*
 /*
  * Only the _current_ task can read/write to tsk->flags, but other
  * Only the _current_ task can read/write to tsk->flags, but other

+ 28 - 35
kernel/cgroup_freezer.c

@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
 			    struct freezer, css);
 			    struct freezer, css);
 }
 }
 
 
-static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
+bool cgroup_freezing(struct task_struct *task)
 {
 {
-	enum freezer_state state = task_freezer(task)->state;
-	return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
-}
+	enum freezer_state state;
+	bool ret;
 
 
-int cgroup_freezing_or_frozen(struct task_struct *task)
-{
-	int result;
-	task_lock(task);
-	result = __cgroup_freezing_or_frozen(task);
-	task_unlock(task);
-	return result;
+	rcu_read_lock();
+	state = task_freezer(task)->state;
+	ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
+	rcu_read_unlock();
+
+	return ret;
 }
 }
 
 
 /*
 /*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
  * freezer_can_attach():
  * freezer_can_attach():
  * cgroup_mutex (held by caller of can_attach)
  * cgroup_mutex (held by caller of can_attach)
  *
  *
- * cgroup_freezing_or_frozen():
- * task->alloc_lock (to get task's cgroup)
- *
  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
  * freezer->lock
  * freezer->lock
  *  sighand->siglock (if the cgroup is freezing)
  *  sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
  *   write_lock css_set_lock (cgroup iterator start)
  *   write_lock css_set_lock (cgroup iterator start)
  *    task->alloc_lock
  *    task->alloc_lock
  *   read_lock css_set_lock (cgroup iterator start)
  *   read_lock css_set_lock (cgroup iterator start)
- *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
+ *    task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
  *     sighand->siglock
  *     sighand->siglock
  */
  */
 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
 static void freezer_destroy(struct cgroup_subsys *ss,
 static void freezer_destroy(struct cgroup_subsys *ss,
 			    struct cgroup *cgroup)
 			    struct cgroup *cgroup)
 {
 {
-	kfree(cgroup_freezer(cgroup));
+	struct freezer *freezer = cgroup_freezer(cgroup);
+
+	if (freezer->state != CGROUP_THAWED)
+		atomic_dec(&system_freezing_cnt);
+	kfree(freezer);
 }
 }
 
 
 /*
 /*
@@ -177,13 +176,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
 
 
 static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
 {
-	rcu_read_lock();
-	if (__cgroup_freezing_or_frozen(tsk)) {
-		rcu_read_unlock();
-		return -EBUSY;
-	}
-	rcu_read_unlock();
-	return 0;
+	return cgroup_freezing(tsk) ? -EBUSY : 0;
 }
 }
 
 
 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -213,7 +206,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
 
 
 	/* Locking avoids race with FREEZING -> THAWED transitions. */
 	/* Locking avoids race with FREEZING -> THAWED transitions. */
 	if (freezer->state == CGROUP_FREEZING)
 	if (freezer->state == CGROUP_FREEZING)
-		freeze_task(task, true);
+		freeze_task(task);
 	spin_unlock_irq(&freezer->lock);
 	spin_unlock_irq(&freezer->lock);
 }
 }
 
 
@@ -231,7 +224,7 @@ static void update_if_frozen(struct cgroup *cgroup,
 	cgroup_iter_start(cgroup, &it);
 	cgroup_iter_start(cgroup, &it);
 	while ((task = cgroup_iter_next(cgroup, &it))) {
 	while ((task = cgroup_iter_next(cgroup, &it))) {
 		ntotal++;
 		ntotal++;
-		if (frozen(task))
+		if (freezing(task) && frozen(task))
 			nfrozen++;
 			nfrozen++;
 	}
 	}
 
 
@@ -279,10 +272,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
 	struct task_struct *task;
 	struct task_struct *task;
 	unsigned int num_cant_freeze_now = 0;
 	unsigned int num_cant_freeze_now = 0;
 
 
-	freezer->state = CGROUP_FREEZING;
 	cgroup_iter_start(cgroup, &it);
 	cgroup_iter_start(cgroup, &it);
 	while ((task = cgroup_iter_next(cgroup, &it))) {
 	while ((task = cgroup_iter_next(cgroup, &it))) {
-		if (!freeze_task(task, true))
+		if (!freeze_task(task))
 			continue;
 			continue;
 		if (frozen(task))
 		if (frozen(task))
 			continue;
 			continue;
@@ -300,12 +292,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
 	struct task_struct *task;
 	struct task_struct *task;
 
 
 	cgroup_iter_start(cgroup, &it);
 	cgroup_iter_start(cgroup, &it);
-	while ((task = cgroup_iter_next(cgroup, &it))) {
-		thaw_process(task);
-	}
+	while ((task = cgroup_iter_next(cgroup, &it)))
+		__thaw_task(task);
 	cgroup_iter_end(cgroup, &it);
 	cgroup_iter_end(cgroup, &it);
-
-	freezer->state = CGROUP_THAWED;
 }
 }
 
 
 static int freezer_change_state(struct cgroup *cgroup,
 static int freezer_change_state(struct cgroup *cgroup,
@@ -319,20 +308,24 @@ static int freezer_change_state(struct cgroup *cgroup,
 	spin_lock_irq(&freezer->lock);
 	spin_lock_irq(&freezer->lock);
 
 
 	update_if_frozen(cgroup, freezer);
 	update_if_frozen(cgroup, freezer);
-	if (goal_state == freezer->state)
-		goto out;
 
 
 	switch (goal_state) {
 	switch (goal_state) {
 	case CGROUP_THAWED:
 	case CGROUP_THAWED:
+		if (freezer->state != CGROUP_THAWED)
+			atomic_dec(&system_freezing_cnt);
+		freezer->state = CGROUP_THAWED;
 		unfreeze_cgroup(cgroup, freezer);
 		unfreeze_cgroup(cgroup, freezer);
 		break;
 		break;
 	case CGROUP_FROZEN:
 	case CGROUP_FROZEN:
+		if (freezer->state == CGROUP_THAWED)
+			atomic_inc(&system_freezing_cnt);
+		freezer->state = CGROUP_FREEZING;
 		retval = try_to_freeze_cgroup(cgroup, freezer);
 		retval = try_to_freeze_cgroup(cgroup, freezer);
 		break;
 		break;
 	default:
 	default:
 		BUG();
 		BUG();
 	}
 	}
-out:
+
 	spin_unlock_irq(&freezer->lock);
 	spin_unlock_irq(&freezer->lock);
 
 
 	return retval;
 	return retval;

+ 1 - 2
kernel/exit.c

@@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
 	tsk->mm = NULL;
 	tsk->mm = NULL;
 	up_read(&mm->mmap_sem);
 	up_read(&mm->mmap_sem);
 	enter_lazy_tlb(mm, current);
 	enter_lazy_tlb(mm, current);
-	/* We don't want this task to be frozen prematurely */
-	clear_freeze_flag(tsk);
 	task_unlock(tsk);
 	task_unlock(tsk);
 	mm_update_next_owner(mm);
 	mm_update_next_owner(mm);
 	mmput(mm);
 	mmput(mm);
@@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
 	exit_rcu();
 	exit_rcu();
 	/* causes final put_task_struct in finish_task_switch(). */
 	/* causes final put_task_struct in finish_task_switch(). */
 	tsk->state = TASK_DEAD;
 	tsk->state = TASK_DEAD;
+	tsk->flags |= PF_NOFREEZE;	/* tell freezer to ignore us */
 	schedule();
 	schedule();
 	BUG();
 	BUG();
 	/* Avoid "noreturn function does return".  */
 	/* Avoid "noreturn function does return".  */

+ 0 - 1
kernel/fork.c

@@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 	new_flags |= PF_FORKNOEXEC;
 	new_flags |= PF_FORKNOEXEC;
 	new_flags |= PF_STARTING;
 	new_flags |= PF_STARTING;
 	p->flags = new_flags;
 	p->flags = new_flags;
-	clear_freeze_flag(p);
 }
 }
 
 
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)

+ 104 - 99
kernel/freezer.c

@@ -9,101 +9,114 @@
 #include <linux/export.h>
 #include <linux/export.h>
 #include <linux/syscalls.h>
 #include <linux/syscalls.h>
 #include <linux/freezer.h>
 #include <linux/freezer.h>
+#include <linux/kthread.h>
 
 
-/*
- * freezing is complete, mark current process as frozen
+/* total number of freezing conditions in effect */
+atomic_t system_freezing_cnt = ATOMIC_INIT(0);
+EXPORT_SYMBOL(system_freezing_cnt);
+
+/* indicate whether PM freezing is in effect, protected by pm_mutex */
+bool pm_freezing;
+bool pm_nosig_freezing;
+
+/* protects freezing and frozen transitions */
+static DEFINE_SPINLOCK(freezer_lock);
+
+/**
+ * freezing_slow_path - slow path for testing whether a task needs to be frozen
+ * @p: task to be tested
+ *
+ * This function is called by freezing() if system_freezing_cnt isn't zero
+ * and tests whether @p needs to enter and stay in frozen state.  Can be
+ * called under any context.  The freezers are responsible for ensuring the
+ * target tasks see the updated state.
  */
  */
-static inline void frozen_process(void)
+bool freezing_slow_path(struct task_struct *p)
 {
 {
-	if (!unlikely(current->flags & PF_NOFREEZE)) {
-		current->flags |= PF_FROZEN;
-		smp_wmb();
-	}
-	clear_freeze_flag(current);
+	if (p->flags & PF_NOFREEZE)
+		return false;
+
+	if (pm_nosig_freezing || cgroup_freezing(p))
+		return true;
+
+	if (pm_freezing && !(p->flags & PF_KTHREAD))
+		return true;
+
+	return false;
 }
 }
+EXPORT_SYMBOL(freezing_slow_path);
 
 
 /* Refrigerator is place where frozen processes are stored :-). */
 /* Refrigerator is place where frozen processes are stored :-). */
-void refrigerator(void)
+bool __refrigerator(bool check_kthr_stop)
 {
 {
 	/* Hmm, should we be allowed to suspend when there are realtime
 	/* Hmm, should we be allowed to suspend when there are realtime
 	   processes around? */
 	   processes around? */
-	long save;
+	bool was_frozen = false;
+	long save = current->state;
 
 
-	task_lock(current);
-	if (freezing(current)) {
-		frozen_process();
-		task_unlock(current);
-	} else {
-		task_unlock(current);
-		return;
-	}
-	save = current->state;
 	pr_debug("%s entered refrigerator\n", current->comm);
 	pr_debug("%s entered refrigerator\n", current->comm);
 
 
-	spin_lock_irq(&current->sighand->siglock);
-	recalc_sigpending(); /* We sent fake signal, clean it up */
-	spin_unlock_irq(&current->sighand->siglock);
-
-	/* prevent accounting of that task to load */
-	current->flags |= PF_FREEZING;
-
 	for (;;) {
 	for (;;) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		set_current_state(TASK_UNINTERRUPTIBLE);
-		if (!frozen(current))
+
+		spin_lock_irq(&freezer_lock);
+		current->flags |= PF_FROZEN;
+		if (!freezing(current) ||
+		    (check_kthr_stop && kthread_should_stop()))
+			current->flags &= ~PF_FROZEN;
+		spin_unlock_irq(&freezer_lock);
+
+		if (!(current->flags & PF_FROZEN))
 			break;
 			break;
+		was_frozen = true;
 		schedule();
 		schedule();
 	}
 	}
 
 
-	/* Remove the accounting blocker */
-	current->flags &= ~PF_FREEZING;
-
 	pr_debug("%s left refrigerator\n", current->comm);
 	pr_debug("%s left refrigerator\n", current->comm);
-	__set_current_state(save);
+
+	/*
+	 * Restore saved task state before returning.  The mb'd version
+	 * needs to be used; otherwise, it might silently break
+	 * synchronization which depends on ordered task state change.
+	 */
+	set_current_state(save);
+
+	return was_frozen;
 }
 }
-EXPORT_SYMBOL(refrigerator);
+EXPORT_SYMBOL(__refrigerator);
 
 
 static void fake_signal_wake_up(struct task_struct *p)
 static void fake_signal_wake_up(struct task_struct *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(&p->sighand->siglock, flags);
-	signal_wake_up(p, 0);
-	spin_unlock_irqrestore(&p->sighand->siglock, flags);
+	if (lock_task_sighand(p, &flags)) {
+		signal_wake_up(p, 0);
+		unlock_task_sighand(p, &flags);
+	}
 }
 }
 
 
 /**
 /**
- *	freeze_task - send a freeze request to given task
- *	@p: task to send the request to
- *	@sig_only: if set, the request will only be sent if the task has the
- *		PF_FREEZER_NOSIG flag unset
- *	Return value: 'false', if @sig_only is set and the task has
- *		PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
+ * freeze_task - send a freeze request to given task
+ * @p: task to send the request to
+ *
+ * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
+ * flag and either sending a fake signal to it or waking it up, depending
+ * on whether it has %PF_FREEZER_NOSIG set.
  *
  *
- *	The freeze request is sent by setting the tasks's TIF_FREEZE flag and
- *	either sending a fake signal to it or waking it up, depending on whether
- *	or not it has PF_FREEZER_NOSIG set.  If @sig_only is set and the task
- *	has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
- *	TIF_FREEZE flag will not be set.
+ * RETURNS:
+ * %false, if @p is not freezing or already frozen; %true, otherwise
  */
  */
-bool freeze_task(struct task_struct *p, bool sig_only)
+bool freeze_task(struct task_struct *p)
 {
 {
-	/*
-	 * We first check if the task is freezing and next if it has already
-	 * been frozen to avoid the race with frozen_process() which first marks
-	 * the task as frozen and next clears its TIF_FREEZE.
-	 */
-	if (!freezing(p)) {
-		smp_rmb();
-		if (frozen(p))
-			return false;
-
-		if (!sig_only || should_send_signal(p))
-			set_freeze_flag(p);
-		else
-			return false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&freezer_lock, flags);
+	if (!freezing(p) || frozen(p)) {
+		spin_unlock_irqrestore(&freezer_lock, flags);
+		return false;
 	}
 	}
 
 
-	if (should_send_signal(p)) {
+	if (!(p->flags & PF_KTHREAD)) {
 		fake_signal_wake_up(p);
 		fake_signal_wake_up(p);
 		/*
 		/*
 		 * fake_signal_wake_up() goes through p's scheduler
 		 * fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
 		 * TASK_RUNNING transition can't race with task state
 		 * TASK_RUNNING transition can't race with task state
 		 * testing in try_to_freeze_tasks().
 		 * testing in try_to_freeze_tasks().
 		 */
 		 */
-	} else if (sig_only) {
-		return false;
 	} else {
 	} else {
 		wake_up_state(p, TASK_INTERRUPTIBLE);
 		wake_up_state(p, TASK_INTERRUPTIBLE);
 	}
 	}
 
 
+	spin_unlock_irqrestore(&freezer_lock, flags);
 	return true;
 	return true;
 }
 }
 
 
-void cancel_freezing(struct task_struct *p)
+void __thaw_task(struct task_struct *p)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	if (freezing(p)) {
-		pr_debug("  clean up: %s\n", p->comm);
-		clear_freeze_flag(p);
-		spin_lock_irqsave(&p->sighand->siglock, flags);
-		recalc_sigpending_and_wake(p);
-		spin_unlock_irqrestore(&p->sighand->siglock, flags);
-	}
-}
-
-static int __thaw_process(struct task_struct *p)
-{
-	if (frozen(p)) {
-		p->flags &= ~PF_FROZEN;
-		return 1;
-	}
-	clear_freeze_flag(p);
-	return 0;
+	/*
+	 * Clear freezing and kick @p if FROZEN.  Clearing is guaranteed to
+	 * be visible to @p as waking up implies wmb.  Waking up inside
+	 * freezer_lock also prevents wakeups from leaking outside
+	 * refrigerator.
+	 */
+	spin_lock_irqsave(&freezer_lock, flags);
+	if (frozen(p))
+		wake_up_process(p);
+	spin_unlock_irqrestore(&freezer_lock, flags);
 }
 }
 
 
-/*
- * Wake up a frozen process
+/**
+ * set_freezable - make %current freezable
  *
  *
- * task_lock() is needed to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails.  Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
+ * Mark %current freezable and enter refrigerator if necessary.
  */
  */
-int thaw_process(struct task_struct *p)
+bool set_freezable(void)
 {
 {
-	task_lock(p);
-	if (__thaw_process(p) == 1) {
-		task_unlock(p);
-		wake_up_process(p);
-		return 1;
-	}
-	task_unlock(p);
-	return 0;
+	might_sleep();
+
+	/*
+	 * Modify flags while holding freezer_lock.  This ensures the
+	 * freezer notices that we aren't frozen yet or the freezing
+	 * condition is visible to try_to_freeze() below.
+	 */
+	spin_lock_irq(&freezer_lock);
+	current->flags &= ~PF_NOFREEZE;
+	spin_unlock_irq(&freezer_lock);
+
+	return try_to_freeze();
 }
 }
-EXPORT_SYMBOL(thaw_process);
+EXPORT_SYMBOL(set_freezable);

+ 26 - 1
kernel/kthread.c

@@ -58,6 +58,31 @@ int kthread_should_stop(void)
 }
 }
 EXPORT_SYMBOL(kthread_should_stop);
 EXPORT_SYMBOL(kthread_should_stop);
 
 
+/**
+ * kthread_freezable_should_stop - should this freezable kthread return now?
+ * @was_frozen: optional out parameter, indicates whether %current was frozen
+ *
+ * kthread_should_stop() for freezable kthreads, which will enter
+ * refrigerator if necessary.  This function is safe from kthread_stop() /
+ * freezer deadlock and freezable kthreads should use this function instead
+ * of calling try_to_freeze() directly.
+ */
+bool kthread_freezable_should_stop(bool *was_frozen)
+{
+	bool frozen = false;
+
+	might_sleep();
+
+	if (unlikely(freezing(current)))
+		frozen = __refrigerator(true);
+
+	if (was_frozen)
+		*was_frozen = frozen;
+
+	return kthread_should_stop();
+}
+EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
+
 /**
 /**
  * kthread_data - return data value specified on kthread creation
  * kthread_data - return data value specified on kthread creation
  * @task: kthread task in question
  * @task: kthread task in question
@@ -257,7 +282,7 @@ int kthreadd(void *unused)
 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
 	set_mems_allowed(node_states[N_HIGH_MEMORY]);
 	set_mems_allowed(node_states[N_HIGH_MEMORY]);
 
 
-	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
+	current->flags |= PF_NOFREEZE;
 
 
 	for (;;) {
 	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		set_current_state(TASK_INTERRUPTIBLE);

+ 2 - 13
kernel/power/hibernate.c

@@ -611,17 +611,6 @@ static void power_down(void)
 	while(1);
 	while(1);
 }
 }
 
 
-static int prepare_processes(void)
-{
-	int error = 0;
-
-	if (freeze_processes()) {
-		error = -EBUSY;
-		thaw_processes();
-	}
-	return error;
-}
-
 /**
 /**
  * hibernate - Carry out system hibernation, including saving the image.
  * hibernate - Carry out system hibernation, including saving the image.
  */
  */
@@ -654,7 +643,7 @@ int hibernate(void)
 	sys_sync();
 	sys_sync();
 	printk("done.\n");
 	printk("done.\n");
 
 
-	error = prepare_processes();
+	error = freeze_processes();
 	if (error)
 	if (error)
 		goto Finish;
 		goto Finish;
 
 
@@ -815,7 +804,7 @@ static int software_resume(void)
 		goto close_finish;
 		goto close_finish;
 
 
 	pr_debug("PM: Preparing processes for restore.\n");
 	pr_debug("PM: Preparing processes for restore.\n");
-	error = prepare_processes();
+	error = freeze_processes();
 	if (error) {
 	if (error) {
 		swsusp_close(FMODE_READ);
 		swsusp_close(FMODE_READ);
 		goto Done;
 		goto Done;

+ 31 - 46
kernel/power/process.c

@@ -22,16 +22,7 @@
  */
  */
 #define TIMEOUT	(20 * HZ)
 #define TIMEOUT	(20 * HZ)
 
 
-static inline int freezable(struct task_struct * p)
-{
-	if ((p == current) ||
-	    (p->flags & PF_NOFREEZE) ||
-	    (p->exit_state != 0))
-		return 0;
-	return 1;
-}
-
-static int try_to_freeze_tasks(bool sig_only)
+static int try_to_freeze_tasks(bool user_only)
 {
 {
 	struct task_struct *g, *p;
 	struct task_struct *g, *p;
 	unsigned long end_time;
 	unsigned long end_time;
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
 
 
 	end_time = jiffies + TIMEOUT;
 	end_time = jiffies + TIMEOUT;
 
 
-	if (!sig_only)
+	if (!user_only)
 		freeze_workqueues_begin();
 		freeze_workqueues_begin();
 
 
 	while (true) {
 	while (true) {
 		todo = 0;
 		todo = 0;
 		read_lock(&tasklist_lock);
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 		do_each_thread(g, p) {
-			if (frozen(p) || !freezable(p))
-				continue;
-
-			if (!freeze_task(p, sig_only))
+			if (p == current || !freeze_task(p))
 				continue;
 				continue;
 
 
 			/*
 			/*
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
 		} while_each_thread(g, p);
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 		read_unlock(&tasklist_lock);
 
 
-		if (!sig_only) {
+		if (!user_only) {
 			wq_busy = freeze_workqueues_busy();
 			wq_busy = freeze_workqueues_busy();
 			todo += wq_busy;
 			todo += wq_busy;
 		}
 		}
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
 	elapsed_csecs = elapsed_csecs64;
 	elapsed_csecs = elapsed_csecs64;
 
 
 	if (todo) {
 	if (todo) {
-		/* This does not unfreeze processes that are already frozen
-		 * (we have slightly ugly calling convention in that respect,
-		 * and caller must call thaw_processes() if something fails),
-		 * but it cleans up leftover PF_FREEZE requests.
-		 */
 		printk("\n");
 		printk("\n");
 		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
 		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
 		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
 		       elapsed_csecs / 100, elapsed_csecs % 100,
 		       elapsed_csecs / 100, elapsed_csecs % 100,
 		       todo - wq_busy, wq_busy);
 		       todo - wq_busy, wq_busy);
 
 
-		thaw_workqueues();
-
 		read_lock(&tasklist_lock);
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
 		do_each_thread(g, p) {
-			task_lock(p);
-			if (!wakeup && freezing(p) && !freezer_should_skip(p))
+			if (!wakeup && !freezer_should_skip(p) &&
+			    p != current && freezing(p) && !frozen(p))
 				sched_show_task(p);
 				sched_show_task(p);
-			cancel_freezing(p);
-			task_unlock(p);
 		} while_each_thread(g, p);
 		} while_each_thread(g, p);
 		read_unlock(&tasklist_lock);
 		read_unlock(&tasklist_lock);
 	} else {
 	} else {
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
 
 
 /**
 /**
  * freeze_processes - Signal user space processes to enter the refrigerator.
  * freeze_processes - Signal user space processes to enter the refrigerator.
+ *
+ * On success, returns 0.  On failure, -errno and system is fully thawed.
  */
  */
 int freeze_processes(void)
 int freeze_processes(void)
 {
 {
 	int error;
 	int error;
 
 
+	if (!pm_freezing)
+		atomic_inc(&system_freezing_cnt);
+
 	printk("Freezing user space processes ... ");
 	printk("Freezing user space processes ... ");
+	pm_freezing = true;
 	error = try_to_freeze_tasks(true);
 	error = try_to_freeze_tasks(true);
 	if (!error) {
 	if (!error) {
 		printk("done.");
 		printk("done.");
@@ -150,17 +135,22 @@ int freeze_processes(void)
 	printk("\n");
 	printk("\n");
 	BUG_ON(in_atomic());
 	BUG_ON(in_atomic());
 
 
+	if (error)
+		thaw_processes();
 	return error;
 	return error;
 }
 }
 
 
 /**
 /**
  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
+ *
+ * On success, returns 0.  On failure, -errno and system is fully thawed.
  */
  */
 int freeze_kernel_threads(void)
 int freeze_kernel_threads(void)
 {
 {
 	int error;
 	int error;
 
 
 	printk("Freezing remaining freezable tasks ... ");
 	printk("Freezing remaining freezable tasks ... ");
+	pm_nosig_freezing = true;
 	error = try_to_freeze_tasks(false);
 	error = try_to_freeze_tasks(false);
 	if (!error)
 	if (!error)
 		printk("done.");
 		printk("done.");
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
 	printk("\n");
 	printk("\n");
 	BUG_ON(in_atomic());
 	BUG_ON(in_atomic());
 
 
+	if (error)
+		thaw_processes();
 	return error;
 	return error;
 }
 }
 
 
-static void thaw_tasks(bool nosig_only)
+void thaw_processes(void)
 {
 {
 	struct task_struct *g, *p;
 	struct task_struct *g, *p;
 
 
-	read_lock(&tasklist_lock);
-	do_each_thread(g, p) {
-		if (!freezable(p))
-			continue;
+	if (pm_freezing)
+		atomic_dec(&system_freezing_cnt);
+	pm_freezing = false;
+	pm_nosig_freezing = false;
 
 
-		if (nosig_only && should_send_signal(p))
-			continue;
+	oom_killer_enable();
+
+	printk("Restarting tasks ... ");
 
 
-		if (cgroup_freezing_or_frozen(p))
-			continue;
+	thaw_workqueues();
 
 
-		thaw_process(p);
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		__thaw_task(p);
 	} while_each_thread(g, p);
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
 	read_unlock(&tasklist_lock);
-}
 
 
-void thaw_processes(void)
-{
-	oom_killer_enable();
-
-	printk("Restarting tasks ... ");
-	thaw_workqueues();
-	thaw_tasks(true);
-	thaw_tasks(false);
 	schedule();
 	schedule();
 	printk("done.\n");
 	printk("done.\n");
 }
 }

+ 3 - 5
kernel/power/suspend.c

@@ -106,13 +106,11 @@ static int suspend_prepare(void)
 		goto Finish;
 		goto Finish;
 
 
 	error = suspend_freeze_processes();
 	error = suspend_freeze_processes();
-	if (error) {
-		suspend_stats.failed_freeze++;
-		dpm_save_failed_step(SUSPEND_FREEZE);
-	} else
+	if (!error)
 		return 0;
 		return 0;
 
 
-	suspend_thaw_processes();
+	suspend_stats.failed_freeze++;
+	dpm_save_failed_step(SUSPEND_FREEZE);
 	usermodehelper_enable();
 	usermodehelper_enable();
  Finish:
  Finish:
 	pm_notifier_call_chain(PM_POST_SUSPEND);
 	pm_notifier_call_chain(PM_POST_SUSPEND);

+ 1 - 3
kernel/power/user.c

@@ -257,10 +257,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
 			break;
 			break;
 
 
 		error = freeze_processes();
 		error = freeze_processes();
-		if (error) {
-			thaw_processes();
+		if (error)
 			usermodehelper_enable();
 			usermodehelper_enable();
-		}
 		if (!error)
 		if (!error)
 			data->frozen = 1;
 			data->frozen = 1;
 		break;
 		break;

+ 2 - 6
mm/backing-dev.c

@@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
 
 
 	/*
 	/*
 	 * Finally, kill the kernel thread. We don't need to be RCU
 	 * Finally, kill the kernel thread. We don't need to be RCU
-	 * safe anymore, since the bdi is gone from visibility. Force
-	 * unfreeze of the thread before calling kthread_stop(), otherwise
-	 * it would never exet if it is currently stuck in the refrigerator.
+	 * safe anymore, since the bdi is gone from visibility.
 	 */
 	 */
-	if (bdi->wb.task) {
-		thaw_process(bdi->wb.task);
+	if (bdi->wb.task)
 		kthread_stop(bdi->wb.task);
 		kthread_stop(bdi->wb.task);
-	}
 }
 }
 
 
 /*
 /*

+ 1 - 1
mm/oom_kill.c

@@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
 		 */
 		 */
 		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
 		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
 			if (unlikely(frozen(p)))
 			if (unlikely(frozen(p)))
-				thaw_process(p);
+				__thaw_task(p);
 			return ERR_PTR(-1UL);
 			return ERR_PTR(-1UL);
 		}
 		}
 		if (!p->mm)
 		if (!p->mm)