diff options
62 files changed, 338 insertions, 402 deletions
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt index 316c2ba187f..3ab9fbd2800 100644 --- a/Documentation/power/freezing-of-tasks.txt +++ b/Documentation/power/freezing-of-tasks.txt @@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called.  It executes  try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and  either wakes them up, if they are kernel threads, or sends fake signals to them,  if they are user space processes.  A task that has TIF_FREEZE set, should react -to it by calling the function called refrigerator() (defined in +to it by calling the function called __refrigerator() (defined in  kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state  to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.  Then, we say that the task is 'frozen' and therefore the set of functions @@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are  defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).  User space processes are generally frozen before kernel threads. -It is not recommended to call refrigerator() directly.  Instead, it is -recommended to use the try_to_freeze() function (defined in -include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the -task enter refrigerator() if the flag is set. +__refrigerator() must not be called directly.  Instead, use the +try_to_freeze() function (defined in include/linux/freezer.h), that checks +the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the +flag is set.  For user space processes try_to_freeze() is called automatically from the  signal-handling code, but the freezable kernel threads need to call it @@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.  After the system memory state has been restored from a hibernation image and  devices have been reinitialized, the function thaw_processes() is called in  order to clear the PF_FROZEN flag for each frozen task.  Then, the tasks that -have been frozen leave refrigerator() and continue running. +have been frozen leave __refrigerator() and continue running.  III. Which kernel threads are freezable?  Kernel threads are not freezable by default.  However, a kernel thread may clear  PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE -directly is strongly discouraged).  From this point it is regarded as freezable +directly is not allowed).  From this point it is regarded as freezable  and must call try_to_freeze() in a suitable place.  IV. Why do we do that? diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index ff73db02234..28335bd40e4 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");  #define TIF_UAC_SIGBUS		12	/* ! userspace part of 'osf_sysinfo' */  #define TIF_MEMDIE		13	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	14	/* restore signal mask in do_signal */ -#define TIF_FREEZE		16	/* is freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING) @@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  /* Work to do on interrupt/exception return.  */  #define _TIF_WORK_MASK		(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 7b5cc8dae06..0f30c3a78fc 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);  #define TIF_POLLING_NRFLAG	16  #define TIF_USING_IWMMXT	17  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */ -#define TIF_FREEZE		19  #define TIF_RESTORE_SIGMASK	20  #define TIF_SECCOMP		21 @@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)  #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)  #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)  #define _TIF_SECCOMP		(1 << TIF_SECCOMP) diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index 7a9c03dcb0b..e5deda4691d 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h @@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_RESTORE_SIGMASK	7	/* restore signal mask in do_signal */  #define TIF_CPU_GOING_TO_SLEEP	8	/* CPU is entering sleep 0 mode */  #define TIF_NOTIFY_RESUME	9	/* callback before returning to user */ -#define TIF_FREEZE		29  #define TIF_DEBUG		30	/* debugging enabled */  #define TIF_USERSPACE		31      /* true if FS sets userspace */ @@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)  #define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  /* Note: The masks below must never span more than 16 bits! */ diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 02560fd8a12..53ad10005ae 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)  					   TIF_NEED_RESCHED */  #define TIF_MEMDIE		4	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */ -#define TIF_FREEZE		6	/* is freezing for suspend */  #define TIF_IRQ_SYNC		7	/* sync pipeline stage */  #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */  #define TIF_SINGLESTEP		9 @@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_IRQ_SYNC		(1<<TIF_IRQ_SYNC)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)  #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP) diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h index 332f19c5455..29b92884d79 100644 --- a/arch/cris/include/asm/thread_info.h +++ b/arch/cris/include/asm/thread_info.h @@ -86,7 +86,6 @@ struct thread_info {  #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */  #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_MEMDIE		17	/* is terminating due to OOM killer */ -#define TIF_FREEZE		18	/* is freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME) @@ -94,7 +93,6 @@ struct thread_info {  #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */  #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */ diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index cefbe73dc11..92d83ea99ae 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h @@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");  #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */  #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_MEMDIE		17	/* is terminating due to OOM killer */ -#define TIF_FREEZE		18	/* freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME) @@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");  #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)  #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)  #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */  #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */ diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h index d6f1784bfde..9c126e0c09a 100644 --- a/arch/h8300/include/asm/thread_info.h +++ b/arch/h8300/include/asm/thread_info.h @@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_MEMDIE		4	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */  #define TIF_NOTIFY_RESUME	6	/* callback before returning to user */ -#define TIF_FREEZE		16	/* is freezing for suspend */  /* as above, but as bit values */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE) @@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index ff0cc84e7bc..e054bcc4273 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -113,7 +113,6 @@ struct thread_info {  #define TIF_MEMDIE		17	/* is terminating due to OOM killer */  #define TIF_MCA_INIT		18	/* this task is processing MCA or INIT */  #define TIF_DB_DISABLED		19	/* debug trap disabled for fsyscall */ -#define TIF_FREEZE		20	/* is freezing for suspend */  #define TIF_RESTORE_RSE		21	/* user RBS is newer than kernel RBS */  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE) @@ -126,7 +125,6 @@ struct thread_info {  #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)  #define _TIF_MCA_INIT		(1 << TIF_MCA_INIT)  #define _TIF_DB_DISABLED	(1 << TIF_DB_DISABLED) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_RESTORE_RSE	(1 << TIF_RESTORE_RSE)  /* "work to do on user-return" bits */ diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 0227dba4406..bf8fa3c06f4 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)  #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */  #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */ -#define TIF_FREEZE		19	/* is freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING) @@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)  #define _TIF_USEDFPU		(1<<TIF_USEDFPU)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */  #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */ diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 790988967ba..294df1592de 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_DELAYED_TRACE	14	/* single step a syscall */  #define TIF_SYSCALL_TRACE	15	/* syscall trace active */  #define TIF_MEMDIE		16	/* is terminating due to OOM killer */ -#define TIF_FREEZE		17	/* thread is freezing for suspend */  #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal */  #endif	/* _ASM_M68K_THREAD_INFO_H */ diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index b73da2ac21b..1a8ab6a5c03 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_MEMDIE		6	/* is terminating due to OOM killer */  #define TIF_SYSCALL_AUDIT	9       /* syscall auditing active */  #define TIF_SECCOMP		10      /* secure computing */ -#define TIF_FREEZE		14	/* Freezing for suspend */  /* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_POLLING_NRFLAG	16 @@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)  #define _TIF_IRET		(1 << TIF_IRET)  #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)  #define _TIF_SECCOMP		(1 << TIF_SECCOMP) diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 97f8bf6639e..0d85d8e440c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");  #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */  #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */ -#define TIF_FREEZE		19  #define TIF_FIXADE		20	/* Fix address errors in software */  #define TIF_LOGADE		21	/* Log address errors to syslog */  #define TIF_32BIT_REGS		22	/* also implies 16/32 fprs */ @@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)  #define _TIF_USEDFPU		(1<<TIF_USEDFPU)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_FIXADE		(1<<TIF_FIXADE)  #define _TIF_LOGADE		(1<<TIF_LOGADE)  #define _TIF_32BIT_REGS		(1<<TIF_32BIT_REGS) diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 87c213002d4..28cf52100ba 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);  #define TIF_RESTORE_SIGMASK	5	/* restore signal mask in do_signal() */  #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_MEMDIE		17	/* is terminating due to OOM killer */ -#define TIF_FREEZE		18	/* freezing for suspend */  #define _TIF_SYSCALL_TRACE	+(1 << TIF_SYSCALL_TRACE)  #define _TIF_NOTIFY_RESUME	+(1 << TIF_NOTIFY_RESUME) @@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);  #define _TIF_SINGLESTEP		+(1 << TIF_SINGLESTEP)  #define _TIF_RESTORE_SIGMASK	+(1 << TIF_RESTORE_SIGMASK)  #define _TIF_POLLING_NRFLAG	+(1 << TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		+(1 << TIF_FREEZE)  #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */  #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */ diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index aa8de727e90..6d9c7c7973d 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -58,7 +58,6 @@ struct thread_info {  #define TIF_32BIT               4       /* 32 bit binary */  #define TIF_MEMDIE		5	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	6	/* restore saved signal mask */ -#define TIF_FREEZE		7	/* is freezing for suspend */  #define TIF_NOTIFY_RESUME	8	/* callback before returning to user */  #define TIF_SINGLESTEP		9	/* single stepping? */  #define TIF_BLOCKSTEP		10	/* branch stepping? */ @@ -69,7 +68,6 @@ struct thread_info {  #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)  #define _TIF_32BIT		(1 << TIF_32BIT)  #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)  #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)  #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 836f231ec1f..96471494096 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_RESTOREALL		11	/* Restore all regs (implies NOERROR) */  #define TIF_NOERROR		12	/* Force successful syscall return */  #define TIF_NOTIFY_RESUME	13	/* callback before returning to user */ -#define TIF_FREEZE		14	/* Freezing for suspend */  #define TIF_SYSCALL_TRACEPOINT	15	/* syscall tracepoint instrumentation */  #define TIF_RUNLATCH		16	/* Is the runlatch enabled? */ @@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_RESTOREALL		(1<<TIF_RESTOREALL)  #define _TIF_NOERROR		(1<<TIF_NOERROR)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)  #define _TIF_RUNLATCH		(1<<TIF_RUNLATCH)  #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index a23183423b1..a73038155e0 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	19	/* restore signal mask in do_signal() */  #define TIF_SINGLE_STEP		20	/* This task is single stepped */ -#define TIF_FREEZE		21	/* thread is freezing for suspend */  #define _TIF_SYSCALL		(1<<TIF_SYSCALL)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME) @@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  #define _TIF_31BIT		(1<<TIF_31BIT)  #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #ifdef CONFIG_64BIT  #define is_32bit_task()		(test_thread_flag(TIF_31BIT)) diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index ea2d5089de1..20ee40af16e 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -122,7 +122,6 @@ extern void init_thread_xstate(void);  #define TIF_SYSCALL_TRACEPOINT	8	/* for ftrace syscall instrumentation */  #define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */ -#define TIF_FREEZE		19	/* Freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)  #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING) @@ -133,7 +132,6 @@ extern void init_thread_xstate(void);  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)  #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)  #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  /*   * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index fa575323341..5cc5888ad5a 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)  #define TIF_POLLING_NRFLAG	9	/* true if poll_idle() is polling  					 * TIF_NEED_RESCHED */  #define TIF_MEMDIE		10	/* is terminating due to OOM killer */ -#define TIF_FREEZE		11	/* is freezing for suspend */  /* as above, but as bit values */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE) @@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)  #define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | \  					 _TIF_SIGPENDING | \  					 _TIF_RESTORE_SIGMASK) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 60d86be1a53..01d057fe6a3 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");  /* flag bit 12 is available */  #define TIF_MEMDIE		13	/* is terminating due to OOM killer */  #define TIF_POLLING_NRFLAG	14 -#define TIF_FREEZE		15	/* is freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)  #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME) @@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");  #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)  #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \  				 _TIF_DO_NOTIFY_RESUME_MASK | \ diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 5bd1bad33fa..200c4ab1240 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_MEMDIE		5	/* is terminating due to OOM killer */  #define TIF_SYSCALL_AUDIT	6  #define TIF_RESTORE_SIGMASK	7 -#define TIF_FREEZE		16	/* is freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)  #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING) @@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_MEMDIE		(1 << TIF_MEMDIE)  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)  #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #endif diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h index c270e9e0486..89f7557583b 100644 --- a/arch/unicore32/include/asm/thread_info.h +++ b/arch/unicore32/include/asm/thread_info.h @@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */  #define TIF_SYSCALL_TRACE	8  #define TIF_MEMDIE		18 -#define TIF_FREEZE		19  #define TIF_RESTORE_SIGMASK	20  #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)  /* diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index a1fe5c127b5..32125af20d3 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -90,7 +90,6 @@ struct thread_info {  #define TIF_MEMDIE		20	/* is terminating due to OOM killer */  #define TIF_DEBUG		21	/* uses debug registers */  #define TIF_IO_BITMAP		22	/* uses I/O bitmap */ -#define TIF_FREEZE		23	/* is freezing for suspend */  #define TIF_FORCED_TF		24	/* true if TF in eflags artificially */  #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */  #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */ @@ -112,7 +111,6 @@ struct thread_info {  #define _TIF_FORK		(1 << TIF_FORK)  #define _TIF_DEBUG		(1 << TIF_DEBUG)  #define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP) -#define _TIF_FREEZE		(1 << TIF_FREEZE)  #define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)  #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)  #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES) diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 7be8accb0b0..6abbedd09d8 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)  #define TIF_MEMDIE		5	/* is terminating due to OOM killer */  #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */  #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_FREEZE		17	/* is freezing for suspend */  #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)  #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING) @@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)  #define _TIF_IRET		(1<<TIF_IRET)  #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)  #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK) -#define _TIF_FREEZE		(1<<TIF_FREEZE)  #define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */  #define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */ diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index a88a78c8616..6c3defa5084 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)  	init_waitqueue_entry(&wait, current); -	current->flags |= PF_NOFREEZE; -  	for (;;) {  		add_wait_queue(&thread->wait_q, &wait); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index eb1d8641cf5..2b8661b54ea 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,  	return error_count;  } -static void dmatest_callback(void *completion) +/* poor man's completion - we want to use wait_event_freezable() on it */ +struct dmatest_done { +	bool			done; +	wait_queue_head_t	*wait; +}; + +static void dmatest_callback(void *arg)  { -	complete(completion); +	struct dmatest_done *done = arg; + +	done->done = true; +	wake_up_all(done->wait);  }  /* @@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)   */  static int dmatest_func(void *data)  { +	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);  	struct dmatest_thread	*thread = data; +	struct dmatest_done	done = { .wait = &done_wait };  	struct dma_chan		*chan;  	const char		*thread_name;  	unsigned int		src_off, dst_off, len; @@ -252,7 +263,7 @@ static int dmatest_func(void *data)  	int			i;  	thread_name = current->comm; -	set_freezable_with_signal(); +	set_freezable();  	ret = -ENOMEM; @@ -306,9 +317,6 @@ static int dmatest_func(void *data)  		struct dma_async_tx_descriptor *tx = NULL;  		dma_addr_t dma_srcs[src_cnt];  		dma_addr_t dma_dsts[dst_cnt]; -		struct completion cmp; -		unsigned long start, tmo, end = 0 /* compiler... */; -		bool reload = true;  		u8 align = 0;  		total_tests++; @@ -391,9 +399,9 @@ static int dmatest_func(void *data)  			continue;  		} -		init_completion(&cmp); +		done.done = false;  		tx->callback = dmatest_callback; -		tx->callback_param = &cmp; +		tx->callback_param = &done;  		cookie = tx->tx_submit(tx);  		if (dma_submit_error(cookie)) { @@ -407,20 +415,20 @@ static int dmatest_func(void *data)  		}  		dma_async_issue_pending(chan); -		do { -			start = jiffies; -			if (reload) -				end = start + msecs_to_jiffies(timeout); -			else if (end <= start) -				end = start + 1; -			tmo = wait_for_completion_interruptible_timeout(&cmp, -								end - start); -			reload = try_to_freeze(); -		} while (tmo == -ERESTARTSYS); +		wait_event_freezable_timeout(done_wait, done.done, +					     msecs_to_jiffies(timeout));  		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); -		if (tmo == 0) { +		if (!done.done) { +			/* +			 * We're leaving the timed out dma operation with +			 * dangling pointer to done_wait.  To make this +			 * correct, we'll need to allocate wait_done for +			 * each test iteration and perform "who's gonna +			 * free it this time?" dancing.  For now, just +			 * leave it dangling. +			 */  			pr_warning("%s: #%u: test timed out\n",  				   thread_name, total_tests - 1);  			failed_tests++; diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 3eee45ffb09..c6b456ad734 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)  	static const unsigned max_i2c_errors = 100;  	int ret; -	current->flags |= PF_NOFREEZE; -  	while (!kthread_should_stop()) {  		int i;  		union { diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 41c96b3d815..e880c79d7bd 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)  			write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD); -			refrigerator(); +			try_to_freeze();  			if (change_speed(stir, stir->speed))  				break; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 7b828680b21..4b11fc91fa7 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)  	u32 poll_mask, event_mask;  	unsigned int si, so;  	unsigned long t; -	unsigned int change_detector, must_reset; +	unsigned int change_detector;  	unsigned int poll_freq; +	bool was_frozen;  	mutex_lock(&hotkey_thread_mutex); @@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)  				t = 100;	/* should never happen... */  		}  		t = msleep_interruptible(t); -		if (unlikely(kthread_should_stop())) +		if (unlikely(kthread_freezable_should_stop(&was_frozen)))  			break; -		must_reset = try_to_freeze(); -		if (t > 0 && !must_reset) + +		if (t > 0 && !was_frozen)  			continue;  		mutex_lock(&hotkey_thread_data_mutex); -		if (must_reset || hotkey_config_change != change_detector) { +		if (was_frozen || hotkey_config_change != change_detector) {  			/* forget old state on thaw or config change */  			si = so;  			t = 0; @@ -2528,10 +2529,6 @@ exit:  static void hotkey_poll_stop_sync(void)  {  	if (tpacpi_hotkey_task) { -		if (frozen(tpacpi_hotkey_task) || -		    freezing(tpacpi_hotkey_task)) -			thaw_process(tpacpi_hotkey_task); -  		kthread_stop(tpacpi_hotkey_task);  		tpacpi_hotkey_task = NULL;  		mutex_lock(&hotkey_thread_mutex); diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 480b0ed2e4d..8a7803cf88d 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c @@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)  	struct rtsx_chip *chip = dev->chip;  	struct Scsi_Host *host = rtsx_to_host(dev); -	current->flags |= PF_NOFREEZE; -  	for (;;) {  		if (wait_for_completion_interruptible(&dev->cmnd_ready))  			break; diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index c325e69415a..aa84b3d7727 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)  	dev_dbg(dev, "device found\n"); -	set_freezable_with_signal(); +	set_freezable(); +  	/*  	 * Wait for the timeout to expire or for a disconnect  	 * @@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)  	 * fail to freeze, but we can't be non-freezable either. Nor can  	 * khubd freeze while waiting for scanning to complete as it may  	 * hold the device lock, causing a hang when suspending devices. -	 * So we request a fake signal when freezing and use -	 * interruptible sleep to kick us out of our wait early when -	 * freezing happens. +	 * So instead of using wait_event_freezable(), explicitly test +	 * for (DONT_SCAN || freezing) in interruptible wait and proceed +	 * if any of DONT_SCAN, freezing or timeout has happened.  	 */  	if (delay_use > 0) {  		dev_dbg(dev, "waiting for device to settle "  				"before scanning\n");  		wait_event_interruptible_timeout(us->delay_wait, -				test_bit(US_FLIDX_DONT_SCAN, &us->dflags), -				delay_use * HZ); +				test_bit(US_FLIDX_DONT_SCAN, &us->dflags) || +				freezing(current), delay_use * HZ);  	}  	/* If the device is still connected, perform the scanning */ diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 7ec14097fef..98ab240072e 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -340,7 +340,7 @@ again:  		if (freezing(current)) {  			worker->working = 0;  			spin_unlock_irq(&worker->lock); -			refrigerator(); +			try_to_freeze();  		} else {  			spin_unlock_irq(&worker->lock);  			if (!kthread_should_stop()) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 632f8f3cc9d..b0917590152 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)  			btrfs_run_defrag_inodes(root->fs_info);  		} -		if (freezing(current)) { -			refrigerator(); -		} else { +		if (!try_to_freeze()) {  			set_current_state(TASK_INTERRUPTIBLE);  			if (!kthread_should_stop())  				schedule(); @@ -1635,9 +1633,7 @@ sleep:  		wake_up_process(root->fs_info->cleaner_kthread);  		mutex_unlock(&root->fs_info->transaction_kthread_mutex); -		if (freezing(current)) { -			refrigerator(); -		} else { +		if (!try_to_freeze()) {  			set_current_state(TASK_INTERRUPTIBLE);  			if (!kthread_should_stop() &&  			    !btrfs_transaction_blocked(root->fs_info)) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3858767ec67..1c7bbd00e7e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2884,8 +2884,7 @@ cont_thread:  		}  		mutex_unlock(&eli->li_list_mtx); -		if (freezing(current)) -			refrigerator(); +		try_to_freeze();  		cur = jiffies;  		if ((time_after_eq(cur, next_wakeup)) || diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 73c3992b2bb..271fde50f0e 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -947,7 +947,7 @@ int bdi_writeback_thread(void *data)  	trace_writeback_thread_start(bdi); -	while (!kthread_should_stop()) { +	while (!kthread_freezable_should_stop(NULL)) {  		/*  		 * Remove own delayed wake-up timer, since we are already awake  		 * and we'll take care of the preriodic write-back. @@ -977,8 +977,6 @@ int bdi_writeback_thread(void *data)  			 */  			schedule();  		} - -		try_to_freeze();  	}  	/* Flush any work that raced with us exiting */ diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 59864643436..8154d42e464 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -951,8 +951,8 @@ int gfs2_logd(void *data)  			wake_up(&sdp->sd_log_waitq);  		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; -		if (freezing(current)) -			refrigerator(); + +		try_to_freeze();  		do {  			prepare_to_wait(&sdp->sd_logd_waitq, &wait, diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 7e528dc14f8..d49669e9265 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)  		/* Check for & recover partially truncated inodes */  		quotad_check_trunc_list(sdp); -		if (freezing(current)) -			refrigerator(); +		try_to_freeze(); +  		t = min(quotad_timeo, statfs_timeo);  		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index fea8dd661d2..a96cff0c5f1 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -166,7 +166,7 @@ loop:  		 */  		jbd_debug(1, "Now suspending kjournald\n");  		spin_unlock(&journal->j_state_lock); -		refrigerator(); +		try_to_freeze();  		spin_lock(&journal->j_state_lock);  	} else {  		/* diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 0fa0123151d..c0a5f9f1b12 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -173,7 +173,7 @@ loop:  		 */  		jbd_debug(1, "Now suspending kjournald2\n");  		write_unlock(&journal->j_state_lock); -		refrigerator(); +		try_to_freeze();  		write_lock(&journal->j_state_lock);  	} else {  		/* diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index cc5f811ed38..2eb952c41a6 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)  		if (freezing(current)) {  			spin_unlock_irq(&log_redrive_lock); -			refrigerator(); +			try_to_freeze();  		} else {  			set_current_state(TASK_INTERRUPTIBLE);  			spin_unlock_irq(&log_redrive_lock); diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index af9606057dd..bb8b661bcc5 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)  		if (freezing(current)) {  			LAZY_UNLOCK(flags); -			refrigerator(); +			try_to_freeze();  		} else {  			DECLARE_WAITQUEUE(wq, current); @@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)  		if (freezing(current)) {  			TXN_UNLOCK(); -			refrigerator(); +			try_to_freeze();  		} else {  			set_current_state(TASK_INTERRUPTIBLE);  			TXN_UNLOCK(); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 50a15fa8cf9..bf3a57bbbfc 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -38,6 +38,7 @@  #include <linux/nfs_xdr.h>  #include <linux/slab.h>  #include <linux/compat.h> +#include <linux/freezer.h>  #include <asm/system.h>  #include <asm/uaccess.h> @@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)  {  	if (fatal_signal_pending(current))  		return -ERESTARTSYS; -	schedule(); +	freezable_schedule();  	return 0;  } diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index d4bc9ed9174..91943953a37 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -17,6 +17,7 @@  #include <linux/nfs_page.h>  #include <linux/lockd/bind.h>  #include <linux/nfs_mount.h> +#include <linux/freezer.h>  #include "iostat.h"  #include "internal.h" @@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)  		res = rpc_call_sync(clnt, msg, flags);  		if (res != -EJUKEBOX && res != -EKEYEXPIRED)  			break; -		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); +		freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);  		res = -ERESTARTSYS;  	} while (!fatal_signal_pending(current));  	return res; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index be2bbac1381..b28bb19b04f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -53,6 +53,7 @@  #include <linux/sunrpc/bc_xprt.h>  #include <linux/xattr.h>  #include <linux/utsname.h> +#include <linux/freezer.h>  #include "nfs4_fs.h"  #include "delegation.h" @@ -241,7 +242,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)  		*timeout = NFS4_POLL_RETRY_MIN;  	if (*timeout > NFS4_POLL_RETRY_MAX)  		*timeout = NFS4_POLL_RETRY_MAX; -	schedule_timeout_killable(*timeout); +	freezable_schedule_timeout_killable(*timeout);  	if (fatal_signal_pending(current))  		res = -ERESTARTSYS;  	*timeout <<= 1; @@ -3950,7 +3951,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4  static unsigned long  nfs4_set_lock_task_retry(unsigned long timeout)  { -	schedule_timeout_killable(timeout); +	freezable_schedule_timeout_killable(timeout);  	timeout <<= 1;  	if (timeout > NFS4_LOCK_MAXTIMEOUT)  		return NFS4_LOCK_MAXTIMEOUT; diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index f48125da198..0c672588fe5 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -41,6 +41,7 @@  #include <linux/nfs_fs.h>  #include <linux/nfs_page.h>  #include <linux/lockd/bind.h> +#include <linux/freezer.h>  #include "internal.h"  #define NFSDBG_FACILITY		NFSDBG_PROC @@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)  		res = rpc_call_sync(clnt, msg, flags);  		if (res != -EKEYEXPIRED)  			break; -		schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); +		freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);  		res = -ERESTARTSYS;  	} while (!fatal_signal_pending(current));  	return res; diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index bb24ab6c282..0e72ad6f22a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)  	if (freezing(current)) {  		spin_unlock(&sci->sc_state_lock); -		refrigerator(); +		try_to_freeze();  		spin_lock(&sci->sc_state_lock);  	} else {  		DEFINE_WAIT(wait); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index cf0ac056815..018829936d6 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1703,7 +1703,7 @@ xfsbufd(  		if (unlikely(freezing(current))) {  			set_bit(XBT_FORCE_SLEEP, &target->bt_flags); -			refrigerator(); +			try_to_freeze();  		} else {  			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);  		} diff --git a/include/linux/freezer.h b/include/linux/freezer.h index a5386e3ee75..30f06c22046 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -5,71 +5,58 @@  #include <linux/sched.h>  #include <linux/wait.h> +#include <linux/atomic.h>  #ifdef CONFIG_FREEZER +extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */ +extern bool pm_freezing;		/* PM freezing in effect */ +extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */ +  /*   * Check if a process has been frozen   */ -static inline int frozen(struct task_struct *p) +static inline bool frozen(struct task_struct *p)  {  	return p->flags & PF_FROZEN;  } -/* - * Check if there is a request to freeze a process - */ -static inline int freezing(struct task_struct *p) -{ -	return test_tsk_thread_flag(p, TIF_FREEZE); -} - -/* - * Request that a process be frozen - */ -static inline void set_freeze_flag(struct task_struct *p) -{ -	set_tsk_thread_flag(p, TIF_FREEZE); -} +extern bool freezing_slow_path(struct task_struct *p);  /* - * Sometimes we may need to cancel the previous 'freeze' request + * Check if there is a request to freeze a process   */ -static inline void clear_freeze_flag(struct task_struct *p) +static inline bool freezing(struct task_struct *p)  { -	clear_tsk_thread_flag(p, TIF_FREEZE); -} - -static inline bool should_send_signal(struct task_struct *p) -{ -	return !(p->flags & PF_FREEZER_NOSIG); +	if (likely(!atomic_read(&system_freezing_cnt))) +		return false; +	return freezing_slow_path(p);  }  /* Takes and releases task alloc lock using task_lock() */ -extern int thaw_process(struct task_struct *p); +extern void __thaw_task(struct task_struct *t); -extern void refrigerator(void); +extern bool __refrigerator(bool check_kthr_stop);  extern int freeze_processes(void);  extern int freeze_kernel_threads(void);  extern void thaw_processes(void); -static inline int try_to_freeze(void) +static inline bool try_to_freeze(void)  { -	if (freezing(current)) { -		refrigerator(); -		return 1; -	} else -		return 0; +	might_sleep(); +	if (likely(!freezing(current))) +		return false; +	return __refrigerator(false);  } -extern bool freeze_task(struct task_struct *p, bool sig_only); -extern void cancel_freezing(struct task_struct *p); +extern bool freeze_task(struct task_struct *p); +extern bool set_freezable(void);  #ifdef CONFIG_CGROUP_FREEZER -extern int cgroup_freezing_or_frozen(struct task_struct *task); +extern bool cgroup_freezing(struct task_struct *task);  #else /* !CONFIG_CGROUP_FREEZER */ -static inline int cgroup_freezing_or_frozen(struct task_struct *task) +static inline bool cgroup_freezing(struct task_struct *task)  { -	return 0; +	return false;  }  #endif /* !CONFIG_CGROUP_FREEZER */ @@ -118,21 +105,27 @@ static inline int freezer_should_skip(struct task_struct *p)  }  /* - * Tell the freezer that the current task should be frozen by it + * These macros are intended to be used whenever you want allow a task that's + * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note + * that neither return any clear indication of whether a freeze event happened + * while in this function.   */ -static inline void set_freezable(void) -{ -	current->flags &= ~PF_NOFREEZE; -} -/* - * Tell the freezer that the current task should be frozen by it and that it - * should send a fake signal to the task to freeze it. - */ -static inline void set_freezable_with_signal(void) -{ -	current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); -} +/* Like schedule(), but should not block the freezer. */ +#define freezable_schedule()						\ +({									\ +	freezer_do_not_count();						\ +	schedule();							\ +	freezer_count();						\ +}) + +/* Like schedule_timeout_killable(), but should not block the freezer. */ +#define freezable_schedule_timeout_killable(timeout)			\ +({									\ +	freezer_do_not_count();						\ +	schedule_timeout_killable(timeout);				\ +	freezer_count();						\ +})  /*   * Freezer-friendly wrappers around wait_event_interruptible(), @@ -152,47 +145,51 @@ static inline void set_freezable_with_signal(void)  #define wait_event_freezable(wq, condition)				\  ({									\  	int __retval;							\ -	do {								\ +	for (;;) {							\  		__retval = wait_event_interruptible(wq, 		\  				(condition) || freezing(current));	\ -		if (__retval && !freezing(current))			\ +		if (__retval || (condition))				\  			break;						\ -		else if (!(condition))					\ -			__retval = -ERESTARTSYS;			\ -	} while (try_to_freeze());					\ +		try_to_freeze();					\ +	}								\  	__retval;							\  }) -  #define wait_event_freezable_timeout(wq, condition, timeout)		\  ({									\  	long __retval = timeout;					\ -	do {								\ +	for (;;) {							\  		__retval = wait_event_interruptible_timeout(wq,		\  				(condition) || freezing(current),	\  				__retval); 				\ -	} while (try_to_freeze());					\ +		if (__retval <= 0 || (condition))			\ +			break;						\ +		try_to_freeze();					\ +	}								\  	__retval;							\  }) +  #else /* !CONFIG_FREEZER */ -static inline int frozen(struct task_struct *p) { return 0; } -static inline int freezing(struct task_struct *p) { return 0; } -static inline void set_freeze_flag(struct task_struct *p) {} -static inline void clear_freeze_flag(struct task_struct *p) {} -static inline int thaw_process(struct task_struct *p) { return 1; } +static inline bool frozen(struct task_struct *p) { return false; } +static inline bool freezing(struct task_struct *p) { return false; } +static inline void __thaw_task(struct task_struct *t) {} -static inline void refrigerator(void) {} +static inline bool __refrigerator(bool check_kthr_stop) { return false; }  static inline int freeze_processes(void) { return -ENOSYS; }  static inline int freeze_kernel_threads(void) { return -ENOSYS; }  static inline void thaw_processes(void) {} -static inline int try_to_freeze(void) { return 0; } +static inline bool try_to_freeze(void) { return false; }  static inline void freezer_do_not_count(void) {}  static inline void freezer_count(void) {}  static inline int freezer_should_skip(struct task_struct *p) { return 0; }  static inline void set_freezable(void) {} -static inline void set_freezable_with_signal(void) {} + +#define freezable_schedule()  schedule() + +#define freezable_schedule_timeout_killable(timeout)			\ +	schedule_timeout_killable(timeout)  #define wait_event_freezable(wq, condition)				\  		wait_event_interruptible(wq, condition) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 5cac19b3a26..0714b24c0e4 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),  void kthread_bind(struct task_struct *k, unsigned int cpu);  int kthread_stop(struct task_struct *k);  int kthread_should_stop(void); +bool kthread_freezable_should_stop(bool *was_frozen);  void *kthread_data(struct task_struct *k);  int kthreadd(void *unused); diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c4f3e9b9bc..d81cce93386 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(  			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)  #define task_contributes_to_load(task)	\  				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ -				 (task->flags & PF_FREEZING) == 0) +				 (task->flags & PF_FROZEN) == 0)  #define __set_task_state(tsk, state_value)		\  	do { (tsk)->state = (state_value); } while (0) @@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *  #define PF_MEMALLOC	0x00000800	/* Allocating memory */  #define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */  #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */ -#define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */  #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */  #define PF_FROZEN	0x00010000	/* frozen for system suspend */  #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */ @@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *  #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */  #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */  #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */ -#define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */  /*   * Only the _current_ task can read/write to tsk->flags, but other diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 5e828a2ca8e..e411a60cc2c 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)  			    struct freezer, css);  } -static inline int __cgroup_freezing_or_frozen(struct task_struct *task) +bool cgroup_freezing(struct task_struct *task)  { -	enum freezer_state state = task_freezer(task)->state; -	return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN); -} +	enum freezer_state state; +	bool ret; -int cgroup_freezing_or_frozen(struct task_struct *task) -{ -	int result; -	task_lock(task); -	result = __cgroup_freezing_or_frozen(task); -	task_unlock(task); -	return result; +	rcu_read_lock(); +	state = task_freezer(task)->state; +	ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN; +	rcu_read_unlock(); + +	return ret;  }  /* @@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;   * freezer_can_attach():   * cgroup_mutex (held by caller of can_attach)   * - * cgroup_freezing_or_frozen(): - * task->alloc_lock (to get task's cgroup) - *   * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):   * freezer->lock   *  sighand->siglock (if the cgroup is freezing) @@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;   *   write_lock css_set_lock (cgroup iterator start)   *    task->alloc_lock   *   read_lock css_set_lock (cgroup iterator start) - *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator()) + *    task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())   *     sighand->siglock   */  static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss, @@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,  static void freezer_destroy(struct cgroup_subsys *ss,  			    struct cgroup *cgroup)  { -	kfree(cgroup_freezer(cgroup)); +	struct freezer *freezer = cgroup_freezer(cgroup); + +	if (freezer->state != CGROUP_THAWED) +		atomic_dec(&system_freezing_cnt); +	kfree(freezer);  }  /* @@ -177,13 +176,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,  static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)  { -	rcu_read_lock(); -	if (__cgroup_freezing_or_frozen(tsk)) { -		rcu_read_unlock(); -		return -EBUSY; -	} -	rcu_read_unlock(); -	return 0; +	return cgroup_freezing(tsk) ? -EBUSY : 0;  }  static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) @@ -213,7 +206,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)  	/* Locking avoids race with FREEZING -> THAWED transitions. */  	if (freezer->state == CGROUP_FREEZING) -		freeze_task(task, true); +		freeze_task(task);  	spin_unlock_irq(&freezer->lock);  } @@ -231,7 +224,7 @@ static void update_if_frozen(struct cgroup *cgroup,  	cgroup_iter_start(cgroup, &it);  	while ((task = cgroup_iter_next(cgroup, &it))) {  		ntotal++; -		if (frozen(task)) +		if (freezing(task) && frozen(task))  			nfrozen++;  	} @@ -279,10 +272,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)  	struct task_struct *task;  	unsigned int num_cant_freeze_now = 0; -	freezer->state = CGROUP_FREEZING;  	cgroup_iter_start(cgroup, &it);  	while ((task = cgroup_iter_next(cgroup, &it))) { -		if (!freeze_task(task, true)) +		if (!freeze_task(task))  			continue;  		if (frozen(task))  			continue; @@ -300,12 +292,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)  	struct task_struct *task;  	cgroup_iter_start(cgroup, &it); -	while ((task = cgroup_iter_next(cgroup, &it))) { -		thaw_process(task); -	} +	while ((task = cgroup_iter_next(cgroup, &it))) +		__thaw_task(task);  	cgroup_iter_end(cgroup, &it); - -	freezer->state = CGROUP_THAWED;  }  static int freezer_change_state(struct cgroup *cgroup, @@ -319,20 +308,24 @@ static int freezer_change_state(struct cgroup *cgroup,  	spin_lock_irq(&freezer->lock);  	update_if_frozen(cgroup, freezer); -	if (goal_state == freezer->state) -		goto out;  	switch (goal_state) {  	case CGROUP_THAWED: +		if (freezer->state != CGROUP_THAWED) +			atomic_dec(&system_freezing_cnt); +		freezer->state = CGROUP_THAWED;  		unfreeze_cgroup(cgroup, freezer);  		break;  	case CGROUP_FROZEN: +		if (freezer->state == CGROUP_THAWED) +			atomic_inc(&system_freezing_cnt); +		freezer->state = CGROUP_FREEZING;  		retval = try_to_freeze_cgroup(cgroup, freezer);  		break;  	default:  		BUG();  	} -out: +  	spin_unlock_irq(&freezer->lock);  	return retval; diff --git a/kernel/exit.c b/kernel/exit.c index d0b7d988f87..95a4141d07e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)  	tsk->mm = NULL;  	up_read(&mm->mmap_sem);  	enter_lazy_tlb(mm, current); -	/* We don't want this task to be frozen prematurely */ -	clear_freeze_flag(tsk);  	task_unlock(tsk);  	mm_update_next_owner(mm);  	mmput(mm); @@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)  	exit_rcu();  	/* causes final put_task_struct in finish_task_switch(). */  	tsk->state = TASK_DEAD; +	tsk->flags |= PF_NOFREEZE;	/* tell freezer to ignore us */  	schedule();  	BUG();  	/* Avoid "noreturn function does return".  */ diff --git a/kernel/fork.c b/kernel/fork.c index da4a6a10d08..82780861384 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)  	new_flags |= PF_FORKNOEXEC;  	new_flags |= PF_STARTING;  	p->flags = new_flags; -	clear_freeze_flag(p);  }  SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) diff --git a/kernel/freezer.c b/kernel/freezer.c index 7be56c53439..9815b8d1eed 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -9,101 +9,114 @@  #include <linux/export.h>  #include <linux/syscalls.h>  #include <linux/freezer.h> +#include <linux/kthread.h> -/* - * freezing is complete, mark current process as frozen +/* total number of freezing conditions in effect */ +atomic_t system_freezing_cnt = ATOMIC_INIT(0); +EXPORT_SYMBOL(system_freezing_cnt); + +/* indicate whether PM freezing is in effect, protected by pm_mutex */ +bool pm_freezing; +bool pm_nosig_freezing; + +/* protects freezing and frozen transitions */ +static DEFINE_SPINLOCK(freezer_lock); + +/** + * freezing_slow_path - slow path for testing whether a task needs to be frozen + * @p: task to be tested + * + * This function is called by freezing() if system_freezing_cnt isn't zero + * and tests whether @p needs to enter and stay in frozen state.  Can be + * called under any context.  The freezers are responsible for ensuring the + * target tasks see the updated state.   */ -static inline void frozen_process(void) +bool freezing_slow_path(struct task_struct *p)  { -	if (!unlikely(current->flags & PF_NOFREEZE)) { -		current->flags |= PF_FROZEN; -		smp_wmb(); -	} -	clear_freeze_flag(current); +	if (p->flags & PF_NOFREEZE) +		return false; + +	if (pm_nosig_freezing || cgroup_freezing(p)) +		return true; + +	if (pm_freezing && !(p->flags & PF_KTHREAD)) +		return true; + +	return false;  } +EXPORT_SYMBOL(freezing_slow_path);  /* Refrigerator is place where frozen processes are stored :-). */ -void refrigerator(void) +bool __refrigerator(bool check_kthr_stop)  {  	/* Hmm, should we be allowed to suspend when there are realtime  	   processes around? */ -	long save; +	bool was_frozen = false; +	long save = current->state; -	task_lock(current); -	if (freezing(current)) { -		frozen_process(); -		task_unlock(current); -	} else { -		task_unlock(current); -		return; -	} -	save = current->state;  	pr_debug("%s entered refrigerator\n", current->comm); -	spin_lock_irq(¤t->sighand->siglock); -	recalc_sigpending(); /* We sent fake signal, clean it up */ -	spin_unlock_irq(¤t->sighand->siglock); - -	/* prevent accounting of that task to load */ -	current->flags |= PF_FREEZING; -  	for (;;) {  		set_current_state(TASK_UNINTERRUPTIBLE); -		if (!frozen(current)) + +		spin_lock_irq(&freezer_lock); +		current->flags |= PF_FROZEN; +		if (!freezing(current) || +		    (check_kthr_stop && kthread_should_stop())) +			current->flags &= ~PF_FROZEN; +		spin_unlock_irq(&freezer_lock); + +		if (!(current->flags & PF_FROZEN))  			break; +		was_frozen = true;  		schedule();  	} -	/* Remove the accounting blocker */ -	current->flags &= ~PF_FREEZING; -  	pr_debug("%s left refrigerator\n", current->comm); -	__set_current_state(save); + +	/* +	 * Restore saved task state before returning.  The mb'd version +	 * needs to be used; otherwise, it might silently break +	 * synchronization which depends on ordered task state change. +	 */ +	set_current_state(save); + +	return was_frozen;  } -EXPORT_SYMBOL(refrigerator); +EXPORT_SYMBOL(__refrigerator);  static void fake_signal_wake_up(struct task_struct *p)  {  	unsigned long flags; -	spin_lock_irqsave(&p->sighand->siglock, flags); -	signal_wake_up(p, 0); -	spin_unlock_irqrestore(&p->sighand->siglock, flags); +	if (lock_task_sighand(p, &flags)) { +		signal_wake_up(p, 0); +		unlock_task_sighand(p, &flags); +	}  }  /** - *	freeze_task - send a freeze request to given task - *	@p: task to send the request to - *	@sig_only: if set, the request will only be sent if the task has the - *		PF_FREEZER_NOSIG flag unset - *	Return value: 'false', if @sig_only is set and the task has - *		PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise + * freeze_task - send a freeze request to given task + * @p: task to send the request to + * + * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE + * flag and either sending a fake signal to it or waking it up, depending + * on whether it has %PF_FREEZER_NOSIG set.   * - *	The freeze request is sent by setting the tasks's TIF_FREEZE flag and - *	either sending a fake signal to it or waking it up, depending on whether - *	or not it has PF_FREEZER_NOSIG set.  If @sig_only is set and the task - *	has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its - *	TIF_FREEZE flag will not be set. + * RETURNS: + * %false, if @p is not freezing or already frozen; %true, otherwise   */ -bool freeze_task(struct task_struct *p, bool sig_only) +bool freeze_task(struct task_struct *p)  { -	/* -	 * We first check if the task is freezing and next if it has already -	 * been frozen to avoid the race with frozen_process() which first marks -	 * the task as frozen and next clears its TIF_FREEZE. -	 */ -	if (!freezing(p)) { -		smp_rmb(); -		if (frozen(p)) -			return false; +	unsigned long flags; -		if (!sig_only || should_send_signal(p)) -			set_freeze_flag(p); -		else -			return false; +	spin_lock_irqsave(&freezer_lock, flags); +	if (!freezing(p) || frozen(p)) { +		spin_unlock_irqrestore(&freezer_lock, flags); +		return false;  	} -	if (should_send_signal(p)) { +	if (!(p->flags & PF_KTHREAD)) {  		fake_signal_wake_up(p);  		/*  		 * fake_signal_wake_up() goes through p's scheduler @@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)  		 * TASK_RUNNING transition can't race with task state  		 * testing in try_to_freeze_tasks().  		 */ -	} else if (sig_only) { -		return false;  	} else {  		wake_up_state(p, TASK_INTERRUPTIBLE);  	} +	spin_unlock_irqrestore(&freezer_lock, flags);  	return true;  } -void cancel_freezing(struct task_struct *p) +void __thaw_task(struct task_struct *p)  {  	unsigned long flags; -	if (freezing(p)) { -		pr_debug("  clean up: %s\n", p->comm); -		clear_freeze_flag(p); -		spin_lock_irqsave(&p->sighand->siglock, flags); -		recalc_sigpending_and_wake(p); -		spin_unlock_irqrestore(&p->sighand->siglock, flags); -	} -} - -static int __thaw_process(struct task_struct *p) -{ -	if (frozen(p)) { -		p->flags &= ~PF_FROZEN; -		return 1; -	} -	clear_freeze_flag(p); -	return 0; +	/* +	 * Clear freezing and kick @p if FROZEN.  Clearing is guaranteed to +	 * be visible to @p as waking up implies wmb.  Waking up inside +	 * freezer_lock also prevents wakeups from leaking outside +	 * refrigerator. +	 */ +	spin_lock_irqsave(&freezer_lock, flags); +	if (frozen(p)) +		wake_up_process(p); +	spin_unlock_irqrestore(&freezer_lock, flags);  } -/* - * Wake up a frozen process +/** + * set_freezable - make %current freezable   * - * task_lock() is needed to prevent the race with refrigerator() which may - * occur if the freezing of tasks fails.  Namely, without the lock, if the - * freezing of tasks failed, thaw_tasks() might have run before a task in - * refrigerator() could call frozen_process(), in which case the task would be - * frozen and no one would thaw it. + * Mark %current freezable and enter refrigerator if necessary.   */ -int thaw_process(struct task_struct *p) +bool set_freezable(void)  { -	task_lock(p); -	if (__thaw_process(p) == 1) { -		task_unlock(p); -		wake_up_process(p); -		return 1; -	} -	task_unlock(p); -	return 0; +	might_sleep(); + +	/* +	 * Modify flags while holding freezer_lock.  This ensures the +	 * freezer notices that we aren't frozen yet or the freezing +	 * condition is visible to try_to_freeze() below. +	 */ +	spin_lock_irq(&freezer_lock); +	current->flags &= ~PF_NOFREEZE; +	spin_unlock_irq(&freezer_lock); + +	return try_to_freeze();  } -EXPORT_SYMBOL(thaw_process); +EXPORT_SYMBOL(set_freezable); diff --git a/kernel/kthread.c b/kernel/kthread.c index b6d216a9263..3d3de633702 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -59,6 +59,31 @@ int kthread_should_stop(void)  EXPORT_SYMBOL(kthread_should_stop);  /** + * kthread_freezable_should_stop - should this freezable kthread return now? + * @was_frozen: optional out parameter, indicates whether %current was frozen + * + * kthread_should_stop() for freezable kthreads, which will enter + * refrigerator if necessary.  This function is safe from kthread_stop() / + * freezer deadlock and freezable kthreads should use this function instead + * of calling try_to_freeze() directly. + */ +bool kthread_freezable_should_stop(bool *was_frozen) +{ +	bool frozen = false; + +	might_sleep(); + +	if (unlikely(freezing(current))) +		frozen = __refrigerator(true); + +	if (was_frozen) +		*was_frozen = frozen; + +	return kthread_should_stop(); +} +EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); + +/**   * kthread_data - return data value specified on kthread creation   * @task: kthread task in question   * @@ -257,7 +282,7 @@ int kthreadd(void *unused)  	set_cpus_allowed_ptr(tsk, cpu_all_mask);  	set_mems_allowed(node_states[N_HIGH_MEMORY]); -	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; +	current->flags |= PF_NOFREEZE;  	for (;;) {  		set_current_state(TASK_INTERRUPTIBLE); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 5314a94a92c..605149a6d21 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -590,17 +590,6 @@ static void power_down(void)  	while(1);  } -static int prepare_processes(void) -{ -	int error = 0; - -	if (freeze_processes()) { -		error = -EBUSY; -		thaw_processes(); -	} -	return error; -} -  /**   * hibernate - Carry out system hibernation, including saving the image.   */ @@ -633,7 +622,7 @@ int hibernate(void)  	sys_sync();  	printk("done.\n"); -	error = prepare_processes(); +	error = freeze_processes();  	if (error)  		goto Finish; @@ -796,7 +785,7 @@ static int software_resume(void)  	}  	pr_debug("PM: Preparing processes for restore.\n"); -	error = prepare_processes(); +	error = freeze_processes();  	if (error) {  		swsusp_close(FMODE_READ);  		goto Done; diff --git a/kernel/power/process.c b/kernel/power/process.c index addbbe5531b..77274c9ba2f 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -22,16 +22,7 @@   */  #define TIMEOUT	(20 * HZ) -static inline int freezable(struct task_struct * p) -{ -	if ((p == current) || -	    (p->flags & PF_NOFREEZE) || -	    (p->exit_state != 0)) -		return 0; -	return 1; -} - -static int try_to_freeze_tasks(bool sig_only) +static int try_to_freeze_tasks(bool user_only)  {  	struct task_struct *g, *p;  	unsigned long end_time; @@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)  	end_time = jiffies + TIMEOUT; -	if (!sig_only) +	if (!user_only)  		freeze_workqueues_begin();  	while (true) {  		todo = 0;  		read_lock(&tasklist_lock);  		do_each_thread(g, p) { -			if (frozen(p) || !freezable(p)) -				continue; - -			if (!freeze_task(p, sig_only)) +			if (p == current || !freeze_task(p))  				continue;  			/* @@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)  		} while_each_thread(g, p);  		read_unlock(&tasklist_lock); -		if (!sig_only) { +		if (!user_only) {  			wq_busy = freeze_workqueues_busy();  			todo += wq_busy;  		} @@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)  	elapsed_csecs = elapsed_csecs64;  	if (todo) { -		/* This does not unfreeze processes that are already frozen -		 * (we have slightly ugly calling convention in that respect, -		 * and caller must call thaw_processes() if something fails), -		 * but it cleans up leftover PF_FREEZE requests. -		 */  		printk("\n");  		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "  		       "(%d tasks refusing to freeze, wq_busy=%d):\n", @@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)  		       elapsed_csecs / 100, elapsed_csecs % 100,  		       todo - wq_busy, wq_busy); -		thaw_workqueues(); -  		read_lock(&tasklist_lock);  		do_each_thread(g, p) { -			task_lock(p); -			if (!wakeup && freezing(p) && !freezer_should_skip(p)) +			if (!wakeup && !freezer_should_skip(p) && +			    p != current && freezing(p) && !frozen(p))  				sched_show_task(p); -			cancel_freezing(p); -			task_unlock(p);  		} while_each_thread(g, p);  		read_unlock(&tasklist_lock);  	} else { @@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)  /**   * freeze_processes - Signal user space processes to enter the refrigerator. + * + * On success, returns 0.  On failure, -errno and system is fully thawed.   */  int freeze_processes(void)  {  	int error; +	if (!pm_freezing) +		atomic_inc(&system_freezing_cnt); +  	printk("Freezing user space processes ... "); +	pm_freezing = true;  	error = try_to_freeze_tasks(true);  	if (!error) {  		printk("done."); @@ -150,17 +135,22 @@ int freeze_processes(void)  	printk("\n");  	BUG_ON(in_atomic()); +	if (error) +		thaw_processes();  	return error;  }  /**   * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. + * + * On success, returns 0.  On failure, -errno and system is fully thawed.   */  int freeze_kernel_threads(void)  {  	int error;  	printk("Freezing remaining freezable tasks ... "); +	pm_nosig_freezing = true;  	error = try_to_freeze_tasks(false);  	if (!error)  		printk("done."); @@ -168,37 +158,32 @@ int freeze_kernel_threads(void)  	printk("\n");  	BUG_ON(in_atomic()); +	if (error) +		thaw_processes();  	return error;  } -static void thaw_tasks(bool nosig_only) +void thaw_processes(void)  {  	struct task_struct *g, *p; -	read_lock(&tasklist_lock); -	do_each_thread(g, p) { -		if (!freezable(p)) -			continue; +	if (pm_freezing) +		atomic_dec(&system_freezing_cnt); +	pm_freezing = false; +	pm_nosig_freezing = false; -		if (nosig_only && should_send_signal(p)) -			continue; +	oom_killer_enable(); + +	printk("Restarting tasks ... "); -		if (cgroup_freezing_or_frozen(p)) -			continue; +	thaw_workqueues(); -		thaw_process(p); +	read_lock(&tasklist_lock); +	do_each_thread(g, p) { +		__thaw_task(p);  	} while_each_thread(g, p);  	read_unlock(&tasklist_lock); -} -void thaw_processes(void) -{ -	oom_killer_enable(); - -	printk("Restarting tasks ... "); -	thaw_workqueues(); -	thaw_tasks(true); -	thaw_tasks(false);  	schedule();  	printk("done.\n");  } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4953dc054c5..d336b27d110 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -106,13 +106,11 @@ static int suspend_prepare(void)  		goto Finish;  	error = suspend_freeze_processes(); -	if (error) { -		suspend_stats.failed_freeze++; -		dpm_save_failed_step(SUSPEND_FREEZE); -	} else +	if (!error)  		return 0; -	suspend_thaw_processes(); +	suspend_stats.failed_freeze++; +	dpm_save_failed_step(SUSPEND_FREEZE);  	usermodehelper_enable();   Finish:  	pm_notifier_call_chain(PM_POST_SUSPEND); diff --git a/kernel/power/user.c b/kernel/power/user.c index e2aff0fc269..c202e2e1a2d 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -257,10 +257,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,  			break;  		error = freeze_processes(); -		if (error) { -			thaw_processes(); +		if (error)  			usermodehelper_enable(); -		}  		if (!error)  			data->frozen = 1;  		break; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 71034f41a2b..7ba8feae11b 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)  	/*  	 * Finally, kill the kernel thread. We don't need to be RCU -	 * safe anymore, since the bdi is gone from visibility. Force -	 * unfreeze of the thread before calling kthread_stop(), otherwise -	 * it would never exet if it is currently stuck in the refrigerator. +	 * safe anymore, since the bdi is gone from visibility.  	 */ -	if (bdi->wb.task) { -		thaw_process(bdi->wb.task); +	if (bdi->wb.task)  		kthread_stop(bdi->wb.task); -	}  }  /* diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 76f2c5ae908..3134ee2fb2e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,  		 */  		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {  			if (unlikely(frozen(p))) -				thaw_process(p); +				__thaw_task(p);  			return ERR_PTR(-1UL);  		}  		if (!p->mm) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index d12ffa54581..5317b9341b5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -18,6 +18,7 @@  #include <linux/smp.h>  #include <linux/spinlock.h>  #include <linux/mutex.h> +#include <linux/freezer.h>  #include <linux/sunrpc/clnt.h> @@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)  {  	if (fatal_signal_pending(current))  		return -ERESTARTSYS; -	schedule(); +	freezable_schedule();  	return 0;  }  |