diff options
Diffstat (limited to 'kernel/signal.c')
| -rw-r--r-- | kernel/signal.c | 103 | 
1 files changed, 87 insertions, 16 deletions
diff --git a/kernel/signal.c b/kernel/signal.c index 580a91e6347..3d09cf6cde7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -31,6 +31,7 @@  #include <linux/nsproxy.h>  #include <linux/user_namespace.h>  #include <linux/uprobes.h> +#include <linux/compat.h>  #define CREATE_TRACE_POINTS  #include <trace/events/signal.h> @@ -679,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)   * No need to set need_resched since signal event passing   * goes through ->blocked   */ -void signal_wake_up(struct task_struct *t, int resume) +void signal_wake_up_state(struct task_struct *t, unsigned int state)  { -	unsigned int mask; -  	set_tsk_thread_flag(t, TIF_SIGPENDING); -  	/* -	 * For SIGKILL, we want to wake it up in the stopped/traced/killable +	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable  	 * case. We don't check t->state here because there is a race with it  	 * executing another processor and just now entering stopped state.  	 * By using wake_up_state, we ensure the process will wake up and  	 * handle its death signal.  	 */ -	mask = TASK_INTERRUPTIBLE; -	if (resume) -		mask |= TASK_WAKEKILL; -	if (!wake_up_state(t, mask)) +	if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))  		kick_process(t);  } @@ -843,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)  	assert_spin_locked(&t->sighand->siglock);  	task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); -	signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); +	ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);  }  /* @@ -1799,6 +1794,10 @@ static inline int may_ptrace_stop(void)  	 * If SIGKILL was already sent before the caller unlocked  	 * ->siglock we must see ->core_state != NULL. Otherwise it  	 * is safe to enter schedule(). +	 * +	 * This is almost outdated, a task with the pending SIGKILL can't +	 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported +	 * after SIGKILL was already dequeued.  	 */  	if (unlikely(current->mm->core_state) &&  	    unlikely(current->mm == current->parent->mm)) @@ -1924,6 +1923,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)  		if (gstop_done)  			do_notify_parent_cldstop(current, false, why); +		/* tasklist protects us from ptrace_freeze_traced() */  		__set_current_state(TASK_RUNNING);  		if (clear_code)  			current->exit_code = 0; @@ -2527,11 +2527,8 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)   */  void set_current_blocked(sigset_t *newset)  { -	struct task_struct *tsk = current;  	sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); -	spin_lock_irq(&tsk->sighand->siglock); -	__set_task_blocked(tsk, newset); -	spin_unlock_irq(&tsk->sighand->siglock); +	__set_current_blocked(newset);  }  void __set_current_blocked(const sigset_t *newset) @@ -3094,6 +3091,80 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s  out:  	return error;  } +#ifdef CONFIG_GENERIC_SIGALTSTACK +SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) +{ +	return do_sigaltstack(uss, uoss, current_user_stack_pointer()); +} +#endif + +int restore_altstack(const stack_t __user *uss) +{ +	int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); +	/* squash all but EFAULT for now */ +	return err == -EFAULT ? err : 0; +} + +int __save_altstack(stack_t __user *uss, unsigned long sp) +{ +	struct task_struct *t = current; +	return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | +		__put_user(sas_ss_flags(sp), &uss->ss_flags) | +		__put_user(t->sas_ss_size, &uss->ss_size); +} + +#ifdef CONFIG_COMPAT +#ifdef CONFIG_GENERIC_SIGALTSTACK +COMPAT_SYSCALL_DEFINE2(sigaltstack, +			const compat_stack_t __user *, uss_ptr, +			compat_stack_t __user *, uoss_ptr) +{ +	stack_t uss, uoss; +	int ret; +	mm_segment_t seg; + +	if (uss_ptr) { +		compat_stack_t uss32; + +		memset(&uss, 0, sizeof(stack_t)); +		if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) +			return -EFAULT; +		uss.ss_sp = compat_ptr(uss32.ss_sp); +		uss.ss_flags = uss32.ss_flags; +		uss.ss_size = uss32.ss_size; +	} +	seg = get_fs(); +	set_fs(KERNEL_DS); +	ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), +			     (stack_t __force __user *) &uoss, +			     compat_user_stack_pointer()); +	set_fs(seg); +	if (ret >= 0 && uoss_ptr)  { +		if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || +		    __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || +		    __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || +		    __put_user(uoss.ss_size, &uoss_ptr->ss_size)) +			ret = -EFAULT; +	} +	return ret; +} + +int compat_restore_altstack(const compat_stack_t __user *uss) +{ +	int err = compat_sys_sigaltstack(uss, NULL); +	/* squash all but -EFAULT for now */ +	return err == -EFAULT ? err : 0; +} + +int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) +{ +	struct task_struct *t = current; +	return  __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | +		__put_user(sas_ss_flags(sp), &uss->ss_flags) | +		__put_user(t->sas_ss_size, &uss->ss_size); +} +#endif +#endif  #ifdef __ARCH_WANT_SYS_SIGPENDING @@ -3130,7 +3201,6 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,  	if (nset) {  		if (copy_from_user(&new_set, nset, sizeof(*nset)))  			return -EFAULT; -		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));  		new_blocked = current->blocked; @@ -3148,7 +3218,7 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,  			return -EINVAL;  		} -		__set_current_blocked(&new_blocked); +		set_current_blocked(&new_blocked);  	}  	if (oset) { @@ -3212,6 +3282,7 @@ SYSCALL_DEFINE1(ssetmask, int, newmask)  	int old = current->blocked.sig[0];  	sigset_t newset; +	siginitset(&newset, newmask);  	set_current_blocked(&newset);  	return old;  |