diff options
Diffstat (limited to 'kernel/sys.c')
| -rw-r--r-- | kernel/sys.c | 111 | 
1 files changed, 50 insertions, 61 deletions
diff --git a/kernel/sys.c b/kernel/sys.c index 038a7bc0901..53879cdae48 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)  	return old_fsgid;  } +void do_sys_times(struct tms *tms) +{ +	struct task_cputime cputime; +	cputime_t cutime, cstime; + +	spin_lock_irq(¤t->sighand->siglock); +	thread_group_cputime(current, &cputime); +	cutime = current->signal->cutime; +	cstime = current->signal->cstime; +	spin_unlock_irq(¤t->sighand->siglock); +	tms->tms_utime = cputime_to_clock_t(cputime.utime); +	tms->tms_stime = cputime_to_clock_t(cputime.stime); +	tms->tms_cutime = cputime_to_clock_t(cutime); +	tms->tms_cstime = cputime_to_clock_t(cstime); +} +  asmlinkage long sys_times(struct tms __user * tbuf)  { -	/* -	 *	In the SMP world we might just be unlucky and have one of -	 *	the times increment as we use it. Since the value is an -	 *	atomically safe type this is just fine. Conceptually its -	 *	as if the syscall took an instant longer to occur. -	 */  	if (tbuf) {  		struct tms tmp; -		struct task_struct *tsk = current; -		struct task_struct *t; -		cputime_t utime, stime, cutime, cstime; -		spin_lock_irq(&tsk->sighand->siglock); -		utime = tsk->signal->utime; -		stime = tsk->signal->stime; -		t = tsk; -		do { -			utime = cputime_add(utime, t->utime); -			stime = cputime_add(stime, t->stime); -			t = next_thread(t); -		} while (t != tsk); - -		cutime = tsk->signal->cutime; -		cstime = tsk->signal->cstime; -		spin_unlock_irq(&tsk->sighand->siglock); - -		tmp.tms_utime = cputime_to_clock_t(utime); -		tmp.tms_stime = cputime_to_clock_t(stime); -		tmp.tms_cutime = cputime_to_clock_t(cutime); -		tmp.tms_cstime = cputime_to_clock_t(cstime); +		do_sys_times(&tmp);  		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))  			return -EFAULT;  	} @@ -1060,9 +1050,7 @@ asmlinkage long sys_setsid(void)  	group_leader->signal->leader = 1;  	__set_special_pids(sid); -	spin_lock(&group_leader->sighand->siglock); -	group_leader->signal->tty = NULL; -	spin_unlock(&group_leader->sighand->siglock); +	proc_clear_tty(group_leader);  	err = session;  out: @@ -1351,8 +1339,10 @@ asmlinkage long sys_sethostname(char __user *name, int len)  	down_write(&uts_sem);  	errno = -EFAULT;  	if (!copy_from_user(tmp, name, len)) { -		memcpy(utsname()->nodename, tmp, len); -		utsname()->nodename[len] = 0; +		struct new_utsname *u = utsname(); + +		memcpy(u->nodename, tmp, len); +		memset(u->nodename + len, 0, sizeof(u->nodename) - len);  		errno = 0;  	}  	up_write(&uts_sem); @@ -1364,15 +1354,17 @@ asmlinkage long sys_sethostname(char __user *name, int len)  asmlinkage long sys_gethostname(char __user *name, int len)  {  	int i, errno; +	struct new_utsname *u;  	if (len < 0)  		return -EINVAL;  	down_read(&uts_sem); -	i = 1 + strlen(utsname()->nodename); +	u = utsname(); +	i = 1 + strlen(u->nodename);  	if (i > len)  		i = len;  	errno = 0; -	if (copy_to_user(name, utsname()->nodename, i)) +	if (copy_to_user(name, u->nodename, i))  		errno = -EFAULT;  	up_read(&uts_sem);  	return errno; @@ -1397,8 +1389,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len)  	down_write(&uts_sem);  	errno = -EFAULT;  	if (!copy_from_user(tmp, name, len)) { -		memcpy(utsname()->domainname, tmp, len); -		utsname()->domainname[len] = 0; +		struct new_utsname *u = utsname(); + +		memcpy(u->domainname, tmp, len); +		memset(u->domainname + len, 0, sizeof(u->domainname) - len);  		errno = 0;  	}  	up_write(&uts_sem); @@ -1445,21 +1439,28 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r  asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)  {  	struct rlimit new_rlim, *old_rlim; -	unsigned long it_prof_secs;  	int retval;  	if (resource >= RLIM_NLIMITS)  		return -EINVAL;  	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))  		return -EFAULT; -	if (new_rlim.rlim_cur > new_rlim.rlim_max) -		return -EINVAL;  	old_rlim = current->signal->rlim + resource;  	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&  	    !capable(CAP_SYS_RESOURCE))  		return -EPERM; -	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) -		return -EPERM; + +	if (resource == RLIMIT_NOFILE) { +		if (new_rlim.rlim_max == RLIM_INFINITY) +			new_rlim.rlim_max = sysctl_nr_open; +		if (new_rlim.rlim_cur == RLIM_INFINITY) +			new_rlim.rlim_cur = sysctl_nr_open; +		if (new_rlim.rlim_max > sysctl_nr_open) +			return -EPERM; +	} + +	if (new_rlim.rlim_cur > new_rlim.rlim_max) +		return -EINVAL;  	retval = security_task_setrlimit(resource, &new_rlim);  	if (retval) @@ -1491,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)  	if (new_rlim.rlim_cur == RLIM_INFINITY)  		goto out; -	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); -	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { -		unsigned long rlim_cur = new_rlim.rlim_cur; -		cputime_t cputime; - -		cputime = secs_to_cputime(rlim_cur); -		read_lock(&tasklist_lock); -		spin_lock_irq(¤t->sighand->siglock); -		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); -		spin_unlock_irq(¤t->sighand->siglock); -		read_unlock(&tasklist_lock); -	} +	update_rlimit_cpu(new_rlim.rlim_cur);  out:  	return 0;  } @@ -1540,11 +1530,8 @@ out:   *   */ -static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, -				     cputime_t *utimep, cputime_t *stimep) +static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)  { -	*utimep = cputime_add(*utimep, t->utime); -	*stimep = cputime_add(*stimep, t->stime);  	r->ru_nvcsw += t->nvcsw;  	r->ru_nivcsw += t->nivcsw;  	r->ru_minflt += t->min_flt; @@ -1558,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)  	struct task_struct *t;  	unsigned long flags;  	cputime_t utime, stime; +	struct task_cputime cputime;  	memset((char *) r, 0, sizeof *r);  	utime = stime = cputime_zero;  	if (who == RUSAGE_THREAD) { -		accumulate_thread_rusage(p, r, &utime, &stime); +		accumulate_thread_rusage(p, r);  		goto out;  	} @@ -1586,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)  				break;  		case RUSAGE_SELF: -			utime = cputime_add(utime, p->signal->utime); -			stime = cputime_add(stime, p->signal->stime); +			thread_group_cputime(p, &cputime); +			utime = cputime_add(utime, cputime.utime); +			stime = cputime_add(stime, cputime.stime);  			r->ru_nvcsw += p->signal->nvcsw;  			r->ru_nivcsw += p->signal->nivcsw;  			r->ru_minflt += p->signal->min_flt; @@ -1596,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)  			r->ru_oublock += p->signal->oublock;  			t = p;  			do { -				accumulate_thread_rusage(t, r, &utime, &stime); +				accumulate_thread_rusage(t, r);  				t = next_thread(t);  			} while (t != p);  			break;  |