diff options
| author | Tejun Heo <tj@kernel.org> | 2013-04-01 17:08:13 -0700 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-04-01 18:45:36 -0700 | 
| commit | 229641a6f1f09e27a1f12fba38980f33f4c92975 (patch) | |
| tree | 234a6f8aea0910de3242af0bbe6d7494fcf81847 /arch/x86/kvm/x86.c | |
| parent | d55262c4d164759a8debe772da6c9b16059dec47 (diff) | |
| parent | 07961ac7c0ee8b546658717034fe692fd12eefa9 (diff) | |
| download | olio-linux-3.10-229641a6f1f09e27a1f12fba38980f33f4c92975.tar.xz olio-linux-3.10-229641a6f1f09e27a1f12fba38980f33f4c92975.zip  | |
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10
branch to take advantage of custom attrs and NUMA support for unbound
workqueues.  Mainline currently contains two commits which result in
non-trivial merge conflicts with wq/for-3.10 and because
block/for-3.10/core is based on v3.9-rc3 which contains one of the
conflicting commits, we need a pre-merge-window merge anyway.  Let's
pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer
from workqueue merge conflicts.
The two conflicts and their resolutions:
* e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes
  worker_pool_assign_id() to use idr_alloc() instead of the old idr
  interface.  worker_pool_assign_id() goes through multiple locking
  changes in wq/for-3.10 causing the following conflict.
  static int worker_pool_assign_id(struct worker_pool *pool)
  {
	  int ret;
  <<<<<<< HEAD
	  lockdep_assert_held(&wq_pool_mutex);
	  do {
		  if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
			  return -ENOMEM;
		  ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
	  } while (ret == -EAGAIN);
  =======
	  mutex_lock(&worker_pool_idr_mutex);
	  ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
	  if (ret >= 0)
		  pool->id = ret;
	  mutex_unlock(&worker_pool_idr_mutex);
  >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
	  return ret < 0 ? ret : 0;
  }
  We want locking from the former and idr_alloc() usage from the
  latter, which can be combined to the following.
  static int worker_pool_assign_id(struct worker_pool *pool)
  {
	  int ret;
	  lockdep_assert_held(&wq_pool_mutex);
	  ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
	  if (ret >= 0) {
		  pool->id = ret;
		  return 0;
	  }
	  return ret;
   }
* eb2834285c ("workqueue: fix possible pool stall bug in
  wq_unbind_fn()") updated wq_unbind_fn() such that it has single
  larger for_each_std_worker_pool() loop instead of two separate loops
  with a schedule() call inbetween.  wq/for-3.10 renamed
  pool->assoc_mutex to pool->manager_mutex causing the following
  conflict (earlier function body and comments omitted for brevity).
  static void wq_unbind_fn(struct work_struct *work)
  {
  ...
		  spin_unlock_irq(&pool->lock);
  <<<<<<< HEAD
		  mutex_unlock(&pool->manager_mutex);
	  }
  =======
		  mutex_unlock(&pool->assoc_mutex);
  >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
		  schedule();
  <<<<<<< HEAD
	  for_each_cpu_worker_pool(pool, cpu)
  =======
  >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
		  atomic_set(&pool->nr_running, 0);
		  spin_lock_irq(&pool->lock);
		  wake_up_worker(pool);
		  spin_unlock_irq(&pool->lock);
	  }
  }
  The resolution is mostly trivial.  We want the control flow of the
  latter with the rename of the former.
  static void wq_unbind_fn(struct work_struct *work)
  {
  ...
		  spin_unlock_irq(&pool->lock);
		  mutex_unlock(&pool->manager_mutex);
		  schedule();
		  atomic_set(&pool->nr_running, 0);
		  spin_lock_irq(&pool->lock);
		  wake_up_worker(pool);
		  spin_unlock_irq(&pool->lock);
	  }
  }
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kvm/x86.c')
| -rw-r--r-- | arch/x86/kvm/x86.c | 64 | 
1 files changed, 31 insertions, 33 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f71500af1f8..f19ac0aca60 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  	unsigned long flags, this_tsc_khz;  	struct kvm_vcpu_arch *vcpu = &v->arch;  	struct kvm_arch *ka = &v->kvm->arch; -	void *shared_kaddr;  	s64 kernel_ns, max_kernel_ns;  	u64 tsc_timestamp, host_tsc; -	struct pvclock_vcpu_time_info *guest_hv_clock; +	struct pvclock_vcpu_time_info guest_hv_clock;  	u8 pvclock_flags;  	bool use_master_clock;  	kernel_ns = 0;  	host_tsc = 0; -	/* Keep irq disabled to prevent changes to the clock */ -	local_irq_save(flags); -	this_tsc_khz = __get_cpu_var(cpu_tsc_khz); -	if (unlikely(this_tsc_khz == 0)) { -		local_irq_restore(flags); -		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); -		return 1; -	} -  	/*  	 * If the host uses TSC clock, then passthrough TSC as stable  	 * to the guest. @@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  		kernel_ns = ka->master_kernel_ns;  	}  	spin_unlock(&ka->pvclock_gtod_sync_lock); + +	/* Keep irq disabled to prevent changes to the clock */ +	local_irq_save(flags); +	this_tsc_khz = __get_cpu_var(cpu_tsc_khz); +	if (unlikely(this_tsc_khz == 0)) { +		local_irq_restore(flags); +		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); +		return 1; +	}  	if (!use_master_clock) {  		host_tsc = native_read_tsc();  		kernel_ns = get_kernel_ns(); @@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  	local_irq_restore(flags); -	if (!vcpu->time_page) +	if (!vcpu->pv_time_enabled)  		return 0;  	/* @@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  	 */  	vcpu->hv_clock.version += 2; -	shared_kaddr = kmap_atomic(vcpu->time_page); - -	guest_hv_clock = shared_kaddr + vcpu->time_offset; +	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, +		&guest_hv_clock, sizeof(guest_hv_clock)))) +		return 0;  	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ -	pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); +	pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);  	if (vcpu->pvclock_set_guest_stopped_request) {  		pvclock_flags |= PVCLOCK_GUEST_STOPPED; @@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)  	vcpu->hv_clock.flags = pvclock_flags; -	memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, -	       sizeof(vcpu->hv_clock)); - -	kunmap_atomic(shared_kaddr); - -	mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); +	kvm_write_guest_cached(v->kvm, &vcpu->pv_time, +				&vcpu->hv_clock, +				sizeof(vcpu->hv_clock));  	return 0;  } @@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)  static void kvmclock_reset(struct kvm_vcpu *vcpu)  { -	if (vcpu->arch.time_page) { -		kvm_release_page_dirty(vcpu->arch.time_page); -		vcpu->arch.time_page = NULL; -	} +	vcpu->arch.pv_time_enabled = false;  }  static void accumulate_steal_time(struct kvm_vcpu *vcpu) @@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)  		break;  	case MSR_KVM_SYSTEM_TIME_NEW:  	case MSR_KVM_SYSTEM_TIME: { +		u64 gpa_offset;  		kvmclock_reset(vcpu);  		vcpu->arch.time = data; @@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)  		if (!(data & 1))  			break; -		/* ...but clean it before doing the actual write */ -		vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); +		gpa_offset = data & ~(PAGE_MASK | 1); -		vcpu->arch.time_page = -				gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); +		/* Check that the address is 32-byte aligned. */ +		if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) +			break; -		if (is_error_page(vcpu->arch.time_page)) -			vcpu->arch.time_page = NULL; +		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, +		     &vcpu->arch.pv_time, data & ~1ULL)) +			vcpu->arch.pv_time_enabled = false; +		else +			vcpu->arch.pv_time_enabled = true;  		break;  	} @@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,   */  static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)  { -	if (!vcpu->arch.time_page) +	if (!vcpu->arch.pv_time_enabled)  		return -EINVAL;  	vcpu->arch.pvclock_set_guest_stopped_request = true;  	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); @@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)  		goto fail_free_wbinvd_dirty_mask;  	vcpu->arch.ia32_tsc_adjust_msr = 0x0; +	vcpu->arch.pv_time_enabled = false;  	kvm_async_pf_hash_reset(vcpu);  	kvm_pmu_init(vcpu);  |