diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 99 | 
1 files changed, 81 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ae253e04c39..59e02691baf 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -601,6 +601,13 @@ gen6_add_request(struct intel_ring_buffer *ring)  	return 0;  } +static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +					      u32 seqno) +{ +	struct drm_i915_private *dev_priv = dev->dev_private; +	return dev_priv->last_seqno < seqno; +} +  /**   * intel_ring_sync - sync the waiter to the signaller on seqno   * @@ -631,11 +638,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,  	if (ret)  		return ret; -	intel_ring_emit(waiter, -			dw1 | signaller->semaphore_register[waiter->id]); -	intel_ring_emit(waiter, seqno); -	intel_ring_emit(waiter, 0); -	intel_ring_emit(waiter, MI_NOOP); +	/* If seqno wrap happened, omit the wait with no-ops */ +	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { +		intel_ring_emit(waiter, +				dw1 | +				signaller->semaphore_register[waiter->id]); +		intel_ring_emit(waiter, seqno); +		intel_ring_emit(waiter, 0); +		intel_ring_emit(waiter, MI_NOOP); +	} else { +		intel_ring_emit(waiter, MI_NOOP); +		intel_ring_emit(waiter, MI_NOOP); +		intel_ring_emit(waiter, MI_NOOP); +		intel_ring_emit(waiter, MI_NOOP); +	}  	intel_ring_advance(waiter);  	return 0; @@ -716,6 +732,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)  	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);  } +static void +ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ +	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); +} +  static u32  pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)  { @@ -723,6 +745,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)  	return pc->cpu_page[0];  } +static void +pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ +	struct pipe_control *pc = ring->private; +	pc->cpu_page[0] = seqno; +} +  static bool  gen5_ring_get_irq(struct intel_ring_buffer *ring)  { @@ -1152,7 +1181,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,  			return ret;  	} -	obj = i915_gem_alloc_object(dev, ring->size); +	obj = NULL; +	if (!HAS_LLC(dev)) +		obj = i915_gem_object_create_stolen(dev, ring->size); +	if (obj == NULL) +		obj = i915_gem_alloc_object(dev, ring->size);  	if (obj == NULL) {  		DRM_ERROR("Failed to allocate ringbuffer\n");  		ret = -ENOMEM; @@ -1190,6 +1223,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,  	if (IS_I830(ring->dev) || IS_845G(ring->dev))  		ring->effective_size -= 128; +	intel_ring_init_seqno(ring, dev_priv->last_seqno); +  	return 0;  err_unmap: @@ -1398,11 +1433,31 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)  	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);  } +static int __intel_ring_begin(struct intel_ring_buffer *ring, +			      int bytes) +{ +	int ret; + +	if (unlikely(ring->tail + bytes > ring->effective_size)) { +		ret = intel_wrap_ring_buffer(ring); +		if (unlikely(ret)) +			return ret; +	} + +	if (unlikely(ring->space < bytes)) { +		ret = ring_wait_for_space(ring, bytes); +		if (unlikely(ret)) +			return ret; +	} + +	ring->space -= bytes; +	return 0; +} +  int intel_ring_begin(struct intel_ring_buffer *ring,  		     int num_dwords)  {  	drm_i915_private_t *dev_priv = ring->dev->dev_private; -	int n = 4*num_dwords;  	int ret;  	ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); @@ -1414,20 +1469,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,  	if (ret)  		return ret; -	if (unlikely(ring->tail + n > ring->effective_size)) { -		ret = intel_wrap_ring_buffer(ring); -		if (unlikely(ret)) -			return ret; -	} +	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); +} -	if (unlikely(ring->space < n)) { -		ret = ring_wait_for_space(ring, n); -		if (unlikely(ret)) -			return ret; +void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ +	struct drm_i915_private *dev_priv = ring->dev->dev_private; + +	BUG_ON(ring->outstanding_lazy_request); + +	if (INTEL_INFO(ring->dev)->gen >= 6) { +		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); +		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);  	} -	ring->space -= n; -	return 0; +	ring->set_seqno(ring, seqno);  }  void intel_ring_advance(struct intel_ring_buffer *ring) @@ -1592,6 +1648,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		ring->irq_put = gen6_ring_put_irq;  		ring->irq_enable_mask = GT_USER_INTERRUPT;  		ring->get_seqno = gen6_ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		ring->sync_to = gen6_ring_sync;  		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;  		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; @@ -1602,6 +1659,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		ring->add_request = pc_render_add_request;  		ring->flush = gen4_render_ring_flush;  		ring->get_seqno = pc_render_get_seqno; +		ring->set_seqno = pc_render_set_seqno;  		ring->irq_get = gen5_ring_get_irq;  		ring->irq_put = gen5_ring_put_irq;  		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; @@ -1612,6 +1670,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		else  			ring->flush = gen4_render_ring_flush;  		ring->get_seqno = ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		if (IS_GEN2(dev)) {  			ring->irq_get = i8xx_ring_get_irq;  			ring->irq_put = i8xx_ring_put_irq; @@ -1683,6 +1742,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)  	else  		ring->flush = gen4_render_ring_flush;  	ring->get_seqno = ring_get_seqno; +	ring->set_seqno = ring_set_seqno;  	if (IS_GEN2(dev)) {  		ring->irq_get = i8xx_ring_get_irq;  		ring->irq_put = i8xx_ring_put_irq; @@ -1743,6 +1803,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)  		ring->flush = gen6_ring_flush;  		ring->add_request = gen6_add_request;  		ring->get_seqno = gen6_ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;  		ring->irq_get = gen6_ring_get_irq;  		ring->irq_put = gen6_ring_put_irq; @@ -1758,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)  		ring->flush = bsd_ring_flush;  		ring->add_request = i9xx_add_request;  		ring->get_seqno = ring_get_seqno; +		ring->set_seqno = ring_set_seqno;  		if (IS_GEN5(dev)) {  			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;  			ring->irq_get = gen5_ring_get_irq; @@ -1787,6 +1849,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)  	ring->flush = blt_ring_flush;  	ring->add_request = gen6_add_request;  	ring->get_seqno = gen6_ring_get_seqno; +	ring->set_seqno = ring_set_seqno;  	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;  	ring->irq_get = gen6_ring_get_irq;  	ring->irq_put = gen6_ring_put_irq;  |