diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 96 | 
1 files changed, 58 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b0016bb6563..c1f691958f8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1696,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)  	if (obj->pages_pin_count)  		return -EBUSY; +	/* ->put_pages might need to allocate memory for the bit17 swizzle +	 * array, hence protect them from being reaped by removing them from gtt +	 * lists early. */ +	list_del(&obj->gtt_list); +  	ops->put_pages(obj);  	obj->pages = NULL; -	list_del(&obj->gtt_list);  	if (i915_gem_object_is_purgeable(obj))  		i915_gem_object_truncate(obj); @@ -1857,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)  void  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, -			       struct intel_ring_buffer *ring, -			       u32 seqno) +			       struct intel_ring_buffer *ring)  {  	struct drm_device *dev = obj->base.dev;  	struct drm_i915_private *dev_priv = dev->dev_private; +	u32 seqno = intel_ring_get_seqno(ring);  	BUG_ON(ring == NULL);  	obj->ring = ring; @@ -1922,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)  	WARN_ON(i915_verify_lists(dev));  } -static u32 -i915_gem_get_seqno(struct drm_device *dev) +static int +i915_gem_handle_seqno_wrap(struct drm_device *dev)  { -	drm_i915_private_t *dev_priv = dev->dev_private; -	u32 seqno = dev_priv->next_seqno; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_ring_buffer *ring; +	int ret, i, j; -	/* reserve 0 for non-seqno */ -	if (++dev_priv->next_seqno == 0) -		dev_priv->next_seqno = 1; +	/* The hardware uses various monotonic 32-bit counters, if we +	 * detect that they will wraparound we need to idle the GPU +	 * and reset those counters. +	 */ +	ret = 0; +	for_each_ring(ring, dev_priv, i) { +		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) +			ret |= ring->sync_seqno[j] != 0; +	} +	if (ret == 0) +		return ret; + +	ret = i915_gpu_idle(dev); +	if (ret) +		return ret; + +	i915_gem_retire_requests(dev); +	for_each_ring(ring, dev_priv, i) { +		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) +			ring->sync_seqno[j] = 0; +	} -	return seqno; +	return 0;  } -u32 -i915_gem_next_request_seqno(struct intel_ring_buffer *ring) +int +i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)  { -	if (ring->outstanding_lazy_request == 0) -		ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); +	struct drm_i915_private *dev_priv = dev->dev_private; + +	/* reserve 0 for non-seqno */ +	if (dev_priv->next_seqno == 0) { +		int ret = i915_gem_handle_seqno_wrap(dev); +		if (ret) +			return ret; + +		dev_priv->next_seqno = 1; +	} -	return ring->outstanding_lazy_request; +	*seqno = dev_priv->next_seqno++; +	return 0;  }  int @@ -1952,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,  	drm_i915_private_t *dev_priv = ring->dev->dev_private;  	struct drm_i915_gem_request *request;  	u32 request_ring_position; -	u32 seqno;  	int was_empty;  	int ret; @@ -1971,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,  	if (request == NULL)  		return -ENOMEM; -	seqno = i915_gem_next_request_seqno(ring);  	/* Record the position of the start of the request so that  	 * should we detect the updated seqno part-way through the @@ -1980,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,  	 */  	request_ring_position = intel_ring_get_tail(ring); -	ret = ring->add_request(ring, &seqno); +	ret = ring->add_request(ring);  	if (ret) {  		kfree(request);  		return ret;  	} -	trace_i915_gem_request_add(ring, seqno); - -	request->seqno = seqno; +	request->seqno = intel_ring_get_seqno(ring);  	request->ring = ring;  	request->tail = request_ring_position;  	request->emitted_jiffies = jiffies; @@ -2006,6 +2034,7 @@ i915_add_request(struct intel_ring_buffer *ring,  		spin_unlock(&file_priv->mm.lock);  	} +	trace_i915_gem_request_add(ring, request->seqno);  	ring->outstanding_lazy_request = 0;  	if (!dev_priv->mm.suspended) { @@ -2022,7 +2051,7 @@ i915_add_request(struct intel_ring_buffer *ring,  	}  	if (out_seqno) -		*out_seqno = seqno; +		*out_seqno = request->seqno;  	return 0;  } @@ -2120,7 +2149,6 @@ void  i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)  {  	uint32_t seqno; -	int i;  	if (list_empty(&ring->request_list))  		return; @@ -2129,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)  	seqno = ring->get_seqno(ring, true); -	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) -		if (seqno >= ring->sync_seqno[i]) -			ring->sync_seqno[i] = 0; -  	while (!list_empty(&ring->request_list)) {  		struct drm_i915_gem_request *request; @@ -2377,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,  	ret = to->sync_to(to, from, seqno);  	if (!ret) -		from->sync_seqno[idx] = seqno; +		/* We use last_read_seqno because sync_to() +		 * might have just caused seqno wrap under +		 * the radar. +		 */ +		from->sync_seqno[idx] = obj->last_read_seqno;  	return ret;  } @@ -2460,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)  	return 0;  } -static int i915_ring_idle(struct intel_ring_buffer *ring) -{ -	if (list_empty(&ring->active_list)) -		return 0; - -	return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); -} -  int i915_gpu_idle(struct drm_device *dev)  {  	drm_i915_private_t *dev_priv = dev->dev_private; @@ -2480,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)  		if (ret)  			return ret; -		ret = i915_ring_idle(ring); +		ret = intel_ring_idle(ring);  		if (ret)  			return ret;  	}  |