diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 89 | 
1 files changed, 45 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ad98db5d22e..e6cc020ea32 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1520,9 +1520,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)  	if (obj->base.map_list.map)  		return 0; +	dev_priv->mm.shrinker_no_lock_stealing = true; +  	ret = drm_gem_create_mmap_offset(&obj->base);  	if (ret != -ENOSPC) -		return ret; +		goto out;  	/* Badly fragmented mmap space? The only way we can recover  	 * space is by destroying unwanted objects. We can't randomly release @@ -1534,10 +1536,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)  	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);  	ret = drm_gem_create_mmap_offset(&obj->base);  	if (ret != -ENOSPC) -		return ret; +		goto out;  	i915_gem_shrink_all(dev_priv); -	return drm_gem_create_mmap_offset(&obj->base); +	ret = drm_gem_create_mmap_offset(&obj->base); +out: +	dev_priv->mm.shrinker_no_lock_stealing = false; + +	return ret;  }  static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) @@ -1699,10 +1705,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)  	if (obj->pages_pin_count)  		return -EBUSY; +	/* ->put_pages might need to allocate memory for the bit17 swizzle +	 * array, hence protect them from being reaped by removing them from gtt +	 * lists early. */ +	list_del(&obj->gtt_list); +  	ops->put_pages(obj);  	obj->pages = NULL; -	list_del(&obj->gtt_list);  	if (i915_gem_object_is_purgeable(obj))  		i915_gem_object_truncate(obj); @@ -1788,7 +1798,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)  	 */  	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;  	gfp = mapping_gfp_mask(mapping); -	gfp |= __GFP_NORETRY | __GFP_NOWARN; +	gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;  	gfp &= ~(__GFP_IO | __GFP_WAIT);  	for_each_sg(st->sgl, sg, page_count, i) {  		page = shmem_read_mapping_page_gfp(mapping, i, gfp); @@ -1801,7 +1811,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)  			 * our own buffer, now let the real VM do its job and  			 * go down in flames if truly OOM.  			 */ -			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); +			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);  			gfp |= __GFP_IO | __GFP_WAIT;  			i915_gem_shrink_all(dev_priv); @@ -1809,7 +1819,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)  			if (IS_ERR(page))  				goto err_pages; -			gfp |= __GFP_NORETRY | __GFP_NOWARN; +			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;  			gfp &= ~(__GFP_IO | __GFP_WAIT);  		} @@ -2909,7 +2919,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  {  	struct drm_device *dev = obj->base.dev;  	drm_i915_private_t *dev_priv = dev->dev_private; -	struct drm_mm_node *free_space; +	struct drm_mm_node *node;  	u32 size, fence_size, fence_alignment, unfenced_alignment;  	bool mappable, fenceable;  	int ret; @@ -2955,66 +2965,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,  	i915_gem_object_pin_pages(obj); +	node = kzalloc(sizeof(*node), GFP_KERNEL); +	if (node == NULL) { +		i915_gem_object_unpin_pages(obj); +		return -ENOMEM; +	} +   search_free:  	if (map_and_fenceable) -		free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, -							       size, alignment, obj->cache_level, -							       0, dev_priv->mm.gtt_mappable_end, -							       false); +		ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, +							  size, alignment, obj->cache_level, +							  0, dev_priv->mm.gtt_mappable_end);  	else -		free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, -						      size, alignment, obj->cache_level, -						      false); - -	if (free_space != NULL) { -		if (map_and_fenceable) -			free_space = -				drm_mm_get_block_range_generic(free_space, -							       size, alignment, obj->cache_level, -							       0, dev_priv->mm.gtt_mappable_end, -							       false); -		else -			free_space = -				drm_mm_get_block_generic(free_space, -							 size, alignment, obj->cache_level, -							 false); -	} -	if (free_space == NULL) { +		ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, +						 size, alignment, obj->cache_level); +	if (ret) {  		ret = i915_gem_evict_something(dev, size, alignment,  					       obj->cache_level,  					       map_and_fenceable,  					       nonblocking); -		if (ret) { -			i915_gem_object_unpin_pages(obj); -			return ret; -		} +		if (ret == 0) +			goto search_free; -		goto search_free; +		i915_gem_object_unpin_pages(obj); +		kfree(node); +		return ret;  	} -	if (WARN_ON(!i915_gem_valid_gtt_space(dev, -					      free_space, -					      obj->cache_level))) { +	if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {  		i915_gem_object_unpin_pages(obj); -		drm_mm_put_block(free_space); +		drm_mm_put_block(node);  		return -EINVAL;  	}  	ret = i915_gem_gtt_prepare_object(obj);  	if (ret) {  		i915_gem_object_unpin_pages(obj); -		drm_mm_put_block(free_space); +		drm_mm_put_block(node);  		return ret;  	}  	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);  	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); -	obj->gtt_space = free_space; -	obj->gtt_offset = free_space->start; +	obj->gtt_space = node; +	obj->gtt_offset = node->start;  	fenceable = -		free_space->size == fence_size && -		(free_space->start & (fence_alignment - 1)) == 0; +		node->size == fence_size && +		(node->start & (fence_alignment - 1)) == 0;  	mappable =  		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; @@ -4375,6 +4373,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)  		if (!mutex_is_locked_by(&dev->struct_mutex, current))  			return 0; +		if (dev_priv->mm.shrinker_no_lock_stealing) +			return 0; +  		unlock = false;  	}  |