diff options
Diffstat (limited to 'drivers/gpu/ion/ion.c')
| -rw-r--r-- | drivers/gpu/ion/ion.c | 112 | 
1 files changed, 103 insertions, 9 deletions
| diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 722c4a2f72c..200b1ad104a 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -17,9 +17,11 @@  #include <linux/device.h>  #include <linux/file.h> +#include <linux/freezer.h>  #include <linux/fs.h>  #include <linux/anon_inodes.h>  #include <linux/ion.h> +#include <linux/kthread.h>  #include <linux/list.h>  #include <linux/memblock.h>  #include <linux/miscdevice.h> @@ -27,6 +29,7 @@  #include <linux/mm.h>  #include <linux/mm_types.h>  #include <linux/rbtree.h> +#include <linux/rtmutex.h>  #include <linux/sched.h>  #include <linux/slab.h>  #include <linux/seq_file.h> @@ -140,6 +143,7 @@ static void ion_buffer_add(struct ion_device *dev,  static int ion_buffer_alloc_dirty(struct ion_buffer *buffer); +static bool ion_heap_drain_freelist(struct ion_heap *heap);  /* this function should only be called while dev->lock is held */  static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,  				     struct ion_device *dev, @@ -161,9 +165,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,  	kref_init(&buffer->ref);  	ret = heap->ops->allocate(heap, buffer, len, align, flags); +  	if (ret) { -		kfree(buffer); -		return ERR_PTR(ret); +		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) +			goto err2; + +		ion_heap_drain_freelist(heap); +		ret = heap->ops->allocate(heap, buffer, len, align, +					  flags); +		if (ret) +			goto err2;  	}  	buffer->dev = dev; @@ -214,27 +225,42 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,  err:  	heap->ops->unmap_dma(heap, buffer);  	heap->ops->free(buffer); +err2:  	kfree(buffer);  	return ERR_PTR(ret);  } -static void ion_buffer_destroy(struct kref *kref) +static void _ion_buffer_destroy(struct ion_buffer *buffer)  { -	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); -	struct ion_device *dev = buffer->dev; -  	if (WARN_ON(buffer->kmap_cnt > 0))  		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);  	buffer->heap->ops->unmap_dma(buffer->heap, buffer);  	buffer->heap->ops->free(buffer); -	mutex_lock(&dev->buffer_lock); -	rb_erase(&buffer->node, &dev->buffers); -	mutex_unlock(&dev->buffer_lock);  	if (buffer->flags & ION_FLAG_CACHED)  		kfree(buffer->dirty);  	kfree(buffer);  } +static void ion_buffer_destroy(struct kref *kref) +{ +	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); +	struct ion_heap *heap = buffer->heap; +	struct ion_device *dev = buffer->dev; + +	mutex_lock(&dev->buffer_lock); +	rb_erase(&buffer->node, &dev->buffers); +	mutex_unlock(&dev->buffer_lock); + +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) { +		rt_mutex_lock(&heap->lock); +		list_add(&buffer->list, &heap->free_list); +		rt_mutex_unlock(&heap->lock); +		wake_up(&heap->waitqueue); +		return; +	} +	_ion_buffer_destroy(buffer); +} +  static void ion_buffer_get(struct ion_buffer *buffer)  {  	kref_get(&buffer->ref); @@ -1272,13 +1298,81 @@ static const struct file_operations debug_heap_fops = {  	.release = single_release,  }; +static size_t ion_heap_free_list_is_empty(struct ion_heap *heap) +{ +	bool is_empty; + +	rt_mutex_lock(&heap->lock); +	is_empty = list_empty(&heap->free_list); +	rt_mutex_unlock(&heap->lock); + +	return is_empty; +} + +static int ion_heap_deferred_free(void *data) +{ +	struct ion_heap *heap = data; + +	while (true) { +		struct ion_buffer *buffer; + +		wait_event_freezable(heap->waitqueue, +				     !ion_heap_free_list_is_empty(heap)); + +		rt_mutex_lock(&heap->lock); +		if (list_empty(&heap->free_list)) { +			rt_mutex_unlock(&heap->lock); +			continue; +		} +		buffer = list_first_entry(&heap->free_list, struct ion_buffer, +					  list); +		list_del(&buffer->list); +		rt_mutex_unlock(&heap->lock); +		_ion_buffer_destroy(buffer); +	} + +	return 0; +} + +static bool ion_heap_drain_freelist(struct ion_heap *heap) +{ +	struct ion_buffer *buffer, *tmp; + +	if (ion_heap_free_list_is_empty(heap)) +		return false; +	rt_mutex_lock(&heap->lock); +	list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) { +		_ion_buffer_destroy(buffer); +		list_del(&buffer->list); +	} +	BUG_ON(!list_empty(&heap->free_list)); +	rt_mutex_unlock(&heap->lock); + + +	return true; +} +  void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)  { +	struct sched_param param = { .sched_priority = 0 }; +  	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||  	    !heap->ops->unmap_dma)  		pr_err("%s: can not add heap with invalid ops struct.\n",  		       __func__); +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) { +		INIT_LIST_HEAD(&heap->free_list); +		rt_mutex_init(&heap->lock); +		init_waitqueue_head(&heap->waitqueue); +		heap->task = kthread_run(ion_heap_deferred_free, heap, +					 "%s", heap->name); +		sched_setscheduler(heap->task, SCHED_IDLE, ¶m); +		if (IS_ERR(heap->task)) +			pr_err("%s: creating thread for deferred free failed\n", +			       __func__); +	} +  	heap->dev = dev;  	down_write(&dev->lock);  	/* use negative heap->id to reverse the priority -- when traversing |