diff options
Diffstat (limited to 'drivers/gpu/ion/ion_system_heap.c')
| -rw-r--r-- | drivers/gpu/ion/ion_system_heap.c | 34 | 
1 files changed, 8 insertions, 26 deletions
| diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c index f1563b8fc33..c1061a801a4 100644 --- a/drivers/gpu/ion/ion_system_heap.c +++ b/drivers/gpu/ion/ion_system_heap.c @@ -91,7 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,  static void free_buffer_page(struct ion_system_heap *heap,  			     struct ion_buffer *buffer, struct page *page, -			     unsigned int order, struct vm_struct *vm_struct) +			     unsigned int order)  {  	bool cached = ion_buffer_cached(buffer);  	bool split_pages = ion_buffer_fault_user_mappings(buffer); @@ -99,20 +99,6 @@ static void free_buffer_page(struct ion_system_heap *heap,  	if (!cached) {  		struct ion_page_pool *pool = heap->pools[order_to_index(order)]; -		/* zero the pages before returning them to the pool for -		   security.  This uses vmap as we want to set the pgprot so -		   the writes to occur to noncached mappings, as the pool's -		   purpose is to keep the pages out of the cache */ -		for (i = 0; i < (1 << order); i++) { -			struct page *sub_page = page + i; -			struct page **pages = &sub_page; -			map_vm_area(vm_struct, -					 pgprot_writecombine(PAGE_KERNEL), -					 &pages); -			memset(vm_struct->addr, 0, PAGE_SIZE); -			unmap_kernel_range((unsigned long)vm_struct->addr, -					PAGE_SIZE); -		}  		ion_page_pool_free(pool, page);  	} else if (split_pages) {  		for (i = 0; i < (1 << order); i++) @@ -167,8 +153,6 @@ static int ion_system_heap_allocate(struct ion_heap *heap,  	long size_remaining = PAGE_ALIGN(size);  	unsigned int max_order = orders[0];  	bool split_pages = ion_buffer_fault_user_mappings(buffer); -	struct vm_struct *vm_struct; -	pte_t *ptes;  	INIT_LIST_HEAD(&pages);  	while (size_remaining > 0) { @@ -216,13 +200,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,  err1:  	kfree(table);  err: -	vm_struct = get_vm_area(PAGE_SIZE, &ptes);  	list_for_each_entry(info, &pages, list) { -		free_buffer_page(sys_heap, buffer, info->page, info->order, -				vm_struct); +		free_buffer_page(sys_heap, buffer, info->page, info->order);  		kfree(info);  	} -	free_vm_area(vm_struct);  	return -ENOMEM;  } @@ -233,18 +214,19 @@ void ion_system_heap_free(struct ion_buffer *buffer)  							struct ion_system_heap,  							heap);  	struct sg_table *table = buffer->sg_table; +	bool cached = ion_buffer_cached(buffer);  	struct scatterlist *sg;  	LIST_HEAD(pages); -	struct vm_struct *vm_struct; -	pte_t *ptes;  	int i; -	vm_struct = get_vm_area(PAGE_SIZE, &ptes); +	/* uncached pages come from the page pools, zero them before returning +	   for security purposes (other allocations are zerod at alloc time */ +	if (!cached) +		ion_heap_buffer_zero(buffer);  	for_each_sg(table->sgl, sg, table->nents, i)  		free_buffer_page(sys_heap, buffer, sg_page(sg), -				get_order(sg_dma_len(sg)), vm_struct); -	free_vm_area(vm_struct); +				get_order(sg_dma_len(sg)));  	sg_free_table(table);  	kfree(table);  } |