diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc_dma.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 60 | 
1 files changed, 26 insertions, 34 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 0c46d8cdc6e..4f9e548b2ee 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -33,6 +33,8 @@   *   when freed).   */ +#define pr_fmt(fmt) "[TTM] " fmt +  #include <linux/dma-mapping.h>  #include <linux/list.h>  #include <linux/seq_file.h> /* for seq_printf */ @@ -221,18 +223,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,  		m->options.small = val;  	else if (attr == &ttm_page_pool_alloc_size) {  		if (val > NUM_PAGES_TO_ALLOC*8) { -			printk(KERN_ERR TTM_PFX -			       "Setting allocation size to %lu " -			       "is not allowed. Recommended size is " -			       "%lu\n", +			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",  			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),  			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));  			return size;  		} else if (val > NUM_PAGES_TO_ALLOC) { -			printk(KERN_WARNING TTM_PFX -			       "Setting allocation size to " -			       "larger than %lu is not recommended.\n", -			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); +			pr_warn("Setting allocation size to larger than %lu is not recommended\n", +				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));  		}  		m->options.alloc_size = val;  	} @@ -313,15 +310,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,  	if (pool->type & IS_UC) {  		r = set_pages_array_uc(pages, cpages);  		if (r) -			pr_err(TTM_PFX -			       "%s: Failed to set %d pages to uc!\n", +			pr_err("%s: Failed to set %d pages to uc!\n",  			       pool->dev_name, cpages);  	}  	if (pool->type & IS_WC) {  		r = set_pages_array_wc(pages, cpages);  		if (r) -			pr_err(TTM_PFX -			       "%s: Failed to set %d pages to wc!\n", +			pr_err("%s: Failed to set %d pages to wc!\n",  			       pool->dev_name, cpages);  	}  	return r; @@ -387,8 +382,8 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,  	/* Don't set WB on WB page pool. */  	if (npages && !(pool->type & IS_CACHED) &&  	    set_pages_array_wb(pages, npages)) -		pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", -			pool->dev_name, npages); +		pr_err("%s: Failed to set %d pages to wb!\n", +		       pool->dev_name, npages);  	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {  		list_del(&d_page->page_list); @@ -400,8 +395,8 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)  {  	/* Don't set WB on WB page pool. */  	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) -		pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", -			pool->dev_name, 1); +		pr_err("%s: Failed to set %d pages to wb!\n", +		       pool->dev_name, 1);  	list_del(&d_page->page_list);  	__ttm_dma_free_page(pool, d_page); @@ -430,17 +425,16 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)  #if 0  	if (nr_free > 1) {  		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", -			pool->dev_name, pool->name, current->pid, -			npages_to_free, nr_free); +			 pool->dev_name, pool->name, current->pid, +			 npages_to_free, nr_free);  	}  #endif  	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),  			GFP_KERNEL);  	if (!pages_to_free) { -		pr_err(TTM_PFX -		       "%s: Failed to allocate memory for pool free operation.\n", -			pool->dev_name); +		pr_err("%s: Failed to allocate memory for pool free operation\n", +		       pool->dev_name);  		return 0;  	}  	INIT_LIST_HEAD(&d_pages); @@ -723,23 +717,21 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,  	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);  	if (!caching_array) { -		pr_err(TTM_PFX -		       "%s: Unable to allocate table for new pages.", -			pool->dev_name); +		pr_err("%s: Unable to allocate table for new pages\n", +		       pool->dev_name);  		return -ENOMEM;  	}  	if (count > 1) {  		pr_debug("%s: (%s:%d) Getting %d pages\n", -			pool->dev_name, pool->name, current->pid, -			count); +			 pool->dev_name, pool->name, current->pid, count);  	}  	for (i = 0, cpages = 0; i < count; ++i) {  		dma_p = __ttm_dma_alloc_page(pool);  		if (!dma_p) { -			pr_err(TTM_PFX "%s: Unable to get page %u.\n", -				pool->dev_name, i); +			pr_err("%s: Unable to get page %u\n", +			       pool->dev_name, i);  			/* store already allocated pages in the pool after  			 * setting the caching state */ @@ -821,8 +813,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,  			struct dma_page *d_page;  			unsigned cpages = 0; -			pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n", -				pool->dev_name, pool->name, r); +			pr_err("%s: Failed to fill %s pool (r:%d)!\n", +			       pool->dev_name, pool->name, r);  			list_for_each_entry(d_page, &d_pages, page_list) {  				cpages++; @@ -1038,8 +1030,8 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,  		nr_free = shrink_pages;  		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);  		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", -			p->pool->dev_name, p->pool->name, current->pid, nr_free, -			shrink_pages); +			 p->pool->dev_name, p->pool->name, current->pid, +			 nr_free, shrink_pages);  	}  	mutex_unlock(&_manager->lock);  	/* return estimated number of unused pages in pool */ @@ -1064,7 +1056,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)  	WARN_ON(_manager); -	printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n"); +	pr_info("Initializing DMA pool allocator\n");  	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);  	if (!_manager) @@ -1097,7 +1089,7 @@ void ttm_dma_page_alloc_fini(void)  {  	struct device_pools *p, *t; -	printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n"); +	pr_info("Finalizing DMA pool allocator\n");  	ttm_dma_pool_mm_shrink_fini(_manager);  	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {  |