diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 14:50:12 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-22 14:50:12 -0700 | 
| commit | ece236ce2fad9c27a6fd2530f899289025194bce (patch) | |
| tree | 474b793205872206a2a3f7d409ff9b1f81f3a9a8 /drivers/infiniband/hw/mthca/mthca_memfree.c | |
| parent | 441c196e84b11aad3123baa9320eee7abc6b5c98 (diff) | |
| parent | 4460207561290c3be7e6c7538f22690028170c1d (diff) | |
| download | olio-linux-3.10-ece236ce2fad9c27a6fd2530f899289025194bce.tar.xz olio-linux-3.10-ece236ce2fad9c27a6fd2530f899289025194bce.zip  | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (26 commits)
  IB/qib: Defer HCA error events to tasklet
  mlx4_core: Bump the driver version to 1.0
  RDMA/cxgb4: Use printk_ratelimited() instead of printk_ratelimit()
  IB/mlx4: Support PMA counters for IBoE
  IB/mlx4: Use flow counters on IBoE ports
  IB/pma: Add include file for IBA performance counters definitions
  mlx4_core: Add network flow counters
  mlx4_core: Fix location of counter index in QP context struct
  mlx4_core: Read extended capabilities into the flags field
  mlx4_core: Extend capability flags to 64 bits
  IB/mlx4: Generate GID change events in IBoE code
  IB/core: Add GID change event
  RDMA/cma: Don't allow IPoIB port space for IBoE
  RDMA: Allow for NULL .modify_device() and .modify_port() methods
  IB/qib: Update active link width
  IB/qib: Fix potential deadlock with link down interrupt
  IB/qib: Add sysfs interface to read free contexts
  IB/mthca: Remove unnecessary read of PCI_CAP_ID_EXP
  IB/qib: Remove double define
  IB/qib: Remove unnecessary read of PCI_CAP_ID_EXP
  ...
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_memfree.c')
| -rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 43 | 
1 files changed, 14 insertions, 29 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 8c2a83732b5..7d2e42dd692 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -223,7 +223,6 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob  {  	int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;  	int ret = 0; -	u8 status;  	mutex_lock(&table->mutex); @@ -240,8 +239,8 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob  		goto out;  	} -	if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, -			  &status) || status) { +	if (mthca_MAP_ICM(dev, table->icm[i], +			  table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {  		mthca_free_icm(dev, table->icm[i], table->coherent);  		table->icm[i] = NULL;  		ret = -ENOMEM; @@ -258,7 +257,6 @@ out:  void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)  {  	int i; -	u8 status;  	if (!mthca_is_memfree(dev))  		return; @@ -269,8 +267,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o  	if (--table->icm[i]->refcount == 0) {  		mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, -				MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, -				&status); +				MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);  		mthca_free_icm(dev, table->icm[i], table->coherent);  		table->icm[i] = NULL;  	} @@ -366,7 +363,6 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,  	int num_icm;  	unsigned chunk_size;  	int i; -	u8 status;  	obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;  	num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); @@ -396,8 +392,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,  						__GFP_NOWARN, use_coherent);  		if (!table->icm[i])  			goto err; -		if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, -				  &status) || status) { +		if (mthca_MAP_ICM(dev, table->icm[i], +				  virt + i * MTHCA_TABLE_CHUNK_SIZE)) {  			mthca_free_icm(dev, table->icm[i], table->coherent);  			table->icm[i] = NULL;  			goto err; @@ -416,8 +412,7 @@ err:  	for (i = 0; i < num_icm; ++i)  		if (table->icm[i]) {  			mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, -					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, -					&status); +					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);  			mthca_free_icm(dev, table->icm[i], table->coherent);  		} @@ -429,13 +424,12 @@ err:  void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)  {  	int i; -	u8 status;  	for (i = 0; i < table->num_icm; ++i)  		if (table->icm[i]) { -			mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, -					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, -					&status); +			mthca_UNMAP_ICM(dev, +					table->virt + i * MTHCA_TABLE_CHUNK_SIZE, +					MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);  			mthca_free_icm(dev, table->icm[i], table->coherent);  		} @@ -454,7 +448,6 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,  {  	struct page *pages[1];  	int ret = 0; -	u8 status;  	int i;  	if (!mthca_is_memfree(dev)) @@ -494,9 +487,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,  	}  	ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), -				 mthca_uarc_virt(dev, uar, i), &status); -	if (!ret && status) -		ret = -EINVAL; +				 mthca_uarc_virt(dev, uar, i));  	if (ret) {  		pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);  		put_page(sg_page(&db_tab->page[i].mem)); @@ -557,14 +548,13 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,  			       struct mthca_user_db_table *db_tab)  {  	int i; -	u8 status;  	if (!mthca_is_memfree(dev))  		return;  	for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {  		if (db_tab->page[i].uvirt) { -			mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); +			mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);  			pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);  			put_page(sg_page(&db_tab->page[i].mem));  		} @@ -581,7 +571,6 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,  	int i, j;  	struct mthca_db_page *page;  	int ret = 0; -	u8 status;  	mutex_lock(&dev->db_tab->mutex); @@ -644,9 +633,7 @@ alloc:  	memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);  	ret = mthca_MAP_ICM_page(dev, page->mapping, -				 mthca_uarc_virt(dev, &dev->driver_uar, i), &status); -	if (!ret && status) -		ret = -EINVAL; +				 mthca_uarc_virt(dev, &dev->driver_uar, i));  	if (ret) {  		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,  				  page->db_rec, page->mapping); @@ -678,7 +665,6 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)  {  	int i, j;  	struct mthca_db_page *page; -	u8 status;  	i = db_index / MTHCA_DB_REC_PER_PAGE;  	j = db_index % MTHCA_DB_REC_PER_PAGE; @@ -694,7 +680,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)  	if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&  	    i >= dev->db_tab->max_group1 - 1) { -		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); +		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);  		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,  				  page->db_rec, page->mapping); @@ -745,7 +731,6 @@ int mthca_init_db_tab(struct mthca_dev *dev)  void mthca_cleanup_db_tab(struct mthca_dev *dev)  {  	int i; -	u8 status;  	if (!mthca_is_memfree(dev))  		return; @@ -763,7 +748,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)  		if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))  			mthca_warn(dev, "Kernel UARC page %d not empty\n", i); -		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); +		mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);  		dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,  				  dev->db_tab->page[i].db_rec,  |