diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-17 18:40:24 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-17 18:40:24 -0800 | 
| commit | 57f2685c16fa8e0cb86e4bc7c8ac33bfed943819 (patch) | |
| tree | 96a42fe632687c8486c250c4805bf1d4c9c34d19 /drivers/dma/intel_mid_dma.c | |
| parent | 488a9d018256dc9f29e041c0360445b6d25eea9a (diff) | |
| parent | e08b881a69d638175bfa99b5af4d72b731633ea7 (diff) | |
| download | olio-linux-3.10-57f2685c16fa8e0cb86e4bc7c8ac33bfed943819.tar.xz olio-linux-3.10-57f2685c16fa8e0cb86e4bc7c8ac33bfed943819.zip  | |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits)
  ARM: mach-shmobile: specify CHCLR registers on SH7372
  dma: shdma: fix runtime PM: clear channel buffers on reset
  dma/imx-sdma: save irq flags when use spin_lock in sdma_tx_submit
  dmaengine/ste_dma40: clear LNK on channel startup
  dmaengine: intel_mid_dma: remove legacy pm interface
  ASoC: mxs: correct 'direction' of device_prep_dma_cyclic
  dmaengine: intel_mid_dma: error path fix
  dmaengine: intel_mid_dma: locking and freeing fixes
  mtd: gpmi-nand: move to dma_transfer_direction
  mtd: fix compile error for gpmi-nand
  mmc: mxs-mmc: fix the dma_transfer_direction migration
  dmaengine: add DMA_TRANS_NONE to dma_transfer_direction
  dma: mxs-dma: Don't use CLKGATE bits in CTRL0 to disable DMA channels
  dma: mxs-dma: make mxs_dma_prep_slave_sg() multi user safe
  dma: mxs-dma: Always leave mxs_dma_init() with the clock disabled.
  dma: mxs-dma: fix a typo in comment
  DMA: PL330: Remove pm_runtime_xxx calls from pl330 probe/remove
  video i.MX IPU: Fix display connections
  i.MX IPU DMA: Fix wrong burstsize settings
  dmaengine/ste_dma40: allow fixed physical channel
  ...
Fix up conflicts in drivers/dma/{Kconfig,mxs-dma.c,pl330.c}
The conflicts looked pretty trivial, but I'll ask people to verify them.
Diffstat (limited to 'drivers/dma/intel_mid_dma.c')
| -rw-r--r-- | drivers/dma/intel_mid_dma.c | 39 | 
1 files changed, 25 insertions, 14 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 19a0c64d45d..74f70aadf9e 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,   * callbacks but must be called with the lock held.   */  static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, -	       struct intel_mid_dma_desc *desc) +		struct intel_mid_dma_desc *desc) +		__releases(&midc->lock) __acquires(&midc->lock)  {  	struct dma_async_tx_descriptor	*txd = &desc->txd;  	dma_async_tx_callback callback_txd = NULL; @@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,  			pci_pool_free(desc->lli_pool, desc->lli,  						desc->lli_phys);  			pci_pool_destroy(desc->lli_pool); +			desc->lli = NULL;  		}  		list_move(&desc->desc_node, &midc->free_list);  		midc->busy = false; @@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,  							midc->dma->block_size);  		/*Populate SAR and DAR values*/  		sg_phy_addr = sg_phys(sg); -		if (desc->dirn ==  DMA_TO_DEVICE) { +		if (desc->dirn ==  DMA_MEM_TO_DEV) {  			lli_bloc_desc->sar  = sg_phy_addr;  			lli_bloc_desc->dar  = mids->dma_slave.dst_addr; -		} else if (desc->dirn ==  DMA_FROM_DEVICE) { +		} else if (desc->dirn ==  DMA_DEV_TO_MEM) {  			lli_bloc_desc->sar  = mids->dma_slave.src_addr;  			lli_bloc_desc->dar  = sg_phy_addr;  		} @@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,  	ret = dma_async_is_complete(cookie, last_complete, last_used);  	if (ret != DMA_SUCCESS) { +		spin_lock_bh(&midc->lock);  		midc_scan_descriptors(to_middma_device(chan->device), midc); +		spin_unlock_bh(&midc->lock);  		last_complete = midc->completed;  		last_used = chan->cookie; @@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,  			pci_pool_free(desc->lli_pool, desc->lli,  						desc->lli_phys);  			pci_pool_destroy(desc->lli_pool); +			desc->lli = NULL;  		}  		list_move(&desc->desc_node, &midc->free_list);  	} @@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(  		if (midc->dma->pimr_mask) {  			cfg_hi.cfgx.protctl = 0x0; /*default value*/  			cfg_hi.cfgx.fifo_mode = 1; -			if (mids->dma_slave.direction == DMA_TO_DEVICE) { +			if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {  				cfg_hi.cfgx.src_per = 0;  				if (mids->device_instance == 0)  					cfg_hi.cfgx.dst_per = 3;  				if (mids->device_instance == 1)  					cfg_hi.cfgx.dst_per = 1; -			} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { +			} else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {  				if (mids->device_instance == 0)  					cfg_hi.cfgx.src_per = 2;  				if (mids->device_instance == 1) @@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(  		ctl_lo.ctlx.sinc = 0;  		ctl_lo.ctlx.dinc = 0;  	} else { -		if (mids->dma_slave.direction == DMA_TO_DEVICE) { +		if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {  			ctl_lo.ctlx.sinc = 0;  			ctl_lo.ctlx.dinc = 2;  			ctl_lo.ctlx.tt_fc = 1; -		} else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { +		} else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {  			ctl_lo.ctlx.sinc = 2;  			ctl_lo.ctlx.dinc = 0;  			ctl_lo.ctlx.tt_fc = 2; @@ -732,7 +737,7 @@ err_desc_get:   */  static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(  			struct dma_chan *chan, struct scatterlist *sgl, -			unsigned int sg_len, enum dma_data_direction direction, +			unsigned int sg_len, enum dma_transfer_direction direction,  			unsigned long flags)  {  	struct intel_mid_dma_chan *midc = NULL; @@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)  	pm_runtime_get_sync(&mid->pdev->dev);  	if (mid->state == SUSPENDED) { -		if (dma_resume(mid->pdev)) { +		if (dma_resume(&mid->pdev->dev)) {  			pr_err("ERR_MDMA: resume failed");  			return -EFAULT;  		} @@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)  					LNW_PERIPHRAL_MASK_SIZE);  		if (dma->mask_reg == NULL) {  			pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); -			return -ENOMEM; +			err = -ENOMEM; +			goto err_ioremap;  		}  	} else  		dma->mask_reg = NULL; @@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)  err_engine:  	free_irq(pdev->irq, dma);  err_irq: +	if (dma->mask_reg) +		iounmap(dma->mask_reg); +err_ioremap:  	pci_pool_destroy(dma->dma_pool);  err_dma_pool:  	pr_err("ERR_MDMA:setup_dma failed: %d\n", err); @@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)  *  * This function is called by OS when a power event occurs  */ -int dma_suspend(struct pci_dev *pci, pm_message_t state) +static int dma_suspend(struct device *dev)  { +	struct pci_dev *pci = to_pci_dev(dev);  	int i;  	struct middma_device *device = pci_get_drvdata(pci);  	pr_debug("MDMA: dma_suspend called\n"); @@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)  *  * This function is called by OS when a power event occurs  */ -int dma_resume(struct pci_dev *pci) +int dma_resume(struct device *dev)  { +	struct pci_dev *pci = to_pci_dev(dev);  	int ret;  	struct middma_device *device = pci_get_drvdata(pci); @@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {  	.runtime_suspend = dma_runtime_suspend,  	.runtime_resume = dma_runtime_resume,  	.runtime_idle = dma_runtime_idle, +	.suspend = dma_suspend, +	.resume = dma_resume,  };  static struct pci_driver intel_mid_dma_pci_driver = { @@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {  	.probe		=	intel_mid_dma_probe,  	.remove		=	__devexit_p(intel_mid_dma_remove),  #ifdef CONFIG_PM -	.suspend = dma_suspend, -	.resume = dma_resume,  	.driver = {  		.pm = &intel_mid_dma_pm,  	},  |