diff options
Diffstat (limited to 'include/linux/dmaengine.h')
| -rw-r--r-- | include/linux/dmaengine.h | 64 | 
1 files changed, 40 insertions, 24 deletions
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d3201e438d1..91ac8da2502 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -608,7 +608,10 @@ static inline int dmaengine_device_control(struct dma_chan *chan,  					   enum dma_ctrl_cmd cmd,  					   unsigned long arg)  { -	return chan->device->device_control(chan, cmd, arg); +	if (chan->device->device_control) +		return chan->device->device_control(chan, cmd, arg); + +	return -ENOSYS;  }  static inline int dmaengine_slave_config(struct dma_chan *chan, @@ -618,6 +621,11 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,  			(unsigned long)config);  } +static inline bool is_slave_direction(enum dma_transfer_direction direction) +{ +	return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM); +} +  static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(  	struct dma_chan *chan, dma_addr_t buf, size_t len,  	enum dma_transfer_direction dir, unsigned long flags) @@ -660,6 +668,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(  						period_len, dir, flags, NULL);  } +static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( +		struct dma_chan *chan, struct dma_interleaved_template *xt, +		unsigned long flags) +{ +	return chan->device->device_prep_interleaved_dma(chan, xt, flags); +} +  static inline int dmaengine_terminate_all(struct dma_chan *chan)  {  	return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); @@ -849,20 +864,6 @@ static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)  	return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;  } -#define first_dma_cap(mask) __first_dma_cap(&(mask)) -static inline int __first_dma_cap(const dma_cap_mask_t *srcp) -{ -	return min_t(int, DMA_TX_TYPE_END, -		find_first_bit(srcp->bits, DMA_TX_TYPE_END)); -} - -#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) -static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) -{ -	return min_t(int, DMA_TX_TYPE_END, -		find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); -} -  #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))  static inline void  __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) @@ -891,9 +892,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)  }  #define for_each_dma_cap_mask(cap, mask) \ -	for ((cap) = first_dma_cap(mask);	\ -		(cap) < DMA_TX_TYPE_END;	\ -		(cap) = next_dma_cap((cap), (mask))) +	for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)  /**   * dma_async_issue_pending - flush pending transactions to HW @@ -907,8 +906,6 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)  	chan->device->device_issue_pending(chan);  } -#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) -  /**   * dma_async_is_tx_complete - poll for transaction completion   * @chan: DMA channel @@ -934,16 +931,13 @@ static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,  	return status;  } -#define dma_async_memcpy_complete(chan, cookie, last, used)\ -	dma_async_is_tx_complete(chan, cookie, last, used) -  /**   * dma_async_is_complete - test a cookie against chan state   * @cookie: transaction identifier to test status of   * @last_complete: last know completed transaction   * @last_used: last cookie value handed out   * - * dma_async_is_complete() is used in dma_async_memcpy_complete() + * dma_async_is_complete() is used in dma_async_is_tx_complete()   * the test logic is separated for lightweight testing of multiple cookies   */  static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, @@ -974,6 +968,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);  enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);  void dma_issue_pending_all(void);  struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +struct dma_chan *dma_request_slave_channel(struct device *dev, char *name);  void dma_release_channel(struct dma_chan *chan);  #else  static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) @@ -988,6 +983,11 @@ static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask,  {  	return NULL;  } +static inline struct dma_chan *dma_request_slave_channel(struct device *dev, +							 char *name) +{ +	return NULL; +}  static inline void dma_release_channel(struct dma_chan *chan)  {  } @@ -1001,6 +1001,22 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);  struct dma_chan *net_dma_find_channel(void);  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ +	__dma_request_slave_channel_compat(&(mask), x, y, dev, name) + +static inline struct dma_chan +*__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn, +				  void *fn_param, struct device *dev, +				  char *name) +{ +	struct dma_chan *chan; + +	chan = dma_request_slave_channel(dev, name); +	if (chan) +		return chan; + +	return __dma_request_channel(mask, fn, fn_param); +}  /* --- Helper iov-locking functions --- */  |