diff options
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 42 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 26 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 56 | ||||
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_marker.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_fence.c) | 70 | ||||
| -rw-r--r-- | include/drm/vmwgfx_drm.h | 6 | 
9 files changed, 110 insertions, 112 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index c9281a1b1d3..f41e8b49997 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm  vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \  	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \  	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ -	    vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o +	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o  obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 87e43e0733b..72d95617bc5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -295,18 +295,18 @@ static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)  static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)  {  	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; -	uint32_t sequence = (unsigned long) sync_obj; +	uint32_t seqno = (unsigned long) sync_obj; -	return vmw_fence_signaled(dev_priv, sequence); +	return vmw_seqno_passed(dev_priv, seqno);  }  static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,  			     bool lazy, bool interruptible)  {  	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; -	uint32_t sequence = (unsigned long) sync_obj; +	uint32_t seqno = (unsigned long) sync_obj; -	return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ); +	return vmw_wait_seqno(dev_priv, false, seqno, false, 3*HZ);  }  struct ttm_bo_driver vmw_bo_driver = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8010254e9cf..c8b5a53f140 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -280,7 +280,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)  	dev_priv->dev = dev;  	dev_priv->vmw_chipset = chipset; -	dev_priv->last_read_sequence = (uint32_t) -100; +	dev_priv->last_read_seqno = (uint32_t) -100;  	mutex_init(&dev_priv->hw_mutex);  	mutex_init(&dev_priv->cmdbuf_mutex);  	mutex_init(&dev_priv->release_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 2374a5c495f..9c3016b53ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -105,7 +105,7 @@ struct vmw_surface {  	struct vmw_cursor_snooper snooper;  }; -struct vmw_fence_queue { +struct vmw_marker_queue {  	struct list_head head;  	struct timespec lag;  	struct timespec lag_time; @@ -121,7 +121,7 @@ struct vmw_fifo_state {  	uint32_t capabilities;  	struct mutex fifo_mutex;  	struct rw_semaphore rwsem; -	struct vmw_fence_queue fence_queue; +	struct vmw_marker_queue marker_queue;  };  struct vmw_relocation { @@ -238,12 +238,12 @@ struct vmw_private {  	 * Fencing and IRQs.  	 */ -	atomic_t fence_seq; +	atomic_t marker_seq;  	wait_queue_head_t fence_queue;  	wait_queue_head_t fifo_queue;  	atomic_t fence_queue_waiters;  	atomic_t fifo_queue_waiters; -	uint32_t last_read_sequence; +	uint32_t last_read_seqno;  	spinlock_t irq_lock;  	/* @@ -411,7 +411,7 @@ extern void vmw_fifo_release(struct vmw_private *dev_priv,  extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);  extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);  extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, -			       uint32_t *sequence); +			       uint32_t *seqno);  extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);  extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);  extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); @@ -448,39 +448,39 @@ extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,   */  extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); -extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy, -			  uint32_t sequence, bool interruptible, -			  unsigned long timeout); +extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, +			     uint32_t seqno, bool interruptible, +			     unsigned long timeout);  extern void vmw_irq_preinstall(struct drm_device *dev);  extern int vmw_irq_postinstall(struct drm_device *dev);  extern void vmw_irq_uninstall(struct drm_device *dev); -extern bool vmw_fence_signaled(struct vmw_private *dev_priv, -			       uint32_t sequence); +extern bool vmw_seqno_passed(struct vmw_private *dev_priv, +				uint32_t seqno);  extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,  				struct drm_file *file_priv);  extern int vmw_fallback_wait(struct vmw_private *dev_priv,  			     bool lazy,  			     bool fifo_idle, -			     uint32_t sequence, +			     uint32_t seqno,  			     bool interruptible,  			     unsigned long timeout); -extern void vmw_update_sequence(struct vmw_private *dev_priv, +extern void vmw_update_seqno(struct vmw_private *dev_priv,  				struct vmw_fifo_state *fifo_state);  /** - * Rudimentary fence objects currently used only for throttling - - * vmwgfx_fence.c + * Rudimentary fence-like objects currently used only for throttling - + * vmwgfx_marker.c   */ -extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); -extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); -extern int vmw_fence_push(struct vmw_fence_queue *queue, -			  uint32_t sequence); -extern int vmw_fence_pull(struct vmw_fence_queue *queue, -			  uint32_t signaled_sequence); +extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); +extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); +extern int vmw_marker_push(struct vmw_marker_queue *queue, +			  uint32_t seqno); +extern int vmw_marker_pull(struct vmw_marker_queue *queue, +			  uint32_t signaled_seqno);  extern int vmw_wait_lag(struct vmw_private *dev_priv, -			struct vmw_fence_queue *queue, uint32_t us); +			struct vmw_marker_queue *queue, uint32_t us);  /**   * Kernel framebuffer - vmwgfx_fb.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c6ff0e40f20..be41484735b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -686,7 +686,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,  	int ret;  	void *user_cmd;  	void *cmd; -	uint32_t sequence; +	uint32_t seqno;  	struct vmw_sw_context *sw_context = &dev_priv->ctx;  	struct vmw_master *vmaster = vmw_master(file_priv->master); @@ -738,7 +738,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,  	vmw_apply_relocations(sw_context);  	if (arg->throttle_us) { -		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, +		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,  				   arg->throttle_us);  		if (unlikely(ret != 0)) @@ -755,10 +755,10 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,  	memcpy(cmd, sw_context->cmd_bounce, arg->command_size);  	vmw_fifo_commit(dev_priv, arg->command_size); -	ret = vmw_fifo_send_fence(dev_priv, &sequence); +	ret = vmw_fifo_send_fence(dev_priv, &seqno);  	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, -				    (void *)(unsigned long) sequence); +				    (void *)(unsigned long) seqno);  	vmw_clear_validations(sw_context);  	mutex_unlock(&dev_priv->cmdbuf_mutex); @@ -771,7 +771,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,  		DRM_ERROR("Fence submission error. Syncing.\n");  	fence_rep.error = ret; -	fence_rep.fence_seq = (uint64_t) sequence; +	fence_rep.fence_seq = (uint64_t) seqno;  	fence_rep.pad64 = 0;  	user_fence_rep = (struct drm_vmw_fence_rep __user *) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index aae01b9ae4d..3ba9cac579e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -127,9 +127,9 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  		 (unsigned int) min,  		 (unsigned int) fifo->capabilities); -	atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); -	iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); -	vmw_fence_queue_init(&fifo->fence_queue); +	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); +	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); +	vmw_marker_queue_init(&fifo->marker_queue);  	return vmw_fifo_send_fence(dev_priv, &dummy);  } @@ -156,7 +156,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)  		vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); -	dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); +	dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);  	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,  		  dev_priv->config_done_state); @@ -166,7 +166,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)  		  dev_priv->traces_state);  	mutex_unlock(&dev_priv->hw_mutex); -	vmw_fence_queue_takedown(&fifo->fence_queue); +	vmw_marker_queue_takedown(&fifo->marker_queue);  	if (likely(fifo->static_buffer != NULL)) {  		vfree(fifo->static_buffer); @@ -447,7 +447,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)  	mutex_unlock(&fifo_state->fifo_mutex);  } -int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) +int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)  {  	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;  	struct svga_fifo_cmd_fence *cmd_fence; @@ -457,16 +457,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)  	fm = vmw_fifo_reserve(dev_priv, bytes);  	if (unlikely(fm == NULL)) { -		*sequence = atomic_read(&dev_priv->fence_seq); +		*seqno = atomic_read(&dev_priv->marker_seq);  		ret = -ENOMEM; -		(void)vmw_fallback_wait(dev_priv, false, true, *sequence, +		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,  					false, 3*HZ);  		goto out_err;  	}  	do { -		*sequence = atomic_add_return(1, &dev_priv->fence_seq); -	} while (*sequence == 0); +		*seqno = atomic_add_return(1, &dev_priv->marker_seq); +	} while (*seqno == 0);  	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { @@ -483,10 +483,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)  	cmd_fence = (struct svga_fifo_cmd_fence *)  	    ((unsigned long)fm + sizeof(__le32)); -	iowrite32(*sequence, &cmd_fence->fence); +	iowrite32(*seqno, &cmd_fence->fence);  	vmw_fifo_commit(dev_priv, bytes); -	(void) vmw_fence_push(&fifo_state->fence_queue, *sequence); -	vmw_update_sequence(dev_priv, fifo_state); +	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno); +	vmw_update_seqno(dev_priv, fifo_state);  out_err:  	return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index e92298a6a38..48701d2c8c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -53,7 +53,7 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)  	return IRQ_NONE;  } -static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) +static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)  {  	uint32_t busy; @@ -64,43 +64,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)  	return (busy == 0);  } -void vmw_update_sequence(struct vmw_private *dev_priv, +void vmw_update_seqno(struct vmw_private *dev_priv,  			 struct vmw_fifo_state *fifo_state)  {  	__le32 __iomem *fifo_mem = dev_priv->mmio_virt; -	uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); +	uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); -	if (dev_priv->last_read_sequence != sequence) { -		dev_priv->last_read_sequence = sequence; -		vmw_fence_pull(&fifo_state->fence_queue, sequence); +	if (dev_priv->last_read_seqno != seqno) { +		dev_priv->last_read_seqno = seqno; +		vmw_marker_pull(&fifo_state->marker_queue, seqno);  	}  } -bool vmw_fence_signaled(struct vmw_private *dev_priv, -			uint32_t sequence) +bool vmw_seqno_passed(struct vmw_private *dev_priv, +			 uint32_t seqno)  {  	struct vmw_fifo_state *fifo_state;  	bool ret; -	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) +	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))  		return true;  	fifo_state = &dev_priv->fifo; -	vmw_update_sequence(dev_priv, fifo_state); -	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) +	vmw_update_seqno(dev_priv, fifo_state); +	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))  		return true;  	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && -	    vmw_fifo_idle(dev_priv, sequence)) +	    vmw_fifo_idle(dev_priv, seqno))  		return true;  	/** -	 * Then check if the sequence is higher than what we've actually +	 * Then check if the seqno is higher than what we've actually  	 * emitted. Then the fence is stale and signaled.  	 */ -	ret = ((atomic_read(&dev_priv->fence_seq) - sequence) +	ret = ((atomic_read(&dev_priv->marker_seq) - seqno)  	       > VMW_FENCE_WRAP);  	return ret; @@ -109,7 +109,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,  int vmw_fallback_wait(struct vmw_private *dev_priv,  		      bool lazy,  		      bool fifo_idle, -		      uint32_t sequence, +		      uint32_t seqno,  		      bool interruptible,  		      unsigned long timeout)  { @@ -123,7 +123,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,  	DEFINE_WAIT(__wait);  	wait_condition = (fifo_idle) ? &vmw_fifo_idle : -		&vmw_fence_signaled; +		&vmw_seqno_passed;  	/**  	 * Block command submission while waiting for idle. @@ -131,14 +131,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,  	if (fifo_idle)  		down_read(&fifo_state->rwsem); -	signal_seq = atomic_read(&dev_priv->fence_seq); +	signal_seq = atomic_read(&dev_priv->marker_seq);  	ret = 0;  	for (;;) {  		prepare_to_wait(&dev_priv->fence_queue, &__wait,  				(interruptible) ?  				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); -		if (wait_condition(dev_priv, sequence)) +		if (wait_condition(dev_priv, seqno))  			break;  		if (time_after_eq(jiffies, end_jiffies)) {  			DRM_ERROR("SVGA device lockup.\n"); @@ -175,28 +175,28 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,  	return ret;  } -int vmw_wait_fence(struct vmw_private *dev_priv, -		   bool lazy, uint32_t sequence, -		   bool interruptible, unsigned long timeout) +int vmw_wait_seqno(struct vmw_private *dev_priv, +		      bool lazy, uint32_t seqno, +		      bool interruptible, unsigned long timeout)  {  	long ret;  	unsigned long irq_flags;  	struct vmw_fifo_state *fifo = &dev_priv->fifo; -	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) +	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))  		return 0; -	if (likely(vmw_fence_signaled(dev_priv, sequence))) +	if (likely(vmw_seqno_passed(dev_priv, seqno)))  		return 0;  	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);  	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) -		return vmw_fallback_wait(dev_priv, lazy, true, sequence, +		return vmw_fallback_wait(dev_priv, lazy, true, seqno,  					 interruptible, timeout);  	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) -		return vmw_fallback_wait(dev_priv, lazy, false, sequence, +		return vmw_fallback_wait(dev_priv, lazy, false, seqno,  					 interruptible, timeout);  	mutex_lock(&dev_priv->hw_mutex); @@ -214,12 +214,12 @@ int vmw_wait_fence(struct vmw_private *dev_priv,  	if (interruptible)  		ret = wait_event_interruptible_timeout  		    (dev_priv->fence_queue, -		     vmw_fence_signaled(dev_priv, sequence), +		     vmw_seqno_passed(dev_priv, seqno),  		     timeout);  	else  		ret = wait_event_timeout  		    (dev_priv->fence_queue, -		     vmw_fence_signaled(dev_priv, sequence), +		     vmw_seqno_passed(dev_priv, seqno),  		     timeout);  	if (unlikely(ret == 0)) @@ -293,5 +293,5 @@ int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,  		return -EBUSY;  	timeout = (unsigned long)arg->kernel_cookie - timeout; -	return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); +	return vmw_wait_seqno(vmw_priv(dev), true, arg->seqno, true, timeout);  } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c index 61eacc1b5ca..8a8725c2716 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c @@ -28,13 +28,13 @@  #include "vmwgfx_drv.h" -struct vmw_fence { +struct vmw_marker {  	struct list_head head; -	uint32_t sequence; +	uint32_t seqno;  	struct timespec submitted;  }; -void vmw_fence_queue_init(struct vmw_fence_queue *queue) +void vmw_marker_queue_init(struct vmw_marker_queue *queue)  {  	INIT_LIST_HEAD(&queue->head);  	queue->lag = ns_to_timespec(0); @@ -42,38 +42,38 @@ void vmw_fence_queue_init(struct vmw_fence_queue *queue)  	spin_lock_init(&queue->lock);  } -void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) +void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)  { -	struct vmw_fence *fence, *next; +	struct vmw_marker *marker, *next;  	spin_lock(&queue->lock); -	list_for_each_entry_safe(fence, next, &queue->head, head) { -		kfree(fence); +	list_for_each_entry_safe(marker, next, &queue->head, head) { +		kfree(marker);  	}  	spin_unlock(&queue->lock);  } -int vmw_fence_push(struct vmw_fence_queue *queue, -		   uint32_t sequence) +int vmw_marker_push(struct vmw_marker_queue *queue, +		   uint32_t seqno)  { -	struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); +	struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL); -	if (unlikely(!fence)) +	if (unlikely(!marker))  		return -ENOMEM; -	fence->sequence = sequence; -	getrawmonotonic(&fence->submitted); +	marker->seqno = seqno; +	getrawmonotonic(&marker->submitted);  	spin_lock(&queue->lock); -	list_add_tail(&fence->head, &queue->head); +	list_add_tail(&marker->head, &queue->head);  	spin_unlock(&queue->lock);  	return 0;  } -int vmw_fence_pull(struct vmw_fence_queue *queue, -		   uint32_t signaled_sequence) +int vmw_marker_pull(struct vmw_marker_queue *queue, +		   uint32_t signaled_seqno)  { -	struct vmw_fence *fence, *next; +	struct vmw_marker *marker, *next;  	struct timespec now;  	bool updated = false; @@ -87,15 +87,15 @@ int vmw_fence_pull(struct vmw_fence_queue *queue,  		goto out_unlock;  	} -	list_for_each_entry_safe(fence, next, &queue->head, head) { -		if (signaled_sequence - fence->sequence > (1 << 30)) +	list_for_each_entry_safe(marker, next, &queue->head, head) { +		if (signaled_seqno - marker->seqno > (1 << 30))  			continue; -		queue->lag = timespec_sub(now, fence->submitted); +		queue->lag = timespec_sub(now, marker->submitted);  		queue->lag_time = now;  		updated = true; -		list_del(&fence->head); -		kfree(fence); +		list_del(&marker->head); +		kfree(marker);  	}  out_unlock: @@ -117,7 +117,7 @@ static struct timespec vmw_timespec_add(struct timespec t1,  	return t1;  } -static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) +static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)  {  	struct timespec now; @@ -131,7 +131,7 @@ static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)  } -static bool vmw_lag_lt(struct vmw_fence_queue *queue, +static bool vmw_lag_lt(struct vmw_marker_queue *queue,  		       uint32_t us)  {  	struct timespec lag, cond; @@ -142,32 +142,30 @@ static bool vmw_lag_lt(struct vmw_fence_queue *queue,  }  int vmw_wait_lag(struct vmw_private *dev_priv, -		 struct vmw_fence_queue *queue, uint32_t us) +		 struct vmw_marker_queue *queue, uint32_t us)  { -	struct vmw_fence *fence; -	uint32_t sequence; +	struct vmw_marker *marker; +	uint32_t seqno;  	int ret;  	while (!vmw_lag_lt(queue, us)) {  		spin_lock(&queue->lock);  		if (list_empty(&queue->head)) -			sequence = atomic_read(&dev_priv->fence_seq); +			seqno = atomic_read(&dev_priv->marker_seq);  		else { -			fence = list_first_entry(&queue->head, -						 struct vmw_fence, head); -			sequence = fence->sequence; +			marker = list_first_entry(&queue->head, +						 struct vmw_marker, head); +			seqno = marker->seqno;  		}  		spin_unlock(&queue->lock); -		ret = vmw_wait_fence(dev_priv, false, sequence, true, -				     3*HZ); +		ret = vmw_wait_seqno(dev_priv, false, seqno, true, +					3*HZ);  		if (unlikely(ret != 0))  			return ret; -		(void) vmw_fence_pull(queue, sequence); +		(void) vmw_marker_pull(queue, seqno);  	}  	return 0;  } - - diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index 467b80c7485..c2b3909ac50 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -289,7 +289,7 @@ union drm_vmw_surface_reference_arg {   * DRM_VMW_EXECBUF   *   * Submit a command buffer for execution on the host, and return a - * fence sequence that when signaled, indicates that the command buffer has + * fence seqno that when signaled, indicates that the command buffer has   * executed.   */ @@ -325,7 +325,7 @@ struct drm_vmw_execbuf_arg {  /**   * struct drm_vmw_fence_rep   * - * @fence_seq: Fence sequence associated with a command submission. + * @fence_seq: Fence seqno associated with a command submission.   * @error: This member should've been set to -EFAULT on submission.   * The following actions should be take on completion:   * error == -EFAULT: Fence communication failed. The host is synchronized. @@ -432,7 +432,7 @@ struct drm_vmw_unref_dmabuf_arg {  struct drm_vmw_fence_wait_arg { -	uint64_t sequence; +	uint64_t seqno;  	uint64_t kernel_cookie;  	int32_t cookie_valid;  	int32_t pad64;  |