diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-04-26 23:28:16 +0200 | 
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-03 11:18:31 +0200 | 
| commit | 4225d0f219d22440e33a5686bf806356cb25bcf5 (patch) | |
| tree | aa0447ef7f0a739000da6bdb241de42a70ae9205 /drivers/gpu/drm/i915/intel_ringbuffer.h | |
| parent | 316d388450be37fedcf4b37cf211b2bdc7826bb8 (diff) | |
| download | olio-linux-3.10-4225d0f219d22440e33a5686bf806356cb25bcf5.tar.xz olio-linux-3.10-4225d0f219d22440e33a5686bf806356cb25bcf5.zip  | |
drm/i915: fixup __iomem mixups in ringbuffer.c
Two things:
- ring->virtual start is an __iomem pointer, treat it accordingly.
- dev_priv->status_page.page_addr is now always a cpu addr, no pointer
  casting needed for that.
Take the opportunity to remove the unnecessary drm indirection when
setting up the ringbuffer iomapping.
v2: Add a compiler barrier before reading the hw status page.
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.h')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 7 | 
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 7b879926969..baba7571457 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -2,7 +2,7 @@  #define _INTEL_RINGBUFFER_H_  struct  intel_hw_status_page { -	u32	__iomem	*page_addr; +	u32		*page_addr;  	unsigned int	gfx_addr;  	struct		drm_i915_gem_object *obj;  }; @@ -115,7 +115,6 @@ struct  intel_ring_buffer {  	u32 outstanding_lazy_request;  	wait_queue_head_t irq_queue; -	drm_local_map_t map;  	void *private;  }; @@ -149,7 +148,9 @@ static inline u32  intel_read_status_page(struct intel_ring_buffer *ring,  		       int reg)  { -	return ioread32(ring->status_page.page_addr + reg); +	/* Ensure that the compiler doesn't optimize away the load. */ +	barrier(); +	return ring->status_page.page_addr[reg];  }  /**  |