diff options
Diffstat (limited to 'fs/xfs/xfs_buf.h')
| -rw-r--r-- | fs/xfs/xfs_buf.h | 151 | 
1 files changed, 119 insertions, 32 deletions
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 79344c48008..7c0b6a0a155 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -38,26 +38,28 @@ typedef enum {  	XBRW_ZERO = 3,			/* Zero target memory */  } xfs_buf_rw_t; -#define XBF_READ	(1 << 0) /* buffer intended for reading from device */ -#define XBF_WRITE	(1 << 1) /* buffer intended for writing to device */ -#define XBF_READ_AHEAD	(1 << 2) /* asynchronous read-ahead */ -#define XBF_ASYNC	(1 << 4) /* initiator will not wait for completion */ -#define XBF_DONE	(1 << 5) /* all pages in the buffer uptodate */ -#define XBF_STALE	(1 << 6) /* buffer has been staled, do not find it */ +#define XBF_READ	 (1 << 0) /* buffer intended for reading from device */ +#define XBF_WRITE	 (1 << 1) /* buffer intended for writing to device */ +#define XBF_READ_AHEAD	 (1 << 2) /* asynchronous read-ahead */ +#define XBF_ASYNC	 (1 << 4) /* initiator will not wait for completion */ +#define XBF_DONE	 (1 << 5) /* all pages in the buffer uptodate */ +#define XBF_STALE	 (1 << 6) /* buffer has been staled, do not find it */  /* I/O hints for the BIO layer */ -#define XBF_SYNCIO	(1 << 10)/* treat this buffer as synchronous I/O */ -#define XBF_FUA		(1 << 11)/* force cache write through mode */ -#define XBF_FLUSH	(1 << 12)/* flush the disk cache before a write */ +#define XBF_SYNCIO	 (1 << 10)/* treat this buffer as synchronous I/O */ +#define XBF_FUA		 (1 << 11)/* force cache write through mode */ +#define XBF_FLUSH	 (1 << 12)/* flush the disk cache before a write */  /* flags used only as arguments to access routines */ -#define XBF_TRYLOCK	(1 << 16)/* lock requested, but do not wait */ -#define XBF_UNMAPPED	(1 << 17)/* do not map the buffer */ +#define XBF_TRYLOCK	 (1 << 16)/* lock requested, but do not wait */ +#define XBF_UNMAPPED	 (1 << 17)/* do not map the buffer */  /* flags used only internally */ -#define _XBF_PAGES	(1 << 20)/* backed by refcounted pages */ -#define _XBF_KMEM	(1 << 21)/* backed by heap memory */ -#define _XBF_DELWRI_Q	(1 << 22)/* buffer on a delwri queue */ +#define _XBF_PAGES	 (1 << 20)/* backed by refcounted pages */ +#define _XBF_KMEM	 (1 << 21)/* backed by heap memory */ +#define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */ +#define _XBF_COMPOUND	 (1 << 23)/* compound buffer */ +#define _XBF_LRU_DISPOSE (1 << 24)/* buffer being discarded */  typedef unsigned int xfs_buf_flags_t; @@ -71,11 +73,13 @@ typedef unsigned int xfs_buf_flags_t;  	{ XBF_SYNCIO,		"SYNCIO" }, \  	{ XBF_FUA,		"FUA" }, \  	{ XBF_FLUSH,		"FLUSH" }, \ -	{ XBF_TRYLOCK,		"TRYLOCK" }, 	/* should never be set */\ +	{ XBF_TRYLOCK,		"TRYLOCK" },	/* should never be set */\  	{ XBF_UNMAPPED,		"UNMAPPED" },	/* ditto */\  	{ _XBF_PAGES,		"PAGES" }, \  	{ _XBF_KMEM,		"KMEM" }, \ -	{ _XBF_DELWRI_Q,	"DELWRI_Q" } +	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \ +	{ _XBF_COMPOUND,	"COMPOUND" }, \ +	{ _XBF_LRU_DISPOSE,	"LRU_DISPOSE" }  typedef struct xfs_buftarg {  	dev_t			bt_dev; @@ -98,6 +102,14 @@ typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);  #define XB_PAGES	2 +struct xfs_buf_map { +	xfs_daddr_t		bm_bn;	/* block number for I/O */ +	int			bm_len;	/* size of I/O */ +}; + +#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ +	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; +  typedef struct xfs_buf {  	/*  	 * first cacheline holds all the fields needed for an uncontended cache @@ -107,14 +119,19 @@ typedef struct xfs_buf {  	 * fast-path on locking.  	 */  	struct rb_node		b_rbnode;	/* rbtree node */ -	xfs_daddr_t		b_bn;		/* block number for I/O */ +	xfs_daddr_t		b_bn;		/* block number of buffer */  	int			b_length;	/* size of buffer in BBs */  	atomic_t		b_hold;		/* reference count */  	atomic_t		b_lru_ref;	/* lru reclaim ref count */  	xfs_buf_flags_t		b_flags;	/* status flags */  	struct semaphore	b_sema;		/* semaphore for lockables */ +	/* +	 * concurrent access to b_lru and b_lru_flags are protected by +	 * bt_lru_lock and not by b_sema +	 */  	struct list_head	b_lru;		/* lru list */ +	xfs_buf_flags_t		b_lru_flags;	/* internal lru status flags */  	wait_queue_head_t	b_waiters;	/* unpin waiters */  	struct list_head	b_list;  	struct xfs_perag	*b_pag;		/* contains rbtree root */ @@ -127,12 +144,16 @@ typedef struct xfs_buf {  	struct xfs_trans	*b_transp;  	struct page		**b_pages;	/* array of page pointers */  	struct page		*b_page_array[XB_PAGES]; /* inline pages */ +	struct xfs_buf_map	*b_maps;	/* compound buffer map */ +	struct xfs_buf_map	b_map;		/* inline compound buffer map */ +	int			b_map_count;  	int			b_io_length;	/* IO size in BBs */  	atomic_t		b_pin_count;	/* pin count */  	atomic_t		b_io_remaining;	/* #outstanding I/O requests */  	unsigned int		b_page_count;	/* size of page array */  	unsigned int		b_offset;	/* page offset in first page */  	unsigned short		b_error;	/* error code on I/O */ +  #ifdef XFS_BUF_LOCK_TRACKING  	int			b_last_holder;  #endif @@ -140,22 +161,78 @@ typedef struct xfs_buf {  /* Finding and Reading Buffers */ -struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno, -				size_t numblks, xfs_buf_flags_t flags, -				struct xfs_buf *new_bp); -#define xfs_incore(buftarg,blkno,len,lockit) \ -	_xfs_buf_find(buftarg, blkno ,len, lockit, NULL) +struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, +			      struct xfs_buf_map *map, int nmaps, +			      xfs_buf_flags_t flags, struct xfs_buf *new_bp); + +static inline struct xfs_buf * +xfs_incore( +	struct xfs_buftarg	*target, +	xfs_daddr_t		blkno, +	size_t			numblks, +	xfs_buf_flags_t		flags) +{ +	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); +	return _xfs_buf_find(target, &map, 1, flags, NULL); +} + +struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target, +			       struct xfs_buf_map *map, int nmaps, +			       xfs_buf_flags_t flags); + +static inline struct xfs_buf * +xfs_buf_alloc( +	struct xfs_buftarg	*target, +	xfs_daddr_t		blkno, +	size_t			numblks, +	xfs_buf_flags_t		flags) +{ +	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); +	return _xfs_buf_alloc(target, &map, 1, flags); +} -struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno, -				size_t numblks, xfs_buf_flags_t flags); -struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno, -				size_t numblks, xfs_buf_flags_t flags); -void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno, -				size_t numblks); +struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target, +			       struct xfs_buf_map *map, int nmaps, +			       xfs_buf_flags_t flags); +struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target, +			       struct xfs_buf_map *map, int nmaps, +			       xfs_buf_flags_t flags); +void xfs_buf_readahead_map(struct xfs_buftarg *target, +			       struct xfs_buf_map *map, int nmaps); + +static inline struct xfs_buf * +xfs_buf_get( +	struct xfs_buftarg	*target, +	xfs_daddr_t		blkno, +	size_t			numblks, +	xfs_buf_flags_t		flags) +{ +	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); +	return xfs_buf_get_map(target, &map, 1, flags); +} + +static inline struct xfs_buf * +xfs_buf_read( +	struct xfs_buftarg	*target, +	xfs_daddr_t		blkno, +	size_t			numblks, +	xfs_buf_flags_t		flags) +{ +	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); +	return xfs_buf_read_map(target, &map, 1, flags); +} + +static inline void +xfs_buf_readahead( +	struct xfs_buftarg	*target, +	xfs_daddr_t		blkno, +	size_t			numblks) +{ +	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); +	return xfs_buf_readahead_map(target, &map, 1); +}  struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks); -struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno, -				size_t numblks, xfs_buf_flags_t flags);  void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);  int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length); @@ -232,8 +309,18 @@ void xfs_buf_stale(struct xfs_buf *bp);  #define XFS_BUF_UNWRITE(bp)	((bp)->b_flags &= ~XBF_WRITE)  #define XFS_BUF_ISWRITE(bp)	((bp)->b_flags & XBF_WRITE) -#define XFS_BUF_ADDR(bp)		((bp)->b_bn) -#define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_bn = (xfs_daddr_t)(bno)) +/* + * These macros use the IO block map rather than b_bn. b_bn is now really + * just for the buffer cache index for cached buffers. As IO does not use b_bn + * anymore, uncached buffers do not use b_bn at all and hence must modify the IO + * map directly. Uncached buffers are not allowed to be discontiguous, so this + * is safe to do. + * + * In future, uncached buffers will pass the block number directly to the io + * request function and hence these macros will go away at that point. + */ +#define XFS_BUF_ADDR(bp)		((bp)->b_map.bm_bn) +#define XFS_BUF_SET_ADDR(bp, bno)	((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))  static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)  {  |