diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 37 | 
1 files changed, 17 insertions, 20 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 63fd2c07cb5..4c5deb6e9e3 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -488,29 +488,16 @@ found:  	spin_unlock(&pag->pag_buf_lock);  	xfs_perag_put(pag); -	/* Attempt to get the semaphore without sleeping, -	 * if this does not work then we need to drop the -	 * spinlock and do a hard attempt on the semaphore. -	 */ -	if (down_trylock(&bp->b_sema)) { +	if (xfs_buf_cond_lock(bp)) { +		/* failed, so wait for the lock if requested. */  		if (!(flags & XBF_TRYLOCK)) { -			/* wait for buffer ownership */  			xfs_buf_lock(bp);  			XFS_STATS_INC(xb_get_locked_waited);  		} else { -			/* We asked for a trylock and failed, no need -			 * to look at file offset and length here, we -			 * know that this buffer at least overlaps our -			 * buffer and is locked, therefore our buffer -			 * either does not exist, or is this buffer. -			 */  			xfs_buf_rele(bp);  			XFS_STATS_INC(xb_busy_locked);  			return NULL;  		} -	} else { -		/* trylock worked */ -		XB_SET_OWNER(bp);  	}  	if (bp->b_flags & XBF_STALE) { @@ -876,10 +863,18 @@ xfs_buf_rele(   */  /* - *	Locks a buffer object, if it is not already locked. - *	Note that this in no way locks the underlying pages, so it is only - *	useful for synchronizing concurrent use of buffer objects, not for - *	synchronizing independent access to the underlying pages. + *	Locks a buffer object, if it is not already locked.  Note that this in + *	no way locks the underlying pages, so it is only useful for + *	synchronizing concurrent use of buffer objects, not for synchronizing + *	independent access to the underlying pages. + * + *	If we come across a stale, pinned, locked buffer, we know that we are + *	being asked to lock a buffer that has been reallocated. Because it is + *	pinned, we know that the log has not been pushed to disk and hence it + *	will still be locked.  Rather than continuing to have trylock attempts + *	fail until someone else pushes the log, push it ourselves before + *	returning.  This means that the xfsaild will not get stuck trying + *	to push on stale inode buffers.   */  int  xfs_buf_cond_lock( @@ -890,6 +885,8 @@ xfs_buf_cond_lock(  	locked = down_trylock(&bp->b_sema) == 0;  	if (locked)  		XB_SET_OWNER(bp); +	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) +		xfs_log_force(bp->b_target->bt_mount, 0);  	trace_xfs_buf_cond_lock(bp, _RET_IP_);  	return locked ? 0 : -EBUSY; @@ -1781,7 +1778,6 @@ xfs_buf_delwri_split(  	INIT_LIST_HEAD(list);  	spin_lock(dwlk);  	list_for_each_entry_safe(bp, n, dwq, b_list) { -		trace_xfs_buf_delwri_split(bp, _RET_IP_);  		ASSERT(bp->b_flags & XBF_DELWRI);  		if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { @@ -1795,6 +1791,7 @@ xfs_buf_delwri_split(  					 _XBF_RUN_QUEUES);  			bp->b_flags |= XBF_WRITE;  			list_move_tail(&bp->b_list, list); +			trace_xfs_buf_delwri_split(bp, _RET_IP_);  		} else  			skipped++;  	}  |