diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-10 10:49:20 +0900 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-10 10:49:20 +0900 | 
| commit | 72055425e53540d9d0e59a57ac8c9b8ce77b62d5 (patch) | |
| tree | 8033d7d7bfdf8725eed785d02f7121d201052d2e /fs/btrfs/qgroup.c | |
| parent | fc81c038c2d61d4fcd8150f383fec1ce23087597 (diff) | |
| parent | f46dbe3dee853f8a860f889cb2b7ff4c624f2a7a (diff) | |
| download | olio-linux-3.10-72055425e53540d9d0e59a57ac8c9b8ce77b62d5.tar.xz olio-linux-3.10-72055425e53540d9d0e59a57ac8c9b8ce77b62d5.zip  | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs update from Chris Mason:
 "This is a large pull, with the bulk of the updates coming from:
   - Hole punching
   - send/receive fixes
   - fsync performance
   - Disk format extension allowing more hardlinks inside a single
     directory (btrfs-progs patch required to enable the compat bit for
     this one)
  I'm cooking more unrelated RAID code, but I wanted to make sure this
  original batch makes it in.  The largest updates here are relatively
  old and have been in testing for some time."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (121 commits)
  btrfs: init ref_index to zero in add_inode_ref
  Btrfs: remove repeated eb->pages check in, disk-io.c/csum_dirty_buffer
  Btrfs: fix page leakage
  Btrfs: do not warn_on when we cannot alloc a page for an extent buffer
  Btrfs: don't bug on enomem in readpage
  Btrfs: cleanup pages properly when ENOMEM in compression
  Btrfs: make filesystem read-only when submitting barrier fails
  Btrfs: detect corrupted filesystem after write I/O errors
  Btrfs: make compress and nodatacow mount options mutually exclusive
  btrfs: fix message printing
  Btrfs: don't bother committing delayed inode updates when fsyncing
  btrfs: move inline function code to header file
  Btrfs: remove unnecessary IS_ERR in bio_readpage_error()
  btrfs: remove unused function btrfs_insert_some_items()
  Btrfs: don't commit instead of overcommitting
  Btrfs: confirmation of value is added before trace_btrfs_get_extent() is called
  Btrfs: be smarter about dropping things from the tree log
  Btrfs: don't lookup csums for prealloc extents
  Btrfs: cache extent state when writing out dirty metadata pages
  Btrfs: do not hold the file extent leaf locked when adding extent item
  ...
Diffstat (limited to 'fs/btrfs/qgroup.c')
| -rw-r--r-- | fs/btrfs/qgroup.c | 40 | 
1 files changed, 24 insertions, 16 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index b6501558174..5039686df6a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1145,12 +1145,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,  		ulist_reinit(tmp);  						/* XXX id not needed */ -		ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); +		ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);  		ULIST_ITER_INIT(&tmp_uiter);  		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {  			struct btrfs_qgroup_list *glist; -			qg = (struct btrfs_qgroup *)tmp_unode->aux; +			qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;  			if (qg->refcnt < seq)  				qg->refcnt = seq + 1;  			else @@ -1158,7 +1158,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,  			list_for_each_entry(glist, &qg->groups, next_group) {  				ulist_add(tmp, glist->group->qgroupid, -					  (unsigned long)glist->group, +					  (u64)(uintptr_t)glist->group,  					  GFP_ATOMIC);  			}  		} @@ -1168,13 +1168,13 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,  	 * step 2: walk from the new root  	 */  	ulist_reinit(tmp); -	ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); +	ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);  	ULIST_ITER_INIT(&uiter);  	while ((unode = ulist_next(tmp, &uiter))) {  		struct btrfs_qgroup *qg;  		struct btrfs_qgroup_list *glist; -		qg = (struct btrfs_qgroup *)unode->aux; +		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;  		if (qg->refcnt < seq) {  			/* not visited by step 1 */  			qg->rfer += sgn * node->num_bytes; @@ -1190,7 +1190,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,  		list_for_each_entry(glist, &qg->groups, next_group) {  			ulist_add(tmp, glist->group->qgroupid, -				  (unsigned long)glist->group, GFP_ATOMIC); +				  (uintptr_t)glist->group, GFP_ATOMIC);  		}  	} @@ -1208,12 +1208,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,  			continue;  		ulist_reinit(tmp); -		ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); +		ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);  		ULIST_ITER_INIT(&tmp_uiter);  		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {  			struct btrfs_qgroup_list *glist; -			qg = (struct btrfs_qgroup *)tmp_unode->aux; +			qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;  			if (qg->tag == seq)  				continue; @@ -1225,7 +1225,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,  			list_for_each_entry(glist, &qg->groups, next_group) {  				ulist_add(tmp, glist->group->qgroupid, -					  (unsigned long)glist->group, +					  (uintptr_t)glist->group,  					  GFP_ATOMIC);  			}  		} @@ -1469,13 +1469,17 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)  	 * be exceeded  	 */  	ulist = ulist_alloc(GFP_ATOMIC); -	ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); +	if (!ulist) { +		ret = -ENOMEM; +		goto out; +	} +	ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);  	ULIST_ITER_INIT(&uiter);  	while ((unode = ulist_next(ulist, &uiter))) {  		struct btrfs_qgroup *qg;  		struct btrfs_qgroup_list *glist; -		qg = (struct btrfs_qgroup *)unode->aux; +		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;  		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&  		    qg->reserved + qg->rfer + num_bytes > @@ -1489,7 +1493,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)  		list_for_each_entry(glist, &qg->groups, next_group) {  			ulist_add(ulist, glist->group->qgroupid, -				  (unsigned long)glist->group, GFP_ATOMIC); +				  (uintptr_t)glist->group, GFP_ATOMIC);  		}  	}  	if (ret) @@ -1502,7 +1506,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)  	while ((unode = ulist_next(ulist, &uiter))) {  		struct btrfs_qgroup *qg; -		qg = (struct btrfs_qgroup *)unode->aux; +		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;  		qg->reserved += num_bytes;  	} @@ -1541,19 +1545,23 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)  		goto out;  	ulist = ulist_alloc(GFP_ATOMIC); -	ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); +	if (!ulist) { +		btrfs_std_error(fs_info, -ENOMEM); +		goto out; +	} +	ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);  	ULIST_ITER_INIT(&uiter);  	while ((unode = ulist_next(ulist, &uiter))) {  		struct btrfs_qgroup *qg;  		struct btrfs_qgroup_list *glist; -		qg = (struct btrfs_qgroup *)unode->aux; +		qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;  		qg->reserved -= num_bytes;  		list_for_each_entry(glist, &qg->groups, next_group) {  			ulist_add(ulist, glist->group->qgroupid, -				  (unsigned long)glist->group, GFP_ATOMIC); +				  (uintptr_t)glist->group, GFP_ATOMIC);  		}  	}  |