diff options
Diffstat (limited to 'fs/bio.c')
| -rw-r--r-- | fs/bio.c | 123 | 
1 files changed, 67 insertions, 56 deletions
@@ -175,14 +175,6 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,  	struct bio_vec *bvl;  	/* -	 * If 'bs' is given, lookup the pool and do the mempool alloc. -	 * If not, this is a bio_kmalloc() allocation and just do a -	 * kzalloc() for the exact number of vecs right away. -	 */ -	if (!bs) -		bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask); - -	/*  	 * see comment near bvec_array define!  	 */  	switch (nr) { @@ -260,21 +252,6 @@ void bio_free(struct bio *bio, struct bio_set *bs)  	mempool_free(p, bs->bio_pool);  } -/* - * default destructor for a bio allocated with bio_alloc_bioset() - */ -static void bio_fs_destructor(struct bio *bio) -{ -	bio_free(bio, fs_bio_set); -} - -static void bio_kmalloc_destructor(struct bio *bio) -{ -	if (bio_has_allocated_vec(bio)) -		kfree(bio->bi_io_vec); -	kfree(bio); -} -  void bio_init(struct bio *bio)  {  	memset(bio, 0, sizeof(*bio)); @@ -301,21 +278,15 @@ void bio_init(struct bio *bio)   **/  struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)  { +	unsigned long idx = BIO_POOL_NONE;  	struct bio_vec *bvl = NULL; -	struct bio *bio = NULL; -	unsigned long idx = 0; -	void *p = NULL; +	struct bio *bio; +	void *p; -	if (bs) { -		p = mempool_alloc(bs->bio_pool, gfp_mask); -		if (!p) -			goto err; -		bio = p + bs->front_pad; -	} else { -		bio = kmalloc(sizeof(*bio), gfp_mask); -		if (!bio) -			goto err; -	} +	p = mempool_alloc(bs->bio_pool, gfp_mask); +	if (unlikely(!p)) +		return NULL; +	bio = p + bs->front_pad;  	bio_init(bio); @@ -332,22 +303,33 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)  		nr_iovecs = bvec_nr_vecs(idx);  	} +out_set:  	bio->bi_flags |= idx << BIO_POOL_OFFSET;  	bio->bi_max_vecs = nr_iovecs; -out_set:  	bio->bi_io_vec = bvl; -  	return bio;  err_free: -	if (bs) -		mempool_free(p, bs->bio_pool); -	else -		kfree(bio); -err: +	mempool_free(p, bs->bio_pool);  	return NULL;  } +static void bio_fs_destructor(struct bio *bio) +{ +	bio_free(bio, fs_bio_set); +} + +/** + *	bio_alloc - allocate a new bio, memory pool backed + *	@gfp_mask: allocation mask to use + *	@nr_iovecs: number of iovecs + * + *	Allocate a new bio with @nr_iovecs bvecs.  If @gfp_mask + *	contains __GFP_WAIT, the allocation is guaranteed to succeed. + * + *	RETURNS: + *	Pointer to new bio on success, NULL on failure. + */  struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)  {  	struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); @@ -358,19 +340,45 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)  	return bio;  } -/* - * Like bio_alloc(), but doesn't use a mempool backing. This means that - * it CAN fail, but while bio_alloc() can only be used for allocations - * that have a short (finite) life span, bio_kmalloc() should be used - * for more permanent bio allocations (like allocating some bio's for - * initalization or setup purposes). - */ +static void bio_kmalloc_destructor(struct bio *bio) +{ +	if (bio_integrity(bio)) +		bio_integrity_free(bio); +	kfree(bio); +} + +/** + * bio_alloc - allocate a bio for I/O + * @gfp_mask:   the GFP_ mask given to the slab allocator + * @nr_iovecs:	number of iovecs to pre-allocate + * + * Description: + *   bio_alloc will allocate a bio and associated bio_vec array that can hold + *   at least @nr_iovecs entries. Allocations will be done from the + *   fs_bio_set. Also see @bio_alloc_bioset. + * + *   If %__GFP_WAIT is set, then bio_alloc will always be able to allocate + *   a bio. This is due to the mempool guarantees. To make this work, callers + *   must never allocate more than 1 bio at the time from this pool. Callers + *   that need to allocate more than 1 bio must always submit the previously + *   allocate bio for IO before attempting to allocate a new one. Failure to + *   do so can cause livelocks under memory pressure. + * + **/  struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)  { -	struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); +	struct bio *bio; -	if (bio) -		bio->bi_destructor = bio_kmalloc_destructor; +	bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), +		      gfp_mask); +	if (unlikely(!bio)) +		return NULL; + +	bio_init(bio); +	bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; +	bio->bi_max_vecs = nr_iovecs; +	bio->bi_io_vec = bio->bi_inline_vecs; +	bio->bi_destructor = bio_kmalloc_destructor;  	return bio;  } @@ -809,12 +817,15 @@ struct bio *bio_copy_user_iov(struct request_queue *q,  		len += iov[i].iov_len;  	} +	if (offset) +		nr_pages++; +  	bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);  	if (!bmd)  		return ERR_PTR(-ENOMEM);  	ret = -ENOMEM; -	bio = bio_alloc(gfp_mask, nr_pages); +	bio = bio_kmalloc(gfp_mask, nr_pages);  	if (!bio)  		goto out_bmd; @@ -938,7 +949,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,  	if (!nr_pages)  		return ERR_PTR(-EINVAL); -	bio = bio_alloc(gfp_mask, nr_pages); +	bio = bio_kmalloc(gfp_mask, nr_pages);  	if (!bio)  		return ERR_PTR(-ENOMEM); @@ -1122,7 +1133,7 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,  	int offset, i;  	struct bio *bio; -	bio = bio_alloc(gfp_mask, nr_pages); +	bio = bio_kmalloc(gfp_mask, nr_pages);  	if (!bio)  		return ERR_PTR(-ENOMEM);  |