block: switch polling to be bio based

ANBZ: #11744

commit 3e08773c38 upstream.

Replace the blk_poll interface that requires the caller to keep a queue
and cookie from the submissions with polling based on the bio.

Polling for the bio itself leads to a few advantages:

 - the cookie construction can made entirely private in blk-mq.c
 - the caller does not need to remember the request_queue and cookie
   separately and thus sidesteps their lifetime issues
 - keeping the device and the cookie inside the bio allows to trivially
   support polling BIOs remapping by stacking drivers
 - a lot of code to propagate the cookie back up the submission path can
   be removed entirely.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Mark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-15-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
[joe: fix all conflicts for *submit_bio() and blk_poll()]
Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: Ferry Meng <mengferry@linux.alibaba.com>
Reviewed-by: Guixin Liu <kanie@linux.alibaba.com>
Link: https://gitee.com/anolis/cloud-kernel/pulls/4084
This commit is contained in:
Christoph Hellwig 2021-10-12 13:12:24 +02:00 committed by 小龙
parent 30815d71f0
commit b8f1c9a55b
43 changed files with 236 additions and 273 deletions

View File

@ -59,7 +59,7 @@ struct nfhd_device {
struct gendisk *disk;
};
static blk_qc_t nfhd_submit_bio(struct bio *bio)
static void nfhd_submit_bio(struct bio *bio)
{
struct nfhd_device *dev = bio->bi_disk->private_data;
struct bio_vec bvec;
@ -77,7 +77,6 @@ static blk_qc_t nfhd_submit_bio(struct bio *bio)
sec += len;
}
bio_endio(bio);
return BLK_QC_T_NONE;
}
static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)

View File

@ -101,7 +101,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
spin_unlock(&dev->lock);
}
static blk_qc_t simdisk_submit_bio(struct bio *bio)
static void simdisk_submit_bio(struct bio *bio)
{
struct simdisk *dev = bio->bi_disk->private_data;
struct bio_vec bvec;
@ -119,7 +119,6 @@ static blk_qc_t simdisk_submit_bio(struct bio *bio)
}
bio_endio(bio);
return BLK_QC_T_NONE;
}
static int simdisk_open(struct block_device *bdev, fmode_t mode)

View File

@ -281,6 +281,7 @@ void bio_init(struct bio *bio, struct bio_vec *table,
memset(bio, 0, sizeof(*bio));
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
bio->bi_cookie = BLK_QC_T_NONE;
bio->bi_io_vec = table;
bio->bi_max_vecs = max_vecs;

View File

@ -921,25 +921,22 @@ end_io:
return false;
}
static blk_qc_t __submit_bio(struct bio *bio)
static void __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_disk;
blk_qc_t ret = BLK_QC_T_NONE;
if (unlikely(bio_queue_enter(bio) != 0))
return BLK_QC_T_NONE;
return;
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
goto queue_exit;
if (disk->fops->submit_bio) {
ret = disk->fops->submit_bio(bio);
goto queue_exit;
if (!disk->fops->submit_bio) {
blk_mq_submit_bio(bio);
return;
}
return blk_mq_submit_bio(bio);
disk->fops->submit_bio(bio);
queue_exit:
blk_queue_exit(disk->queue);
return ret;
}
/*
@ -961,10 +958,9 @@ queue_exit:
* bio_list_on_stack[1] contains bios that were submitted before the current
* ->submit_bio_bio, but that haven't been processed yet.
*/
static blk_qc_t __submit_bio_noacct(struct bio *bio)
static void __submit_bio_noacct(struct bio *bio)
{
struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
BUG_ON(bio->bi_next);
@ -981,7 +977,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
ret = __submit_bio(bio);
__submit_bio(bio);
/*
* Sort new bios into those for a lower level and those for the
@ -1004,25 +1000,22 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
current->bio_list = NULL;
return ret;
}
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
static void __submit_bio_noacct_mq(struct bio *bio)
{
struct bio_list bio_list[2] = { };
blk_qc_t ret;
current->bio_list = bio_list;
do {
ret = __submit_bio(bio);
__submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0])));
current->bio_list = NULL;
return ret;
}
blk_qc_t submit_bio_noacct_nocheck(struct bio *bio)
void submit_bio_noacct_nocheck(struct bio *bio)
{
blk_cgroup_bio_start(bio);
blkcg_bio_issue_init(bio);
@ -1042,14 +1035,12 @@ blk_qc_t submit_bio_noacct_nocheck(struct bio *bio)
* to collect a list of requests submited by a ->submit_bio method while
* it is active, and then process them after it returned.
*/
if (current->bio_list) {
if (current->bio_list)
bio_list_add(&current->bio_list[0], bio);
return BLK_QC_T_NONE;
}
if (!bio->bi_disk->fops->submit_bio)
return __submit_bio_noacct_mq(bio);
return __submit_bio_noacct(bio);
else if (!bio->bi_disk->fops->submit_bio)
__submit_bio_noacct_mq(bio);
else
__submit_bio_noacct(bio);
}
/**
@ -1061,9 +1052,9 @@ blk_qc_t submit_bio_noacct_nocheck(struct bio *bio)
* systems and other upper level users of the block layer should use
* submit_bio() instead.
*/
blk_qc_t submit_bio_noacct(struct bio *bio)
void submit_bio_noacct(struct bio *bio)
{
return submit_bio_noacct_nocheck(bio);
submit_bio_noacct_nocheck(bio);
}
EXPORT_SYMBOL(submit_bio_noacct);
@ -1080,10 +1071,10 @@ EXPORT_SYMBOL(submit_bio_noacct);
* in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
* been called.
*/
blk_qc_t submit_bio(struct bio *bio)
void submit_bio(struct bio *bio)
{
if (blkcg_punt_bio_submit(bio))
return BLK_QC_T_NONE;
return;
/*
* If it's a regular read/write or a barrier with data attached,
@ -1123,19 +1114,91 @@ blk_qc_t submit_bio(struct bio *bio)
if (unlikely(bio_op(bio) == REQ_OP_READ &&
bio_flagged(bio, BIO_WORKINGSET))) {
unsigned long pflags;
blk_qc_t ret;
psi_memstall_enter(&pflags);
ret = submit_bio_noacct(bio);
submit_bio_noacct(bio);
psi_memstall_leave(&pflags);
return ret;
return;
}
return submit_bio_noacct(bio);
submit_bio_noacct(bio);
}
EXPORT_SYMBOL(submit_bio);
/**
* bio_poll - poll for BIO completions
* @bio: bio to poll for
* @flags: BLK_POLL_* flags that control the behavior
*
* Poll for completions on queue associated with the bio. Returns number of
* completed entries found.
*
* Note: the caller must either be the context that submitted @bio, or
* be in a RCU critical section to prevent freeing of @bio.
*/
int bio_poll(struct bio *bio, unsigned int flags)
{
struct request_queue *q = bio->bi_disk->queue;
blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
int ret;
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
if (current->plug)
blk_flush_plug_list(current->plug, false);
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
return 0;
if (WARN_ON_ONCE(!queue_is_mq(q)))
ret = 0; /* not yet implemented, should not happen */
else
ret = blk_mq_poll(q, cookie, flags);
blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL_GPL(bio_poll);
/*
* Helper to implement file_operations.iopoll. Requires the bio to be stored
* in iocb->private, and cleared before freeing the bio.
*/
int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct bio *bio;
int ret = 0;
/*
* Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
* point to a freshly allocated bio at this point. If that happens
* we have a few cases to consider:
*
* 1) the bio is beeing initialized and bi_bdev is NULL. We can just
* simply nothing in this case
* 2) the bio points to a not poll enabled device. bio_poll will catch
* this and return 0
* 3) the bio points to a poll capable device, including but not
* limited to the one that the original bio pointed to. In this
* case we will call into the actual poll method and poll for I/O,
* even if we don't need to, but it won't cause harm either.
*
* For cases 2) and 3) above the RCU grace period ensures that bi_bdev
* is still allocated. Because partitions hold a reference to the whole
* device bdev and thus disk, the disk is also still valid. Grabbing
* a reference to the queue in bio_poll() ensures the hctxs and requests
* are still valid as well.
*/
rcu_read_lock();
bio = READ_ONCE(kiocb->private);
if (bio && bio->bi_disk)
ret = bio_poll(bio, flags);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
/**
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for the new queue limits

View File

@ -36,6 +36,8 @@ bool blk_rq_is_poll(struct request *rq)
return false;
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
return false;
if (WARN_ON_ONCE(!rq->bio))
return false;
return true;
}
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
@ -43,7 +45,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
do {
blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
bio_poll(rq->bio, 0);
cond_resched();
} while (!completion_done(wait));
}

View File

@ -63,6 +63,9 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
return bucket;
}
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
blk_qc_t qc)
{
@ -79,6 +82,13 @@ static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
return blk_mq_tag_to_rq(hctx->tags, tag);
}
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
{
return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
(rq->tag != -1 ?
rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
}
/*
* Check if any of the ctx, dispatch list or elevator
* have pending work in this hardware queue.
@ -797,6 +807,8 @@ void blk_mq_start_request(struct request *rq)
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
q->integrity.profile->prepare_fn(rq);
#endif
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
}
EXPORT_SYMBOL(blk_mq_start_request);
@ -2035,19 +2047,15 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
}
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie, bool last)
struct request *rq, bool last)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
.rq = rq,
.last = last,
};
blk_qc_t new_cookie;
blk_status_t ret;
new_cookie = request_to_qc_t(hctx, rq);
/*
* For OK queue, we are done. For error, caller may kill it.
* Any other error (busy), just add it to our list as we
@ -2057,7 +2065,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
switch (ret) {
case BLK_STS_OK:
blk_mq_update_dispatch_busy(hctx, false);
*cookie = new_cookie;
break;
case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE:
@ -2066,7 +2073,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
break;
default:
blk_mq_update_dispatch_busy(hctx, false);
*cookie = BLK_QC_T_NONE;
break;
}
@ -2075,7 +2081,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
bool bypass_insert, bool last)
{
struct request_queue *q = rq->q;
@ -2105,7 +2110,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
goto insert;
}
return __blk_mq_issue_directly(hctx, rq, cookie, last);
return __blk_mq_issue_directly(hctx, rq, last);
insert:
if (bypass_insert)
return BLK_STS_RESOURCE;
@ -2119,7 +2124,6 @@ insert:
* blk_mq_try_issue_directly - Try to send a request directly to device driver.
* @hctx: Pointer of the associated hardware queue.
* @rq: Pointer to request to be sent.
* @cookie: Request queue cookie.
*
* If the device has enough resources to accept a new request now, send the
* request directly to device driver. Else, insert at hctx->dispatch queue, so
@ -2127,7 +2131,7 @@ insert:
* queue have higher priority.
*/
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie)
struct request *rq)
{
blk_status_t ret;
int srcu_idx;
@ -2136,7 +2140,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK)
@ -2149,11 +2153,10 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
blk_status_t ret;
int srcu_idx;
blk_qc_t unused_cookie;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
hctx_unlock(hctx, srcu_idx);
return ret;
@ -2233,10 +2236,8 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
*
* It will not queue the request if there is an error with the bio, or at the
* request creation.
*
* Returns: Request queue cookie.
*/
blk_qc_t blk_mq_submit_bio(struct bio *bio)
void blk_mq_submit_bio(struct bio *bio)
{
struct request_queue *q = bio->bi_disk->queue;
const int is_sync = op_is_sync(bio->bi_opf);
@ -2248,9 +2249,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
unsigned int nr_segs;
blk_qc_t cookie;
blk_status_t ret;
bool hipri;
blk_queue_bounce(q, &bio);
__blk_queue_split(&bio, &nr_segs);
@ -2267,8 +2266,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
rq_qos_throttle(q, bio);
hipri = bio->bi_opf & REQ_POLLED;
data.cmd_flags = bio->bi_opf;
rq = __blk_mq_alloc_request(&data);
if (unlikely(!rq)) {
@ -2282,8 +2279,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
rq_qos_track(q, rq, bio);
cookie = request_to_qc_t(data.hctx, rq);
blk_mq_bio_to_request(rq, bio, nr_segs);
ret = blk_crypto_init_request(rq);
@ -2291,7 +2286,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
bio->bi_status = ret;
bio_endio(bio);
blk_mq_free_request(rq);
return BLK_QC_T_NONE;
return;
}
plug = blk_mq_plug(q, bio);
@ -2347,8 +2342,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx;
trace_block_unplug(q, 1, true);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
blk_mq_try_issue_directly(data.hctx, same_queue_rq);
}
} else if ((q->nr_hw_queues > 1 && is_sync) ||
!data.hctx->dispatch_busy) {
@ -2356,18 +2350,15 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
* There is no scheduler and we can try to send directly
* to the hardware.
*/
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
blk_mq_try_issue_directly(data.hctx, rq);
} else {
/* Default case. */
blk_mq_sched_insert_request(rq, false, true, true);
}
if (!hipri)
return BLK_QC_T_NONE;
return cookie;
return;
queue_exit:
blk_queue_exit(q);
return BLK_QC_T_NONE;
}
static size_t order_to_size(unsigned int order)
@ -4021,25 +4012,8 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
return 0;
}
/**
* blk_poll - poll for IO completions
* @q: the queue
* @cookie: cookie passed back at IO submission time
* @flags: BLK_POLL_* flags that control the behavior
*
* Description:
* Poll for completions on the passed in queue. Returns number of
* completed entries found.
*/
int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
{
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
if (current->plug)
blk_flush_plug_list(current->plug, false);
if (!(flags & BLK_POLL_NOSLEEP) &&
q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
if (blk_mq_poll_hybrid(q, cookie))
@ -4047,7 +4021,6 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
}
return blk_mq_poll_classic(q, cookie, flags);
}
EXPORT_SYMBOL_GPL(blk_poll);
unsigned int blk_mq_rq_cpu(struct request *rq)
{

View File

@ -37,6 +37,8 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
void blk_mq_submit_bio(struct bio *bio);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);

View File

@ -51,7 +51,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
blk_qc_t submit_bio_noacct_nocheck(struct bio *bio);
void submit_bio_noacct_nocheck(struct bio *bio);
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)

View File

@ -280,7 +280,7 @@ out:
return err;
}
static blk_qc_t brd_submit_bio(struct bio *bio)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
@ -288,8 +288,10 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
struct bvec_iter iter;
sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
bio_io_error(bio);
return;
}
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@ -301,16 +303,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), sector);
if (err)
goto io_error;
if (err) {
bio_io_error(bio);
return;
}
sector += len >> SECTOR_SHIFT;
}
bio_endio(bio);
return BLK_QC_T_NONE;
io_error:
bio_io_error(bio);
return BLK_QC_T_NONE;
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,

View File

@ -1450,7 +1450,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
extern blk_qc_t drbd_submit_bio(struct bio *bio);
void drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);

View File

@ -1594,7 +1594,7 @@ void do_submit(struct work_struct *ws)
}
}
blk_qc_t drbd_submit_bio(struct bio *bio)
void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_disk->private_data;
unsigned long start_jif;
@ -1610,7 +1610,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio)
inc_ap_bio(device);
__drbd_make_request(device, bio, start_jif);
return BLK_QC_T_NONE;
}
static bool net_timeout_reached(struct drbd_request *net_req,

View File

@ -1416,7 +1416,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
return &nullb->queues[index];
}
static blk_qc_t null_submit_bio(struct bio *bio)
static void null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
@ -1428,7 +1428,6 @@ static blk_qc_t null_submit_bio(struct bio *bio)
cmd->bio = bio;
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
return BLK_QC_T_NONE;
}
static bool should_timeout_request(struct request *rq)

View File

@ -2367,7 +2367,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
}
static blk_qc_t pkt_submit_bio(struct bio *bio)
static void pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@ -2390,7 +2390,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
*/
if (bio_data_dir(bio) == READ) {
pkt_make_request_read(pd, bio);
return BLK_QC_T_NONE;
return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@ -2422,10 +2422,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
pkt_make_request_write(bio->bi_disk->queue, split);
} while (split != bio);
return BLK_QC_T_NONE;
return;
end_io:
bio_io_error(bio);
return BLK_QC_T_NONE;
}
static void pkt_init_queue(struct pktcdvd_device *pd)

View File

@ -579,7 +579,7 @@ out:
return next;
}
static blk_qc_t ps3vram_submit_bio(struct bio *bio)
static void ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@ -595,13 +595,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio)
spin_unlock_irq(&priv->lock);
if (busy)
return BLK_QC_T_NONE;
return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
return BLK_QC_T_NONE;
}
static const struct block_device_operations ps3vram_fops = {

View File

@ -50,7 +50,7 @@ struct rsxx_bio_meta {
static struct kmem_cache *bio_meta_pool;
static blk_qc_t rsxx_submit_bio(struct bio *bio);
static void rsxx_submit_bio(struct bio *bio);
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
}
}
static blk_qc_t rsxx_submit_bio(struct bio *bio)
static void rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_disk->private_data;
struct rsxx_bio_meta *bio_meta;
@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio)
if (st)
goto queue_err;
return BLK_QC_T_NONE;
return;
queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
@ -177,7 +177,6 @@ req_err:
if (st)
bio->bi_status = st;
bio_endio(bio);
return BLK_QC_T_NONE;
}
/*----------------- Device Setup -------------------*/

View File

@ -519,7 +519,7 @@ static int mm_check_plugged(struct cardinfo *card)
return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
}
static blk_qc_t mm_submit_bio(struct bio *bio)
static void mm_submit_bio(struct bio *bio)
{
struct cardinfo *card = bio->bi_disk->private_data;
@ -536,8 +536,6 @@ static blk_qc_t mm_submit_bio(struct bio *bio)
if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
return BLK_QC_T_NONE;
}
static irqreturn_t mm_interrupt(int irq, void *__card)

View File

@ -1593,22 +1593,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
static blk_qc_t zram_submit_bio(struct bio *bio)
static void zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
goto error;
bio_io_error(bio);
return;
}
__zram_make_request(zram, bio);
return BLK_QC_T_NONE;
error:
bio_io_error(bio);
return BLK_QC_T_NONE;
}
static void zram_slot_free_notify(struct block_device *bdev,

View File

@ -47,7 +47,7 @@ static struct pblk_global_caches pblk_caches = {
struct bio_set pblk_bio_set;
static blk_qc_t pblk_submit_bio(struct bio *bio)
static void pblk_submit_bio(struct bio *bio)
{
struct pblk *pblk = bio->bi_disk->queue->queuedata;
@ -55,7 +55,7 @@ static blk_qc_t pblk_submit_bio(struct bio *bio)
pblk_discard(pblk, bio);
if (!(bio->bi_opf & REQ_PREFLUSH)) {
bio_endio(bio);
return BLK_QC_T_NONE;
return;
}
}
@ -75,8 +75,6 @@ static blk_qc_t pblk_submit_bio(struct bio *bio)
pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
return BLK_QC_T_NONE;
}
static const struct block_device_operations pblk_bops = {

View File

@ -1170,7 +1170,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
blk_qc_t cached_dev_submit_bio(struct bio *bio)
void cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct bcache_device *d = bio->bi_disk->private_data;
@ -1181,7 +1181,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
dc->io_disable)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return BLK_QC_T_NONE;
return;
}
if (likely(d->c)) {
@ -1225,8 +1225,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
} else
/* I/O request sent to backing device */
detached_dev_do_request(d, bio);
return BLK_QC_T_NONE;
}
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@ -1275,7 +1273,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
blk_qc_t flash_dev_submit_bio(struct bio *bio)
void flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
@ -1284,7 +1282,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return BLK_QC_T_NONE;
return;
}
s = search_alloc(bio, d);
@ -1300,7 +1298,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
return BLK_QC_T_NONE;
return;
} else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
@ -1316,7 +1314,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
}
continue_at(cl, search_free, NULL);
return BLK_QC_T_NONE;
}
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,

View File

@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
blk_qc_t cached_dev_submit_bio(struct bio *bio);
void cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
blk_qc_t flash_dev_submit_bio(struct bio *bio);
void flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;

View File

@ -1293,14 +1293,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
static blk_qc_t __map_bio(struct dm_target_io *tio)
static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@ -1328,7 +1327,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector);
ret = submit_bio_noacct(clone);
submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
if (unlikely(swap_bios_limit(ti, clone))) {
@ -1350,8 +1349,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
return ret;
}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
@ -1438,7 +1435,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
}
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target_io *tio, unsigned *len)
{
struct bio *clone = &tio->clone;
@ -1448,8 +1445,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
__bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, *len);
return __map_bio(tio);
__map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
@ -1463,7 +1459,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
while ((bio = bio_list_pop(&blist))) {
tio = container_of(bio, struct dm_target_io, clone);
(void) __clone_and_map_simple_bio(ci, tio, len);
__clone_and_map_simple_bio(ci, tio, len);
}
}
@ -1514,7 +1510,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
free_tio(tio);
return r;
}
(void) __map_bio(tio);
__map_bio(tio);
return 0;
}
@ -1629,11 +1625,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
static void __split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
@ -1677,7 +1672,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio);
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ret = submit_bio_noacct(bio);
submit_bio_noacct(bio);
break;
}
}
@ -1685,13 +1680,11 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
return ret;
}
static blk_qc_t dm_submit_bio(struct bio *bio)
static void dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_disk->private_data;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@ -1716,10 +1709,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
if (is_abnormal_io(bio))
blk_queue_split(&bio);
ret = __split_and_process_bio(md, map, bio);
__split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
return ret;
}
/*-----------------------------------------------------------------

View File

@ -459,7 +459,7 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
static blk_qc_t md_submit_bio(struct bio *bio)
static void md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
const int sgrp = op_stat_group(bio_op(bio));
@ -468,12 +468,12 @@ static blk_qc_t md_submit_bio(struct bio *bio)
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
return BLK_QC_T_NONE;
return;
}
if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
return;
}
blk_queue_split(&bio);
@ -482,7 +482,7 @@ static blk_qc_t md_submit_bio(struct bio *bio)
if (bio_sectors(bio) != 0)
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return BLK_QC_T_NONE;
return;
}
/*
@ -499,8 +499,6 @@ static blk_qc_t md_submit_bio(struct bio *bio)
part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
part_stat_unlock();
return BLK_QC_T_NONE;
}
/* mddev_suspend makes sure no new requests are submitted

View File

@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
return err;
}
static blk_qc_t nd_blk_submit_bio(struct bio *bio)
static void nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_disk->private_data;
@ -173,7 +173,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bool do_acct;
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
return;
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
@ -199,7 +199,6 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bio_end_io_acct(bio, start);
bio_endio(bio);
return BLK_QC_T_NONE;
}
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,

View File

@ -1439,7 +1439,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
return ret;
}
static blk_qc_t btt_submit_bio(struct bio *bio)
static void btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_disk->private_data;
@ -1450,7 +1450,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
bool do_acct;
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
return;
do_acct = blk_queue_io_stat(bio->bi_disk->queue);
if (do_acct)
@ -1482,7 +1482,6 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
bio_end_io_acct(bio, start);
bio_endio(bio);
return BLK_QC_T_NONE;
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,

View File

@ -189,7 +189,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
return rc;
}
static blk_qc_t pmem_submit_bio(struct bio *bio)
static void pmem_submit_bio(struct bio *bio)
{
int ret = 0;
blk_status_t rc = 0;
@ -228,7 +228,6 @@ static blk_qc_t pmem_submit_bio(struct bio *bio)
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
return BLK_QC_T_NONE;
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,

View File

@ -641,7 +641,7 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd)
struct nvme_ns, cdev);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
ret = blk_poll(q, request_to_qc_t(req->mq_hctx, req), 0);
ret = bio_poll(req->bio, 0);
return ret;
}
#ifdef CONFIG_NVME_MULTIPATH
@ -741,7 +741,7 @@ int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd)
req = READ_ONCE(ioucmd->cookie);
q = ns->queue;
if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
ret = blk_poll(q, request_to_qc_t(req->mq_hctx, req), 0);
ret = bio_poll(req->bio, 0);
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;

View File

@ -293,12 +293,11 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
static void nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
/*
@ -316,7 +315,7 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
trace_block_bio_remap(bio->bi_disk->queue, bio,
disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
ret = submit_bio_noacct(bio);
submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
@ -331,7 +330,6 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
}
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)

View File

@ -31,7 +31,7 @@
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_submit_bio(struct bio *bio);
static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
@ -865,7 +865,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
up_write(&dcssblk_devices_sem);
}
static blk_qc_t
static void
dcssblk_submit_bio(struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
@ -923,10 +923,9 @@ dcssblk_submit_bio(struct bio *bio)
bytes_done += bvec.bv_len;
}
bio_endio(bio);
return BLK_QC_T_NONE;
return;
fail:
bio_io_error(bio);
return BLK_QC_T_NONE;
}
static long

View File

@ -182,7 +182,7 @@ static unsigned long xpram_highest_page_index(void)
/*
* Block device make request function.
*/
static blk_qc_t xpram_submit_bio(struct bio *bio)
static void xpram_submit_bio(struct bio *bio)
{
xpram_device_t *xdev = bio->bi_disk->private_data;
struct bio_vec bvec;
@ -224,10 +224,9 @@ static blk_qc_t xpram_submit_bio(struct bio *bio)
}
}
bio_endio(bio);
return BLK_QC_T_NONE;
return;
fail:
bio_io_error(bio);
return BLK_QC_T_NONE;
}
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)

View File

@ -244,7 +244,6 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
bool should_dirty = false;
struct bio bio;
ssize_t ret;
blk_qc_t qc;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
@ -285,13 +284,12 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
if (iocb->ki_flags & IOCB_HIPRI)
bio_set_polled(&bio, iocb);
qc = submit_bio(&bio);
submit_bio(&bio);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(bio.bi_private))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(bdev), qc, 0))
if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@ -324,14 +322,6 @@ struct blkdev_dio {
static struct bio_set blkdev_dio_pool;
static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
struct request_queue *q = bdev_get_queue(bdev);
return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
static void blkdev_bio_end_io(struct bio *bio)
{
struct blkdev_dio *dio = bio->bi_private;
@ -345,6 +335,8 @@ static void blkdev_bio_end_io(struct bio *bio)
struct kiocb *iocb = dio->iocb;
ssize_t ret;
WRITE_ONCE(iocb->private, NULL);
if (likely(!dio->bio.bi_status)) {
ret = dio->size;
iocb->ki_pos += ret;
@ -383,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bool do_poll = (iocb->ki_flags & IOCB_HIPRI);
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
@ -446,10 +437,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (do_poll)
bio_set_polled(bio, iocb);
qc = submit_bio(bio);
submit_bio(bio);
if (do_poll)
WRITE_ONCE(iocb->ki_cookie, qc);
WRITE_ONCE(iocb->private, bio);
break;
}
@ -483,7 +474,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!READ_ONCE(dio->waiter))
break;
if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
if (!do_poll || !bio_poll(bio, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@ -2082,7 +2073,7 @@ const struct file_operations def_blk_fops = {
.llseek = block_llseek,
.read_iter = blkdev_read_iter,
.write_iter = blkdev_write_iter,
.iopoll = blkdev_iopoll,
.iopoll = iocb_bio_iopoll,
.mmap = generic_file_mmap,
.fsync = blkdev_fsync,
.unlocked_ioctl = block_ioctl,

View File

@ -7863,7 +7863,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
return dip;
}
static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter,
static void btrfs_submit_direct(const struct iomap_iter *iter,
struct bio *dio_bio, loff_t file_offset)
{
struct inode *inode = iter->inode;
@ -7892,7 +7892,7 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter,
}
dio_bio->bi_status = BLK_STS_RESOURCE;
bio_endio(dio_bio);
return BLK_QC_T_NONE;
return;
}
if (!write && csum) {
@ -7968,12 +7968,11 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter,
start_sector += clone_len >> 9;
file_offset += clone_len;
} while (submit_len > 0);
return BLK_QC_T_NONE;
return;
out_err:
dip->dio_bio->bi_status = status;
btrfs_dio_private_put(dip);
return BLK_QC_T_NONE;
}
static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,

View File

@ -901,7 +901,7 @@ const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek,
.read_iter = ext4_file_read_iter,
.write_iter = ext4_file_write_iter,
.iopoll = iomap_dio_iopoll,
.iopoll = iocb_bio_iopoll,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,

View File

@ -1365,7 +1365,7 @@ const struct file_operations gfs2_file_fops = {
.llseek = gfs2_llseek,
.read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.iopoll = iomap_dio_iopoll,
.iopoll = iocb_bio_iopoll,
.unlocked_ioctl = gfs2_ioctl,
.compat_ioctl = gfs2_compat_ioctl,
.mmap = gfs2_mmap,
@ -1398,7 +1398,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.llseek = gfs2_llseek,
.read_iter = gfs2_file_read_iter,
.write_iter = gfs2_file_write_iter,
.iopoll = iomap_dio_iopoll,
.iopoll = iocb_bio_iopoll,
.unlocked_ioctl = gfs2_ioctl,
.compat_ioctl = gfs2_compat_ioctl,
.mmap = gfs2_mmap,

View File

@ -38,8 +38,7 @@ struct iomap_dio {
struct {
struct iov_iter *iter;
struct task_struct *waiter;
struct request_queue *last_queue;
blk_qc_t cookie;
struct bio *poll_bio;
} submit;
/* used for aio completion: */
@ -49,29 +48,20 @@ struct iomap_dio {
};
};
int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct request_queue *q = READ_ONCE(kiocb->private);
if (!q)
return 0;
return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
static void iomap_dio_submit_bio(const struct iomap_iter *iter,
struct iomap_dio *dio, struct bio *bio, loff_t pos)
{
atomic_inc(&dio->ref);
if (dio->iocb->ki_flags & IOCB_HIPRI)
if (dio->iocb->ki_flags & IOCB_HIPRI) {
bio_set_polled(bio, dio->iocb);
dio->submit.poll_bio = bio;
}
dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev);
if (dio->dops && dio->dops->submit_io)
dio->submit.cookie = dio->dops->submit_io(iter, bio, pos);
dio->dops->submit_io(iter, bio, pos);
else
dio->submit.cookie = submit_bio(bio);
submit_bio(bio);
}
ssize_t iomap_dio_complete(struct iomap_dio *dio)
@ -162,9 +152,11 @@ static void iomap_dio_bio_end_io(struct bio *bio)
} else if (dio->flags & IOMAP_DIO_WRITE) {
struct inode *inode = file_inode(dio->iocb->ki_filp);
WRITE_ONCE(dio->iocb->private, NULL);
INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
} else {
WRITE_ONCE(dio->iocb->private, NULL);
iomap_dio_complete_work(&dio->aio.work);
}
}
@ -465,8 +457,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->submit.iter = iter;
dio->submit.waiter = current;
dio->submit.cookie = BLK_QC_T_NONE;
dio->submit.last_queue = NULL;
dio->submit.poll_bio = NULL;
if (iov_iter_rw(iter) == READ) {
if (iomi.pos >= dio->i_size)
@ -571,8 +562,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (dio->flags & IOMAP_DIO_WRITE_FUA)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
WRITE_ONCE(iocb->private, dio->submit.last_queue);
WRITE_ONCE(iocb->private, dio->submit.poll_bio);
/*
* We are about to drop our additional submission reference, which
@ -599,10 +589,8 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(dio->submit.waiter))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue ||
!blk_poll(dio->submit.last_queue,
dio->submit.cookie, 0))
if (!dio->submit.poll_bio ||
!bio_poll(dio->submit.poll_bio, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);

View File

@ -1451,7 +1451,7 @@ const struct file_operations xfs_file_operations = {
.write_iter = xfs_file_write_iter,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iomap_dio_iopoll,
.iopoll = iocb_bio_iopoll,
.unlocked_ioctl = xfs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = xfs_file_compat_ioctl,

View File

@ -1178,7 +1178,7 @@ static const struct file_operations zonefs_file_operations = {
.write_iter = zonefs_file_write_iter,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iomap_dio_iopoll,
.iopoll = iocb_bio_iopoll,
};
static struct kmem_cache *zonefs_inode_cachep;

View File

@ -435,7 +435,7 @@ static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
}
extern blk_qc_t submit_bio(struct bio *);
void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);

View File

@ -148,9 +148,9 @@ struct blk_mq_hw_ctx {
/** @kobj: Kernel object for sysfs. */
struct kobject kobj;
/** @poll_considered: Count times blk_poll() was called. */
/** @poll_considered: Count times blk_mq_poll() was called. */
unsigned long poll_considered;
/** @poll_invoked: Count how many requests blk_poll() polled. */
/** @poll_invoked: Count how many requests blk_mq_poll() polled. */
unsigned long poll_invoked;
/** @poll_success: Count how many polled requests were completed. */
unsigned long poll_success;
@ -615,22 +615,10 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
if (rq->tag != -1)
return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
BLK_QC_T_INTERNAL;
}
static inline void blk_mq_cleanup_rq(struct request *rq)
{
if (rq->q->mq_ops->cleanup_rq)
rq->q->mq_ops->cleanup_rq(rq);
}
blk_qc_t blk_mq_submit_bio(struct bio *bio);
#endif

View File

@ -204,6 +204,9 @@ static inline void bio_issue_init(struct bio_issue *issue,
((u64)size << BIO_ISSUE_SIZE_SHIFT));
}
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@ -224,8 +227,8 @@ struct bio {
struct bvec_iter bi_iter;
blk_qc_t bi_cookie;
bio_end_io_t *bi_end_io;
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
@ -451,7 +454,7 @@ enum req_flag_bits {
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
__REQ_POLLED, /* caller polls for completion using blk_poll */
__REQ_POLLED, /* caller polls for completion using bio_poll */
/* for driver use */
__REQ_DRV,
@ -564,11 +567,6 @@ static inline int op_stat_group(unsigned int op)
return op_is_write(op);
}
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
struct blk_rq_stat {
u64 mean;
u64 min;

View File

@ -39,6 +39,7 @@ struct sg_io_hdr;
struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct kiocb;
struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
@ -943,7 +944,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
blk_qc_t submit_bio_noacct(struct bio *bio);
void submit_bio_noacct(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
@ -995,7 +996,8 @@ blk_status_t errno_to_blk_status(int errno);
#define BLK_POLL_ONESHOT (1 << 0)
/* do not sleep to wait for the expected completion time */
#define BLK_POLL_NOSLEEP (1 << 1)
int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
int bio_poll(struct bio *bio, unsigned int flags);
int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@ -1924,7 +1926,7 @@ static inline void blk_ksm_unregister(struct request_queue *q) { }
struct block_device_operations {
blk_qc_t (*submit_bio) (struct bio *bio);
void (*submit_bio)(struct bio *bio);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);

View File

@ -334,11 +334,7 @@ struct kiocb {
int ki_flags;
u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
union {
unsigned int ki_cookie; /* for ->iopoll */
struct wait_page_queue *ki_waitq; /* for async buffered IO */
};
struct wait_page_queue *ki_waitq; /* for async buffered IO */
randomized_struct_fields_end
};

View File

@ -324,8 +324,8 @@ int iomap_writepages(struct address_space *mapping,
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
loff_t file_offset);
void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
loff_t file_offset);
};
/*
@ -341,7 +341,6 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags);
ssize_t iomap_dio_complete(struct iomap_dio *dio);
int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags);
#ifdef CONFIG_SWAP
struct file;

View File

@ -326,8 +326,6 @@ int swap_readpage(struct page *page, bool synchronous)
struct bio *bio;
int ret = 0;
struct swap_info_struct *sis = page_swap_info(page);
blk_qc_t qc;
struct gendisk *disk;
unsigned long pflags;
VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
@ -372,7 +370,6 @@ int swap_readpage(struct page *page, bool synchronous)
ret = -ENOMEM;
goto out;
}
disk = bio->bi_disk;
/*
* Keep this task valid during swap readpage because the oom killer may
* attempt to access it in the page fault retry time check.
@ -385,13 +382,13 @@ int swap_readpage(struct page *page, bool synchronous)
}
count_vm_event(PSWPIN);
bio_get(bio);
qc = submit_bio(bio);
submit_bio(bio);
while (synchronous) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(bio->bi_private))
break;
if (!blk_poll(disk->queue, qc, 0))
if (!bio_poll(bio, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);