forked from Minki/linux
fs: get rid of the res2 iocb->ki_complete argument
The second argument was only used by the USB gadget code, yet everyone pays the overhead of passing a zero to be passed into aio, where it ends up being part of the aio res2 value. Now that everybody is passing in zero, kill off the extra argument. Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4c3d8accdc
commit
6b19b766e8
@ -164,7 +164,7 @@ static void blkdev_bio_end_io(struct bio *bio)
|
||||
ret = blk_status_to_errno(dio->bio.bi_status);
|
||||
}
|
||||
|
||||
dio->iocb->ki_complete(iocb, ret, 0);
|
||||
dio->iocb->ki_complete(iocb, ret);
|
||||
if (dio->flags & DIO_MULTI_BIO)
|
||||
bio_put(&dio->bio);
|
||||
} else {
|
||||
|
@ -1076,7 +1076,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
|
||||
af_alg_free_resources(areq);
|
||||
sock_put(sk);
|
||||
|
||||
iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
|
||||
iocb->ki_complete(iocb, err ? err : (int)resultlen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_async_cb);
|
||||
|
||||
|
@ -554,7 +554,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
|
||||
blk_mq_complete_request(rq);
|
||||
}
|
||||
|
||||
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
|
||||
|
||||
@ -627,7 +627,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
||||
lo_rw_aio_do_completion(cmd);
|
||||
|
||||
if (ret != -EIOCBQUEUED)
|
||||
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
|
||||
lo_rw_aio_complete(&cmd->iocb, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
|
||||
return call_iter(iocb, &iter);
|
||||
}
|
||||
|
||||
static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
|
||||
static void nvmet_file_io_done(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
@ -222,7 +222,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
|
||||
}
|
||||
|
||||
complete:
|
||||
nvmet_file_io_done(&req->f.iocb, ret, 0);
|
||||
nvmet_file_io_done(&req->f.iocb, ret);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ struct target_core_file_cmd {
|
||||
struct bio_vec bvecs[];
|
||||
};
|
||||
|
||||
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct target_core_file_cmd *cmd;
|
||||
|
||||
@ -303,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
|
||||
|
||||
if (ret != -EIOCBQUEUED)
|
||||
cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
|
||||
cmd_rw_aio_complete(&aio_cmd->iocb, ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
|
||||
kthread_unuse_mm(io_data->mm);
|
||||
}
|
||||
|
||||
io_data->kiocb->ki_complete(io_data->kiocb, ret, 0);
|
||||
io_data->kiocb->ki_complete(io_data->kiocb, ret);
|
||||
|
||||
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
|
||||
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
|
||||
|
@ -469,7 +469,7 @@ static void ep_user_copy_worker(struct work_struct *work)
|
||||
ret = -EFAULT;
|
||||
|
||||
/* completing the iocb can drop the ctx and mm, don't touch mm after */
|
||||
iocb->ki_complete(iocb, ret, 0);
|
||||
iocb->ki_complete(iocb, ret);
|
||||
|
||||
kfree(priv->buf);
|
||||
kfree(priv->to_free);
|
||||
@ -497,8 +497,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
kfree(priv);
|
||||
iocb->private = NULL;
|
||||
iocb->ki_complete(iocb,
|
||||
req->actual ? req->actual : (long)req->status,
|
||||
0);
|
||||
req->actual ? req->actual : (long)req->status);
|
||||
} else {
|
||||
/* ep_copy_to_user() won't report both; we hide some faults */
|
||||
if (unlikely(0 != req->status))
|
||||
|
6
fs/aio.c
6
fs/aio.c
@ -1417,7 +1417,7 @@ static void aio_remove_iocb(struct aio_kiocb *iocb)
|
||||
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
|
||||
}
|
||||
|
||||
static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
static void aio_complete_rw(struct kiocb *kiocb, long res)
|
||||
{
|
||||
struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
|
||||
|
||||
@ -1437,7 +1437,7 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
}
|
||||
|
||||
iocb->ki_res.res = res;
|
||||
iocb->ki_res.res2 = res2;
|
||||
iocb->ki_res.res2 = 0;
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
@ -1508,7 +1508,7 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
|
||||
ret = -EINTR;
|
||||
fallthrough;
|
||||
default:
|
||||
req->ki_complete(req, ret, 0);
|
||||
req->ki_complete(req, ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,11 +37,11 @@ static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
|
||||
/*
|
||||
* Handle completion of a read from the cache.
|
||||
*/
|
||||
static void cachefiles_read_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void cachefiles_read_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
|
||||
|
||||
_enter("%ld,%ld", ret, ret2);
|
||||
_enter("%ld", ret);
|
||||
|
||||
if (ki->term_func) {
|
||||
if (ret >= 0)
|
||||
@ -139,7 +139,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
|
||||
fallthrough;
|
||||
default:
|
||||
ki->was_async = false;
|
||||
cachefiles_read_complete(&ki->iocb, ret, 0);
|
||||
cachefiles_read_complete(&ki->iocb, ret);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
break;
|
||||
@ -159,12 +159,12 @@ presubmission_error:
|
||||
/*
|
||||
* Handle completion of a write to the cache.
|
||||
*/
|
||||
static void cachefiles_write_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
|
||||
struct inode *inode = file_inode(ki->iocb.ki_filp);
|
||||
|
||||
_enter("%ld,%ld", ret, ret2);
|
||||
_enter("%ld", ret);
|
||||
|
||||
/* Tell lockdep we inherited freeze protection from submission thread */
|
||||
__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
|
||||
@ -244,7 +244,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
|
||||
fallthrough;
|
||||
default:
|
||||
ki->was_async = false;
|
||||
cachefiles_write_complete(&ki->iocb, ret, 0);
|
||||
cachefiles_write_complete(&ki->iocb, ret);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -1023,7 +1023,7 @@ static void ceph_aio_complete(struct inode *inode,
|
||||
ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
|
||||
CEPH_CAP_FILE_RD));
|
||||
|
||||
aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
|
||||
aio_req->iocb->ki_complete(aio_req->iocb, ret);
|
||||
|
||||
ceph_free_cap_flush(aio_req->prealloc_cf);
|
||||
kfree(aio_req);
|
||||
|
@ -3184,7 +3184,7 @@ restart_loop:
|
||||
mutex_unlock(&ctx->aio_mutex);
|
||||
|
||||
if (ctx->iocb && ctx->iocb->ki_complete)
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
|
||||
else
|
||||
complete(&ctx->done);
|
||||
}
|
||||
@ -3917,7 +3917,7 @@ again:
|
||||
mutex_unlock(&ctx->aio_mutex);
|
||||
|
||||
if (ctx->iocb && ctx->iocb->ki_complete)
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
|
||||
else
|
||||
complete(&ctx->done);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
|
||||
|
||||
if (ret > 0 && dio->op == REQ_OP_WRITE)
|
||||
ret = generic_write_sync(dio->iocb, ret);
|
||||
dio->iocb->ki_complete(dio->iocb, ret, 0);
|
||||
dio->iocb->ki_complete(dio->iocb, ret);
|
||||
}
|
||||
|
||||
kmem_cache_free(dio_cache, dio);
|
||||
|
@ -687,7 +687,7 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
io->iocb->ki_complete(io->iocb, res, 0);
|
||||
io->iocb->ki_complete(io->iocb, res);
|
||||
}
|
||||
|
||||
kref_put(&io->refcnt, fuse_io_release);
|
||||
|
@ -2689,7 +2689,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
|
||||
__io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
|
||||
}
|
||||
|
||||
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
static void io_complete_rw(struct kiocb *kiocb, long res)
|
||||
{
|
||||
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
|
||||
|
||||
@ -2700,7 +2700,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
|
||||
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
|
||||
{
|
||||
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
|
||||
|
||||
@ -2913,7 +2913,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
|
||||
ret = -EINTR;
|
||||
fallthrough;
|
||||
default:
|
||||
kiocb->ki_complete(kiocb, ret, 0);
|
||||
kiocb->ki_complete(kiocb, ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ static void iomap_dio_complete_work(struct work_struct *work)
|
||||
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
|
||||
struct kiocb *iocb = dio->iocb;
|
||||
|
||||
iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
|
||||
iocb->ki_complete(iocb, iomap_dio_complete(dio));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -275,7 +275,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
|
||||
res = (long) dreq->count;
|
||||
WARN_ON_ONCE(dreq->count < 0);
|
||||
}
|
||||
dreq->iocb->ki_complete(dreq->iocb, res, 0);
|
||||
dreq->iocb->ki_complete(dreq->iocb, res);
|
||||
}
|
||||
|
||||
complete(&dreq->completion);
|
||||
|
@ -272,14 +272,14 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
|
||||
kmem_cache_free(ovl_aio_request_cachep, aio_req);
|
||||
}
|
||||
|
||||
static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2)
|
||||
static void ovl_aio_rw_complete(struct kiocb *iocb, long res)
|
||||
{
|
||||
struct ovl_aio_req *aio_req = container_of(iocb,
|
||||
struct ovl_aio_req, iocb);
|
||||
struct kiocb *orig_iocb = aio_req->orig_iocb;
|
||||
|
||||
ovl_aio_cleanup_handler(aio_req);
|
||||
orig_iocb->ki_complete(orig_iocb, res, res2);
|
||||
orig_iocb->ki_complete(orig_iocb, res);
|
||||
}
|
||||
|
||||
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
@ -330,7 +330,7 @@ struct kiocb {
|
||||
randomized_struct_fields_start
|
||||
|
||||
loff_t ki_pos;
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
u16 ki_hint;
|
||||
|
Loading…
Reference in New Issue
Block a user