forked from Minki/linux
for-5.16/ki_complete-2021-10-29
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmF8MOUQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpmeqEACrayLMDMdlb1FduTYw29QAL7XxS375r92T bwLippmKQIFNi8p5ScHraelV5ixgxse2j68MexlQHpl9aHIn/oL7qHACIMgDP05m KaSy8Hr2abqr+zz+rLMhkm21zAva6aWjQu7NoEjBE4dC5L4l9p885LaA+jmqQUno 1wvpaEcype8cITJ+sSCb3kD6nZx7y1Lt5zEefUfk6ruMm9x9FwvU6uc4rIHi+Zve Hwo8yGbTvlU8rGSi9naC/U8pIZ4bqEuTAcV5VHNrWG+b4aA/aFPpSjpIiSBZSXo0 HXa+jmcr6gkejfPeOZkBbRub6Fm9Wq2pDAZskPWFX6zyX0pIV05GjJ2J/ba8rovn QrcfxaBv8XitKgrjFZeR0ZBqD2iJjPA/Yq5/r1ZmZ0wSHI3W4UuTGhQYEPyDLceH ZWq/wcfVFek4kAoCxCqy9kWiOujY90WWKQW3yD7b8FPZ0d+/R1Mn+drlYaSKN1Pk /9/+z1DaLtBWbJ2G+BQ9oUkYmNSapAiYc2YXVss86hmhLX+prFtSj3zECZUvhyAz b42A2DVsjU+65yT2zdPBXlMrbI91qNnvIXcz5szNdTfHTn9FiLQb4BffMV0FHT3g vap8N3Rb8UkZ3v4NCVAtlfcGr0kvYHQH+Qgh6oAlXB4NQoKJCVadzpTFPMWjx788 oHBUjA0UTQ== =4vl/ -----END PGP SIGNATURE----- Merge tag 'for-5.16/ki_complete-2021-10-29' of git://git.kernel.dk/linux-block Pull kiocb->ki_complete() cleanup from Jens Axboe: "This removes the res2 argument from kiocb->ki_complete(). Only the USB gadget code used it, everybody else passes 0. The USB guys checked the user gadget code they could find, and everybody just uses res as expected for the async interface" * tag 'for-5.16/ki_complete-2021-10-29' of git://git.kernel.dk/linux-block: fs: get rid of the res2 iocb->ki_complete argument usb: remove res2 argument from gadget code completions
This commit is contained in:
commit
b6773cdb0e
@ -163,7 +163,7 @@ static void blkdev_bio_end_io(struct bio *bio)
|
||||
ret = blk_status_to_errno(dio->bio.bi_status);
|
||||
}
|
||||
|
||||
dio->iocb->ki_complete(iocb, ret, 0);
|
||||
dio->iocb->ki_complete(iocb, ret);
|
||||
bio_put(&dio->bio);
|
||||
} else {
|
||||
struct task_struct *waiter = dio->waiter;
|
||||
@ -295,7 +295,7 @@ static void blkdev_bio_end_io_async(struct bio *bio)
|
||||
ret = blk_status_to_errno(bio->bi_status);
|
||||
}
|
||||
|
||||
iocb->ki_complete(iocb, ret, 0);
|
||||
iocb->ki_complete(iocb, ret);
|
||||
|
||||
if (dio->flags & DIO_SHOULD_DIRTY) {
|
||||
bio_check_pages_dirty(bio);
|
||||
|
@ -1076,7 +1076,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
|
||||
af_alg_free_resources(areq);
|
||||
sock_put(sk);
|
||||
|
||||
iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
|
||||
iocb->ki_complete(iocb, err ? err : (int)resultlen);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_async_cb);
|
||||
|
||||
|
@ -382,7 +382,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
|
||||
blk_mq_complete_request(rq);
|
||||
}
|
||||
|
||||
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
|
||||
|
||||
@ -455,7 +455,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
|
||||
lo_rw_aio_do_completion(cmd);
|
||||
|
||||
if (ret != -EIOCBQUEUED)
|
||||
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
|
||||
lo_rw_aio_complete(&cmd->iocb, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
|
||||
return call_iter(iocb, &iter);
|
||||
}
|
||||
|
||||
static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
|
||||
static void nvmet_file_io_done(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
@ -222,7 +222,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
|
||||
}
|
||||
|
||||
complete:
|
||||
nvmet_file_io_done(&req->f.iocb, ret, 0);
|
||||
nvmet_file_io_done(&req->f.iocb, ret);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -245,7 +245,7 @@ struct target_core_file_cmd {
|
||||
struct bio_vec bvecs[];
|
||||
};
|
||||
|
||||
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct target_core_file_cmd *cmd;
|
||||
|
||||
@ -303,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
|
||||
|
||||
if (ret != -EIOCBQUEUED)
|
||||
cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
|
||||
cmd_rw_aio_complete(&aio_cmd->iocb, ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
|
||||
kthread_unuse_mm(io_data->mm);
|
||||
}
|
||||
|
||||
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
|
||||
io_data->kiocb->ki_complete(io_data->kiocb, ret);
|
||||
|
||||
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
|
||||
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
|
||||
|
@ -469,7 +469,7 @@ static void ep_user_copy_worker(struct work_struct *work)
|
||||
ret = -EFAULT;
|
||||
|
||||
/* completing the iocb can drop the ctx and mm, don't touch mm after */
|
||||
iocb->ki_complete(iocb, ret, ret);
|
||||
iocb->ki_complete(iocb, ret);
|
||||
|
||||
kfree(priv->buf);
|
||||
kfree(priv->to_free);
|
||||
@ -496,11 +496,8 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
|
||||
kfree(priv->to_free);
|
||||
kfree(priv);
|
||||
iocb->private = NULL;
|
||||
/* aio_complete() reports bytes-transferred _and_ faults */
|
||||
|
||||
iocb->ki_complete(iocb,
|
||||
req->actual ? req->actual : (long)req->status,
|
||||
req->status);
|
||||
req->actual ? req->actual : (long)req->status);
|
||||
} else {
|
||||
/* ep_copy_to_user() won't report both; we hide some faults */
|
||||
if (unlikely(0 != req->status))
|
||||
|
6
fs/aio.c
6
fs/aio.c
@ -1417,7 +1417,7 @@ static void aio_remove_iocb(struct aio_kiocb *iocb)
|
||||
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
|
||||
}
|
||||
|
||||
static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
static void aio_complete_rw(struct kiocb *kiocb, long res)
|
||||
{
|
||||
struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
|
||||
|
||||
@ -1437,7 +1437,7 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
}
|
||||
|
||||
iocb->ki_res.res = res;
|
||||
iocb->ki_res.res2 = res2;
|
||||
iocb->ki_res.res2 = 0;
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
@ -1508,7 +1508,7 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
|
||||
ret = -EINTR;
|
||||
fallthrough;
|
||||
default:
|
||||
req->ki_complete(req, ret, 0);
|
||||
req->ki_complete(req, ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,11 +37,11 @@ static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
|
||||
/*
|
||||
* Handle completion of a read from the cache.
|
||||
*/
|
||||
static void cachefiles_read_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void cachefiles_read_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
|
||||
|
||||
_enter("%ld,%ld", ret, ret2);
|
||||
_enter("%ld", ret);
|
||||
|
||||
if (ki->term_func) {
|
||||
if (ret >= 0)
|
||||
@ -139,7 +139,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
|
||||
fallthrough;
|
||||
default:
|
||||
ki->was_async = false;
|
||||
cachefiles_read_complete(&ki->iocb, ret, 0);
|
||||
cachefiles_read_complete(&ki->iocb, ret);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
break;
|
||||
@ -159,12 +159,12 @@ presubmission_error:
|
||||
/*
|
||||
* Handle completion of a write to the cache.
|
||||
*/
|
||||
static void cachefiles_write_complete(struct kiocb *iocb, long ret, long ret2)
|
||||
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
|
||||
{
|
||||
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
|
||||
struct inode *inode = file_inode(ki->iocb.ki_filp);
|
||||
|
||||
_enter("%ld,%ld", ret, ret2);
|
||||
_enter("%ld", ret);
|
||||
|
||||
/* Tell lockdep we inherited freeze protection from submission thread */
|
||||
__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
|
||||
@ -244,7 +244,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
|
||||
fallthrough;
|
||||
default:
|
||||
ki->was_async = false;
|
||||
cachefiles_write_complete(&ki->iocb, ret, 0);
|
||||
cachefiles_write_complete(&ki->iocb, ret);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -1022,7 +1022,7 @@ static void ceph_aio_complete(struct inode *inode,
|
||||
ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
|
||||
CEPH_CAP_FILE_RD));
|
||||
|
||||
aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
|
||||
aio_req->iocb->ki_complete(aio_req->iocb, ret);
|
||||
|
||||
ceph_free_cap_flush(aio_req->prealloc_cf);
|
||||
kfree(aio_req);
|
||||
|
@ -3184,7 +3184,7 @@ restart_loop:
|
||||
mutex_unlock(&ctx->aio_mutex);
|
||||
|
||||
if (ctx->iocb && ctx->iocb->ki_complete)
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
|
||||
else
|
||||
complete(&ctx->done);
|
||||
}
|
||||
@ -3917,7 +3917,7 @@ again:
|
||||
mutex_unlock(&ctx->aio_mutex);
|
||||
|
||||
if (ctx->iocb && ctx->iocb->ki_complete)
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
|
||||
ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
|
||||
else
|
||||
complete(&ctx->done);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
|
||||
|
||||
if (ret > 0 && dio->op == REQ_OP_WRITE)
|
||||
ret = generic_write_sync(dio->iocb, ret);
|
||||
dio->iocb->ki_complete(dio->iocb, ret, 0);
|
||||
dio->iocb->ki_complete(dio->iocb, ret);
|
||||
}
|
||||
|
||||
kmem_cache_free(dio_cache, dio);
|
||||
|
@ -687,7 +687,7 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
io->iocb->ki_complete(io->iocb, res, 0);
|
||||
io->iocb->ki_complete(io->iocb, res);
|
||||
}
|
||||
|
||||
kref_put(&io->refcnt, fuse_io_release);
|
||||
|
@ -2666,7 +2666,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
|
||||
__io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
|
||||
}
|
||||
|
||||
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
static void io_complete_rw(struct kiocb *kiocb, long res)
|
||||
{
|
||||
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
|
||||
|
||||
@ -2677,7 +2677,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
|
||||
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
|
||||
{
|
||||
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
|
||||
|
||||
@ -2884,7 +2884,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
|
||||
ret = -EINTR;
|
||||
fallthrough;
|
||||
default:
|
||||
kiocb->ki_complete(kiocb, ret, 0);
|
||||
kiocb->ki_complete(kiocb, ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ static void iomap_dio_complete_work(struct work_struct *work)
|
||||
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
|
||||
struct kiocb *iocb = dio->iocb;
|
||||
|
||||
iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
|
||||
iocb->ki_complete(iocb, iomap_dio_complete(dio));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -275,7 +275,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
|
||||
res = (long) dreq->count;
|
||||
WARN_ON_ONCE(dreq->count < 0);
|
||||
}
|
||||
dreq->iocb->ki_complete(dreq->iocb, res, 0);
|
||||
dreq->iocb->ki_complete(dreq->iocb, res);
|
||||
}
|
||||
|
||||
complete(&dreq->completion);
|
||||
|
@ -272,14 +272,14 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
|
||||
kmem_cache_free(ovl_aio_request_cachep, aio_req);
|
||||
}
|
||||
|
||||
static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2)
|
||||
static void ovl_aio_rw_complete(struct kiocb *iocb, long res)
|
||||
{
|
||||
struct ovl_aio_req *aio_req = container_of(iocb,
|
||||
struct ovl_aio_req, iocb);
|
||||
struct kiocb *orig_iocb = aio_req->orig_iocb;
|
||||
|
||||
ovl_aio_cleanup_handler(aio_req);
|
||||
orig_iocb->ki_complete(orig_iocb, res, res2);
|
||||
orig_iocb->ki_complete(orig_iocb, res);
|
||||
}
|
||||
|
||||
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
@ -330,7 +330,7 @@ struct kiocb {
|
||||
randomized_struct_fields_start
|
||||
|
||||
loff_t ki_pos;
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
u16 ki_hint;
|
||||
|
Loading…
Reference in New Issue
Block a user