forked from Minki/linux
Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull iov_iter updates from Al Viro: "Cleanups that sat in -next + -stable fodder that has just missed 4.11. There's more iov_iter work in my local tree, but I'd prefer to push the stuff that had been in -next first" * 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: iov_iter: don't revert iov buffer if csum error generic_file_read_iter(): make use of iov_iter_revert() generic_file_direct_write(): make use of iov_iter_revert() orangefs: use iov_iter_revert() sctp: switch to copy_from_iter_full() net/9p: switch to copy_from_iter_full() switch memcpy_from_msg() to copy_from_iter_full() rds: make use of iov_iter_revert()
This commit is contained in:
commit
5b13475a5e
@ -114,7 +114,6 @@ static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inod
|
||||
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
|
||||
struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
|
||||
struct orangefs_kernel_op_s *new_op = NULL;
|
||||
struct iov_iter saved = *iter;
|
||||
int buffer_index = -1;
|
||||
ssize_t ret;
|
||||
|
||||
@ -193,7 +192,7 @@ populate_shared_memory:
|
||||
orangefs_bufmap_put(buffer_index);
|
||||
buffer_index = -1;
|
||||
if (type == ORANGEFS_IO_WRITE)
|
||||
*iter = saved;
|
||||
iov_iter_revert(iter, total_size);
|
||||
gossip_debug(GOSSIP_FILE_DEBUG,
|
||||
"%s:going to repopulate_shared_memory.\n",
|
||||
__func__);
|
||||
|
@ -3113,7 +3113,7 @@ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
|
||||
|
||||
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
|
||||
{
|
||||
return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
|
||||
return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
|
||||
|
15
mm/filemap.c
15
mm/filemap.c
@ -2035,7 +2035,6 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct iov_iter data = *iter;
|
||||
loff_t size;
|
||||
|
||||
size = i_size_read(inode);
|
||||
@ -2046,11 +2045,12 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
||||
file_accessed(file);
|
||||
|
||||
retval = mapping->a_ops->direct_IO(iocb, &data);
|
||||
retval = mapping->a_ops->direct_IO(iocb, iter);
|
||||
if (retval >= 0) {
|
||||
iocb->ki_pos += retval;
|
||||
iov_iter_advance(iter, retval);
|
||||
count -= retval;
|
||||
}
|
||||
iov_iter_revert(iter, iov_iter_count(iter) - count);
|
||||
|
||||
/*
|
||||
* Btrfs can have a short DIO read if we encounter
|
||||
@ -2061,7 +2061,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
* the rest of the read. Buffered reads will not work for
|
||||
* DAX files, so don't bother trying.
|
||||
*/
|
||||
if (retval < 0 || !iov_iter_count(iter) || iocb->ki_pos >= size ||
|
||||
if (retval < 0 || !count || iocb->ki_pos >= size ||
|
||||
IS_DAX(inode))
|
||||
goto out;
|
||||
}
|
||||
@ -2706,7 +2706,6 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
ssize_t written;
|
||||
size_t write_len;
|
||||
pgoff_t end;
|
||||
struct iov_iter data;
|
||||
|
||||
write_len = iov_iter_count(from);
|
||||
end = (pos + write_len - 1) >> PAGE_SHIFT;
|
||||
@ -2735,8 +2734,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
}
|
||||
}
|
||||
|
||||
data = *from;
|
||||
written = mapping->a_ops->direct_IO(iocb, &data);
|
||||
written = mapping->a_ops->direct_IO(iocb, from);
|
||||
|
||||
/*
|
||||
* Finally, try again to invalidate clean pages which might have been
|
||||
@ -2753,13 +2751,14 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
|
||||
if (written > 0) {
|
||||
pos += written;
|
||||
iov_iter_advance(from, written);
|
||||
write_len -= written;
|
||||
if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
|
||||
i_size_write(inode, pos);
|
||||
mark_inode_dirty(inode);
|
||||
}
|
||||
iocb->ki_pos = pos;
|
||||
}
|
||||
iov_iter_revert(from, write_len - iov_iter_count(from));
|
||||
out:
|
||||
return written;
|
||||
}
|
||||
|
@ -592,9 +592,8 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
|
||||
ename = &req->rc->sdata[req->rc->offset];
|
||||
if (len > inline_len) {
|
||||
/* We have error in external buffer */
|
||||
err = copy_from_iter(ename + inline_len,
|
||||
len - inline_len, uidata);
|
||||
if (err != len - inline_len) {
|
||||
if (!copy_from_iter_full(ename + inline_len,
|
||||
len - inline_len, uidata)) {
|
||||
err = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size)
|
||||
{
|
||||
size_t len = min(pdu->capacity - pdu->size, size);
|
||||
struct iov_iter i = *from;
|
||||
if (copy_from_iter(&pdu->sdata[pdu->size], len, &i) != len)
|
||||
if (!copy_from_iter_full(&pdu->sdata[pdu->size], len, &i))
|
||||
len = 0;
|
||||
|
||||
pdu->size += len;
|
||||
|
@ -760,7 +760,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
|
||||
|
||||
if (msg_data_left(msg) < chunk) {
|
||||
if (__skb_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
return -EINVAL;
|
||||
if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
|
||||
goto fault;
|
||||
} else {
|
||||
@ -768,15 +768,16 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
|
||||
if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
|
||||
chunk, &csum))
|
||||
goto fault;
|
||||
if (csum_fold(csum))
|
||||
goto csum_error;
|
||||
|
||||
if (csum_fold(csum)) {
|
||||
iov_iter_revert(&msg->msg_iter, chunk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
|
||||
netdev_rx_csum_fault(skb->dev);
|
||||
}
|
||||
return 0;
|
||||
csum_error:
|
||||
iov_iter_revert(&msg->msg_iter, chunk);
|
||||
return -EINVAL;
|
||||
fault:
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -594,7 +594,6 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
goto out;
|
||||
|
||||
while (1) {
|
||||
struct iov_iter save;
|
||||
/* If there are pending notifications, do those - and nothing else */
|
||||
if (!list_empty(&rs->rs_notify_queue)) {
|
||||
ret = rds_notify_queue_get(rs, msg);
|
||||
@ -630,7 +629,6 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
|
||||
&inc->i_conn->c_faddr,
|
||||
ntohs(inc->i_hdr.h_sport));
|
||||
save = msg->msg_iter;
|
||||
ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
|
||||
if (ret < 0)
|
||||
break;
|
||||
@ -644,7 +642,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
rds_inc_put(inc);
|
||||
inc = NULL;
|
||||
rds_stats_inc(s_recv_deliver_raced);
|
||||
msg->msg_iter = save;
|
||||
iov_iter_revert(&msg->msg_iter, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1512,14 +1512,12 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len,
|
||||
struct iov_iter *from)
|
||||
{
|
||||
void *target;
|
||||
ssize_t copied;
|
||||
|
||||
/* Make room in chunk for data. */
|
||||
target = skb_put(chunk->skb, len);
|
||||
|
||||
/* Copy data (whole iovec) into chunk */
|
||||
copied = copy_from_iter(target, len, from);
|
||||
if (copied != len)
|
||||
if (!copy_from_iter_full(target, len, from))
|
||||
return -EFAULT;
|
||||
|
||||
/* Adjust the chunk length field. */
|
||||
|
Loading…
Reference in New Issue
Block a user