ten ksmbd server fixes

-----BEGIN PGP SIGNATURE-----
 
 iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmTuK5AACgkQiiy9cAdy
 T1EggQv/RNMaOguGItp1qUgOg9XuHSboLVkCJgwoCTl0cb7VYWFlj8G4IOi37WoQ
 M3SXVQ/DjxB3eSms2LaIFKFjyVZXzZZ9GFjb+dBGssLWB5Zdk6Ez+IJLOpwNUar1
 nwSC0kU/Lqj/gUnmUsmDlmV2Y/14uTeZEh6RiA1IzDMOAEr8KEuakgFzRg5DudYM
 CgZWfO466A48N/YXGmNNqq+8RVEtKaM3A31NZKgAsm4Lw03+V8JwYK/sSx8HWRBx
 heb8Goa7AUIbpggtoVnWf6PPzJsWOgELrVzvUYdyj7JD5HzaY0LDVOJ6YYyuRTBP
 M4n7yZT0mlAFDflHMvydaOKNJS+6HlE94xVPySo/S8uJJ9hHWcMqe8oJov11h6CT
 a76Q7bMkBBXK6GfEjetIY6qWwhN78M1d/Rf9EJRll+d4vIU1i5gPpCTptvfTqMCc
 A53bROyc3TPcH7A5PBWK0ecENIJ0S4wQd+7UzspQjXj+dk429CYF0+bksfWhijjf
 ubEIo9fE
 =4EB/
 -----END PGP SIGNATURE-----

Merge tag '6.6-rc-ksmbd-fixes-part1' of git://git.samba.org/ksmbd

Pull smb server updates from Steve French:

 - fix potential overflows in decoding create and in session setup
   requests

 - cleanup fixes

 - compounding fixes, including one for MacOS compounded read requests

 - session setup error handling fix

 - fix mode bit bug when applying force_directory_mode and
   force_create_mode

 - RDMA (smbdirect) write fix

* tag '6.6-rc-ksmbd-fixes-part1' of git://git.samba.org/ksmbd:
  ksmbd: add missing calling smb2_set_err_rsp() on error
  ksmbd: replace one-element array with flex-array member in struct smb2_ea_info
  ksmbd: fix slub overflow in ksmbd_decode_ntlmssp_auth_blob()
  ksmbd: fix wrong DataOffset validation of create context
  ksmbd: Fix one kernel-doc comment
  ksmbd: reduce descriptor size if remaining bytes is less than request size
  ksmbd: fix `force create mode' and `force directory mode'
  ksmbd: fix wrong interim response on compound
  ksmbd: add support for read compound
  ksmbd: switch to use kmemdup_nul() helper
This commit is contained in:
Linus Torvalds 2023-08-31 15:28:26 -07:00
commit 8ae5d298ef
15 changed files with 432 additions and 417 deletions

View File

@ -214,12 +214,10 @@ static int ksmbd_neg_token_alloc(void *context, size_t hdrlen,
{
struct ksmbd_conn *conn = context;
conn->mechToken = kmalloc(vlen + 1, GFP_KERNEL);
conn->mechToken = kmemdup_nul(value, vlen, GFP_KERNEL);
if (!conn->mechToken)
return -ENOMEM;
memcpy(conn->mechToken, value, vlen);
conn->mechToken[vlen] = '\0';
return 0;
}

View File

@ -355,6 +355,9 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
if (blob_len < (u64)sess_key_off + sess_key_len)
return -EINVAL;
if (sess_key_len > CIFS_KEY_SIZE)
return -EINVAL;
ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
if (!ctx_arc4)
return -ENOMEM;
@ -1029,11 +1032,15 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
{
struct scatterlist *sg;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
int i, *nr_entries, total_entries = 0, sg_idx = 0;
if (!nvec)
return NULL;
nr_entries = kcalloc(nvec, sizeof(int), GFP_KERNEL);
if (!nr_entries)
return NULL;
for (i = 0; i < nvec - 1; i++) {
unsigned long kaddr = (unsigned long)iov[i + 1].iov_base;
@ -1051,8 +1058,10 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
total_entries += 2;
sg = kmalloc_array(total_entries, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
if (!sg) {
kfree(nr_entries);
return NULL;
}
sg_init_table(sg, total_entries);
smb2_sg_set_buf(&sg[sg_idx++], iov[0].iov_base + 24, assoc_data_len);
@ -1086,6 +1095,7 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
}
}
smb2_sg_set_buf(&sg[sg_idx], sign, SMB2_SIGNATURE_SIZE);
kfree(nr_entries);
return sg;
}

View File

@ -123,28 +123,22 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
}
}
int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
int ret = 1;
if (list_empty(&work->request_entry) &&
list_empty(&work->async_request_entry))
return 0;
return;
if (!work->multiRsp)
atomic_dec(&conn->req_running);
if (!work->multiRsp) {
spin_lock(&conn->request_lock);
list_del_init(&work->request_entry);
spin_unlock(&conn->request_lock);
if (work->asynchronous)
release_async_work(work);
ret = 0;
}
atomic_dec(&conn->req_running);
spin_lock(&conn->request_lock);
list_del_init(&work->request_entry);
spin_unlock(&conn->request_lock);
if (work->asynchronous)
release_async_work(work);
wake_up_all(&conn->req_running_q);
return ret;
}
void ksmbd_conn_lock(struct ksmbd_conn *conn)
@ -193,41 +187,22 @@ void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
int ksmbd_conn_write(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
size_t len = 0;
int sent;
struct kvec iov[3];
int iov_idx = 0;
if (!work->response_buf) {
pr_err("NULL response header\n");
return -EINVAL;
}
if (work->tr_buf) {
iov[iov_idx] = (struct kvec) { work->tr_buf,
sizeof(struct smb2_transform_hdr) + 4 };
len += iov[iov_idx++].iov_len;
}
if (work->aux_payload_sz) {
iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
len += iov[iov_idx++].iov_len;
iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
len += iov[iov_idx++].iov_len;
} else {
if (work->tr_buf)
iov[iov_idx].iov_len = work->resp_hdr_sz;
else
iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
iov[iov_idx].iov_base = work->response_buf;
len += iov[iov_idx++].iov_len;
}
if (work->send_no_response)
return 0;
ksmbd_conn_lock(conn);
sent = conn->transport->ops->writev(conn->transport, &iov[0],
iov_idx, len,
work->need_invalidate_rkey,
work->remote_key);
sent = conn->transport->ops->writev(conn->transport, work->iov,
work->iov_cnt,
get_rfc1002_len(work->iov[0].iov_base) + 4,
work->need_invalidate_rkey,
work->remote_key);
ksmbd_conn_unlock(conn);
if (sent < 0) {

View File

@ -158,7 +158,7 @@ int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
struct smb2_buffer_desc_v1 *desc,
unsigned int desc_len);
void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work);
void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
int ksmbd_conn_handler_loop(void *p);
int ksmbd_conn_transport_init(void);

View File

@ -27,18 +27,35 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void)
INIT_LIST_HEAD(&work->async_request_entry);
INIT_LIST_HEAD(&work->fp_entry);
INIT_LIST_HEAD(&work->interim_entry);
INIT_LIST_HEAD(&work->aux_read_list);
work->iov_alloc_cnt = 4;
work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
GFP_KERNEL);
if (!work->iov) {
kmem_cache_free(work_cache, work);
work = NULL;
}
}
return work;
}
void ksmbd_free_work_struct(struct ksmbd_work *work)
{
struct aux_read *ar, *tmp;
WARN_ON(work->saved_cred != NULL);
kvfree(work->response_buf);
kvfree(work->aux_payload_buf);
list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
kvfree(ar->buf);
list_del(&ar->entry);
kfree(ar);
}
kfree(work->tr_buf);
kvfree(work->request_buf);
kfree(work->iov);
if (work->async_id)
ksmbd_release_id(&work->conn->async_ida, work->async_id);
kmem_cache_free(work_cache, work);
@ -77,3 +94,77 @@ bool ksmbd_queue_work(struct ksmbd_work *work)
{
return queue_work(ksmbd_wq, &work->work);
}
static int ksmbd_realloc_iov_pin(struct ksmbd_work *work, void *ib,
unsigned int ib_len)
{
if (work->iov_alloc_cnt <= work->iov_cnt) {
struct kvec *new;
work->iov_alloc_cnt += 4;
new = krealloc(work->iov,
sizeof(struct kvec) * work->iov_alloc_cnt,
GFP_KERNEL | __GFP_ZERO);
if (!new)
return -ENOMEM;
work->iov = new;
}
work->iov[++work->iov_idx].iov_base = ib;
work->iov[work->iov_idx].iov_len = ib_len;
work->iov_cnt++;
return 0;
}
static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
void *aux_buf, unsigned int aux_size)
{
/* Plus rfc_length size on first iov */
if (!work->iov_idx) {
work->iov[work->iov_idx].iov_base = work->response_buf;
*(__be32 *)work->iov[0].iov_base = 0;
work->iov[work->iov_idx].iov_len = 4;
work->iov_cnt++;
}
ksmbd_realloc_iov_pin(work, ib, len);
inc_rfc1001_len(work->iov[0].iov_base, len);
if (aux_size) {
struct aux_read *ar;
ksmbd_realloc_iov_pin(work, aux_buf, aux_size);
inc_rfc1001_len(work->iov[0].iov_base, aux_size);
ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
if (!ar)
return -ENOMEM;
ar->buf = aux_buf;
list_add(&ar->entry, &work->aux_read_list);
}
return 0;
}
int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
{
return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
}
int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
void *aux_buf, unsigned int aux_size)
{
return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
}
int allocate_interim_rsp_buf(struct ksmbd_work *work)
{
work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
if (!work->response_buf)
return -ENOMEM;
work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
return 0;
}

View File

@ -19,6 +19,11 @@ enum {
KSMBD_WORK_CLOSED,
};
struct aux_read {
void *buf;
struct list_head entry;
};
/* one of these for every pending CIFS request at the connection */
struct ksmbd_work {
/* Server corresponding to this mid */
@ -31,13 +36,19 @@ struct ksmbd_work {
/* Response buffer */
void *response_buf;
/* Read data buffer */
void *aux_payload_buf;
struct list_head aux_read_list;
struct kvec *iov;
int iov_alloc_cnt;
int iov_cnt;
int iov_idx;
/* Next cmd hdr in compound req buf*/
int next_smb2_rcv_hdr_off;
/* Next cmd hdr in compound rsp buf*/
int next_smb2_rsp_hdr_off;
/* Current cmd hdr in compound rsp buf*/
int curr_smb2_rsp_hdr_off;
/*
* Current Local FID assigned compound response if SMB2 CREATE
@ -53,16 +64,11 @@ struct ksmbd_work {
unsigned int credits_granted;
/* response smb header size */
unsigned int resp_hdr_sz;
unsigned int response_sz;
/* Read data count */
unsigned int aux_payload_sz;
void *tr_buf;
unsigned char state;
/* Multiple responses for one request e.g. SMB ECHO */
bool multiRsp:1;
/* No response for cancelled request */
bool send_no_response:1;
/* Request is encrypted */
@ -95,6 +101,15 @@ static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
}
/**
* ksmbd_resp_buf_curr - Get current buffer on compound response.
* @work: smb work containing response buffer
*/
static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work)
{
return work->response_buf + work->curr_smb2_rsp_hdr_off + 4;
}
/**
* ksmbd_req_buf_next - Get next buffer on compound request.
* @work: smb work containing response buffer
@ -113,5 +128,8 @@ int ksmbd_work_pool_init(void);
int ksmbd_workqueue_init(void);
void ksmbd_workqueue_destroy(void);
bool ksmbd_queue_work(struct ksmbd_work *work);
int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
void *aux_buf, unsigned int aux_size);
int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len);
int allocate_interim_rsp_buf(struct ksmbd_work *work);
#endif /* __KSMBD_WORK_H__ */

View File

@ -34,29 +34,22 @@ struct ksmbd_share_config {
#define KSMBD_SHARE_INVALID_UID ((__u16)-1)
#define KSMBD_SHARE_INVALID_GID ((__u16)-1)
static inline int share_config_create_mode(struct ksmbd_share_config *share,
umode_t posix_mode)
static inline umode_t
share_config_create_mode(struct ksmbd_share_config *share,
umode_t posix_mode)
{
if (!share->force_create_mode) {
if (!posix_mode)
return share->create_mask;
else
return posix_mode & share->create_mask;
}
return share->force_create_mode & share->create_mask;
umode_t mode = (posix_mode ?: (umode_t)-1) & share->create_mask;
return mode | share->force_create_mode;
}
static inline int share_config_directory_mode(struct ksmbd_share_config *share,
umode_t posix_mode)
static inline umode_t
share_config_directory_mode(struct ksmbd_share_config *share,
umode_t posix_mode)
{
if (!share->force_directory_mode) {
if (!posix_mode)
return share->directory_mask;
else
return posix_mode & share->directory_mask;
}
umode_t mode = (posix_mode ?: (umode_t)-1) & share->directory_mask;
return share->force_directory_mode & share->directory_mask;
return mode | share->force_directory_mode;
}
static inline int test_share_config_flag(struct ksmbd_share_config *share,

View File

@ -616,15 +616,6 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
return 0;
}
static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
{
work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
if (!work->response_buf)
return -ENOMEM;
work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
return 0;
}
/**
* __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
* to client
@ -639,7 +630,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
{
struct smb2_oplock_break *rsp = NULL;
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
struct ksmbd_conn *conn = work->conn;
struct oplock_break_info *br_info = work->request_buf;
struct smb2_hdr *rsp_hdr;
struct ksmbd_file *fp;
@ -648,7 +638,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
if (!fp)
goto out;
if (allocate_oplock_break_buf(work)) {
if (allocate_interim_rsp_buf(work)) {
pr_err("smb2_allocate_rsp_buf failed! ");
ksmbd_fd_put(work, fp);
goto out;
@ -656,8 +646,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
*(__be32 *)work->response_buf =
cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(0);
@ -684,13 +672,15 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp->PersistentFid = fp->persistent_id;
rsp->VolatileFid = fp->volatile_id;
inc_rfc1001_len(work->response_buf, 24);
ksmbd_fd_put(work, fp);
if (ksmbd_iov_pin_rsp(work, (void *)rsp,
sizeof(struct smb2_oplock_break)))
goto out;
ksmbd_debug(OPLOCK,
"sending oplock break v_id %llu p_id = %llu lock level = %d\n",
rsp->VolatileFid, rsp->PersistentFid, rsp->OplockLevel);
ksmbd_fd_put(work, fp);
ksmbd_conn_write(work);
out:
@ -751,18 +741,15 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
struct smb2_lease_break *rsp = NULL;
struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
struct lease_break_info *br_info = work->request_buf;
struct ksmbd_conn *conn = work->conn;
struct smb2_hdr *rsp_hdr;
if (allocate_oplock_break_buf(work)) {
if (allocate_interim_rsp_buf(work)) {
ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
goto out;
}
rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
*(__be32 *)work->response_buf =
cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(0);
@ -791,7 +778,9 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
rsp->AccessMaskHint = 0;
rsp->ShareMaskHint = 0;
inc_rfc1001_len(work->response_buf, 44);
if (ksmbd_iov_pin_rsp(work, (void *)rsp,
sizeof(struct smb2_lease_break)))
goto out;
ksmbd_conn_write(work);
@ -1492,7 +1481,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag, i
name_len < 4 ||
name_off + name_len > cc_len ||
(value_off & 0x7) != 0 ||
(value_off && (value_off < name_off + name_len)) ||
(value_len && value_off < name_off + (name_len < 8 ? 8 : name_len)) ||
((u64)value_off + value_len > cc_len))
return ERR_PTR(-EINVAL);

View File

@ -163,6 +163,7 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
{
u16 command = 0;
int rc;
bool is_chained = false;
if (conn->ops->allocate_rsp_buf(work))
return;
@ -229,14 +230,13 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
}
}
is_chained = is_chained_smb2_message(work);
if (work->sess &&
(work->sess->sign || smb3_11_final_sess_setup_resp(work) ||
conn->ops->is_sign_req(work, command)))
conn->ops->set_sign_rsp(work);
} while (is_chained_smb2_message(work));
if (work->send_no_response)
return;
} while (is_chained == true);
send:
smb3_preauth_hash_rsp(work);

File diff suppressed because it is too large Load Diff

View File

@ -361,7 +361,7 @@ struct smb2_ea_info {
__u8 Flags;
__u8 EaNameLength;
__le16 EaValueLength;
char name[1];
char name[];
/* optionally followed by value */
} __packed; /* level 15 Query */

View File

@ -319,12 +319,6 @@ static int init_smb1_rsp_hdr(struct ksmbd_work *work)
struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
/*
* Remove 4 byte direct TCP header.
*/
*(__be32 *)work->response_buf =
cpu_to_be32(sizeof(struct smb_hdr) - 4);
rsp_hdr->Command = SMB_COM_NEGOTIATE;
*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
rsp_hdr->Flags = SMBFLG_RESPONSE;
@ -560,10 +554,11 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
/* Add 2 byte bcc and 2 byte DialectIndex. */
inc_rfc1001_len(work->response_buf, 4);
neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
if (ksmbd_iov_pin_rsp(work, (void *)neg_rsp,
sizeof(struct smb_negotiate_rsp) - 4))
return -ENOMEM;
neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
neg_rsp->hdr.WordCount = 1;
neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
neg_rsp->ByteCount = 0;

View File

@ -1241,14 +1241,12 @@ static int smb_direct_writev(struct ksmbd_transport *t,
//FIXME: skip RFC1002 header..
buflen -= 4;
iov[0].iov_base += 4;
iov[0].iov_len -= 4;
remaining_data_length = buflen;
ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
start = i = 0;
start = i = 1;
buflen = 0;
while (true) {
buflen += iov[i].iov_len;
@ -1366,24 +1364,35 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
LIST_HEAD(msg_list);
char *desc_buf;
int credits_needed;
unsigned int desc_buf_len;
size_t total_length = 0;
unsigned int desc_buf_len, desc_num = 0;
if (t->status != SMB_DIRECT_CS_CONNECTED)
return -ENOTCONN;
if (buf_len > t->max_rdma_rw_size)
return -EINVAL;
/* calculate needed credits */
credits_needed = 0;
desc_buf = buf;
for (i = 0; i < desc_len / sizeof(*desc); i++) {
if (!buf_len)
break;
desc_buf_len = le32_to_cpu(desc[i].length);
if (!desc_buf_len)
return -EINVAL;
if (desc_buf_len > buf_len) {
desc_buf_len = buf_len;
desc[i].length = cpu_to_le32(desc_buf_len);
buf_len = 0;
}
credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len);
desc_buf += desc_buf_len;
total_length += desc_buf_len;
if (desc_buf_len == 0 || total_length > buf_len ||
total_length > t->max_rdma_rw_size)
return -EINVAL;
buf_len -= desc_buf_len;
desc_num++;
}
ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
@ -1395,7 +1404,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
/* build rdma_rw_ctx for each descriptor */
desc_buf = buf;
for (i = 0; i < desc_len / sizeof(*desc); i++) {
for (i = 0; i < desc_num; i++) {
msg = kzalloc(offsetof(struct smb_direct_rdma_rw_msg, sg_list) +
sizeof(struct scatterlist) * SG_CHUNK_SIZE, GFP_KERNEL);
if (!msg) {

View File

@ -367,15 +367,15 @@ out:
* @fid: file id of open file
* @count: read byte count
* @pos: file pos
* @rbuf: read data buffer
*
* Return: number of read bytes on success, otherwise error
*/
int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
loff_t *pos)
loff_t *pos, char *rbuf)
{
struct file *filp = fp->filp;
ssize_t nbytes = 0;
char *rbuf = work->aux_payload_buf;
struct inode *inode = file_inode(filp);
if (S_ISDIR(inode->i_mode))

View File

@ -76,8 +76,8 @@ void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
struct dentry *dentry, __le32 *daccess);
int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode);
int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode);
int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp,
size_t count, loff_t *pos);
int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
loff_t *pos, char *rbuf);
int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
char *buf, size_t count, loff_t *pos, bool sync,
ssize_t *written);