mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 11:31:31 +00:00
Merge branch 'torvalds:master' into master
This commit is contained in:
commit
8a1cb53e6c
@ -12167,7 +12167,7 @@ KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
|
||||
M: Chuck Lever <chuck.lever@oracle.com>
|
||||
M: Jeff Layton <jlayton@kernel.org>
|
||||
R: Neil Brown <neilb@suse.de>
|
||||
R: Olga Kornievskaia <kolga@netapp.com>
|
||||
R: Olga Kornievskaia <okorniev@redhat.com>
|
||||
R: Dai Ngo <Dai.Ngo@oracle.com>
|
||||
R: Tom Talpey <tom@talpey.com>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
|
@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef _LOONGARCH_DMA_DIRECT_H
|
||||
#define _LOONGARCH_DMA_DIRECT_H
|
||||
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
|
||||
#endif /* _LOONGARCH_DMA_DIRECT_H */
|
@ -9,6 +9,8 @@
|
||||
|
||||
extern atomic_t irq_err_count;
|
||||
|
||||
#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
|
||||
|
||||
/*
|
||||
* interrupt-retrigger: NOP for now. This may not be appropriate for all
|
||||
* machines, we'll see ...
|
||||
|
@ -76,7 +76,6 @@ static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
|
||||
#endif
|
||||
|
||||
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
|
||||
void kvm_reset_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_save_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_restore_timer(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -530,6 +530,10 @@ SYM_FUNC_END(_restore_lasx_context)
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
STACK_FRAME_NON_STANDARD _restore_fp
|
||||
#ifdef CONFIG_CPU_HAS_LSX
|
||||
STACK_FRAME_NON_STANDARD _restore_lsx
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_LASX
|
||||
STACK_FRAME_NON_STANDARD _restore_lasx
|
||||
#endif
|
||||
#endif
|
||||
|
@ -102,9 +102,6 @@ void __init init_IRQ(void)
|
||||
mp_ops.init_ipi();
|
||||
#endif
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_set_noprobe(i);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
|
||||
|
||||
|
@ -277,6 +277,10 @@ SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
STACK_FRAME_NON_STANDARD kvm_restore_fpu
|
||||
#ifdef CONFIG_CPU_HAS_LSX
|
||||
STACK_FRAME_NON_STANDARD kvm_restore_lsx
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_LASX
|
||||
STACK_FRAME_NON_STANDARD kvm_restore_lasx
|
||||
#endif
|
||||
#endif
|
||||
|
@ -188,10 +188,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu)
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void kvm_reset_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
write_gcsr_timercfg(0);
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
|
||||
hrtimer_cancel(&vcpu->arch.swtimer);
|
||||
}
|
||||
|
@ -647,7 +647,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
|
||||
break;
|
||||
case KVM_REG_LOONGARCH_VCPU_RESET:
|
||||
kvm_reset_timer(vcpu);
|
||||
vcpu->arch.st.guest_addr = 0;
|
||||
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
|
||||
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
|
||||
break;
|
||||
|
@ -359,6 +359,7 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
|
||||
dev->smu_msg = 0x538;
|
||||
break;
|
||||
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
|
||||
case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
|
||||
dev->num_ips = 22;
|
||||
dev->s2d_msg_id = 0xDE;
|
||||
dev->smu_msg = 0x938;
|
||||
@ -597,6 +598,7 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
|
||||
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
|
||||
break;
|
||||
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
|
||||
case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
|
||||
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_1AH);
|
||||
break;
|
||||
default:
|
||||
@ -630,6 +632,7 @@ static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
|
||||
case AMD_CPU_ID_CB:
|
||||
case AMD_CPU_ID_PS:
|
||||
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
|
||||
case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -145,6 +145,10 @@ static struct quirk_entry quirk_asus_ignore_fan = {
|
||||
.wmi_ignore_fan = true,
|
||||
};
|
||||
|
||||
static struct quirk_entry quirk_asus_zenbook_duo_kbd = {
|
||||
.ignore_key_wlan = true,
|
||||
};
|
||||
|
||||
static int dmi_matched(const struct dmi_system_id *dmi)
|
||||
{
|
||||
pr_info("Identified laptop model '%s'\n", dmi->ident);
|
||||
@ -516,6 +520,15 @@ static const struct dmi_system_id asus_quirks[] = {
|
||||
},
|
||||
.driver_data = &quirk_asus_ignore_fan,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUS Zenbook Duo UX8406MA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX8406MA"),
|
||||
},
|
||||
.driver_data = &quirk_asus_zenbook_duo_kbd,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
@ -630,7 +643,12 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
|
||||
case 0x32: /* Volume Mute */
|
||||
if (atkbd_reports_vol_keys)
|
||||
*code = ASUS_WMI_KEY_IGNORE;
|
||||
|
||||
break;
|
||||
case 0x5D: /* Wireless console Toggle */
|
||||
case 0x5E: /* Wireless console Enable */
|
||||
case 0x5F: /* Wireless console Disable */
|
||||
if (quirks->ignore_key_wlan)
|
||||
*code = ASUS_WMI_KEY_IGNORE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ struct quirk_entry {
|
||||
bool wmi_force_als_set;
|
||||
bool wmi_ignore_fan;
|
||||
bool filter_i8042_e1_extended_codes;
|
||||
bool ignore_key_wlan;
|
||||
enum asus_wmi_tablet_switch_mode tablet_switch_mode;
|
||||
int wapf;
|
||||
/*
|
||||
|
@ -140,7 +140,6 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
|
||||
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
|
||||
},
|
||||
.driver_data = (void *)&lenovo_yt3_info,
|
||||
|
14
fs/attr.c
14
fs/attr.c
@ -487,9 +487,17 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
|
||||
error = security_inode_setattr(idmap, dentry, attr);
|
||||
if (error)
|
||||
return error;
|
||||
error = try_break_deleg(inode, delegated_inode);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* If ATTR_DELEG is set, then these attributes are being set on
|
||||
* behalf of the holder of a write delegation. We want to avoid
|
||||
* breaking the delegation in this case.
|
||||
*/
|
||||
if (!(ia_valid & ATTR_DELEG)) {
|
||||
error = try_break_deleg(inode, delegated_inode);
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (inode->i_op->setattr)
|
||||
error = inode->i_op->setattr(idmap, dentry, attr);
|
||||
|
@ -668,7 +668,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
|
||||
{
|
||||
struct btrfs_inode *inode = bbio->inode;
|
||||
struct btrfs_fs_info *fs_info = bbio->fs_info;
|
||||
struct btrfs_bio *orig_bbio = bbio;
|
||||
struct bio *bio = &bbio->bio;
|
||||
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
|
||||
u64 length = bio->bi_iter.bi_size;
|
||||
@ -706,7 +705,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
|
||||
bbio->saved_iter = bio->bi_iter;
|
||||
ret = btrfs_lookup_bio_sums(bbio);
|
||||
if (ret)
|
||||
goto fail_put_bio;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
|
||||
@ -740,13 +739,13 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
|
||||
|
||||
ret = btrfs_bio_csum(bbio);
|
||||
if (ret)
|
||||
goto fail_put_bio;
|
||||
goto fail;
|
||||
} else if (use_append ||
|
||||
(btrfs_is_zoned(fs_info) && inode &&
|
||||
inode->flags & BTRFS_INODE_NODATASUM)) {
|
||||
ret = btrfs_alloc_dummy_sum(bbio);
|
||||
if (ret)
|
||||
goto fail_put_bio;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
@ -754,12 +753,23 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
|
||||
done:
|
||||
return map_length == length;
|
||||
|
||||
fail_put_bio:
|
||||
if (map_length < length)
|
||||
btrfs_cleanup_bio(bbio);
|
||||
fail:
|
||||
btrfs_bio_counter_dec(fs_info);
|
||||
btrfs_bio_end_io(orig_bbio, ret);
|
||||
/*
|
||||
* We have split the original bbio, now we have to end both the current
|
||||
* @bbio and remaining one, as the remaining one will never be submitted.
|
||||
*/
|
||||
if (map_length < length) {
|
||||
struct btrfs_bio *remaining = bbio->private;
|
||||
|
||||
ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
|
||||
ASSERT(remaining);
|
||||
|
||||
remaining->bio.bi_status = ret;
|
||||
btrfs_orig_bbio_end_io(remaining);
|
||||
}
|
||||
bbio->bio.bi_status = ret;
|
||||
btrfs_orig_bbio_end_io(bbio);
|
||||
/* Do not submit another chunk */
|
||||
return true;
|
||||
}
|
||||
|
@ -637,7 +637,7 @@ static int extent_fiemap(struct btrfs_inode *inode,
|
||||
struct btrfs_path *path;
|
||||
struct fiemap_cache cache = { 0 };
|
||||
struct btrfs_backref_share_check_ctx *backref_ctx;
|
||||
u64 last_extent_end;
|
||||
u64 last_extent_end = 0;
|
||||
u64 prev_extent_end;
|
||||
u64 range_start;
|
||||
u64 range_end;
|
||||
|
@ -4185,6 +4185,8 @@ static int try_flush_qgroup(struct btrfs_root *root)
|
||||
return 0;
|
||||
}
|
||||
|
||||
btrfs_run_delayed_iputs(root->fs_info);
|
||||
btrfs_wait_on_delayed_iputs(root->fs_info);
|
||||
ret = btrfs_start_delalloc_snapshot(root, true);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1985,8 +1985,8 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
|
||||
return unalloc < data_chunk_size;
|
||||
}
|
||||
|
||||
static int do_reclaim_sweep(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *space_info, int raid)
|
||||
static void do_reclaim_sweep(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_space_info *space_info, int raid)
|
||||
{
|
||||
struct btrfs_block_group *bg;
|
||||
int thresh_pct;
|
||||
@ -2031,7 +2031,6 @@ again:
|
||||
}
|
||||
|
||||
up_read(&space_info->groups_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
|
||||
@ -2074,21 +2073,15 @@ bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
|
||||
void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
int ret;
|
||||
int raid;
|
||||
struct btrfs_space_info *space_info;
|
||||
|
||||
list_for_each_entry(space_info, &fs_info->space_info, list) {
|
||||
if (!btrfs_should_periodic_reclaim(space_info))
|
||||
continue;
|
||||
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
|
||||
ret = do_reclaim_sweep(fs_info, space_info, raid);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
|
||||
do_reclaim_sweep(fs_info, space_info, raid);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -294,6 +294,6 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6
|
||||
void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
|
||||
bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
|
||||
int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info);
|
||||
int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
|
||||
|
||||
#endif /* BTRFS_SPACE_INFO_H */
|
||||
|
@ -2789,15 +2789,18 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
|
||||
deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
|
||||
deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
|
||||
|
||||
spin_lock(&nf->fi_lock);
|
||||
file = find_any_file_locked(nf);
|
||||
if (file) {
|
||||
nfs4_show_superblock(s, file);
|
||||
seq_puts(s, ", ");
|
||||
nfs4_show_fname(s, file);
|
||||
seq_puts(s, ", ");
|
||||
}
|
||||
spin_unlock(&nf->fi_lock);
|
||||
if (nf) {
|
||||
spin_lock(&nf->fi_lock);
|
||||
file = find_any_file_locked(nf);
|
||||
if (file) {
|
||||
nfs4_show_superblock(s, file);
|
||||
seq_puts(s, ", ");
|
||||
nfs4_show_fname(s, file);
|
||||
seq_puts(s, ", ");
|
||||
}
|
||||
spin_unlock(&nf->fi_lock);
|
||||
} else
|
||||
seq_puts(s, "closed, ");
|
||||
nfs4_show_owner(s, oo);
|
||||
if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
|
||||
seq_puts(s, ", admin-revoked");
|
||||
@ -3075,9 +3078,9 @@ nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
|
||||
struct nfs4_delegation *dp =
|
||||
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
|
||||
|
||||
nfs4_put_stid(&dp->dl_stid);
|
||||
clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
|
||||
wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
|
||||
nfs4_put_stid(&dp->dl_stid);
|
||||
}
|
||||
|
||||
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
|
||||
@ -8812,7 +8815,7 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
|
||||
/**
|
||||
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
|
||||
* @rqstp: RPC transaction context
|
||||
* @inode: file to be checked for a conflict
|
||||
* @dentry: dentry of inode to be checked for a conflict
|
||||
* @modified: return true if file was modified
|
||||
* @size: new size of file if modified is true
|
||||
*
|
||||
@ -8827,16 +8830,16 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
|
||||
* code is returned.
|
||||
*/
|
||||
__be32
|
||||
nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
|
||||
nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
|
||||
bool *modified, u64 *size)
|
||||
{
|
||||
__be32 status;
|
||||
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
|
||||
struct file_lock_context *ctx;
|
||||
struct file_lease *fl;
|
||||
struct nfs4_delegation *dp;
|
||||
struct iattr attrs;
|
||||
struct nfs4_cb_fattr *ncf;
|
||||
struct inode *inode = d_inode(dentry);
|
||||
|
||||
*modified = false;
|
||||
ctx = locks_inode_context(inode);
|
||||
@ -8859,7 +8862,8 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
|
||||
goto break_lease;
|
||||
}
|
||||
if (type == F_WRLCK) {
|
||||
dp = fl->c.flc_owner;
|
||||
struct nfs4_delegation *dp = fl->c.flc_owner;
|
||||
|
||||
if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
|
||||
spin_unlock(&ctx->flc_lock);
|
||||
return 0;
|
||||
@ -8867,6 +8871,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
|
||||
break_lease:
|
||||
nfsd_stats_wdeleg_getattr_inc(nn);
|
||||
dp = fl->c.flc_owner;
|
||||
refcount_inc(&dp->dl_stid.sc_count);
|
||||
ncf = &dp->dl_cb_fattr;
|
||||
nfs4_cb_getattr(&dp->dl_cb_fattr);
|
||||
spin_unlock(&ctx->flc_lock);
|
||||
@ -8876,27 +8881,37 @@ break_lease:
|
||||
/* Recall delegation only if client didn't respond */
|
||||
status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
|
||||
if (status != nfserr_jukebox ||
|
||||
!nfsd_wait_for_delegreturn(rqstp, inode))
|
||||
!nfsd_wait_for_delegreturn(rqstp, inode)) {
|
||||
nfs4_put_stid(&dp->dl_stid);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
if (!ncf->ncf_file_modified &&
|
||||
(ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
|
||||
ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
|
||||
ncf->ncf_file_modified = true;
|
||||
if (ncf->ncf_file_modified) {
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Per section 10.4.3 of RFC 8881, the server would
|
||||
* not update the file's metadata with the client's
|
||||
* modified size
|
||||
*/
|
||||
attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
|
||||
attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
|
||||
setattr_copy(&nop_mnt_idmap, inode, &attrs);
|
||||
mark_inode_dirty(inode);
|
||||
attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
|
||||
inode_lock(inode);
|
||||
err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
|
||||
inode_unlock(inode);
|
||||
if (err) {
|
||||
nfs4_put_stid(&dp->dl_stid);
|
||||
return nfserrno(err);
|
||||
}
|
||||
ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
|
||||
*size = ncf->ncf_cur_fsize;
|
||||
*modified = true;
|
||||
}
|
||||
nfs4_put_stid(&dp->dl_stid);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
@ -3545,6 +3545,9 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
|
||||
args.dentry = dentry;
|
||||
args.ignore_crossmnt = (ignore_crossmnt != 0);
|
||||
args.acl = NULL;
|
||||
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
|
||||
args.context = NULL;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make a local copy of the attribute bitmap that can be modified.
|
||||
@ -3562,7 +3565,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
|
||||
}
|
||||
args.size = 0;
|
||||
if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
|
||||
status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
|
||||
status = nfsd4_deleg_getattr_conflict(rqstp, dentry,
|
||||
&file_modified, &size);
|
||||
if (status)
|
||||
goto out;
|
||||
@ -3617,7 +3620,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
|
||||
args.contextsupport = false;
|
||||
|
||||
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
|
||||
args.context = NULL;
|
||||
if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
|
||||
attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
|
||||
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
|
||||
|
@ -781,5 +781,5 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
|
||||
}
|
||||
|
||||
extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
|
||||
struct inode *inode, bool *file_modified, u64 *size);
|
||||
struct dentry *dentry, bool *file_modified, u64 *size);
|
||||
#endif /* NFSD4_STATE_H */
|
||||
|
@ -210,6 +210,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
|
||||
#define ATTR_TIMES_SET (1 << 16)
|
||||
#define ATTR_TOUCH (1 << 17)
|
||||
#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
|
||||
|
||||
/*
|
||||
* Whiteout is represented by a char device. The following constants define the
|
||||
|
@ -85,6 +85,10 @@ __cvdso_getrandom_data(const struct vdso_rng_data *rng_info, void *buffer, size_
|
||||
if (unlikely(((unsigned long)opaque_state & ~PAGE_MASK) + sizeof(*state) > PAGE_SIZE))
|
||||
return -EFAULT;
|
||||
|
||||
/* Handle unexpected flags by falling back to the kernel. */
|
||||
if (unlikely(flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)))
|
||||
goto fallback_syscall;
|
||||
|
||||
/* If the caller passes the wrong size, which might happen due to CRIU, fallback. */
|
||||
if (unlikely(opaque_len != sizeof(*state)))
|
||||
goto fallback_syscall;
|
||||
|
Loading…
Reference in New Issue
Block a user