linux/fs/cifs/dfs_cache.h
Paulo Alcantara 6be2ea33a4 cifs: avoid potential races when handling multiple dfs tcons
Now that a DFS tcon manages its own list of DFS referrals and
sessions, there is no point in having a single worker to refresh
referrals of all DFS tcons.  Make it faster and less prone to race
conditions when having several mounts by queueing a worker per DFS
tcon that will take care of refreshing only the DFS referrals related
to it.

Cc: stable@vger.kernel.org # v6.2+
Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
2023-05-03 23:29:47 -05:00

102 lines
2.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* DFS referral cache routines
*
* Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
*/
#ifndef _CIFS_DFS_CACHE_H
#define _CIFS_DFS_CACHE_H
#include <linux/nls.h>
#include <linux/list.h>
#include <linux/uuid.h>
#include "cifsglob.h"
extern struct workqueue_struct *dfscache_wq;
extern atomic_t dfs_cache_ttl;
#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
struct dfs_cache_tgt_list {
int tl_numtgts;
struct list_head tl_list;
};
struct dfs_cache_tgt_iterator {
char *it_name;
int it_path_consumed;
struct list_head it_list;
};
int dfs_cache_init(void);
void dfs_cache_destroy(void);
extern const struct proc_ops dfscache_proc_ops;
int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
int remap, const char *path, struct dfs_info3_param *ref,
struct dfs_cache_tgt_list *tgt_list);
int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
struct dfs_cache_tgt_list *tgt_list);
void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
struct dfs_info3_param *ref);
int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
char **prefix);
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
void dfs_cache_refresh(struct work_struct *work);
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
struct dfs_cache_tgt_iterator *it)
{
if (!tl || list_empty(&tl->tl_list) || !it ||
list_is_last(&it->it_list, &tl->tl_list))
return NULL;
return list_next_entry(it, it_list);
}
static inline struct dfs_cache_tgt_iterator *
dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl)
{
if (!tl)
return NULL;
return list_first_entry_or_null(&tl->tl_list,
struct dfs_cache_tgt_iterator,
it_list);
}
static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl)
{
struct dfs_cache_tgt_iterator *it, *nit;
if (!tl || list_empty(&tl->tl_list))
return;
list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) {
list_del(&it->it_list);
kfree(it->it_name);
kfree(it);
}
tl->tl_numtgts = 0;
}
static inline const char *
dfs_cache_get_tgt_name(const struct dfs_cache_tgt_iterator *it)
{
return it ? it->it_name : NULL;
}
static inline int
dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
{
return tl ? tl->tl_numtgts : 0;
}
static inline int dfs_cache_get_ttl(void)
{
return atomic_read(&dfs_cache_ttl);
}
#endif /* _CIFS_DFS_CACHE_H */