linux/fs/lockd/svc.c

730 lines
18 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/lockd/svc.c
*
* This is the central lockd service.
*
* FIXME: Separate the lockd NFS server functionality from the lockd NFS
* client functionality. Oh why didn't Sun create two separate
* services in the first place?
*
* Authors: Olaf Kirch (okir@monad.swb.de)
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/moduleparam.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/uio.h>
#include <linux/smp.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/inetdevice.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svc_xprt.h>
#include <net/ip.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
#include <linux/lockd/lockd.h>
#include <linux/nfs.h>
#include "netns.h"
#include "procfs.h"
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
static struct svc_program nlmsvc_program;
const struct nlmsvc_binding *nlmsvc_ops;
EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
static struct svc_serv *nlmsvc_serv;
unsigned long nlmsvc_timeout;
static void nlmsvc_request_retry(struct timer_list *tl)
{
svc_wake_up(nlmsvc_serv);
}
DEFINE_TIMER(nlmsvc_retry, nlmsvc_request_retry);
netns: make struct pernet_operations::id unsigned int Make struct pernet_operations::id unsigned. There are 2 reasons to do so: 1) This field is really an index into an zero based array and thus is unsigned entity. Using negative value is out-of-bound access by definition. 2) On x86_64 unsigned 32-bit data which are mixed with pointers via array indexing or offsets added or subtracted to pointers are preffered to signed 32-bit data. "int" being used as an array index needs to be sign-extended to 64-bit before being used. void f(long *p, int i) { g(p[i]); } roughly translates to movsx rsi, esi mov rdi, [rsi+...] call g MOVSX is 3 byte instruction which isn't necessary if the variable is unsigned because x86_64 is zero extending by default. Now, there is net_generic() function which, you guessed it right, uses "int" as an array index: static inline void *net_generic(const struct net *net, int id) { ... ptr = ng->ptr[id - 1]; ... } And this function is used a lot, so those sign extensions add up. Patch snipes ~1730 bytes on allyesconfig kernel (without all junk messing with code generation): add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730) Unfortunately some functions actually grow bigger. This is a semmingly random artefact of code generation with register allocator being used differently. gcc decides that some variable needs to live in new r8+ registers and every access now requires REX prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be used which is longer than [r8] However, overall balance is in negative direction: add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730) function old new delta nfsd4_lock 3886 3959 +73 tipc_link_build_proto_msg 1096 1140 +44 mac80211_hwsim_new_radio 2776 2808 +32 tipc_mon_rcv 1032 1058 +26 svcauth_gss_legacy_init 1413 1429 +16 tipc_bcbase_select_primary 379 392 +13 nfsd4_exchange_id 1247 1260 +13 nfsd4_setclientid_confirm 782 793 +11 ... put_client_renew_locked 494 480 -14 ip_set_sockfn_get 730 716 -14 geneve_sock_add 829 813 -16 nfsd4_sequence_done 721 703 -18 nlmclnt_lookup_host 708 686 -22 nfsd4_lockt 1085 1063 -22 nfs_get_client 1077 1050 -27 tcf_bpf_init 1106 1076 -30 nfsd4_encode_fattr 5997 5930 -67 Total: Before=154856051, After=154854321, chg -0.00% Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 01:58:21 +00:00
unsigned int lockd_net_id;
/*
* These can be set at insmod time (useful for NFS as root filesystem),
* and also changed through the sysctl interface. -- Jamie Lokier, Aug 2003
*/
static unsigned long nlm_grace_period;
static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
static int nlm_udpport, nlm_tcpport;
/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */
static unsigned int nlm_max_connections = 1024;
/*
* Constants needed for the sysctl interface.
*/
static const unsigned long nlm_grace_period_min = 0;
static const unsigned long nlm_grace_period_max = 240;
static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
#ifdef CONFIG_SYSCTL
static const int nlm_port_min = 0, nlm_port_max = 65535;
static struct ctl_table_header * nlm_sysctl_table;
#endif
static unsigned long get_lockd_grace_period(void)
{
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
return roundup(nlm_grace_period, nlm_timeout) * HZ;
else
return nlm_timeout * 5 * HZ;
}
static void grace_ender(struct work_struct *grace)
{
struct delayed_work *dwork = to_delayed_work(grace);
struct lockd_net *ln = container_of(dwork, struct lockd_net,
grace_period_end);
locks_end_grace(&ln->lockd_manager);
}
static void set_grace_period(struct net *net)
{
unsigned long grace_period = get_lockd_grace_period();
struct lockd_net *ln = net_generic(net, lockd_net_id);
lockd: don't depend on lockd main loop to end grace End lockd's grace period using schedule_delayed_work() instead of a check on every pass through the main loop. After a later patch, we'll depend on lockd to end its grace period even if it's not currently handling requests; so it shouldn't depend on being woken up from the main loop to do so. Also, Nakano Hiroaki (who independently produced a similar patch) noticed that the current behavior is buggy in the face of jiffies wraparound: "lockd uses time_before() to determine whether the grace period has expired. This would seem to be enough to avoid timer wrap-around issues, but, unfortunately, that is not the case. The time_* family of comparison functions can be safely used to compare jiffies relatively close in time, but they stop working after approximately LONG_MAX/2 ticks. nfsd can suffer this problem because the time_before() comparison in lockd() is not performed until the first request comes in, which means that if there is no lockd traffic for more than LONG_MAX/2 ticks we are screwed. "The implication of this is that once time_before() starts misbehaving any attempt from a NFS client to execute fcntl() will be received with a NLM_LCK_DENIED_GRACE_PERIOD message for 25 days (assuming HZ=1000). In other words, the 50 seconds grace period could turn into a grace period of 50 days or more. "Note: This bug was analyzed independently by Oda-san <oda@valinux.co.jp> and myself." Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu> Cc: Nakano Hiroaki <nakano.hiroaki@oss.ntt.co.jp> Cc: Itsuro Oda <oda@valinux.co.jp>
2008-03-18 23:00:19 +00:00
locks_start_grace(net, &ln->lockd_manager);
cancel_delayed_work_sync(&ln->grace_period_end);
schedule_delayed_work(&ln->grace_period_end, grace_period);
}
/*
* This is the lockd kernel thread
*/
static int
lockd(void *vrqstp)
{
struct svc_rqst *rqstp = vrqstp;
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
/* try_to_freeze() is called from svc_recv() */
set_freezable();
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
*/
while (!svc_thread_should_stop(rqstp)) {
/* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections;
nlmsvc_retry_blocked(rqstp);
svc_recv(rqstp);
}
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
dprintk("lockd_down: service stopped\n");
svc_exit_thread(rqstp);
return 0;
}
static int create_lockd_listener(struct svc_serv *serv, const char *name,
struct net *net, const int family,
const unsigned short port,
const struct cred *cred)
{
struct svc_xprt *xprt;
xprt = svc_find_xprt(serv, name, net, family, 0);
if (xprt == NULL)
return svc_xprt_create(serv, name, net, family, port,
SVC_SOCK_DEFAULTS, cred);
svc_xprt_put(xprt);
return 0;
}
static int create_lockd_family(struct svc_serv *serv, struct net *net,
const int family, const struct cred *cred)
{
int err;
err = create_lockd_listener(serv, "udp", net, family, nlm_udpport,
cred);
if (err < 0)
return err;
return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport,
cred);
}
/*
* Ensure there are active UDP and TCP listeners for lockd.
*
* Even if we have only TCP NFS mounts and/or TCP NFSDs, some
* local services (such as rpc.statd) still require UDP, and
* some NFS servers do not yet support NLM over TCP.
*
* Returns zero if all listeners are available; otherwise a
* negative errno value is returned.
*/
static int make_socks(struct svc_serv *serv, struct net *net,
const struct cred *cred)
{
static int warned;
int err;
err = create_lockd_family(serv, net, PF_INET, cred);
if (err < 0)
goto out_err;
err = create_lockd_family(serv, net, PF_INET6, cred);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_err;
warned = 0;
return 0;
out_err:
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", err);
svc_xprt_destroy_all(serv, net);
svc_rpcb_cleanup(serv, net);
return err;
}
static int lockd_up_net(struct svc_serv *serv, struct net *net,
const struct cred *cred)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
int error;
if (ln->nlmsvc_users++)
return 0;
error = svc_bind(serv, net);
if (error)
goto err_bind;
error = make_socks(serv, net, cred);
if (error < 0)
lockd: fix rpcbind crash on lockd startup failure Nikita Yuschenko reported that booting a kernel with init=/bin/sh and then nfs mounting without portmap or rpcbind running using a busybox mount resulted in: # mount -t nfs 10.30.130.21:/opt /mnt svc: failed to register lockdv1 RPC service (errno 111). lockd_up: makesock failed, error=-111 Unable to handle kernel paging request for data at address 0x00000030 Faulting instruction address: 0xc055e65c Oops: Kernel access of bad area, sig: 11 [#1] MPC85xx CDS Modules linked in: CPU: 0 PID: 1338 Comm: mount Not tainted 3.10.44.cge #117 task: cf29cea0 ti: cf35c000 task.ti: cf35c000 NIP: c055e65c LR: c0566490 CTR: c055e648 REGS: cf35dad0 TRAP: 0300 Not tainted (3.10.44.cge) MSR: 00029000 <CE,EE,ME> CR: 22442488 XER: 20000000 DEAR: 00000030, ESR: 00000000 GPR00: c05606f4 cf35db80 cf29cea0 cf0ded80 cf0dedb8 00000001 1dec3086 00000000 GPR08: 00000000 c07b1640 00000007 1dec3086 22442482 100b9758 00000000 10090ae8 GPR16: 00000000 000186a5 00000000 00000000 100c3018 bfa46edc 100b0000 bfa46ef0 GPR24: cf386ae0 c07834f0 00000000 c0565f88 00000001 cf0dedb8 00000000 cf0ded80 NIP [c055e65c] call_start+0x14/0x34 LR [c0566490] __rpc_execute+0x70/0x250 Call Trace: [cf35db80] [00000080] 0x80 (unreliable) [cf35dbb0] [c05606f4] rpc_run_task+0x9c/0xc4 [cf35dbc0] [c0560840] rpc_call_sync+0x50/0xb8 [cf35dbf0] [c056ee90] rpcb_register_call+0x54/0x84 [cf35dc10] [c056f24c] rpcb_register+0xf8/0x10c [cf35dc70] [c0569e18] svc_unregister.isra.23+0x100/0x108 [cf35dc90] [c0569e38] svc_rpcb_cleanup+0x18/0x30 [cf35dca0] [c0198c5c] lockd_up+0x1dc/0x2e0 [cf35dcd0] [c0195348] nlmclnt_init+0x2c/0xc8 [cf35dcf0] [c015bb5c] nfs_start_lockd+0x98/0xec [cf35dd20] [c015ce6c] nfs_create_server+0x1e8/0x3f4 [cf35dd90] [c0171590] nfs3_create_server+0x10/0x44 [cf35dda0] [c016528c] nfs_try_mount+0x158/0x1e4 [cf35de20] [c01670d0] nfs_fs_mount+0x434/0x8c8 [cf35de70] [c00cd3bc] mount_fs+0x20/0xbc [cf35de90] [c00e4f88] vfs_kern_mount+0x50/0x104 [cf35dec0] [c00e6e0c] do_mount+0x1d0/0x8e0 [cf35df10] [c00e75ac] SyS_mount+0x90/0xd0 [cf35df40] [c000ccf4] ret_from_syscall+0x0/0x3c The addition of svc_shutdown_net() resulted in two calls to svc_rpcb_cleanup(); the second is no longer necessary and crashes when it calls rpcb_register_call with clnt=NULL. Reported-by: Nikita Yushchenko <nyushchenko@dev.rtsoft.ru> Fixes: 679b033df484 "lockd: ensure we tear down any live sockets when socket creation fails during lockd_up" Cc: stable@vger.kernel.org Acked-by: Jeff Layton <jlayton@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2014-08-29 20:25:50 +00:00
goto err_bind;
set_grace_period(net);
dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
return 0;
err_bind:
ln->nlmsvc_users--;
return error;
}
static void lockd_down_net(struct svc_serv *serv, struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
svc_xprt_destroy_all(serv, net);
svc_rpcb_cleanup(serv, net);
}
} else {
pr_err("%s: no users! net=%x\n",
__func__, net->ns.inum);
BUG();
}
}
static int lockd_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
if (event != NETDEV_DOWN)
goto out;
if (nlmsvc_serv) {
dprintk("lockd_inetaddr_event: removed %pI4\n",
&ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin);
}
out:
return NOTIFY_DONE;
}
static struct notifier_block lockd_inetaddr_notifier = {
.notifier_call = lockd_inetaddr_event,
};
#if IS_ENABLED(CONFIG_IPV6)
static int lockd_inet6addr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
if (event != NETDEV_DOWN)
goto out;
if (nlmsvc_serv) {
dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ifa->addr;
if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin6);
}
out:
return NOTIFY_DONE;
}
static struct notifier_block lockd_inet6addr_notifier = {
.notifier_call = lockd_inet6addr_event,
};
#endif
static int lockd_get(void)
{
struct svc_serv *serv;
int error;
if (nlmsvc_serv) {
nlmsvc_users++;
return 0;
}
/*
* Sanity check: if there's no pid,
* we should be the first user ...
*/
if (nlmsvc_users)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
if (!nlm_timeout)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, lockd);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
return -ENOMEM;
}
serv->sv_maxconn = nlm_max_connections;
error = svc_set_num_threads(serv, NULL, 1);
/* The thread now holds the only reference */
svc_put(serv);
if (error < 0)
return error;
nlmsvc_serv = serv;
register_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
dprintk("lockd_up: service created\n");
nlmsvc_users++;
return 0;
}
static void lockd_put(void)
{
if (WARN(nlmsvc_users <= 0, "lockd_down: no users!\n"))
return;
if (--nlmsvc_users)
return;
unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
svc_get(nlmsvc_serv);
svc_set_num_threads(nlmsvc_serv, NULL, 0);
svc_put(nlmsvc_serv);
timer_delete_sync(&nlmsvc_retry);
nlmsvc_serv = NULL;
dprintk("lockd_down: service destroyed\n");
}
/*
* Bring up the lockd process if it's not already up.
*/
int lockd_up(struct net *net, const struct cred *cred)
{
int error;
mutex_lock(&nlmsvc_mutex);
error = lockd_get();
if (error)
goto err;
error = lockd_up_net(nlmsvc_serv, net, cred);
if (error < 0) {
lockd_put();
goto err;
}
err:
mutex_unlock(&nlmsvc_mutex);
return error;
}
EXPORT_SYMBOL_GPL(lockd_up);
/*
* Decrement the user count and bring down lockd if we're the last.
*/
void
lockd_down(struct net *net)
{
mutex_lock(&nlmsvc_mutex);
lockd_down_net(nlmsvc_serv, net);
lockd_put();
mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL_GPL(lockd_down);
#ifdef CONFIG_SYSCTL
/*
* Sysctl parameters (same as module parameters, different interface).
*/
static struct ctl_table nlm_sysctls[] = {
{
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_grace_period_min,
.extra2 = (unsigned long *) &nlm_grace_period_max,
},
{
.procname = "nlm_timeout",
.data = &nlm_timeout,
.maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_timeout_min,
.extra2 = (unsigned long *) &nlm_timeout_max,
},
{
.procname = "nlm_udpport",
.data = &nlm_udpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
.procname = "nlm_tcpport",
.data = &nlm_tcpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
.procname = "nsm_use_hostnames",
.data = &nsm_use_hostnames,
sysctl: fix proc_dobool() usability Currently proc_dobool expects a (bool *) in table->data, but sizeof(int) in table->maxsize, because it uses do_proc_dointvec() directly. This is unsafe for at least two reasons: 1. A sysctl table definition may use { .data = &variable, .maxsize = sizeof(variable) }, not realizing that this makes the sysctl unusable (see the Fixes: tag) and that they need to use the completely counterintuitive sizeof(int) instead. 2. proc_dobool() will currently try to parse an array of values if given .maxsize >= 2*sizeof(int), but will try to write values of type bool by offsets of sizeof(int), so it will not work correctly with neither an (int *) nor a (bool *). There is no .maxsize validation to prevent this. Fix this by: 1. Constraining proc_dobool() to allow only one value and .maxsize == sizeof(bool). 2. Wrapping the original struct ctl_table in a temporary one with .data pointing to a local int variable and .maxsize set to sizeof(int) and passing this one to proc_dointvec(), converting the value to/from bool as needed (using proc_dou8vec_minmax() as an example). 3. Extending sysctl_check_table() to enforce proc_dobool() expectations. 4. Fixing the proc_dobool() docstring (it was just copy-pasted from proc_douintvec, apparently...). 5. Converting all existing proc_dobool() users to set .maxsize to sizeof(bool) instead of sizeof(int). Fixes: 83efeeeb3d04 ("tty: Allow TIOCSTI to be disabled") Fixes: a2071573d634 ("sysctl: introduce new proc handler proc_dobool") Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com> Acked-by: Kees Cook <keescook@chromium.org> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
2023-02-10 14:58:23 +00:00
.maxlen = sizeof(bool),
.mode = 0644,
.proc_handler = proc_dobool,
},
{
.procname = "nsm_local_state",
.data = &nsm_local_state,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#endif /* CONFIG_SYSCTL */
/*
* Module (and sysfs) parameters.
*/
#define param_set_min_max(name, type, which_strtol, min, max) \
static int param_set_##name(const char *val, const struct kernel_param *kp) \
{ \
char *endp; \
__typeof__(type) num = which_strtol(val, &endp, 0); \
if (endp == val || *endp || num < (min) || num > (max)) \
return -EINVAL; \
*((type *) kp->arg) = num; \
return 0; \
}
static inline int is_callback(u32 proc)
{
return proc == NLMPROC_GRANTED
|| proc == NLMPROC_GRANTED_MSG
|| proc == NLMPROC_TEST_RES
|| proc == NLMPROC_LOCK_RES
|| proc == NLMPROC_CANCEL_RES
|| proc == NLMPROC_UNLOCK_RES
|| proc == NLMPROC_NSM_NOTIFY;
}
static enum svc_auth_status lockd_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_client = NULL;
switch (rqstp->rq_authop->flavour) {
case RPC_AUTH_NULL:
case RPC_AUTH_UNIX:
rqstp->rq_auth_stat = rpc_auth_ok;
if (rqstp->rq_proc == 0)
return SVC_OK;
if (is_callback(rqstp->rq_proc)) {
/* Leave it to individual procedures to
* call nlmsvc_lookup_host(rqstp)
*/
return SVC_OK;
}
return svc_set_client(rqstp);
}
rqstp->rq_auth_stat = rpc_autherr_badcred;
return SVC_DENIED;
}
param_set_min_max(port, int, simple_strtol, 0, 65535)
param_set_min_max(grace_period, unsigned long, simple_strtoul,
nlm_grace_period_min, nlm_grace_period_max)
param_set_min_max(timeout, unsigned long, simple_strtoul,
nlm_timeout_min, nlm_timeout_max)
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION ".");
MODULE_LICENSE("GPL");
module_param_call(nlm_grace_period, param_set_grace_period, param_get_ulong,
&nlm_grace_period, 0644);
module_param_call(nlm_timeout, param_set_timeout, param_get_ulong,
&nlm_timeout, 0644);
module_param_call(nlm_udpport, param_set_port, param_get_int,
&nlm_udpport, 0644);
module_param_call(nlm_tcpport, param_set_port, param_get_int,
&nlm_tcpport, 0644);
module_param(nsm_use_hostnames, bool, 0644);
module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
INIT_LIST_HEAD(&ln->lockd_manager.list);
ln->lockd_manager.block_opens = false;
INIT_LIST_HEAD(&ln->nsm_handles);
return 0;
}
static void lockd_exit_net(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
WARN_ONCE(!list_empty(&ln->lockd_manager.list),
"net %x %s: lockd_manager.list is not empty\n",
net->ns.inum, __func__);
WARN_ONCE(!list_empty(&ln->nsm_handles),
"net %x %s: nsm_handles list is not empty\n",
net->ns.inum, __func__);
WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
"net %x %s: grace_period_end was not cancelled\n",
net->ns.inum, __func__);
}
static struct pernet_operations lockd_net_ops = {
.init = lockd_init_net,
.exit = lockd_exit_net,
.id = &lockd_net_id,
.size = sizeof(struct lockd_net),
};
/*
* Initialising and terminating the module.
*/
static int __init init_nlm(void)
{
int err;
#ifdef CONFIG_SYSCTL
err = -ENOMEM;
nlm_sysctl_table = register_sysctl("fs/nfs", nlm_sysctls);
if (nlm_sysctl_table == NULL)
goto err_sysctl;
#endif
err = register_pernet_subsys(&lockd_net_ops);
if (err)
goto err_pernet;
err = lockd_create_procfs();
if (err)
goto err_procfs;
return 0;
err_procfs:
unregister_pernet_subsys(&lockd_net_ops);
err_pernet:
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
err_sysctl:
#endif
return err;
}
static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
lockd_remove_procfs();
unregister_pernet_subsys(&lockd_net_ops);
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
#endif
}
module_init(init_nlm);
module_exit(exit_nlm);
/**
* nlmsvc_dispatch - Process an NLM Request
* @rqstp: incoming request
*
* Return values:
* %0: Processing complete; do not send a Reply
* %1: Processing complete; send Reply in rqstp->rq_res
*/
static int nlmsvc_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *procp = rqstp->rq_procinfo;
__be32 *statp = rqstp->rq_accept_statp;
if (!procp->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
*statp = procp->pc_func(rqstp);
if (*statp == rpc_drop_reply)
return 0;
if (*statp != rpc_success)
return 1;
if (!procp->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
return 1;
out_decode_err:
*statp = rpc_garbage_args;
return 1;
out_encode_err:
*statp = rpc_system_err;
return 1;
}
/*
* Define NLM program and procedures
*/
static DEFINE_PER_CPU_ALIGNED(unsigned long, nlmsvc_version1_count[17]);
static const struct svc_version nlmsvc_version1 = {
.vs_vers = 1,
.vs_nproc = 17,
.vs_proc = nlmsvc_procedures,
.vs_count = nlmsvc_version1_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nlmsvc_version3_count[ARRAY_SIZE(nlmsvc_procedures)]);
static const struct svc_version nlmsvc_version3 = {
.vs_vers = 3,
.vs_nproc = ARRAY_SIZE(nlmsvc_procedures),
.vs_proc = nlmsvc_procedures,
.vs_count = nlmsvc_version3_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#ifdef CONFIG_LOCKD_V4
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nlmsvc_version4_count[ARRAY_SIZE(nlmsvc_procedures4)]);
static const struct svc_version nlmsvc_version4 = {
.vs_vers = 4,
.vs_nproc = ARRAY_SIZE(nlmsvc_procedures4),
.vs_proc = nlmsvc_procedures4,
.vs_count = nlmsvc_version4_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
static const struct svc_version *nlmsvc_version[] = {
[1] = &nlmsvc_version1,
[3] = &nlmsvc_version3,
#ifdef CONFIG_LOCKD_V4
[4] = &nlmsvc_version4,
#endif
};
static struct svc_stat nlmsvc_stats;
#define NLM_NRVERS ARRAY_SIZE(nlmsvc_version)
static struct svc_program nlmsvc_program = {
.pg_prog = NLM_PROGRAM, /* program number */
.pg_nvers = NLM_NRVERS, /* number of entries in nlmsvc_version */
.pg_vers = nlmsvc_version, /* version table */
.pg_name = "lockd", /* service name */
.pg_class = "nfsd", /* share authentication with nfsd */
.pg_stats = &nlmsvc_stats, /* stats table */
.pg_authenticate = &lockd_authenticate, /* export authentication */
.pg_init_request = svc_generic_init_request,
.pg_rpcbind_set = svc_generic_rpcbind_set,
};