auth_gss.c revision 5fccc5b52ee07d07a74ce53c6f174bff81e26a16
1/*
2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 *
4 * RPCSEC_GSS client authentication.
5 *
6 *  Copyright (c) 2000 The Regents of the University of Michigan.
7 *  All rights reserved.
8 *
9 *  Dug Song       <dugsong@monkey.org>
10 *  Andy Adamson   <andros@umich.edu>
11 *
12 *  Redistribution and use in source and binary forms, with or without
13 *  modification, are permitted provided that the following conditions
14 *  are met:
15 *
16 *  1. Redistributions of source code must retain the above copyright
17 *     notice, this list of conditions and the following disclaimer.
18 *  2. Redistributions in binary form must reproduce the above copyright
19 *     notice, this list of conditions and the following disclaimer in the
20 *     documentation and/or other materials provided with the distribution.
21 *  3. Neither the name of the University nor the names of its
22 *     contributors may be used to endorse or promote products derived
23 *     from this software without specific prior written permission.
24 *
25 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/types.h>
42#include <linux/slab.h>
43#include <linux/sched.h>
44#include <linux/pagemap.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/sunrpc/auth.h>
47#include <linux/sunrpc/auth_gss.h>
48#include <linux/sunrpc/svcauth_gss.h>
49#include <linux/sunrpc/gss_err.h>
50#include <linux/workqueue.h>
51#include <linux/sunrpc/rpc_pipe_fs.h>
52#include <linux/sunrpc/gss_api.h>
53#include <asm/uaccess.h>
54#include <linux/hashtable.h>
55
56#include "../netns.h"
57
58static const struct rpc_authops authgss_ops;
59
60static const struct rpc_credops gss_credops;
61static const struct rpc_credops gss_nullops;
62
63#define GSS_RETRY_EXPIRED 5
64static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
65
66#define GSS_KEY_EXPIRE_TIMEO 240
67static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
68
69#ifdef RPC_DEBUG
70# define RPCDBG_FACILITY	RPCDBG_AUTH
71#endif
72
73#define GSS_CRED_SLACK		(RPC_MAX_AUTH_SIZE * 2)
74/* length of a krb5 verifier (48), plus data added before arguments when
75 * using integrity (two 4-byte integers): */
76#define GSS_VERF_SLACK		100
77
78static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
79static DEFINE_SPINLOCK(gss_auth_hash_lock);
80
81struct gss_pipe {
82	struct rpc_pipe_dir_object pdo;
83	struct rpc_pipe *pipe;
84	struct rpc_clnt *clnt;
85	const char *name;
86	struct kref kref;
87};
88
89struct gss_auth {
90	struct kref kref;
91	struct hlist_node hash;
92	struct rpc_auth rpc_auth;
93	struct gss_api_mech *mech;
94	enum rpc_gss_svc service;
95	struct rpc_clnt *client;
96	struct net *net;
97	/*
98	 * There are two upcall pipes; dentry[1], named "gssd", is used
99	 * for the new text-based upcall; dentry[0] is named after the
100	 * mechanism (for example, "krb5") and exists for
101	 * backwards-compatibility with older gssd's.
102	 */
103	struct gss_pipe *gss_pipe[2];
104	const char *target_name;
105};
106
107/* pipe_version >= 0 if and only if someone has a pipe open. */
108static DEFINE_SPINLOCK(pipe_version_lock);
109static struct rpc_wait_queue pipe_version_rpc_waitqueue;
110static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
111
112static void gss_free_ctx(struct gss_cl_ctx *);
113static const struct rpc_pipe_ops gss_upcall_ops_v0;
114static const struct rpc_pipe_ops gss_upcall_ops_v1;
115
116static inline struct gss_cl_ctx *
117gss_get_ctx(struct gss_cl_ctx *ctx)
118{
119	atomic_inc(&ctx->count);
120	return ctx;
121}
122
123static inline void
124gss_put_ctx(struct gss_cl_ctx *ctx)
125{
126	if (atomic_dec_and_test(&ctx->count))
127		gss_free_ctx(ctx);
128}
129
130/* gss_cred_set_ctx:
131 * called by gss_upcall_callback and gss_create_upcall in order
132 * to set the gss context. The actual exchange of an old context
133 * and a new one is protected by the pipe->lock.
134 */
135static void
136gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
137{
138	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
139
140	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
141		return;
142	gss_get_ctx(ctx);
143	rcu_assign_pointer(gss_cred->gc_ctx, ctx);
144	set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
145	smp_mb__before_clear_bit();
146	clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
147}
148
149static const void *
150simple_get_bytes(const void *p, const void *end, void *res, size_t len)
151{
152	const void *q = (const void *)((const char *)p + len);
153	if (unlikely(q > end || q < p))
154		return ERR_PTR(-EFAULT);
155	memcpy(res, p, len);
156	return q;
157}
158
159static inline const void *
160simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
161{
162	const void *q;
163	unsigned int len;
164
165	p = simple_get_bytes(p, end, &len, sizeof(len));
166	if (IS_ERR(p))
167		return p;
168	q = (const void *)((const char *)p + len);
169	if (unlikely(q > end || q < p))
170		return ERR_PTR(-EFAULT);
171	dest->data = kmemdup(p, len, GFP_NOFS);
172	if (unlikely(dest->data == NULL))
173		return ERR_PTR(-ENOMEM);
174	dest->len = len;
175	return q;
176}
177
178static struct gss_cl_ctx *
179gss_cred_get_ctx(struct rpc_cred *cred)
180{
181	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
182	struct gss_cl_ctx *ctx = NULL;
183
184	rcu_read_lock();
185	if (gss_cred->gc_ctx)
186		ctx = gss_get_ctx(gss_cred->gc_ctx);
187	rcu_read_unlock();
188	return ctx;
189}
190
191static struct gss_cl_ctx *
192gss_alloc_context(void)
193{
194	struct gss_cl_ctx *ctx;
195
196	ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
197	if (ctx != NULL) {
198		ctx->gc_proc = RPC_GSS_PROC_DATA;
199		ctx->gc_seq = 1;	/* NetApp 6.4R1 doesn't accept seq. no. 0 */
200		spin_lock_init(&ctx->gc_seq_lock);
201		atomic_set(&ctx->count,1);
202	}
203	return ctx;
204}
205
206#define GSSD_MIN_TIMEOUT (60 * 60)
207static const void *
208gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
209{
210	const void *q;
211	unsigned int seclen;
212	unsigned int timeout;
213	unsigned long now = jiffies;
214	u32 window_size;
215	int ret;
216
217	/* First unsigned int gives the remaining lifetime in seconds of the
218	 * credential - e.g. the remaining TGT lifetime for Kerberos or
219	 * the -t value passed to GSSD.
220	 */
221	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
222	if (IS_ERR(p))
223		goto err;
224	if (timeout == 0)
225		timeout = GSSD_MIN_TIMEOUT;
226	ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
227	/* Sequence number window. Determines the maximum number of
228	 * simultaneous requests
229	 */
230	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
231	if (IS_ERR(p))
232		goto err;
233	ctx->gc_win = window_size;
234	/* gssd signals an error by passing ctx->gc_win = 0: */
235	if (ctx->gc_win == 0) {
236		/*
237		 * in which case, p points to an error code. Anything other
238		 * than -EKEYEXPIRED gets converted to -EACCES.
239		 */
240		p = simple_get_bytes(p, end, &ret, sizeof(ret));
241		if (!IS_ERR(p))
242			p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
243						    ERR_PTR(-EACCES);
244		goto err;
245	}
246	/* copy the opaque wire context */
247	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
248	if (IS_ERR(p))
249		goto err;
250	/* import the opaque security context */
251	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
252	if (IS_ERR(p))
253		goto err;
254	q = (const void *)((const char *)p + seclen);
255	if (unlikely(q > end || q < p)) {
256		p = ERR_PTR(-EFAULT);
257		goto err;
258	}
259	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
260	if (ret < 0) {
261		p = ERR_PTR(ret);
262		goto err;
263	}
264	dprintk("RPC:       %s Success. gc_expiry %lu now %lu timeout %u\n",
265		__func__, ctx->gc_expiry, now, timeout);
266	return q;
267err:
268	dprintk("RPC:       %s returns error %ld\n", __func__, -PTR_ERR(p));
269	return p;
270}
271
272#define UPCALL_BUF_LEN 128
273
274struct gss_upcall_msg {
275	atomic_t count;
276	kuid_t	uid;
277	struct rpc_pipe_msg msg;
278	struct list_head list;
279	struct gss_auth *auth;
280	struct rpc_pipe *pipe;
281	struct rpc_wait_queue rpc_waitqueue;
282	wait_queue_head_t waitqueue;
283	struct gss_cl_ctx *ctx;
284	char databuf[UPCALL_BUF_LEN];
285};
286
287static int get_pipe_version(struct net *net)
288{
289	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
290	int ret;
291
292	spin_lock(&pipe_version_lock);
293	if (sn->pipe_version >= 0) {
294		atomic_inc(&sn->pipe_users);
295		ret = sn->pipe_version;
296	} else
297		ret = -EAGAIN;
298	spin_unlock(&pipe_version_lock);
299	return ret;
300}
301
302static void put_pipe_version(struct net *net)
303{
304	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
305
306	if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
307		sn->pipe_version = -1;
308		spin_unlock(&pipe_version_lock);
309	}
310}
311
312static void
313gss_release_msg(struct gss_upcall_msg *gss_msg)
314{
315	struct net *net = gss_msg->auth->net;
316	if (!atomic_dec_and_test(&gss_msg->count))
317		return;
318	put_pipe_version(net);
319	BUG_ON(!list_empty(&gss_msg->list));
320	if (gss_msg->ctx != NULL)
321		gss_put_ctx(gss_msg->ctx);
322	rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
323	kfree(gss_msg);
324}
325
326static struct gss_upcall_msg *
327__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
328{
329	struct gss_upcall_msg *pos;
330	list_for_each_entry(pos, &pipe->in_downcall, list) {
331		if (!uid_eq(pos->uid, uid))
332			continue;
333		atomic_inc(&pos->count);
334		dprintk("RPC:       %s found msg %p\n", __func__, pos);
335		return pos;
336	}
337	dprintk("RPC:       %s found nothing\n", __func__);
338	return NULL;
339}
340
341/* Try to add an upcall to the pipefs queue.
342 * If an upcall owned by our uid already exists, then we return a reference
343 * to that upcall instead of adding the new upcall.
344 */
345static inline struct gss_upcall_msg *
346gss_add_msg(struct gss_upcall_msg *gss_msg)
347{
348	struct rpc_pipe *pipe = gss_msg->pipe;
349	struct gss_upcall_msg *old;
350
351	spin_lock(&pipe->lock);
352	old = __gss_find_upcall(pipe, gss_msg->uid);
353	if (old == NULL) {
354		atomic_inc(&gss_msg->count);
355		list_add(&gss_msg->list, &pipe->in_downcall);
356	} else
357		gss_msg = old;
358	spin_unlock(&pipe->lock);
359	return gss_msg;
360}
361
362static void
363__gss_unhash_msg(struct gss_upcall_msg *gss_msg)
364{
365	list_del_init(&gss_msg->list);
366	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
367	wake_up_all(&gss_msg->waitqueue);
368	atomic_dec(&gss_msg->count);
369}
370
371static void
372gss_unhash_msg(struct gss_upcall_msg *gss_msg)
373{
374	struct rpc_pipe *pipe = gss_msg->pipe;
375
376	if (list_empty(&gss_msg->list))
377		return;
378	spin_lock(&pipe->lock);
379	if (!list_empty(&gss_msg->list))
380		__gss_unhash_msg(gss_msg);
381	spin_unlock(&pipe->lock);
382}
383
384static void
385gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
386{
387	switch (gss_msg->msg.errno) {
388	case 0:
389		if (gss_msg->ctx == NULL)
390			break;
391		clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
392		gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
393		break;
394	case -EKEYEXPIRED:
395		set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
396	}
397	gss_cred->gc_upcall_timestamp = jiffies;
398	gss_cred->gc_upcall = NULL;
399	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
400}
401
402static void
403gss_upcall_callback(struct rpc_task *task)
404{
405	struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
406			struct gss_cred, gc_base);
407	struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
408	struct rpc_pipe *pipe = gss_msg->pipe;
409
410	spin_lock(&pipe->lock);
411	gss_handle_downcall_result(gss_cred, gss_msg);
412	spin_unlock(&pipe->lock);
413	task->tk_status = gss_msg->msg.errno;
414	gss_release_msg(gss_msg);
415}
416
417static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
418{
419	uid_t uid = from_kuid(&init_user_ns, gss_msg->uid);
420	memcpy(gss_msg->databuf, &uid, sizeof(uid));
421	gss_msg->msg.data = gss_msg->databuf;
422	gss_msg->msg.len = sizeof(uid);
423	BUG_ON(sizeof(uid) > UPCALL_BUF_LEN);
424}
425
426static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
427				const char *service_name,
428				const char *target_name)
429{
430	struct gss_api_mech *mech = gss_msg->auth->mech;
431	char *p = gss_msg->databuf;
432	int len = 0;
433
434	gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
435				   mech->gm_name,
436				   from_kuid(&init_user_ns, gss_msg->uid));
437	p += gss_msg->msg.len;
438	if (target_name) {
439		len = sprintf(p, "target=%s ", target_name);
440		p += len;
441		gss_msg->msg.len += len;
442	}
443	if (service_name != NULL) {
444		len = sprintf(p, "service=%s ", service_name);
445		p += len;
446		gss_msg->msg.len += len;
447	}
448	if (mech->gm_upcall_enctypes) {
449		len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes);
450		p += len;
451		gss_msg->msg.len += len;
452	}
453	len = sprintf(p, "\n");
454	gss_msg->msg.len += len;
455
456	gss_msg->msg.data = gss_msg->databuf;
457	BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
458}
459
460static struct gss_upcall_msg *
461gss_alloc_msg(struct gss_auth *gss_auth,
462		kuid_t uid, const char *service_name)
463{
464	struct gss_upcall_msg *gss_msg;
465	int vers;
466
467	gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
468	if (gss_msg == NULL)
469		return ERR_PTR(-ENOMEM);
470	vers = get_pipe_version(gss_auth->net);
471	if (vers < 0) {
472		kfree(gss_msg);
473		return ERR_PTR(vers);
474	}
475	gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
476	INIT_LIST_HEAD(&gss_msg->list);
477	rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
478	init_waitqueue_head(&gss_msg->waitqueue);
479	atomic_set(&gss_msg->count, 1);
480	gss_msg->uid = uid;
481	gss_msg->auth = gss_auth;
482	switch (vers) {
483	case 0:
484		gss_encode_v0_msg(gss_msg);
485		break;
486	default:
487		gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
488	};
489	return gss_msg;
490}
491
492static struct gss_upcall_msg *
493gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
494{
495	struct gss_cred *gss_cred = container_of(cred,
496			struct gss_cred, gc_base);
497	struct gss_upcall_msg *gss_new, *gss_msg;
498	kuid_t uid = cred->cr_uid;
499
500	gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
501	if (IS_ERR(gss_new))
502		return gss_new;
503	gss_msg = gss_add_msg(gss_new);
504	if (gss_msg == gss_new) {
505		int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
506		if (res) {
507			gss_unhash_msg(gss_new);
508			gss_msg = ERR_PTR(res);
509		}
510	} else
511		gss_release_msg(gss_new);
512	return gss_msg;
513}
514
515static void warn_gssd(void)
516{
517	static unsigned long ratelimit;
518	unsigned long now = jiffies;
519
520	if (time_after(now, ratelimit)) {
521		printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
522				"Please check user daemon is running.\n");
523		ratelimit = now + 15*HZ;
524	}
525}
526
527static inline int
528gss_refresh_upcall(struct rpc_task *task)
529{
530	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
531	struct gss_auth *gss_auth = container_of(cred->cr_auth,
532			struct gss_auth, rpc_auth);
533	struct gss_cred *gss_cred = container_of(cred,
534			struct gss_cred, gc_base);
535	struct gss_upcall_msg *gss_msg;
536	struct rpc_pipe *pipe;
537	int err = 0;
538
539	dprintk("RPC: %5u %s for uid %u\n",
540		task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid));
541	gss_msg = gss_setup_upcall(gss_auth, cred);
542	if (PTR_ERR(gss_msg) == -EAGAIN) {
543		/* XXX: warning on the first, under the assumption we
544		 * shouldn't normally hit this case on a refresh. */
545		warn_gssd();
546		task->tk_timeout = 15*HZ;
547		rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
548		return -EAGAIN;
549	}
550	if (IS_ERR(gss_msg)) {
551		err = PTR_ERR(gss_msg);
552		goto out;
553	}
554	pipe = gss_msg->pipe;
555	spin_lock(&pipe->lock);
556	if (gss_cred->gc_upcall != NULL)
557		rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
558	else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
559		task->tk_timeout = 0;
560		gss_cred->gc_upcall = gss_msg;
561		/* gss_upcall_callback will release the reference to gss_upcall_msg */
562		atomic_inc(&gss_msg->count);
563		rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
564	} else {
565		gss_handle_downcall_result(gss_cred, gss_msg);
566		err = gss_msg->msg.errno;
567	}
568	spin_unlock(&pipe->lock);
569	gss_release_msg(gss_msg);
570out:
571	dprintk("RPC: %5u %s for uid %u result %d\n",
572		task->tk_pid, __func__,
573		from_kuid(&init_user_ns, cred->cr_uid),	err);
574	return err;
575}
576
577static inline int
578gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
579{
580	struct net *net = gss_auth->net;
581	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
582	struct rpc_pipe *pipe;
583	struct rpc_cred *cred = &gss_cred->gc_base;
584	struct gss_upcall_msg *gss_msg;
585	unsigned long timeout;
586	DEFINE_WAIT(wait);
587	int err;
588
589	dprintk("RPC:       %s for uid %u\n",
590		__func__, from_kuid(&init_user_ns, cred->cr_uid));
591retry:
592	err = 0;
593	/* Default timeout is 15s unless we know that gssd is not running */
594	timeout = 15 * HZ;
595	if (!sn->gssd_running)
596		timeout = HZ >> 2;
597	gss_msg = gss_setup_upcall(gss_auth, cred);
598	if (PTR_ERR(gss_msg) == -EAGAIN) {
599		err = wait_event_interruptible_timeout(pipe_version_waitqueue,
600				sn->pipe_version >= 0, timeout);
601		if (sn->pipe_version < 0) {
602			if (err == 0)
603				sn->gssd_running = 0;
604			warn_gssd();
605			err = -EACCES;
606		}
607		if (err < 0)
608			goto out;
609		goto retry;
610	}
611	if (IS_ERR(gss_msg)) {
612		err = PTR_ERR(gss_msg);
613		goto out;
614	}
615	pipe = gss_msg->pipe;
616	for (;;) {
617		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
618		spin_lock(&pipe->lock);
619		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
620			break;
621		}
622		spin_unlock(&pipe->lock);
623		if (fatal_signal_pending(current)) {
624			err = -ERESTARTSYS;
625			goto out_intr;
626		}
627		schedule();
628	}
629	if (gss_msg->ctx)
630		gss_cred_set_ctx(cred, gss_msg->ctx);
631	else
632		err = gss_msg->msg.errno;
633	spin_unlock(&pipe->lock);
634out_intr:
635	finish_wait(&gss_msg->waitqueue, &wait);
636	gss_release_msg(gss_msg);
637out:
638	dprintk("RPC:       %s for uid %u result %d\n",
639		__func__, from_kuid(&init_user_ns, cred->cr_uid), err);
640	return err;
641}
642
643#define MSG_BUF_MAXSIZE 1024
644
645static ssize_t
646gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
647{
648	const void *p, *end;
649	void *buf;
650	struct gss_upcall_msg *gss_msg;
651	struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
652	struct gss_cl_ctx *ctx;
653	uid_t id;
654	kuid_t uid;
655	ssize_t err = -EFBIG;
656
657	if (mlen > MSG_BUF_MAXSIZE)
658		goto out;
659	err = -ENOMEM;
660	buf = kmalloc(mlen, GFP_NOFS);
661	if (!buf)
662		goto out;
663
664	err = -EFAULT;
665	if (copy_from_user(buf, src, mlen))
666		goto err;
667
668	end = (const void *)((char *)buf + mlen);
669	p = simple_get_bytes(buf, end, &id, sizeof(id));
670	if (IS_ERR(p)) {
671		err = PTR_ERR(p);
672		goto err;
673	}
674
675	uid = make_kuid(&init_user_ns, id);
676	if (!uid_valid(uid)) {
677		err = -EINVAL;
678		goto err;
679	}
680
681	err = -ENOMEM;
682	ctx = gss_alloc_context();
683	if (ctx == NULL)
684		goto err;
685
686	err = -ENOENT;
687	/* Find a matching upcall */
688	spin_lock(&pipe->lock);
689	gss_msg = __gss_find_upcall(pipe, uid);
690	if (gss_msg == NULL) {
691		spin_unlock(&pipe->lock);
692		goto err_put_ctx;
693	}
694	list_del_init(&gss_msg->list);
695	spin_unlock(&pipe->lock);
696
697	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
698	if (IS_ERR(p)) {
699		err = PTR_ERR(p);
700		switch (err) {
701		case -EACCES:
702		case -EKEYEXPIRED:
703			gss_msg->msg.errno = err;
704			err = mlen;
705			break;
706		case -EFAULT:
707		case -ENOMEM:
708		case -EINVAL:
709		case -ENOSYS:
710			gss_msg->msg.errno = -EAGAIN;
711			break;
712		default:
713			printk(KERN_CRIT "%s: bad return from "
714				"gss_fill_context: %zd\n", __func__, err);
715			BUG();
716		}
717		goto err_release_msg;
718	}
719	gss_msg->ctx = gss_get_ctx(ctx);
720	err = mlen;
721
722err_release_msg:
723	spin_lock(&pipe->lock);
724	__gss_unhash_msg(gss_msg);
725	spin_unlock(&pipe->lock);
726	gss_release_msg(gss_msg);
727err_put_ctx:
728	gss_put_ctx(ctx);
729err:
730	kfree(buf);
731out:
732	dprintk("RPC:       %s returning %Zd\n", __func__, err);
733	return err;
734}
735
736static int gss_pipe_open(struct inode *inode, int new_version)
737{
738	struct net *net = inode->i_sb->s_fs_info;
739	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
740	int ret = 0;
741
742	spin_lock(&pipe_version_lock);
743	if (sn->pipe_version < 0) {
744		/* First open of any gss pipe determines the version: */
745		sn->pipe_version = new_version;
746		rpc_wake_up(&pipe_version_rpc_waitqueue);
747		wake_up(&pipe_version_waitqueue);
748	} else if (sn->pipe_version != new_version) {
749		/* Trying to open a pipe of a different version */
750		ret = -EBUSY;
751		goto out;
752	}
753	atomic_inc(&sn->pipe_users);
754out:
755	spin_unlock(&pipe_version_lock);
756	return ret;
757
758}
759
760static int gss_pipe_open_v0(struct inode *inode)
761{
762	return gss_pipe_open(inode, 0);
763}
764
765static int gss_pipe_open_v1(struct inode *inode)
766{
767	return gss_pipe_open(inode, 1);
768}
769
770static void
771gss_pipe_release(struct inode *inode)
772{
773	struct net *net = inode->i_sb->s_fs_info;
774	struct rpc_pipe *pipe = RPC_I(inode)->pipe;
775	struct gss_upcall_msg *gss_msg;
776
777restart:
778	spin_lock(&pipe->lock);
779	list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
780
781		if (!list_empty(&gss_msg->msg.list))
782			continue;
783		gss_msg->msg.errno = -EPIPE;
784		atomic_inc(&gss_msg->count);
785		__gss_unhash_msg(gss_msg);
786		spin_unlock(&pipe->lock);
787		gss_release_msg(gss_msg);
788		goto restart;
789	}
790	spin_unlock(&pipe->lock);
791
792	put_pipe_version(net);
793}
794
795static void
796gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
797{
798	struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
799
800	if (msg->errno < 0) {
801		dprintk("RPC:       %s releasing msg %p\n",
802			__func__, gss_msg);
803		atomic_inc(&gss_msg->count);
804		gss_unhash_msg(gss_msg);
805		if (msg->errno == -ETIMEDOUT)
806			warn_gssd();
807		gss_release_msg(gss_msg);
808	}
809}
810
811static void gss_pipe_dentry_destroy(struct dentry *dir,
812		struct rpc_pipe_dir_object *pdo)
813{
814	struct gss_pipe *gss_pipe = pdo->pdo_data;
815	struct rpc_pipe *pipe = gss_pipe->pipe;
816
817	if (pipe->dentry != NULL) {
818		rpc_unlink(pipe->dentry);
819		pipe->dentry = NULL;
820	}
821}
822
823static int gss_pipe_dentry_create(struct dentry *dir,
824		struct rpc_pipe_dir_object *pdo)
825{
826	struct gss_pipe *p = pdo->pdo_data;
827	struct dentry *dentry;
828
829	dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
830	if (IS_ERR(dentry))
831		return PTR_ERR(dentry);
832	p->pipe->dentry = dentry;
833	return 0;
834}
835
836static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
837	.create = gss_pipe_dentry_create,
838	.destroy = gss_pipe_dentry_destroy,
839};
840
841static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
842		const char *name,
843		const struct rpc_pipe_ops *upcall_ops)
844{
845	struct gss_pipe *p;
846	int err = -ENOMEM;
847
848	p = kmalloc(sizeof(*p), GFP_KERNEL);
849	if (p == NULL)
850		goto err;
851	p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
852	if (IS_ERR(p->pipe)) {
853		err = PTR_ERR(p->pipe);
854		goto err_free_gss_pipe;
855	}
856	p->name = name;
857	p->clnt = clnt;
858	kref_init(&p->kref);
859	rpc_init_pipe_dir_object(&p->pdo,
860			&gss_pipe_dir_object_ops,
861			p);
862	return p;
863err_free_gss_pipe:
864	kfree(p);
865err:
866	return ERR_PTR(err);
867}
868
869struct gss_alloc_pdo {
870	struct rpc_clnt *clnt;
871	const char *name;
872	const struct rpc_pipe_ops *upcall_ops;
873};
874
875static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
876{
877	struct gss_pipe *gss_pipe;
878	struct gss_alloc_pdo *args = data;
879
880	if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
881		return 0;
882	gss_pipe = container_of(pdo, struct gss_pipe, pdo);
883	if (strcmp(gss_pipe->name, args->name) != 0)
884		return 0;
885	if (!kref_get_unless_zero(&gss_pipe->kref))
886		return 0;
887	return 1;
888}
889
890static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
891{
892	struct gss_pipe *gss_pipe;
893	struct gss_alloc_pdo *args = data;
894
895	gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
896	if (!IS_ERR(gss_pipe))
897		return &gss_pipe->pdo;
898	return NULL;
899}
900
901static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
902		const char *name,
903		const struct rpc_pipe_ops *upcall_ops)
904{
905	struct net *net = rpc_net_ns(clnt);
906	struct rpc_pipe_dir_object *pdo;
907	struct gss_alloc_pdo args = {
908		.clnt = clnt,
909		.name = name,
910		.upcall_ops = upcall_ops,
911	};
912
913	pdo = rpc_find_or_alloc_pipe_dir_object(net,
914			&clnt->cl_pipedir_objects,
915			gss_pipe_match_pdo,
916			gss_pipe_alloc_pdo,
917			&args);
918	if (pdo != NULL)
919		return container_of(pdo, struct gss_pipe, pdo);
920	return ERR_PTR(-ENOMEM);
921}
922
923static void __gss_pipe_free(struct gss_pipe *p)
924{
925	struct rpc_clnt *clnt = p->clnt;
926	struct net *net = rpc_net_ns(clnt);
927
928	rpc_remove_pipe_dir_object(net,
929			&clnt->cl_pipedir_objects,
930			&p->pdo);
931	rpc_destroy_pipe_data(p->pipe);
932	kfree(p);
933}
934
935static void __gss_pipe_release(struct kref *kref)
936{
937	struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
938
939	__gss_pipe_free(p);
940}
941
942static void gss_pipe_free(struct gss_pipe *p)
943{
944	if (p != NULL)
945		kref_put(&p->kref, __gss_pipe_release);
946}
947
948/*
949 * NOTE: we have the opportunity to use different
950 * parameters based on the input flavor (which must be a pseudoflavor)
951 */
952static struct gss_auth *
953gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
954{
955	rpc_authflavor_t flavor = args->pseudoflavor;
956	struct gss_auth *gss_auth;
957	struct gss_pipe *gss_pipe;
958	struct rpc_auth * auth;
959	int err = -ENOMEM; /* XXX? */
960
961	dprintk("RPC:       creating GSS authenticator for client %p\n", clnt);
962
963	if (!try_module_get(THIS_MODULE))
964		return ERR_PTR(err);
965	if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
966		goto out_dec;
967	INIT_HLIST_NODE(&gss_auth->hash);
968	gss_auth->target_name = NULL;
969	if (args->target_name) {
970		gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
971		if (gss_auth->target_name == NULL)
972			goto err_free;
973	}
974	gss_auth->client = clnt;
975	gss_auth->net = get_net(rpc_net_ns(clnt));
976	err = -EINVAL;
977	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
978	if (!gss_auth->mech) {
979		dprintk("RPC:       Pseudoflavor %d not found!\n", flavor);
980		goto err_put_net;
981	}
982	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
983	if (gss_auth->service == 0)
984		goto err_put_mech;
985	auth = &gss_auth->rpc_auth;
986	auth->au_cslack = GSS_CRED_SLACK >> 2;
987	auth->au_rslack = GSS_VERF_SLACK >> 2;
988	auth->au_ops = &authgss_ops;
989	auth->au_flavor = flavor;
990	atomic_set(&auth->au_count, 1);
991	kref_init(&gss_auth->kref);
992
993	err = rpcauth_init_credcache(auth);
994	if (err)
995		goto err_put_mech;
996	/*
997	 * Note: if we created the old pipe first, then someone who
998	 * examined the directory at the right moment might conclude
999	 * that we supported only the old pipe.  So we instead create
1000	 * the new pipe first.
1001	 */
1002	gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
1003	if (IS_ERR(gss_pipe)) {
1004		err = PTR_ERR(gss_pipe);
1005		goto err_destroy_credcache;
1006	}
1007	gss_auth->gss_pipe[1] = gss_pipe;
1008
1009	gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
1010			&gss_upcall_ops_v0);
1011	if (IS_ERR(gss_pipe)) {
1012		err = PTR_ERR(gss_pipe);
1013		goto err_destroy_pipe_1;
1014	}
1015	gss_auth->gss_pipe[0] = gss_pipe;
1016
1017	return gss_auth;
1018err_destroy_pipe_1:
1019	gss_pipe_free(gss_auth->gss_pipe[1]);
1020err_destroy_credcache:
1021	rpcauth_destroy_credcache(auth);
1022err_put_mech:
1023	gss_mech_put(gss_auth->mech);
1024err_put_net:
1025	put_net(gss_auth->net);
1026err_free:
1027	kfree(gss_auth->target_name);
1028	kfree(gss_auth);
1029out_dec:
1030	module_put(THIS_MODULE);
1031	return ERR_PTR(err);
1032}
1033
1034static void
1035gss_free(struct gss_auth *gss_auth)
1036{
1037	gss_pipe_free(gss_auth->gss_pipe[0]);
1038	gss_pipe_free(gss_auth->gss_pipe[1]);
1039	gss_mech_put(gss_auth->mech);
1040	put_net(gss_auth->net);
1041	kfree(gss_auth->target_name);
1042
1043	kfree(gss_auth);
1044	module_put(THIS_MODULE);
1045}
1046
1047static void
1048gss_free_callback(struct kref *kref)
1049{
1050	struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
1051
1052	gss_free(gss_auth);
1053}
1054
1055static void
1056gss_destroy(struct rpc_auth *auth)
1057{
1058	struct gss_auth *gss_auth = container_of(auth,
1059			struct gss_auth, rpc_auth);
1060
1061	dprintk("RPC:       destroying GSS authenticator %p flavor %d\n",
1062			auth, auth->au_flavor);
1063
1064	if (hash_hashed(&gss_auth->hash)) {
1065		spin_lock(&gss_auth_hash_lock);
1066		hash_del(&gss_auth->hash);
1067		spin_unlock(&gss_auth_hash_lock);
1068	}
1069
1070	gss_pipe_free(gss_auth->gss_pipe[0]);
1071	gss_auth->gss_pipe[0] = NULL;
1072	gss_pipe_free(gss_auth->gss_pipe[1]);
1073	gss_auth->gss_pipe[1] = NULL;
1074	rpcauth_destroy_credcache(auth);
1075
1076	kref_put(&gss_auth->kref, gss_free_callback);
1077}
1078
1079/*
1080 * Auths may be shared between rpc clients that were cloned from a
1081 * common client with the same xprt, if they also share the flavor and
1082 * target_name.
1083 *
1084 * The auth is looked up from the oldest parent sharing the same
1085 * cl_xprt, and the auth itself references only that common parent
1086 * (which is guaranteed to last as long as any of its descendants).
1087 */
1088static struct gss_auth *
1089gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args,
1090		struct rpc_clnt *clnt,
1091		struct gss_auth *new)
1092{
1093	struct gss_auth *gss_auth;
1094	unsigned long hashval = (unsigned long)clnt;
1095
1096	spin_lock(&gss_auth_hash_lock);
1097	hash_for_each_possible(gss_auth_hash_table,
1098			gss_auth,
1099			hash,
1100			hashval) {
1101		if (gss_auth->client != clnt)
1102			continue;
1103		if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1104			continue;
1105		if (gss_auth->target_name != args->target_name) {
1106			if (gss_auth->target_name == NULL)
1107				continue;
1108			if (args->target_name == NULL)
1109				continue;
1110			if (strcmp(gss_auth->target_name, args->target_name))
1111				continue;
1112		}
1113		if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count))
1114			continue;
1115		goto out;
1116	}
1117	if (new)
1118		hash_add(gss_auth_hash_table, &new->hash, hashval);
1119	gss_auth = new;
1120out:
1121	spin_unlock(&gss_auth_hash_lock);
1122	return gss_auth;
1123}
1124
1125static struct gss_auth *
1126gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1127{
1128	struct gss_auth *gss_auth;
1129	struct gss_auth *new;
1130
1131	gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
1132	if (gss_auth != NULL)
1133		goto out;
1134	new = gss_create_new(args, clnt);
1135	if (IS_ERR(new))
1136		return new;
1137	gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
1138	if (gss_auth != new)
1139		gss_destroy(&new->rpc_auth);
1140out:
1141	return gss_auth;
1142}
1143
1144static struct rpc_auth *
1145gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1146{
1147	struct gss_auth *gss_auth;
1148	struct rpc_xprt *xprt = rcu_access_pointer(clnt->cl_xprt);
1149
1150	while (clnt != clnt->cl_parent) {
1151		struct rpc_clnt *parent = clnt->cl_parent;
1152		/* Find the original parent for this transport */
1153		if (rcu_access_pointer(parent->cl_xprt) != xprt)
1154			break;
1155		clnt = parent;
1156	}
1157
1158	gss_auth = gss_create_hashed(args, clnt);
1159	if (IS_ERR(gss_auth))
1160		return ERR_CAST(gss_auth);
1161	return &gss_auth->rpc_auth;
1162}
1163
1164/*
1165 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
1166 * to the server with the GSS control procedure field set to
1167 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1168 * all RPCSEC_GSS state associated with that context.
1169 */
1170static int
1171gss_destroying_context(struct rpc_cred *cred)
1172{
1173	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1174	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1175	struct rpc_task *task;
1176
1177	if (gss_cred->gc_ctx == NULL ||
1178	    test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
1179		return 0;
1180
1181	gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1182	cred->cr_ops = &gss_nullops;
1183
1184	/* Take a reference to ensure the cred will be destroyed either
1185	 * by the RPC call or by the put_rpccred() below */
1186	get_rpccred(cred);
1187
1188	task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
1189	if (!IS_ERR(task))
1190		rpc_put_task(task);
1191
1192	put_rpccred(cred);
1193	return 1;
1194}
1195
1196/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1197 * to create a new cred or context, so they check that things have been
1198 * allocated before freeing them. */
1199static void
1200gss_do_free_ctx(struct gss_cl_ctx *ctx)
1201{
1202	dprintk("RPC:       %s\n", __func__);
1203
1204	gss_delete_sec_context(&ctx->gc_gss_ctx);
1205	kfree(ctx->gc_wire_ctx.data);
1206	kfree(ctx);
1207}
1208
1209static void
1210gss_free_ctx_callback(struct rcu_head *head)
1211{
1212	struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1213	gss_do_free_ctx(ctx);
1214}
1215
1216static void
1217gss_free_ctx(struct gss_cl_ctx *ctx)
1218{
1219	call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1220}
1221
1222static void
1223gss_free_cred(struct gss_cred *gss_cred)
1224{
1225	dprintk("RPC:       %s cred=%p\n", __func__, gss_cred);
1226	kfree(gss_cred);
1227}
1228
1229static void
1230gss_free_cred_callback(struct rcu_head *head)
1231{
1232	struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1233	gss_free_cred(gss_cred);
1234}
1235
1236static void
1237gss_destroy_nullcred(struct rpc_cred *cred)
1238{
1239	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1240	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1241	struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
1242
1243	RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1244	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1245	if (ctx)
1246		gss_put_ctx(ctx);
1247	kref_put(&gss_auth->kref, gss_free_callback);
1248}
1249
1250static void
1251gss_destroy_cred(struct rpc_cred *cred)
1252{
1253
1254	if (gss_destroying_context(cred))
1255		return;
1256	gss_destroy_nullcred(cred);
1257}
1258
1259/*
1260 * Lookup RPCSEC_GSS cred for the current process
1261 */
1262static struct rpc_cred *
1263gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1264{
1265	return rpcauth_lookup_credcache(auth, acred, flags);
1266}
1267
1268static struct rpc_cred *
1269gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1270{
1271	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1272	struct gss_cred	*cred = NULL;
1273	int err = -ENOMEM;
1274
1275	dprintk("RPC:       %s for uid %d, flavor %d\n",
1276		__func__, from_kuid(&init_user_ns, acred->uid),
1277		auth->au_flavor);
1278
1279	if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
1280		goto out_err;
1281
1282	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1283	/*
1284	 * Note: in order to force a call to call_refresh(), we deliberately
1285	 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1286	 */
1287	cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1288	cred->gc_service = gss_auth->service;
1289	cred->gc_principal = NULL;
1290	if (acred->machine_cred)
1291		cred->gc_principal = acred->principal;
1292	kref_get(&gss_auth->kref);
1293	return &cred->gc_base;
1294
1295out_err:
1296	dprintk("RPC:       %s failed with error %d\n", __func__, err);
1297	return ERR_PTR(err);
1298}
1299
1300static int
1301gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1302{
1303	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1304	struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1305	int err;
1306
1307	do {
1308		err = gss_create_upcall(gss_auth, gss_cred);
1309	} while (err == -EAGAIN);
1310	return err;
1311}
1312
1313/*
1314 * Returns -EACCES if GSS context is NULL or will expire within the
1315 * timeout (miliseconds)
1316 */
1317static int
1318gss_key_timeout(struct rpc_cred *rc)
1319{
1320	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1321	unsigned long now = jiffies;
1322	unsigned long expire;
1323
1324	if (gss_cred->gc_ctx == NULL)
1325		return -EACCES;
1326
1327	expire = gss_cred->gc_ctx->gc_expiry - (gss_key_expire_timeo * HZ);
1328
1329	if (time_after(now, expire))
1330		return -EACCES;
1331	return 0;
1332}
1333
1334static int
1335gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1336{
1337	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1338	int ret;
1339
1340	if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1341		goto out;
1342	/* Don't match with creds that have expired. */
1343	if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
1344		return 0;
1345	if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1346		return 0;
1347out:
1348	if (acred->principal != NULL) {
1349		if (gss_cred->gc_principal == NULL)
1350			return 0;
1351		ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
1352		goto check_expire;
1353	}
1354	if (gss_cred->gc_principal != NULL)
1355		return 0;
1356	ret = uid_eq(rc->cr_uid, acred->uid);
1357
1358check_expire:
1359	if (ret == 0)
1360		return ret;
1361
1362	/* Notify acred users of GSS context expiration timeout */
1363	if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) &&
1364	    (gss_key_timeout(rc) != 0)) {
1365		/* test will now be done from generic cred */
1366		test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags);
1367		/* tell NFS layer that key will expire soon */
1368		set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
1369	}
1370	return ret;
1371}
1372
1373/*
1374* Marshal credentials.
1375* Maybe we should keep a cached credential for performance reasons.
1376*/
1377static __be32 *
1378gss_marshal(struct rpc_task *task, __be32 *p)
1379{
1380	struct rpc_rqst *req = task->tk_rqstp;
1381	struct rpc_cred *cred = req->rq_cred;
1382	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1383						 gc_base);
1384	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
1385	__be32		*cred_len;
1386	u32             maj_stat = 0;
1387	struct xdr_netobj mic;
1388	struct kvec	iov;
1389	struct xdr_buf	verf_buf;
1390
1391	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1392
1393	*p++ = htonl(RPC_AUTH_GSS);
1394	cred_len = p++;
1395
1396	spin_lock(&ctx->gc_seq_lock);
1397	req->rq_seqno = ctx->gc_seq++;
1398	spin_unlock(&ctx->gc_seq_lock);
1399
1400	*p++ = htonl((u32) RPC_GSS_VERSION);
1401	*p++ = htonl((u32) ctx->gc_proc);
1402	*p++ = htonl((u32) req->rq_seqno);
1403	*p++ = htonl((u32) gss_cred->gc_service);
1404	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1405	*cred_len = htonl((p - (cred_len + 1)) << 2);
1406
1407	/* We compute the checksum for the verifier over the xdr-encoded bytes
1408	 * starting with the xid and ending at the end of the credential: */
1409	iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
1410					req->rq_snd_buf.head[0].iov_base);
1411	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1412	xdr_buf_from_iov(&iov, &verf_buf);
1413
1414	/* set verifier flavor*/
1415	*p++ = htonl(RPC_AUTH_GSS);
1416
1417	mic.data = (u8 *)(p + 1);
1418	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1419	if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
1420		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1421	} else if (maj_stat != 0) {
1422		printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
1423		goto out_put_ctx;
1424	}
1425	p = xdr_encode_opaque(p, NULL, mic.len);
1426	gss_put_ctx(ctx);
1427	return p;
1428out_put_ctx:
1429	gss_put_ctx(ctx);
1430	return NULL;
1431}
1432
1433static int gss_renew_cred(struct rpc_task *task)
1434{
1435	struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1436	struct gss_cred *gss_cred = container_of(oldcred,
1437						 struct gss_cred,
1438						 gc_base);
1439	struct rpc_auth *auth = oldcred->cr_auth;
1440	struct auth_cred acred = {
1441		.uid = oldcred->cr_uid,
1442		.principal = gss_cred->gc_principal,
1443		.machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
1444	};
1445	struct rpc_cred *new;
1446
1447	new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1448	if (IS_ERR(new))
1449		return PTR_ERR(new);
1450	task->tk_rqstp->rq_cred = new;
1451	put_rpccred(oldcred);
1452	return 0;
1453}
1454
1455static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1456{
1457	if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1458		unsigned long now = jiffies;
1459		unsigned long begin, expire;
1460		struct gss_cred *gss_cred;
1461
1462		gss_cred = container_of(cred, struct gss_cred, gc_base);
1463		begin = gss_cred->gc_upcall_timestamp;
1464		expire = begin + gss_expired_cred_retry_delay * HZ;
1465
1466		if (time_in_range_open(now, begin, expire))
1467			return 1;
1468	}
1469	return 0;
1470}
1471
1472/*
1473* Refresh credentials. XXX - finish
1474*/
1475static int
1476gss_refresh(struct rpc_task *task)
1477{
1478	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1479	int ret = 0;
1480
1481	if (gss_cred_is_negative_entry(cred))
1482		return -EKEYEXPIRED;
1483
1484	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1485			!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1486		ret = gss_renew_cred(task);
1487		if (ret < 0)
1488			goto out;
1489		cred = task->tk_rqstp->rq_cred;
1490	}
1491
1492	if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1493		ret = gss_refresh_upcall(task);
1494out:
1495	return ret;
1496}
1497
1498/* Dummy refresh routine: used only when destroying the context */
1499static int
1500gss_refresh_null(struct rpc_task *task)
1501{
1502	return -EACCES;
1503}
1504
1505static __be32 *
1506gss_validate(struct rpc_task *task, __be32 *p)
1507{
1508	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1509	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1510	__be32		seq;
1511	struct kvec	iov;
1512	struct xdr_buf	verf_buf;
1513	struct xdr_netobj mic;
1514	u32		flav,len;
1515	u32		maj_stat;
1516	__be32		*ret = ERR_PTR(-EIO);
1517
1518	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1519
1520	flav = ntohl(*p++);
1521	if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
1522		goto out_bad;
1523	if (flav != RPC_AUTH_GSS)
1524		goto out_bad;
1525	seq = htonl(task->tk_rqstp->rq_seqno);
1526	iov.iov_base = &seq;
1527	iov.iov_len = sizeof(seq);
1528	xdr_buf_from_iov(&iov, &verf_buf);
1529	mic.data = (u8 *)p;
1530	mic.len = len;
1531
1532	ret = ERR_PTR(-EACCES);
1533	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1534	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1535		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1536	if (maj_stat) {
1537		dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n",
1538			task->tk_pid, __func__, maj_stat);
1539		goto out_bad;
1540	}
1541	/* We leave it to unwrap to calculate au_rslack. For now we just
1542	 * calculate the length of the verifier: */
1543	cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1544	gss_put_ctx(ctx);
1545	dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
1546			task->tk_pid, __func__);
1547	return p + XDR_QUADLEN(len);
1548out_bad:
1549	gss_put_ctx(ctx);
1550	dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__,
1551		PTR_ERR(ret));
1552	return ret;
1553}
1554
1555static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
1556				__be32 *p, void *obj)
1557{
1558	struct xdr_stream xdr;
1559
1560	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
1561	encode(rqstp, &xdr, obj);
1562}
1563
1564static inline int
1565gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1566		   kxdreproc_t encode, struct rpc_rqst *rqstp,
1567		   __be32 *p, void *obj)
1568{
1569	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1570	struct xdr_buf	integ_buf;
1571	__be32          *integ_len = NULL;
1572	struct xdr_netobj mic;
1573	u32		offset;
1574	__be32		*q;
1575	struct kvec	*iov;
1576	u32             maj_stat = 0;
1577	int		status = -EIO;
1578
1579	integ_len = p++;
1580	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1581	*p++ = htonl(rqstp->rq_seqno);
1582
1583	gss_wrap_req_encode(encode, rqstp, p, obj);
1584
1585	if (xdr_buf_subsegment(snd_buf, &integ_buf,
1586				offset, snd_buf->len - offset))
1587		return status;
1588	*integ_len = htonl(integ_buf.len);
1589
1590	/* guess whether we're in the head or the tail: */
1591	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1592		iov = snd_buf->tail;
1593	else
1594		iov = snd_buf->head;
1595	p = iov->iov_base + iov->iov_len;
1596	mic.data = (u8 *)(p + 1);
1597
1598	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1599	status = -EIO; /* XXX? */
1600	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1601		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1602	else if (maj_stat)
1603		return status;
1604	q = xdr_encode_opaque(p, NULL, mic.len);
1605
1606	offset = (u8 *)q - (u8 *)p;
1607	iov->iov_len += offset;
1608	snd_buf->len += offset;
1609	return 0;
1610}
1611
1612static void
1613priv_release_snd_buf(struct rpc_rqst *rqstp)
1614{
1615	int i;
1616
1617	for (i=0; i < rqstp->rq_enc_pages_num; i++)
1618		__free_page(rqstp->rq_enc_pages[i]);
1619	kfree(rqstp->rq_enc_pages);
1620}
1621
1622static int
1623alloc_enc_pages(struct rpc_rqst *rqstp)
1624{
1625	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1626	int first, last, i;
1627
1628	if (snd_buf->page_len == 0) {
1629		rqstp->rq_enc_pages_num = 0;
1630		return 0;
1631	}
1632
1633	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1634	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
1635	rqstp->rq_enc_pages_num = last - first + 1 + 1;
1636	rqstp->rq_enc_pages
1637		= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
1638				GFP_NOFS);
1639	if (!rqstp->rq_enc_pages)
1640		goto out;
1641	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1642		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1643		if (rqstp->rq_enc_pages[i] == NULL)
1644			goto out_free;
1645	}
1646	rqstp->rq_release_snd_buf = priv_release_snd_buf;
1647	return 0;
1648out_free:
1649	rqstp->rq_enc_pages_num = i;
1650	priv_release_snd_buf(rqstp);
1651out:
1652	return -EAGAIN;
1653}
1654
1655static inline int
1656gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1657		  kxdreproc_t encode, struct rpc_rqst *rqstp,
1658		  __be32 *p, void *obj)
1659{
1660	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1661	u32		offset;
1662	u32             maj_stat;
1663	int		status;
1664	__be32		*opaque_len;
1665	struct page	**inpages;
1666	int		first;
1667	int		pad;
1668	struct kvec	*iov;
1669	char		*tmp;
1670
1671	opaque_len = p++;
1672	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1673	*p++ = htonl(rqstp->rq_seqno);
1674
1675	gss_wrap_req_encode(encode, rqstp, p, obj);
1676
1677	status = alloc_enc_pages(rqstp);
1678	if (status)
1679		return status;
1680	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1681	inpages = snd_buf->pages + first;
1682	snd_buf->pages = rqstp->rq_enc_pages;
1683	snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1684	/*
1685	 * Give the tail its own page, in case we need extra space in the
1686	 * head when wrapping:
1687	 *
1688	 * call_allocate() allocates twice the slack space required
1689	 * by the authentication flavor to rq_callsize.
1690	 * For GSS, slack is GSS_CRED_SLACK.
1691	 */
1692	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1693		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1694		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1695		snd_buf->tail[0].iov_base = tmp;
1696	}
1697	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1698	/* slack space should prevent this ever happening: */
1699	BUG_ON(snd_buf->len > snd_buf->buflen);
1700	status = -EIO;
1701	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1702	 * done anyway, so it's safe to put the request on the wire: */
1703	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1704		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1705	else if (maj_stat)
1706		return status;
1707
1708	*opaque_len = htonl(snd_buf->len - offset);
1709	/* guess whether we're in the head or the tail: */
1710	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1711		iov = snd_buf->tail;
1712	else
1713		iov = snd_buf->head;
1714	p = iov->iov_base + iov->iov_len;
1715	pad = 3 - ((snd_buf->len - offset - 1) & 3);
1716	memset(p, 0, pad);
1717	iov->iov_len += pad;
1718	snd_buf->len += pad;
1719
1720	return 0;
1721}
1722
1723static int
1724gss_wrap_req(struct rpc_task *task,
1725	     kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
1726{
1727	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1728	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1729			gc_base);
1730	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1731	int             status = -EIO;
1732
1733	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1734	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1735		/* The spec seems a little ambiguous here, but I think that not
1736		 * wrapping context destruction requests makes the most sense.
1737		 */
1738		gss_wrap_req_encode(encode, rqstp, p, obj);
1739		status = 0;
1740		goto out;
1741	}
1742	switch (gss_cred->gc_service) {
1743	case RPC_GSS_SVC_NONE:
1744		gss_wrap_req_encode(encode, rqstp, p, obj);
1745		status = 0;
1746		break;
1747	case RPC_GSS_SVC_INTEGRITY:
1748		status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
1749		break;
1750	case RPC_GSS_SVC_PRIVACY:
1751		status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
1752		break;
1753	}
1754out:
1755	gss_put_ctx(ctx);
1756	dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status);
1757	return status;
1758}
1759
1760static inline int
1761gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1762		struct rpc_rqst *rqstp, __be32 **p)
1763{
1764	struct xdr_buf	*rcv_buf = &rqstp->rq_rcv_buf;
1765	struct xdr_buf integ_buf;
1766	struct xdr_netobj mic;
1767	u32 data_offset, mic_offset;
1768	u32 integ_len;
1769	u32 maj_stat;
1770	int status = -EIO;
1771
1772	integ_len = ntohl(*(*p)++);
1773	if (integ_len & 3)
1774		return status;
1775	data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1776	mic_offset = integ_len + data_offset;
1777	if (mic_offset > rcv_buf->len)
1778		return status;
1779	if (ntohl(*(*p)++) != rqstp->rq_seqno)
1780		return status;
1781
1782	if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1783				mic_offset - data_offset))
1784		return status;
1785
1786	if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1787		return status;
1788
1789	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1790	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1791		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1792	if (maj_stat != GSS_S_COMPLETE)
1793		return status;
1794	return 0;
1795}
1796
1797static inline int
1798gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1799		struct rpc_rqst *rqstp, __be32 **p)
1800{
1801	struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
1802	u32 offset;
1803	u32 opaque_len;
1804	u32 maj_stat;
1805	int status = -EIO;
1806
1807	opaque_len = ntohl(*(*p)++);
1808	offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1809	if (offset + opaque_len > rcv_buf->len)
1810		return status;
1811	/* remove padding: */
1812	rcv_buf->len = offset + opaque_len;
1813
1814	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1815	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1816		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1817	if (maj_stat != GSS_S_COMPLETE)
1818		return status;
1819	if (ntohl(*(*p)++) != rqstp->rq_seqno)
1820		return status;
1821
1822	return 0;
1823}
1824
1825static int
1826gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
1827		      __be32 *p, void *obj)
1828{
1829	struct xdr_stream xdr;
1830
1831	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
1832	return decode(rqstp, &xdr, obj);
1833}
1834
1835static int
1836gss_unwrap_resp(struct rpc_task *task,
1837		kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
1838{
1839	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1840	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1841			gc_base);
1842	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1843	__be32		*savedp = p;
1844	struct kvec	*head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1845	int		savedlen = head->iov_len;
1846	int             status = -EIO;
1847
1848	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1849		goto out_decode;
1850	switch (gss_cred->gc_service) {
1851	case RPC_GSS_SVC_NONE:
1852		break;
1853	case RPC_GSS_SVC_INTEGRITY:
1854		status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1855		if (status)
1856			goto out;
1857		break;
1858	case RPC_GSS_SVC_PRIVACY:
1859		status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1860		if (status)
1861			goto out;
1862		break;
1863	}
1864	/* take into account extra slack for integrity and privacy cases: */
1865	cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1866						+ (savedlen - head->iov_len);
1867out_decode:
1868	status = gss_unwrap_req_decode(decode, rqstp, p, obj);
1869out:
1870	gss_put_ctx(ctx);
1871	dprintk("RPC: %5u %s returning %d\n",
1872		task->tk_pid, __func__, status);
1873	return status;
1874}
1875
1876static const struct rpc_authops authgss_ops = {
1877	.owner		= THIS_MODULE,
1878	.au_flavor	= RPC_AUTH_GSS,
1879	.au_name	= "RPCSEC_GSS",
1880	.create		= gss_create,
1881	.destroy	= gss_destroy,
1882	.lookup_cred	= gss_lookup_cred,
1883	.crcreate	= gss_create_cred,
1884	.list_pseudoflavors = gss_mech_list_pseudoflavors,
1885	.info2flavor	= gss_mech_info2flavor,
1886	.flavor2info	= gss_mech_flavor2info,
1887};
1888
1889static const struct rpc_credops gss_credops = {
1890	.cr_name	= "AUTH_GSS",
1891	.crdestroy	= gss_destroy_cred,
1892	.cr_init	= gss_cred_init,
1893	.crbind		= rpcauth_generic_bind_cred,
1894	.crmatch	= gss_match,
1895	.crmarshal	= gss_marshal,
1896	.crrefresh	= gss_refresh,
1897	.crvalidate	= gss_validate,
1898	.crwrap_req	= gss_wrap_req,
1899	.crunwrap_resp	= gss_unwrap_resp,
1900	.crkey_timeout	= gss_key_timeout,
1901};
1902
1903static const struct rpc_credops gss_nullops = {
1904	.cr_name	= "AUTH_GSS",
1905	.crdestroy	= gss_destroy_nullcred,
1906	.crbind		= rpcauth_generic_bind_cred,
1907	.crmatch	= gss_match,
1908	.crmarshal	= gss_marshal,
1909	.crrefresh	= gss_refresh_null,
1910	.crvalidate	= gss_validate,
1911	.crwrap_req	= gss_wrap_req,
1912	.crunwrap_resp	= gss_unwrap_resp,
1913};
1914
1915static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
1916	.upcall		= rpc_pipe_generic_upcall,
1917	.downcall	= gss_pipe_downcall,
1918	.destroy_msg	= gss_pipe_destroy_msg,
1919	.open_pipe	= gss_pipe_open_v0,
1920	.release_pipe	= gss_pipe_release,
1921};
1922
1923static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
1924	.upcall		= rpc_pipe_generic_upcall,
1925	.downcall	= gss_pipe_downcall,
1926	.destroy_msg	= gss_pipe_destroy_msg,
1927	.open_pipe	= gss_pipe_open_v1,
1928	.release_pipe	= gss_pipe_release,
1929};
1930
1931static __net_init int rpcsec_gss_init_net(struct net *net)
1932{
1933	return gss_svc_init_net(net);
1934}
1935
1936static __net_exit void rpcsec_gss_exit_net(struct net *net)
1937{
1938	gss_svc_shutdown_net(net);
1939}
1940
1941static struct pernet_operations rpcsec_gss_net_ops = {
1942	.init = rpcsec_gss_init_net,
1943	.exit = rpcsec_gss_exit_net,
1944};
1945
1946/*
1947 * Initialize RPCSEC_GSS module
1948 */
1949static int __init init_rpcsec_gss(void)
1950{
1951	int err = 0;
1952
1953	err = rpcauth_register(&authgss_ops);
1954	if (err)
1955		goto out;
1956	err = gss_svc_init();
1957	if (err)
1958		goto out_unregister;
1959	err = register_pernet_subsys(&rpcsec_gss_net_ops);
1960	if (err)
1961		goto out_svc_exit;
1962	rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
1963	return 0;
1964out_svc_exit:
1965	gss_svc_shutdown();
1966out_unregister:
1967	rpcauth_unregister(&authgss_ops);
1968out:
1969	return err;
1970}
1971
1972static void __exit exit_rpcsec_gss(void)
1973{
1974	unregister_pernet_subsys(&rpcsec_gss_net_ops);
1975	gss_svc_shutdown();
1976	rpcauth_unregister(&authgss_ops);
1977	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1978}
1979
1980MODULE_ALIAS("rpc-auth-6");
1981MODULE_LICENSE("GPL");
1982module_param_named(expired_cred_retry_delay,
1983		   gss_expired_cred_retry_delay,
1984		   uint, 0644);
1985MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
1986		"the RPC engine retries an expired credential");
1987
1988module_param_named(key_expire_timeo,
1989		   gss_key_expire_timeo,
1990		   uint, 0644);
1991MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
1992		"credential keys lifetime where the NFS layer cleans up "
1993		"prior to key expiration");
1994
1995module_init(init_rpcsec_gss)
1996module_exit(exit_rpcsec_gss)
1997