auth_gss.c revision 5d28dc82074f1e64b22c9424b161abc1f5d6bcdb
1/*
2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 *
4 * RPCSEC_GSS client authentication.
5 *
6 *  Copyright (c) 2000 The Regents of the University of Michigan.
7 *  All rights reserved.
8 *
9 *  Dug Song       <dugsong@monkey.org>
10 *  Andy Adamson   <andros@umich.edu>
11 *
12 *  Redistribution and use in source and binary forms, with or without
13 *  modification, are permitted provided that the following conditions
14 *  are met:
15 *
16 *  1. Redistributions of source code must retain the above copyright
17 *     notice, this list of conditions and the following disclaimer.
18 *  2. Redistributions in binary form must reproduce the above copyright
19 *     notice, this list of conditions and the following disclaimer in the
20 *     documentation and/or other materials provided with the distribution.
21 *  3. Neither the name of the University nor the names of its
22 *     contributors may be used to endorse or promote products derived
23 *     from this software without specific prior written permission.
24 *
25 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 * $Id$
38 */
39
40
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/types.h>
44#include <linux/slab.h>
45#include <linux/sched.h>
46#include <linux/pagemap.h>
47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/auth.h>
49#include <linux/sunrpc/auth_gss.h>
50#include <linux/sunrpc/svcauth_gss.h>
51#include <linux/sunrpc/gss_err.h>
52#include <linux/workqueue.h>
53#include <linux/sunrpc/rpc_pipe_fs.h>
54#include <linux/sunrpc/gss_api.h>
55#include <asm/uaccess.h>
56
57static const struct rpc_authops authgss_ops;
58
59static const struct rpc_credops gss_credops;
60
61#ifdef RPC_DEBUG
62# define RPCDBG_FACILITY	RPCDBG_AUTH
63#endif
64
65#define NFS_NGROUPS	16
66
67#define GSS_CRED_SLACK		1024		/* XXX: unused */
68/* length of a krb5 verifier (48), plus data added before arguments when
69 * using integrity (two 4-byte integers): */
70#define GSS_VERF_SLACK		100
71
72/* XXX this define must match the gssd define
73* as it is passed to gssd to signal the use of
74* machine creds should be part of the shared rpc interface */
75
76#define CA_RUN_AS_MACHINE  0x00000200
77
78/* dump the buffer in `emacs-hexl' style */
79#define isprint(c)      ((c > 0x1f) && (c < 0x7f))
80
81struct gss_auth {
82	struct rpc_auth rpc_auth;
83	struct gss_api_mech *mech;
84	enum rpc_gss_svc service;
85	struct rpc_clnt *client;
86	struct dentry *dentry;
87};
88
89static void gss_free_ctx(struct gss_cl_ctx *);
90static struct rpc_pipe_ops gss_upcall_ops;
91
92static inline struct gss_cl_ctx *
93gss_get_ctx(struct gss_cl_ctx *ctx)
94{
95	atomic_inc(&ctx->count);
96	return ctx;
97}
98
99static inline void
100gss_put_ctx(struct gss_cl_ctx *ctx)
101{
102	if (atomic_dec_and_test(&ctx->count))
103		gss_free_ctx(ctx);
104}
105
106/* gss_cred_set_ctx:
107 * called by gss_upcall_callback and gss_create_upcall in order
108 * to set the gss context. The actual exchange of an old context
109 * and a new one is protected by the inode->i_lock.
110 */
111static void
112gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
113{
114	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
115	struct gss_cl_ctx *old;
116
117	old = gss_cred->gc_ctx;
118	rcu_assign_pointer(gss_cred->gc_ctx, ctx);
119	set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
120	clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
121	if (old)
122		gss_put_ctx(old);
123}
124
125static int
126gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
127{
128	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
129	int res = 0;
130
131	rcu_read_lock();
132	if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx)
133		res = 1;
134	rcu_read_unlock();
135	return res;
136}
137
138static const void *
139simple_get_bytes(const void *p, const void *end, void *res, size_t len)
140{
141	const void *q = (const void *)((const char *)p + len);
142	if (unlikely(q > end || q < p))
143		return ERR_PTR(-EFAULT);
144	memcpy(res, p, len);
145	return q;
146}
147
148static inline const void *
149simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
150{
151	const void *q;
152	unsigned int len;
153
154	p = simple_get_bytes(p, end, &len, sizeof(len));
155	if (IS_ERR(p))
156		return p;
157	q = (const void *)((const char *)p + len);
158	if (unlikely(q > end || q < p))
159		return ERR_PTR(-EFAULT);
160	dest->data = kmemdup(p, len, GFP_KERNEL);
161	if (unlikely(dest->data == NULL))
162		return ERR_PTR(-ENOMEM);
163	dest->len = len;
164	return q;
165}
166
167static struct gss_cl_ctx *
168gss_cred_get_ctx(struct rpc_cred *cred)
169{
170	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
171	struct gss_cl_ctx *ctx = NULL;
172
173	rcu_read_lock();
174	if (gss_cred->gc_ctx)
175		ctx = gss_get_ctx(gss_cred->gc_ctx);
176	rcu_read_unlock();
177	return ctx;
178}
179
180static struct gss_cl_ctx *
181gss_alloc_context(void)
182{
183	struct gss_cl_ctx *ctx;
184
185	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
186	if (ctx != NULL) {
187		ctx->gc_proc = RPC_GSS_PROC_DATA;
188		ctx->gc_seq = 1;	/* NetApp 6.4R1 doesn't accept seq. no. 0 */
189		spin_lock_init(&ctx->gc_seq_lock);
190		atomic_set(&ctx->count,1);
191	}
192	return ctx;
193}
194
195#define GSSD_MIN_TIMEOUT (60 * 60)
196static const void *
197gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
198{
199	const void *q;
200	unsigned int seclen;
201	unsigned int timeout;
202	u32 window_size;
203	int ret;
204
205	/* First unsigned int gives the lifetime (in seconds) of the cred */
206	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
207	if (IS_ERR(p))
208		goto err;
209	if (timeout == 0)
210		timeout = GSSD_MIN_TIMEOUT;
211	ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
212	/* Sequence number window. Determines the maximum number of simultaneous requests */
213	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
214	if (IS_ERR(p))
215		goto err;
216	ctx->gc_win = window_size;
217	/* gssd signals an error by passing ctx->gc_win = 0: */
218	if (ctx->gc_win == 0) {
219		/* in which case, p points to  an error code which we ignore */
220		p = ERR_PTR(-EACCES);
221		goto err;
222	}
223	/* copy the opaque wire context */
224	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
225	if (IS_ERR(p))
226		goto err;
227	/* import the opaque security context */
228	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
229	if (IS_ERR(p))
230		goto err;
231	q = (const void *)((const char *)p + seclen);
232	if (unlikely(q > end || q < p)) {
233		p = ERR_PTR(-EFAULT);
234		goto err;
235	}
236	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
237	if (ret < 0) {
238		p = ERR_PTR(ret);
239		goto err;
240	}
241	return q;
242err:
243	dprintk("RPC:       gss_fill_context returning %ld\n", -PTR_ERR(p));
244	return p;
245}
246
247
248struct gss_upcall_msg {
249	atomic_t count;
250	uid_t	uid;
251	struct rpc_pipe_msg msg;
252	struct list_head list;
253	struct gss_auth *auth;
254	struct rpc_wait_queue rpc_waitqueue;
255	wait_queue_head_t waitqueue;
256	struct gss_cl_ctx *ctx;
257};
258
259static void
260gss_release_msg(struct gss_upcall_msg *gss_msg)
261{
262	if (!atomic_dec_and_test(&gss_msg->count))
263		return;
264	BUG_ON(!list_empty(&gss_msg->list));
265	if (gss_msg->ctx != NULL)
266		gss_put_ctx(gss_msg->ctx);
267	kfree(gss_msg);
268}
269
270static struct gss_upcall_msg *
271__gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
272{
273	struct gss_upcall_msg *pos;
274	list_for_each_entry(pos, &rpci->in_downcall, list) {
275		if (pos->uid != uid)
276			continue;
277		atomic_inc(&pos->count);
278		dprintk("RPC:       gss_find_upcall found msg %p\n", pos);
279		return pos;
280	}
281	dprintk("RPC:       gss_find_upcall found nothing\n");
282	return NULL;
283}
284
285/* Try to add a upcall to the pipefs queue.
286 * If an upcall owned by our uid already exists, then we return a reference
287 * to that upcall instead of adding the new upcall.
288 */
289static inline struct gss_upcall_msg *
290gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
291{
292	struct inode *inode = gss_auth->dentry->d_inode;
293	struct rpc_inode *rpci = RPC_I(inode);
294	struct gss_upcall_msg *old;
295
296	spin_lock(&inode->i_lock);
297	old = __gss_find_upcall(rpci, gss_msg->uid);
298	if (old == NULL) {
299		atomic_inc(&gss_msg->count);
300		list_add(&gss_msg->list, &rpci->in_downcall);
301	} else
302		gss_msg = old;
303	spin_unlock(&inode->i_lock);
304	return gss_msg;
305}
306
307static void
308__gss_unhash_msg(struct gss_upcall_msg *gss_msg)
309{
310	list_del_init(&gss_msg->list);
311	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
312	wake_up_all(&gss_msg->waitqueue);
313	atomic_dec(&gss_msg->count);
314}
315
316static void
317gss_unhash_msg(struct gss_upcall_msg *gss_msg)
318{
319	struct gss_auth *gss_auth = gss_msg->auth;
320	struct inode *inode = gss_auth->dentry->d_inode;
321
322	if (list_empty(&gss_msg->list))
323		return;
324	spin_lock(&inode->i_lock);
325	if (!list_empty(&gss_msg->list))
326		__gss_unhash_msg(gss_msg);
327	spin_unlock(&inode->i_lock);
328}
329
330static void
331gss_upcall_callback(struct rpc_task *task)
332{
333	struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
334			struct gss_cred, gc_base);
335	struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
336	struct inode *inode = gss_msg->auth->dentry->d_inode;
337
338	spin_lock(&inode->i_lock);
339	if (gss_msg->ctx)
340		gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx));
341	else
342		task->tk_status = gss_msg->msg.errno;
343	gss_cred->gc_upcall = NULL;
344	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
345	spin_unlock(&inode->i_lock);
346	gss_release_msg(gss_msg);
347}
348
349static inline struct gss_upcall_msg *
350gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
351{
352	struct gss_upcall_msg *gss_msg;
353
354	gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
355	if (gss_msg != NULL) {
356		INIT_LIST_HEAD(&gss_msg->list);
357		rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
358		init_waitqueue_head(&gss_msg->waitqueue);
359		atomic_set(&gss_msg->count, 1);
360		gss_msg->msg.data = &gss_msg->uid;
361		gss_msg->msg.len = sizeof(gss_msg->uid);
362		gss_msg->uid = uid;
363		gss_msg->auth = gss_auth;
364	}
365	return gss_msg;
366}
367
368static struct gss_upcall_msg *
369gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
370{
371	struct gss_upcall_msg *gss_new, *gss_msg;
372
373	gss_new = gss_alloc_msg(gss_auth, cred->cr_uid);
374	if (gss_new == NULL)
375		return ERR_PTR(-ENOMEM);
376	gss_msg = gss_add_msg(gss_auth, gss_new);
377	if (gss_msg == gss_new) {
378		int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
379		if (res) {
380			gss_unhash_msg(gss_new);
381			gss_msg = ERR_PTR(res);
382		}
383	} else
384		gss_release_msg(gss_new);
385	return gss_msg;
386}
387
388static inline int
389gss_refresh_upcall(struct rpc_task *task)
390{
391	struct rpc_cred *cred = task->tk_msg.rpc_cred;
392	struct gss_auth *gss_auth = container_of(cred->cr_auth,
393			struct gss_auth, rpc_auth);
394	struct gss_cred *gss_cred = container_of(cred,
395			struct gss_cred, gc_base);
396	struct gss_upcall_msg *gss_msg;
397	struct inode *inode = gss_auth->dentry->d_inode;
398	int err = 0;
399
400	dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
401								cred->cr_uid);
402	gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
403	if (IS_ERR(gss_msg)) {
404		err = PTR_ERR(gss_msg);
405		goto out;
406	}
407	spin_lock(&inode->i_lock);
408	if (gss_cred->gc_upcall != NULL)
409		rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
410	else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
411		task->tk_timeout = 0;
412		gss_cred->gc_upcall = gss_msg;
413		/* gss_upcall_callback will release the reference to gss_upcall_msg */
414		atomic_inc(&gss_msg->count);
415		rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
416	} else
417		err = gss_msg->msg.errno;
418	spin_unlock(&inode->i_lock);
419	gss_release_msg(gss_msg);
420out:
421	dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
422			task->tk_pid, cred->cr_uid, err);
423	return err;
424}
425
426static inline int
427gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
428{
429	struct inode *inode = gss_auth->dentry->d_inode;
430	struct rpc_cred *cred = &gss_cred->gc_base;
431	struct gss_upcall_msg *gss_msg;
432	DEFINE_WAIT(wait);
433	int err = 0;
434
435	dprintk("RPC:       gss_upcall for uid %u\n", cred->cr_uid);
436	gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
437	if (IS_ERR(gss_msg)) {
438		err = PTR_ERR(gss_msg);
439		goto out;
440	}
441	for (;;) {
442		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
443		spin_lock(&inode->i_lock);
444		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
445			break;
446		}
447		spin_unlock(&inode->i_lock);
448		if (signalled()) {
449			err = -ERESTARTSYS;
450			goto out_intr;
451		}
452		schedule();
453	}
454	if (gss_msg->ctx)
455		gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx));
456	else
457		err = gss_msg->msg.errno;
458	spin_unlock(&inode->i_lock);
459out_intr:
460	finish_wait(&gss_msg->waitqueue, &wait);
461	gss_release_msg(gss_msg);
462out:
463	dprintk("RPC:       gss_create_upcall for uid %u result %d\n",
464			cred->cr_uid, err);
465	return err;
466}
467
468static ssize_t
469gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
470		char __user *dst, size_t buflen)
471{
472	char *data = (char *)msg->data + msg->copied;
473	ssize_t mlen = msg->len;
474	ssize_t left;
475
476	if (mlen > buflen)
477		mlen = buflen;
478	left = copy_to_user(dst, data, mlen);
479	if (left < 0) {
480		msg->errno = left;
481		return left;
482	}
483	mlen -= left;
484	msg->copied += mlen;
485	msg->errno = 0;
486	return mlen;
487}
488
489#define MSG_BUF_MAXSIZE 1024
490
491static ssize_t
492gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
493{
494	const void *p, *end;
495	void *buf;
496	struct rpc_clnt *clnt;
497	struct gss_upcall_msg *gss_msg;
498	struct inode *inode = filp->f_path.dentry->d_inode;
499	struct gss_cl_ctx *ctx;
500	uid_t uid;
501	ssize_t err = -EFBIG;
502
503	if (mlen > MSG_BUF_MAXSIZE)
504		goto out;
505	err = -ENOMEM;
506	buf = kmalloc(mlen, GFP_KERNEL);
507	if (!buf)
508		goto out;
509
510	clnt = RPC_I(inode)->private;
511	err = -EFAULT;
512	if (copy_from_user(buf, src, mlen))
513		goto err;
514
515	end = (const void *)((char *)buf + mlen);
516	p = simple_get_bytes(buf, end, &uid, sizeof(uid));
517	if (IS_ERR(p)) {
518		err = PTR_ERR(p);
519		goto err;
520	}
521
522	err = -ENOMEM;
523	ctx = gss_alloc_context();
524	if (ctx == NULL)
525		goto err;
526
527	err = -ENOENT;
528	/* Find a matching upcall */
529	spin_lock(&inode->i_lock);
530	gss_msg = __gss_find_upcall(RPC_I(inode), uid);
531	if (gss_msg == NULL) {
532		spin_unlock(&inode->i_lock);
533		goto err_put_ctx;
534	}
535	list_del_init(&gss_msg->list);
536	spin_unlock(&inode->i_lock);
537
538	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
539	if (IS_ERR(p)) {
540		err = PTR_ERR(p);
541		gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN;
542		goto err_release_msg;
543	}
544	gss_msg->ctx = gss_get_ctx(ctx);
545	err = mlen;
546
547err_release_msg:
548	spin_lock(&inode->i_lock);
549	__gss_unhash_msg(gss_msg);
550	spin_unlock(&inode->i_lock);
551	gss_release_msg(gss_msg);
552err_put_ctx:
553	gss_put_ctx(ctx);
554err:
555	kfree(buf);
556out:
557	dprintk("RPC:       gss_pipe_downcall returning %Zd\n", err);
558	return err;
559}
560
561static void
562gss_pipe_release(struct inode *inode)
563{
564	struct rpc_inode *rpci = RPC_I(inode);
565	struct gss_upcall_msg *gss_msg;
566
567	spin_lock(&inode->i_lock);
568	while (!list_empty(&rpci->in_downcall)) {
569
570		gss_msg = list_entry(rpci->in_downcall.next,
571				struct gss_upcall_msg, list);
572		gss_msg->msg.errno = -EPIPE;
573		atomic_inc(&gss_msg->count);
574		__gss_unhash_msg(gss_msg);
575		spin_unlock(&inode->i_lock);
576		gss_release_msg(gss_msg);
577		spin_lock(&inode->i_lock);
578	}
579	spin_unlock(&inode->i_lock);
580}
581
582static void
583gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
584{
585	struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
586	static unsigned long ratelimit;
587
588	if (msg->errno < 0) {
589		dprintk("RPC:       gss_pipe_destroy_msg releasing msg %p\n",
590				gss_msg);
591		atomic_inc(&gss_msg->count);
592		gss_unhash_msg(gss_msg);
593		if (msg->errno == -ETIMEDOUT) {
594			unsigned long now = jiffies;
595			if (time_after(now, ratelimit)) {
596				printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
597						    "Please check user daemon is running!\n");
598				ratelimit = now + 15*HZ;
599			}
600		}
601		gss_release_msg(gss_msg);
602	}
603}
604
605/*
606 * NOTE: we have the opportunity to use different
607 * parameters based on the input flavor (which must be a pseudoflavor)
608 */
609static struct rpc_auth *
610gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
611{
612	struct gss_auth *gss_auth;
613	struct rpc_auth * auth;
614	int err = -ENOMEM; /* XXX? */
615
616	dprintk("RPC:       creating GSS authenticator for client %p\n", clnt);
617
618	if (!try_module_get(THIS_MODULE))
619		return ERR_PTR(err);
620	if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
621		goto out_dec;
622	gss_auth->client = clnt;
623	err = -EINVAL;
624	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
625	if (!gss_auth->mech) {
626		printk(KERN_WARNING "%s: Pseudoflavor %d not found!",
627				__FUNCTION__, flavor);
628		goto err_free;
629	}
630	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
631	if (gss_auth->service == 0)
632		goto err_put_mech;
633	auth = &gss_auth->rpc_auth;
634	auth->au_cslack = GSS_CRED_SLACK >> 2;
635	auth->au_rslack = GSS_VERF_SLACK >> 2;
636	auth->au_ops = &authgss_ops;
637	auth->au_flavor = flavor;
638	atomic_set(&auth->au_count, 1);
639
640	gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
641			clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
642	if (IS_ERR(gss_auth->dentry)) {
643		err = PTR_ERR(gss_auth->dentry);
644		goto err_put_mech;
645	}
646
647	err = rpcauth_init_credcache(auth);
648	if (err)
649		goto err_unlink_pipe;
650
651	return auth;
652err_unlink_pipe:
653	rpc_unlink(gss_auth->dentry);
654err_put_mech:
655	gss_mech_put(gss_auth->mech);
656err_free:
657	kfree(gss_auth);
658out_dec:
659	module_put(THIS_MODULE);
660	return ERR_PTR(err);
661}
662
663static void
664gss_destroy(struct rpc_auth *auth)
665{
666	struct gss_auth *gss_auth;
667
668	dprintk("RPC:       destroying GSS authenticator %p flavor %d\n",
669			auth, auth->au_flavor);
670
671	rpcauth_destroy_credcache(auth);
672
673	gss_auth = container_of(auth, struct gss_auth, rpc_auth);
674	rpc_unlink(gss_auth->dentry);
675	gss_auth->dentry = NULL;
676	gss_mech_put(gss_auth->mech);
677
678	kfree(gss_auth);
679	module_put(THIS_MODULE);
680}
681
682/* gss_destroy_cred (and gss_destroy_ctx) are used to clean up after failure
683 * to create a new cred or context, so they check that things have been
684 * allocated before freeing them. */
685static void
686gss_do_free_ctx(struct gss_cl_ctx *ctx)
687{
688	dprintk("RPC:       gss_free_ctx\n");
689
690	if (ctx->gc_gss_ctx)
691		gss_delete_sec_context(&ctx->gc_gss_ctx);
692
693	kfree(ctx->gc_wire_ctx.data);
694	kfree(ctx);
695}
696
697static void
698gss_free_ctx_callback(struct rcu_head *head)
699{
700	struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
701	gss_do_free_ctx(ctx);
702}
703
704static void
705gss_free_ctx(struct gss_cl_ctx *ctx)
706{
707	call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
708}
709
710static void
711gss_free_cred(struct gss_cred *gss_cred)
712{
713	dprintk("RPC:       gss_free_cred %p\n", gss_cred);
714	kfree(gss_cred);
715}
716
717static void
718gss_free_cred_callback(struct rcu_head *head)
719{
720	struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
721	gss_free_cred(gss_cred);
722}
723
724static void
725gss_destroy_cred(struct rpc_cred *cred)
726{
727	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
728	struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
729
730	rcu_assign_pointer(gss_cred->gc_ctx, NULL);
731	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
732	if (ctx)
733		gss_put_ctx(ctx);
734}
735
736/*
737 * Lookup RPCSEC_GSS cred for the current process
738 */
739static struct rpc_cred *
740gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
741{
742	return rpcauth_lookup_credcache(auth, acred, flags);
743}
744
745static struct rpc_cred *
746gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
747{
748	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
749	struct gss_cred	*cred = NULL;
750	int err = -ENOMEM;
751
752	dprintk("RPC:       gss_create_cred for uid %d, flavor %d\n",
753		acred->uid, auth->au_flavor);
754
755	if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
756		goto out_err;
757
758	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
759	/*
760	 * Note: in order to force a call to call_refresh(), we deliberately
761	 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
762	 */
763	cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
764	cred->gc_service = gss_auth->service;
765	return &cred->gc_base;
766
767out_err:
768	dprintk("RPC:       gss_create_cred failed with error %d\n", err);
769	return ERR_PTR(err);
770}
771
772static int
773gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
774{
775	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
776	struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
777	int err;
778
779	do {
780		err = gss_create_upcall(gss_auth, gss_cred);
781	} while (err == -EAGAIN);
782	return err;
783}
784
785static int
786gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
787{
788	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
789
790	/*
791	 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
792	 * we don't really care if the credential has expired or not,
793	 * since the caller should be prepared to reinitialise it.
794	 */
795	if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
796		goto out;
797	/* Don't match with creds that have expired. */
798	if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
799		return 0;
800out:
801	return (rc->cr_uid == acred->uid);
802}
803
804/*
805* Marshal credentials.
806* Maybe we should keep a cached credential for performance reasons.
807*/
808static __be32 *
809gss_marshal(struct rpc_task *task, __be32 *p)
810{
811	struct rpc_cred *cred = task->tk_msg.rpc_cred;
812	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
813						 gc_base);
814	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
815	__be32		*cred_len;
816	struct rpc_rqst *req = task->tk_rqstp;
817	u32             maj_stat = 0;
818	struct xdr_netobj mic;
819	struct kvec	iov;
820	struct xdr_buf	verf_buf;
821
822	dprintk("RPC: %5u gss_marshal\n", task->tk_pid);
823
824	*p++ = htonl(RPC_AUTH_GSS);
825	cred_len = p++;
826
827	spin_lock(&ctx->gc_seq_lock);
828	req->rq_seqno = ctx->gc_seq++;
829	spin_unlock(&ctx->gc_seq_lock);
830
831	*p++ = htonl((u32) RPC_GSS_VERSION);
832	*p++ = htonl((u32) ctx->gc_proc);
833	*p++ = htonl((u32) req->rq_seqno);
834	*p++ = htonl((u32) gss_cred->gc_service);
835	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
836	*cred_len = htonl((p - (cred_len + 1)) << 2);
837
838	/* We compute the checksum for the verifier over the xdr-encoded bytes
839	 * starting with the xid and ending at the end of the credential: */
840	iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
841					req->rq_snd_buf.head[0].iov_base);
842	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
843	xdr_buf_from_iov(&iov, &verf_buf);
844
845	/* set verifier flavor*/
846	*p++ = htonl(RPC_AUTH_GSS);
847
848	mic.data = (u8 *)(p + 1);
849	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
850	if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
851		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
852	} else if (maj_stat != 0) {
853		printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
854		goto out_put_ctx;
855	}
856	p = xdr_encode_opaque(p, NULL, mic.len);
857	gss_put_ctx(ctx);
858	return p;
859out_put_ctx:
860	gss_put_ctx(ctx);
861	return NULL;
862}
863
864/*
865* Refresh credentials. XXX - finish
866*/
867static int
868gss_refresh(struct rpc_task *task)
869{
870
871	if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred))
872		return gss_refresh_upcall(task);
873	return 0;
874}
875
876static __be32 *
877gss_validate(struct rpc_task *task, __be32 *p)
878{
879	struct rpc_cred *cred = task->tk_msg.rpc_cred;
880	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
881	__be32		seq;
882	struct kvec	iov;
883	struct xdr_buf	verf_buf;
884	struct xdr_netobj mic;
885	u32		flav,len;
886	u32		maj_stat;
887
888	dprintk("RPC: %5u gss_validate\n", task->tk_pid);
889
890	flav = ntohl(*p++);
891	if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
892		goto out_bad;
893	if (flav != RPC_AUTH_GSS)
894		goto out_bad;
895	seq = htonl(task->tk_rqstp->rq_seqno);
896	iov.iov_base = &seq;
897	iov.iov_len = sizeof(seq);
898	xdr_buf_from_iov(&iov, &verf_buf);
899	mic.data = (u8 *)p;
900	mic.len = len;
901
902	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
903	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
904		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
905	if (maj_stat)
906		goto out_bad;
907	/* We leave it to unwrap to calculate au_rslack. For now we just
908	 * calculate the length of the verifier: */
909	task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2;
910	gss_put_ctx(ctx);
911	dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
912			task->tk_pid);
913	return p + XDR_QUADLEN(len);
914out_bad:
915	gss_put_ctx(ctx);
916	dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid);
917	return NULL;
918}
919
920static inline int
921gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
922		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
923{
924	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
925	struct xdr_buf	integ_buf;
926	__be32          *integ_len = NULL;
927	struct xdr_netobj mic;
928	u32		offset;
929	__be32		*q;
930	struct kvec	*iov;
931	u32             maj_stat = 0;
932	int		status = -EIO;
933
934	integ_len = p++;
935	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
936	*p++ = htonl(rqstp->rq_seqno);
937
938	status = encode(rqstp, p, obj);
939	if (status)
940		return status;
941
942	if (xdr_buf_subsegment(snd_buf, &integ_buf,
943				offset, snd_buf->len - offset))
944		return status;
945	*integ_len = htonl(integ_buf.len);
946
947	/* guess whether we're in the head or the tail: */
948	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
949		iov = snd_buf->tail;
950	else
951		iov = snd_buf->head;
952	p = iov->iov_base + iov->iov_len;
953	mic.data = (u8 *)(p + 1);
954
955	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
956	status = -EIO; /* XXX? */
957	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
958		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
959	else if (maj_stat)
960		return status;
961	q = xdr_encode_opaque(p, NULL, mic.len);
962
963	offset = (u8 *)q - (u8 *)p;
964	iov->iov_len += offset;
965	snd_buf->len += offset;
966	return 0;
967}
968
969static void
970priv_release_snd_buf(struct rpc_rqst *rqstp)
971{
972	int i;
973
974	for (i=0; i < rqstp->rq_enc_pages_num; i++)
975		__free_page(rqstp->rq_enc_pages[i]);
976	kfree(rqstp->rq_enc_pages);
977}
978
979static int
980alloc_enc_pages(struct rpc_rqst *rqstp)
981{
982	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
983	int first, last, i;
984
985	if (snd_buf->page_len == 0) {
986		rqstp->rq_enc_pages_num = 0;
987		return 0;
988	}
989
990	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
991	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
992	rqstp->rq_enc_pages_num = last - first + 1 + 1;
993	rqstp->rq_enc_pages
994		= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
995				GFP_NOFS);
996	if (!rqstp->rq_enc_pages)
997		goto out;
998	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
999		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1000		if (rqstp->rq_enc_pages[i] == NULL)
1001			goto out_free;
1002	}
1003	rqstp->rq_release_snd_buf = priv_release_snd_buf;
1004	return 0;
1005out_free:
1006	for (i--; i >= 0; i--) {
1007		__free_page(rqstp->rq_enc_pages[i]);
1008	}
1009out:
1010	return -EAGAIN;
1011}
1012
1013static inline int
1014gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1015		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
1016{
1017	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1018	u32		offset;
1019	u32             maj_stat;
1020	int		status;
1021	__be32		*opaque_len;
1022	struct page	**inpages;
1023	int		first;
1024	int		pad;
1025	struct kvec	*iov;
1026	char		*tmp;
1027
1028	opaque_len = p++;
1029	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1030	*p++ = htonl(rqstp->rq_seqno);
1031
1032	status = encode(rqstp, p, obj);
1033	if (status)
1034		return status;
1035
1036	status = alloc_enc_pages(rqstp);
1037	if (status)
1038		return status;
1039	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1040	inpages = snd_buf->pages + first;
1041	snd_buf->pages = rqstp->rq_enc_pages;
1042	snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1043	/* Give the tail its own page, in case we need extra space in the
1044	 * head when wrapping: */
1045	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1046		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1047		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1048		snd_buf->tail[0].iov_base = tmp;
1049	}
1050	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1051	/* RPC_SLACK_SPACE should prevent this ever happening: */
1052	BUG_ON(snd_buf->len > snd_buf->buflen);
1053	status = -EIO;
1054	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1055	 * done anyway, so it's safe to put the request on the wire: */
1056	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1057		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1058	else if (maj_stat)
1059		return status;
1060
1061	*opaque_len = htonl(snd_buf->len - offset);
1062	/* guess whether we're in the head or the tail: */
1063	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1064		iov = snd_buf->tail;
1065	else
1066		iov = snd_buf->head;
1067	p = iov->iov_base + iov->iov_len;
1068	pad = 3 - ((snd_buf->len - offset - 1) & 3);
1069	memset(p, 0, pad);
1070	iov->iov_len += pad;
1071	snd_buf->len += pad;
1072
1073	return 0;
1074}
1075
1076static int
1077gss_wrap_req(struct rpc_task *task,
1078	     kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
1079{
1080	struct rpc_cred *cred = task->tk_msg.rpc_cred;
1081	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1082			gc_base);
1083	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1084	int             status = -EIO;
1085
1086	dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid);
1087	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1088		/* The spec seems a little ambiguous here, but I think that not
1089		 * wrapping context destruction requests makes the most sense.
1090		 */
1091		status = encode(rqstp, p, obj);
1092		goto out;
1093	}
1094	switch (gss_cred->gc_service) {
1095		case RPC_GSS_SVC_NONE:
1096			status = encode(rqstp, p, obj);
1097			break;
1098		case RPC_GSS_SVC_INTEGRITY:
1099			status = gss_wrap_req_integ(cred, ctx, encode,
1100								rqstp, p, obj);
1101			break;
1102		case RPC_GSS_SVC_PRIVACY:
1103			status = gss_wrap_req_priv(cred, ctx, encode,
1104					rqstp, p, obj);
1105			break;
1106	}
1107out:
1108	gss_put_ctx(ctx);
1109	dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status);
1110	return status;
1111}
1112
1113static inline int
1114gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1115		struct rpc_rqst *rqstp, __be32 **p)
1116{
1117	struct xdr_buf	*rcv_buf = &rqstp->rq_rcv_buf;
1118	struct xdr_buf integ_buf;
1119	struct xdr_netobj mic;
1120	u32 data_offset, mic_offset;
1121	u32 integ_len;
1122	u32 maj_stat;
1123	int status = -EIO;
1124
1125	integ_len = ntohl(*(*p)++);
1126	if (integ_len & 3)
1127		return status;
1128	data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1129	mic_offset = integ_len + data_offset;
1130	if (mic_offset > rcv_buf->len)
1131		return status;
1132	if (ntohl(*(*p)++) != rqstp->rq_seqno)
1133		return status;
1134
1135	if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1136				mic_offset - data_offset))
1137		return status;
1138
1139	if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1140		return status;
1141
1142	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1143	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1144		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1145	if (maj_stat != GSS_S_COMPLETE)
1146		return status;
1147	return 0;
1148}
1149
1150static inline int
1151gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1152		struct rpc_rqst *rqstp, __be32 **p)
1153{
1154	struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
1155	u32 offset;
1156	u32 opaque_len;
1157	u32 maj_stat;
1158	int status = -EIO;
1159
1160	opaque_len = ntohl(*(*p)++);
1161	offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1162	if (offset + opaque_len > rcv_buf->len)
1163		return status;
1164	/* remove padding: */
1165	rcv_buf->len = offset + opaque_len;
1166
1167	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1168	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1169		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1170	if (maj_stat != GSS_S_COMPLETE)
1171		return status;
1172	if (ntohl(*(*p)++) != rqstp->rq_seqno)
1173		return status;
1174
1175	return 0;
1176}
1177
1178
1179static int
1180gss_unwrap_resp(struct rpc_task *task,
1181		kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
1182{
1183	struct rpc_cred *cred = task->tk_msg.rpc_cred;
1184	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1185			gc_base);
1186	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1187	__be32		*savedp = p;
1188	struct kvec	*head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1189	int		savedlen = head->iov_len;
1190	int             status = -EIO;
1191
1192	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1193		goto out_decode;
1194	switch (gss_cred->gc_service) {
1195		case RPC_GSS_SVC_NONE:
1196			break;
1197		case RPC_GSS_SVC_INTEGRITY:
1198			status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1199			if (status)
1200				goto out;
1201			break;
1202		case RPC_GSS_SVC_PRIVACY:
1203			status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1204			if (status)
1205				goto out;
1206			break;
1207	}
1208	/* take into account extra slack for integrity and privacy cases: */
1209	task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp)
1210						+ (savedlen - head->iov_len);
1211out_decode:
1212	status = decode(rqstp, p, obj);
1213out:
1214	gss_put_ctx(ctx);
1215	dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
1216			status);
1217	return status;
1218}
1219
1220static const struct rpc_authops authgss_ops = {
1221	.owner		= THIS_MODULE,
1222	.au_flavor	= RPC_AUTH_GSS,
1223#ifdef RPC_DEBUG
1224	.au_name	= "RPCSEC_GSS",
1225#endif
1226	.create		= gss_create,
1227	.destroy	= gss_destroy,
1228	.lookup_cred	= gss_lookup_cred,
1229	.crcreate	= gss_create_cred
1230};
1231
1232static const struct rpc_credops gss_credops = {
1233	.cr_name	= "AUTH_GSS",
1234	.crdestroy	= gss_destroy_cred,
1235	.cr_init	= gss_cred_init,
1236	.crmatch	= gss_match,
1237	.crmarshal	= gss_marshal,
1238	.crrefresh	= gss_refresh,
1239	.crvalidate	= gss_validate,
1240	.crwrap_req	= gss_wrap_req,
1241	.crunwrap_resp	= gss_unwrap_resp,
1242};
1243
1244static struct rpc_pipe_ops gss_upcall_ops = {
1245	.upcall		= gss_pipe_upcall,
1246	.downcall	= gss_pipe_downcall,
1247	.destroy_msg	= gss_pipe_destroy_msg,
1248	.release_pipe	= gss_pipe_release,
1249};
1250
1251/*
1252 * Initialize RPCSEC_GSS module
1253 */
1254static int __init init_rpcsec_gss(void)
1255{
1256	int err = 0;
1257
1258	err = rpcauth_register(&authgss_ops);
1259	if (err)
1260		goto out;
1261	err = gss_svc_init();
1262	if (err)
1263		goto out_unregister;
1264	return 0;
1265out_unregister:
1266	rpcauth_unregister(&authgss_ops);
1267out:
1268	return err;
1269}
1270
1271static void __exit exit_rpcsec_gss(void)
1272{
1273	gss_svc_shutdown();
1274	rpcauth_unregister(&authgss_ops);
1275}
1276
1277MODULE_LICENSE("GPL");
1278module_init(init_rpcsec_gss)
1279module_exit(exit_rpcsec_gss)
1280