auth_gss.c revision e726340ac9cf6bb5b3f92a064664e10cd2b748de
1/*
2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 *
4 * RPCSEC_GSS client authentication.
5 *
6 *  Copyright (c) 2000 The Regents of the University of Michigan.
7 *  All rights reserved.
8 *
9 *  Dug Song       <dugsong@monkey.org>
10 *  Andy Adamson   <andros@umich.edu>
11 *
12 *  Redistribution and use in source and binary forms, with or without
13 *  modification, are permitted provided that the following conditions
14 *  are met:
15 *
16 *  1. Redistributions of source code must retain the above copyright
17 *     notice, this list of conditions and the following disclaimer.
18 *  2. Redistributions in binary form must reproduce the above copyright
19 *     notice, this list of conditions and the following disclaimer in the
20 *     documentation and/or other materials provided with the distribution.
21 *  3. Neither the name of the University nor the names of its
22 *     contributors may be used to endorse or promote products derived
23 *     from this software without specific prior written permission.
24 *
25 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/types.h>
42#include <linux/slab.h>
43#include <linux/sched.h>
44#include <linux/pagemap.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/sunrpc/auth.h>
47#include <linux/sunrpc/auth_gss.h>
48#include <linux/sunrpc/svcauth_gss.h>
49#include <linux/sunrpc/gss_err.h>
50#include <linux/workqueue.h>
51#include <linux/sunrpc/rpc_pipe_fs.h>
52#include <linux/sunrpc/gss_api.h>
53#include <asm/uaccess.h>
54
55#include "../netns.h"
56
57static const struct rpc_authops authgss_ops;
58
59static const struct rpc_credops gss_credops;
60static const struct rpc_credops gss_nullops;
61
62#define GSS_RETRY_EXPIRED 5
63static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
64
65#ifdef RPC_DEBUG
66# define RPCDBG_FACILITY	RPCDBG_AUTH
67#endif
68
69#define GSS_CRED_SLACK		(RPC_MAX_AUTH_SIZE * 2)
70/* length of a krb5 verifier (48), plus data added before arguments when
71 * using integrity (two 4-byte integers): */
72#define GSS_VERF_SLACK		100
73
74struct gss_auth {
75	struct kref kref;
76	struct rpc_auth rpc_auth;
77	struct gss_api_mech *mech;
78	enum rpc_gss_svc service;
79	struct rpc_clnt *client;
80	struct net *net;
81	/*
82	 * There are two upcall pipes; dentry[1], named "gssd", is used
83	 * for the new text-based upcall; dentry[0] is named after the
84	 * mechanism (for example, "krb5") and exists for
85	 * backwards-compatibility with older gssd's.
86	 */
87	struct rpc_pipe *pipe[2];
88	const char *target_name;
89};
90
91/* pipe_version >= 0 if and only if someone has a pipe open. */
92static DEFINE_SPINLOCK(pipe_version_lock);
93static struct rpc_wait_queue pipe_version_rpc_waitqueue;
94static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
95
96static void gss_free_ctx(struct gss_cl_ctx *);
97static const struct rpc_pipe_ops gss_upcall_ops_v0;
98static const struct rpc_pipe_ops gss_upcall_ops_v1;
99
100static inline struct gss_cl_ctx *
101gss_get_ctx(struct gss_cl_ctx *ctx)
102{
103	atomic_inc(&ctx->count);
104	return ctx;
105}
106
107static inline void
108gss_put_ctx(struct gss_cl_ctx *ctx)
109{
110	if (atomic_dec_and_test(&ctx->count))
111		gss_free_ctx(ctx);
112}
113
114/* gss_cred_set_ctx:
115 * called by gss_upcall_callback and gss_create_upcall in order
116 * to set the gss context. The actual exchange of an old context
117 * and a new one is protected by the pipe->lock.
118 */
119static void
120gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
121{
122	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
123
124	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
125		return;
126	gss_get_ctx(ctx);
127	rcu_assign_pointer(gss_cred->gc_ctx, ctx);
128	set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
129	smp_mb__before_clear_bit();
130	clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
131}
132
133static const void *
134simple_get_bytes(const void *p, const void *end, void *res, size_t len)
135{
136	const void *q = (const void *)((const char *)p + len);
137	if (unlikely(q > end || q < p))
138		return ERR_PTR(-EFAULT);
139	memcpy(res, p, len);
140	return q;
141}
142
143static inline const void *
144simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
145{
146	const void *q;
147	unsigned int len;
148
149	p = simple_get_bytes(p, end, &len, sizeof(len));
150	if (IS_ERR(p))
151		return p;
152	q = (const void *)((const char *)p + len);
153	if (unlikely(q > end || q < p))
154		return ERR_PTR(-EFAULT);
155	dest->data = kmemdup(p, len, GFP_NOFS);
156	if (unlikely(dest->data == NULL))
157		return ERR_PTR(-ENOMEM);
158	dest->len = len;
159	return q;
160}
161
162static struct gss_cl_ctx *
163gss_cred_get_ctx(struct rpc_cred *cred)
164{
165	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
166	struct gss_cl_ctx *ctx = NULL;
167
168	rcu_read_lock();
169	if (gss_cred->gc_ctx)
170		ctx = gss_get_ctx(gss_cred->gc_ctx);
171	rcu_read_unlock();
172	return ctx;
173}
174
175static struct gss_cl_ctx *
176gss_alloc_context(void)
177{
178	struct gss_cl_ctx *ctx;
179
180	ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
181	if (ctx != NULL) {
182		ctx->gc_proc = RPC_GSS_PROC_DATA;
183		ctx->gc_seq = 1;	/* NetApp 6.4R1 doesn't accept seq. no. 0 */
184		spin_lock_init(&ctx->gc_seq_lock);
185		atomic_set(&ctx->count,1);
186	}
187	return ctx;
188}
189
190#define GSSD_MIN_TIMEOUT (60 * 60)
191static const void *
192gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
193{
194	const void *q;
195	unsigned int seclen;
196	unsigned int timeout;
197	unsigned long now = jiffies;
198	u32 window_size;
199	int ret;
200
201	/* First unsigned int gives the remaining lifetime in seconds of the
202	 * credential - e.g. the remaining TGT lifetime for Kerberos or
203	 * the -t value passed to GSSD.
204	 */
205	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
206	if (IS_ERR(p))
207		goto err;
208	if (timeout == 0)
209		timeout = GSSD_MIN_TIMEOUT;
210	ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
211	/* Sequence number window. Determines the maximum number of
212	 * simultaneous requests
213	 */
214	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
215	if (IS_ERR(p))
216		goto err;
217	ctx->gc_win = window_size;
218	/* gssd signals an error by passing ctx->gc_win = 0: */
219	if (ctx->gc_win == 0) {
220		/*
221		 * in which case, p points to an error code. Anything other
222		 * than -EKEYEXPIRED gets converted to -EACCES.
223		 */
224		p = simple_get_bytes(p, end, &ret, sizeof(ret));
225		if (!IS_ERR(p))
226			p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
227						    ERR_PTR(-EACCES);
228		goto err;
229	}
230	/* copy the opaque wire context */
231	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
232	if (IS_ERR(p))
233		goto err;
234	/* import the opaque security context */
235	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
236	if (IS_ERR(p))
237		goto err;
238	q = (const void *)((const char *)p + seclen);
239	if (unlikely(q > end || q < p)) {
240		p = ERR_PTR(-EFAULT);
241		goto err;
242	}
243	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
244	if (ret < 0) {
245		p = ERR_PTR(ret);
246		goto err;
247	}
248	dprintk("RPC:       %s Success. gc_expiry %lu now %lu timeout %u\n",
249		__func__, ctx->gc_expiry, now, timeout);
250	return q;
251err:
252	dprintk("RPC:       %s returns error %ld\n", __func__, -PTR_ERR(p));
253	return p;
254}
255
256#define UPCALL_BUF_LEN 128
257
258struct gss_upcall_msg {
259	atomic_t count;
260	kuid_t	uid;
261	struct rpc_pipe_msg msg;
262	struct list_head list;
263	struct gss_auth *auth;
264	struct rpc_pipe *pipe;
265	struct rpc_wait_queue rpc_waitqueue;
266	wait_queue_head_t waitqueue;
267	struct gss_cl_ctx *ctx;
268	char databuf[UPCALL_BUF_LEN];
269};
270
271static int get_pipe_version(struct net *net)
272{
273	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
274	int ret;
275
276	spin_lock(&pipe_version_lock);
277	if (sn->pipe_version >= 0) {
278		atomic_inc(&sn->pipe_users);
279		ret = sn->pipe_version;
280	} else
281		ret = -EAGAIN;
282	spin_unlock(&pipe_version_lock);
283	return ret;
284}
285
286static void put_pipe_version(struct net *net)
287{
288	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
289
290	if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
291		sn->pipe_version = -1;
292		spin_unlock(&pipe_version_lock);
293	}
294}
295
296static void
297gss_release_msg(struct gss_upcall_msg *gss_msg)
298{
299	struct net *net = gss_msg->auth->net;
300	if (!atomic_dec_and_test(&gss_msg->count))
301		return;
302	put_pipe_version(net);
303	BUG_ON(!list_empty(&gss_msg->list));
304	if (gss_msg->ctx != NULL)
305		gss_put_ctx(gss_msg->ctx);
306	rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
307	kfree(gss_msg);
308}
309
310static struct gss_upcall_msg *
311__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
312{
313	struct gss_upcall_msg *pos;
314	list_for_each_entry(pos, &pipe->in_downcall, list) {
315		if (!uid_eq(pos->uid, uid))
316			continue;
317		atomic_inc(&pos->count);
318		dprintk("RPC:       %s found msg %p\n", __func__, pos);
319		return pos;
320	}
321	dprintk("RPC:       %s found nothing\n", __func__);
322	return NULL;
323}
324
325/* Try to add an upcall to the pipefs queue.
326 * If an upcall owned by our uid already exists, then we return a reference
327 * to that upcall instead of adding the new upcall.
328 */
329static inline struct gss_upcall_msg *
330gss_add_msg(struct gss_upcall_msg *gss_msg)
331{
332	struct rpc_pipe *pipe = gss_msg->pipe;
333	struct gss_upcall_msg *old;
334
335	spin_lock(&pipe->lock);
336	old = __gss_find_upcall(pipe, gss_msg->uid);
337	if (old == NULL) {
338		atomic_inc(&gss_msg->count);
339		list_add(&gss_msg->list, &pipe->in_downcall);
340	} else
341		gss_msg = old;
342	spin_unlock(&pipe->lock);
343	return gss_msg;
344}
345
346static void
347__gss_unhash_msg(struct gss_upcall_msg *gss_msg)
348{
349	list_del_init(&gss_msg->list);
350	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
351	wake_up_all(&gss_msg->waitqueue);
352	atomic_dec(&gss_msg->count);
353}
354
355static void
356gss_unhash_msg(struct gss_upcall_msg *gss_msg)
357{
358	struct rpc_pipe *pipe = gss_msg->pipe;
359
360	if (list_empty(&gss_msg->list))
361		return;
362	spin_lock(&pipe->lock);
363	if (!list_empty(&gss_msg->list))
364		__gss_unhash_msg(gss_msg);
365	spin_unlock(&pipe->lock);
366}
367
368static void
369gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
370{
371	switch (gss_msg->msg.errno) {
372	case 0:
373		if (gss_msg->ctx == NULL)
374			break;
375		clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
376		gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
377		break;
378	case -EKEYEXPIRED:
379		set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
380	}
381	gss_cred->gc_upcall_timestamp = jiffies;
382	gss_cred->gc_upcall = NULL;
383	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
384}
385
386static void
387gss_upcall_callback(struct rpc_task *task)
388{
389	struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
390			struct gss_cred, gc_base);
391	struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
392	struct rpc_pipe *pipe = gss_msg->pipe;
393
394	spin_lock(&pipe->lock);
395	gss_handle_downcall_result(gss_cred, gss_msg);
396	spin_unlock(&pipe->lock);
397	task->tk_status = gss_msg->msg.errno;
398	gss_release_msg(gss_msg);
399}
400
401static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg)
402{
403	uid_t uid = from_kuid(&init_user_ns, gss_msg->uid);
404	memcpy(gss_msg->databuf, &uid, sizeof(uid));
405	gss_msg->msg.data = gss_msg->databuf;
406	gss_msg->msg.len = sizeof(uid);
407	BUG_ON(sizeof(uid) > UPCALL_BUF_LEN);
408}
409
410static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
411				const char *service_name,
412				const char *target_name)
413{
414	struct gss_api_mech *mech = gss_msg->auth->mech;
415	char *p = gss_msg->databuf;
416	int len = 0;
417
418	gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ",
419				   mech->gm_name,
420				   from_kuid(&init_user_ns, gss_msg->uid));
421	p += gss_msg->msg.len;
422	if (target_name) {
423		len = sprintf(p, "target=%s ", target_name);
424		p += len;
425		gss_msg->msg.len += len;
426	}
427	if (service_name != NULL) {
428		len = sprintf(p, "service=%s ", service_name);
429		p += len;
430		gss_msg->msg.len += len;
431	}
432	if (mech->gm_upcall_enctypes) {
433		len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes);
434		p += len;
435		gss_msg->msg.len += len;
436	}
437	len = sprintf(p, "\n");
438	gss_msg->msg.len += len;
439
440	gss_msg->msg.data = gss_msg->databuf;
441	BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN);
442}
443
444static struct gss_upcall_msg *
445gss_alloc_msg(struct gss_auth *gss_auth,
446		kuid_t uid, const char *service_name)
447{
448	struct gss_upcall_msg *gss_msg;
449	int vers;
450
451	gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
452	if (gss_msg == NULL)
453		return ERR_PTR(-ENOMEM);
454	vers = get_pipe_version(gss_auth->net);
455	if (vers < 0) {
456		kfree(gss_msg);
457		return ERR_PTR(vers);
458	}
459	gss_msg->pipe = gss_auth->pipe[vers];
460	INIT_LIST_HEAD(&gss_msg->list);
461	rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
462	init_waitqueue_head(&gss_msg->waitqueue);
463	atomic_set(&gss_msg->count, 1);
464	gss_msg->uid = uid;
465	gss_msg->auth = gss_auth;
466	switch (vers) {
467	case 0:
468		gss_encode_v0_msg(gss_msg);
469	default:
470		gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
471	};
472	return gss_msg;
473}
474
475static struct gss_upcall_msg *
476gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
477{
478	struct gss_cred *gss_cred = container_of(cred,
479			struct gss_cred, gc_base);
480	struct gss_upcall_msg *gss_new, *gss_msg;
481	kuid_t uid = cred->cr_uid;
482
483	gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
484	if (IS_ERR(gss_new))
485		return gss_new;
486	gss_msg = gss_add_msg(gss_new);
487	if (gss_msg == gss_new) {
488		int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
489		if (res) {
490			gss_unhash_msg(gss_new);
491			gss_msg = ERR_PTR(res);
492		}
493	} else
494		gss_release_msg(gss_new);
495	return gss_msg;
496}
497
498static void warn_gssd(void)
499{
500	static unsigned long ratelimit;
501	unsigned long now = jiffies;
502
503	if (time_after(now, ratelimit)) {
504		printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
505				"Please check user daemon is running.\n");
506		ratelimit = now + 15*HZ;
507	}
508}
509
510static inline int
511gss_refresh_upcall(struct rpc_task *task)
512{
513	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
514	struct gss_auth *gss_auth = container_of(cred->cr_auth,
515			struct gss_auth, rpc_auth);
516	struct gss_cred *gss_cred = container_of(cred,
517			struct gss_cred, gc_base);
518	struct gss_upcall_msg *gss_msg;
519	struct rpc_pipe *pipe;
520	int err = 0;
521
522	dprintk("RPC: %5u %s for uid %u\n",
523		task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid));
524	gss_msg = gss_setup_upcall(gss_auth, cred);
525	if (PTR_ERR(gss_msg) == -EAGAIN) {
526		/* XXX: warning on the first, under the assumption we
527		 * shouldn't normally hit this case on a refresh. */
528		warn_gssd();
529		task->tk_timeout = 15*HZ;
530		rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL);
531		return -EAGAIN;
532	}
533	if (IS_ERR(gss_msg)) {
534		err = PTR_ERR(gss_msg);
535		goto out;
536	}
537	pipe = gss_msg->pipe;
538	spin_lock(&pipe->lock);
539	if (gss_cred->gc_upcall != NULL)
540		rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
541	else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
542		task->tk_timeout = 0;
543		gss_cred->gc_upcall = gss_msg;
544		/* gss_upcall_callback will release the reference to gss_upcall_msg */
545		atomic_inc(&gss_msg->count);
546		rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
547	} else {
548		gss_handle_downcall_result(gss_cred, gss_msg);
549		err = gss_msg->msg.errno;
550	}
551	spin_unlock(&pipe->lock);
552	gss_release_msg(gss_msg);
553out:
554	dprintk("RPC: %5u %s for uid %u result %d\n",
555		task->tk_pid, __func__,
556		from_kuid(&init_user_ns, cred->cr_uid),	err);
557	return err;
558}
559
560static inline int
561gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
562{
563	struct net *net = gss_auth->net;
564	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
565	struct rpc_pipe *pipe;
566	struct rpc_cred *cred = &gss_cred->gc_base;
567	struct gss_upcall_msg *gss_msg;
568	unsigned long timeout;
569	DEFINE_WAIT(wait);
570	int err;
571
572	dprintk("RPC:       %s for uid %u\n",
573		__func__, from_kuid(&init_user_ns, cred->cr_uid));
574retry:
575	err = 0;
576	/* Default timeout is 15s unless we know that gssd is not running */
577	timeout = 15 * HZ;
578	if (!sn->gssd_running)
579		timeout = HZ >> 2;
580	gss_msg = gss_setup_upcall(gss_auth, cred);
581	if (PTR_ERR(gss_msg) == -EAGAIN) {
582		err = wait_event_interruptible_timeout(pipe_version_waitqueue,
583				sn->pipe_version >= 0, timeout);
584		if (sn->pipe_version < 0) {
585			if (err == 0)
586				sn->gssd_running = 0;
587			warn_gssd();
588			err = -EACCES;
589		}
590		if (err < 0)
591			goto out;
592		goto retry;
593	}
594	if (IS_ERR(gss_msg)) {
595		err = PTR_ERR(gss_msg);
596		goto out;
597	}
598	pipe = gss_msg->pipe;
599	for (;;) {
600		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
601		spin_lock(&pipe->lock);
602		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
603			break;
604		}
605		spin_unlock(&pipe->lock);
606		if (fatal_signal_pending(current)) {
607			err = -ERESTARTSYS;
608			goto out_intr;
609		}
610		schedule();
611	}
612	if (gss_msg->ctx)
613		gss_cred_set_ctx(cred, gss_msg->ctx);
614	else
615		err = gss_msg->msg.errno;
616	spin_unlock(&pipe->lock);
617out_intr:
618	finish_wait(&gss_msg->waitqueue, &wait);
619	gss_release_msg(gss_msg);
620out:
621	dprintk("RPC:       %s for uid %u result %d\n",
622		__func__, from_kuid(&init_user_ns, cred->cr_uid), err);
623	return err;
624}
625
626#define MSG_BUF_MAXSIZE 1024
627
628static ssize_t
629gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
630{
631	const void *p, *end;
632	void *buf;
633	struct gss_upcall_msg *gss_msg;
634	struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
635	struct gss_cl_ctx *ctx;
636	uid_t id;
637	kuid_t uid;
638	ssize_t err = -EFBIG;
639
640	if (mlen > MSG_BUF_MAXSIZE)
641		goto out;
642	err = -ENOMEM;
643	buf = kmalloc(mlen, GFP_NOFS);
644	if (!buf)
645		goto out;
646
647	err = -EFAULT;
648	if (copy_from_user(buf, src, mlen))
649		goto err;
650
651	end = (const void *)((char *)buf + mlen);
652	p = simple_get_bytes(buf, end, &id, sizeof(id));
653	if (IS_ERR(p)) {
654		err = PTR_ERR(p);
655		goto err;
656	}
657
658	uid = make_kuid(&init_user_ns, id);
659	if (!uid_valid(uid)) {
660		err = -EINVAL;
661		goto err;
662	}
663
664	err = -ENOMEM;
665	ctx = gss_alloc_context();
666	if (ctx == NULL)
667		goto err;
668
669	err = -ENOENT;
670	/* Find a matching upcall */
671	spin_lock(&pipe->lock);
672	gss_msg = __gss_find_upcall(pipe, uid);
673	if (gss_msg == NULL) {
674		spin_unlock(&pipe->lock);
675		goto err_put_ctx;
676	}
677	list_del_init(&gss_msg->list);
678	spin_unlock(&pipe->lock);
679
680	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
681	if (IS_ERR(p)) {
682		err = PTR_ERR(p);
683		switch (err) {
684		case -EACCES:
685		case -EKEYEXPIRED:
686			gss_msg->msg.errno = err;
687			err = mlen;
688			break;
689		case -EFAULT:
690		case -ENOMEM:
691		case -EINVAL:
692		case -ENOSYS:
693			gss_msg->msg.errno = -EAGAIN;
694			break;
695		default:
696			printk(KERN_CRIT "%s: bad return from "
697				"gss_fill_context: %zd\n", __func__, err);
698			BUG();
699		}
700		goto err_release_msg;
701	}
702	gss_msg->ctx = gss_get_ctx(ctx);
703	err = mlen;
704
705err_release_msg:
706	spin_lock(&pipe->lock);
707	__gss_unhash_msg(gss_msg);
708	spin_unlock(&pipe->lock);
709	gss_release_msg(gss_msg);
710err_put_ctx:
711	gss_put_ctx(ctx);
712err:
713	kfree(buf);
714out:
715	dprintk("RPC:       %s returning %Zd\n", __func__, err);
716	return err;
717}
718
719static int gss_pipe_open(struct inode *inode, int new_version)
720{
721	struct net *net = inode->i_sb->s_fs_info;
722	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
723	int ret = 0;
724
725	spin_lock(&pipe_version_lock);
726	if (sn->pipe_version < 0) {
727		/* First open of any gss pipe determines the version: */
728		sn->pipe_version = new_version;
729		rpc_wake_up(&pipe_version_rpc_waitqueue);
730		wake_up(&pipe_version_waitqueue);
731	} else if (sn->pipe_version != new_version) {
732		/* Trying to open a pipe of a different version */
733		ret = -EBUSY;
734		goto out;
735	}
736	atomic_inc(&sn->pipe_users);
737out:
738	spin_unlock(&pipe_version_lock);
739	return ret;
740
741}
742
743static int gss_pipe_open_v0(struct inode *inode)
744{
745	return gss_pipe_open(inode, 0);
746}
747
748static int gss_pipe_open_v1(struct inode *inode)
749{
750	return gss_pipe_open(inode, 1);
751}
752
753static void
754gss_pipe_release(struct inode *inode)
755{
756	struct net *net = inode->i_sb->s_fs_info;
757	struct rpc_pipe *pipe = RPC_I(inode)->pipe;
758	struct gss_upcall_msg *gss_msg;
759
760restart:
761	spin_lock(&pipe->lock);
762	list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
763
764		if (!list_empty(&gss_msg->msg.list))
765			continue;
766		gss_msg->msg.errno = -EPIPE;
767		atomic_inc(&gss_msg->count);
768		__gss_unhash_msg(gss_msg);
769		spin_unlock(&pipe->lock);
770		gss_release_msg(gss_msg);
771		goto restart;
772	}
773	spin_unlock(&pipe->lock);
774
775	put_pipe_version(net);
776}
777
778static void
779gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
780{
781	struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
782
783	if (msg->errno < 0) {
784		dprintk("RPC:       %s releasing msg %p\n",
785			__func__, gss_msg);
786		atomic_inc(&gss_msg->count);
787		gss_unhash_msg(gss_msg);
788		if (msg->errno == -ETIMEDOUT)
789			warn_gssd();
790		gss_release_msg(gss_msg);
791	}
792}
793
794static void gss_pipes_dentries_destroy(struct rpc_auth *auth)
795{
796	struct gss_auth *gss_auth;
797
798	gss_auth = container_of(auth, struct gss_auth, rpc_auth);
799	if (gss_auth->pipe[0]->dentry)
800		rpc_unlink(gss_auth->pipe[0]->dentry);
801	if (gss_auth->pipe[1]->dentry)
802		rpc_unlink(gss_auth->pipe[1]->dentry);
803}
804
805static int gss_pipes_dentries_create(struct rpc_auth *auth)
806{
807	int err;
808	struct gss_auth *gss_auth;
809	struct rpc_clnt *clnt;
810
811	gss_auth = container_of(auth, struct gss_auth, rpc_auth);
812	clnt = gss_auth->client;
813
814	gss_auth->pipe[1]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry,
815						      "gssd",
816						      clnt, gss_auth->pipe[1]);
817	if (IS_ERR(gss_auth->pipe[1]->dentry))
818		return PTR_ERR(gss_auth->pipe[1]->dentry);
819	gss_auth->pipe[0]->dentry = rpc_mkpipe_dentry(clnt->cl_dentry,
820						      gss_auth->mech->gm_name,
821						      clnt, gss_auth->pipe[0]);
822	if (IS_ERR(gss_auth->pipe[0]->dentry)) {
823		err = PTR_ERR(gss_auth->pipe[0]->dentry);
824		goto err_unlink_pipe_1;
825	}
826	return 0;
827
828err_unlink_pipe_1:
829	rpc_unlink(gss_auth->pipe[1]->dentry);
830	return err;
831}
832
833static void gss_pipes_dentries_destroy_net(struct rpc_clnt *clnt,
834					   struct rpc_auth *auth)
835{
836	struct gss_auth *gss_auth = container_of(auth, struct gss_auth,
837			rpc_auth);
838	struct net *net = gss_auth->net;
839	struct super_block *sb;
840
841	sb = rpc_get_sb_net(net);
842	if (sb) {
843		if (clnt->cl_dentry)
844			gss_pipes_dentries_destroy(auth);
845		rpc_put_sb_net(net);
846	}
847}
848
849static int gss_pipes_dentries_create_net(struct rpc_clnt *clnt,
850					 struct rpc_auth *auth)
851{
852	struct gss_auth *gss_auth = container_of(auth, struct gss_auth,
853			rpc_auth);
854	struct net *net = gss_auth->net;
855	struct super_block *sb;
856	int err = 0;
857
858	sb = rpc_get_sb_net(net);
859	if (sb) {
860		if (clnt->cl_dentry)
861			err = gss_pipes_dentries_create(auth);
862		rpc_put_sb_net(net);
863	}
864	return err;
865}
866
867/*
868 * NOTE: we have the opportunity to use different
869 * parameters based on the input flavor (which must be a pseudoflavor)
870 */
871static struct rpc_auth *
872gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
873{
874	rpc_authflavor_t flavor = args->pseudoflavor;
875	struct gss_auth *gss_auth;
876	struct rpc_auth * auth;
877	int err = -ENOMEM; /* XXX? */
878
879	dprintk("RPC:       creating GSS authenticator for client %p\n", clnt);
880
881	if (!try_module_get(THIS_MODULE))
882		return ERR_PTR(err);
883	if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
884		goto out_dec;
885	gss_auth->target_name = NULL;
886	if (args->target_name) {
887		gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
888		if (gss_auth->target_name == NULL)
889			goto err_free;
890	}
891	gss_auth->client = clnt;
892	gss_auth->net = get_net(rpc_net_ns(clnt));
893	err = -EINVAL;
894	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
895	if (!gss_auth->mech) {
896		dprintk("RPC:       Pseudoflavor %d not found!\n", flavor);
897		goto err_put_net;
898	}
899	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
900	if (gss_auth->service == 0)
901		goto err_put_mech;
902	auth = &gss_auth->rpc_auth;
903	auth->au_cslack = GSS_CRED_SLACK >> 2;
904	auth->au_rslack = GSS_VERF_SLACK >> 2;
905	auth->au_ops = &authgss_ops;
906	auth->au_flavor = flavor;
907	atomic_set(&auth->au_count, 1);
908	kref_init(&gss_auth->kref);
909
910	/*
911	 * Note: if we created the old pipe first, then someone who
912	 * examined the directory at the right moment might conclude
913	 * that we supported only the old pipe.  So we instead create
914	 * the new pipe first.
915	 */
916	gss_auth->pipe[1] = rpc_mkpipe_data(&gss_upcall_ops_v1,
917					    RPC_PIPE_WAIT_FOR_OPEN);
918	if (IS_ERR(gss_auth->pipe[1])) {
919		err = PTR_ERR(gss_auth->pipe[1]);
920		goto err_put_mech;
921	}
922
923	gss_auth->pipe[0] = rpc_mkpipe_data(&gss_upcall_ops_v0,
924					    RPC_PIPE_WAIT_FOR_OPEN);
925	if (IS_ERR(gss_auth->pipe[0])) {
926		err = PTR_ERR(gss_auth->pipe[0]);
927		goto err_destroy_pipe_1;
928	}
929	err = gss_pipes_dentries_create_net(clnt, auth);
930	if (err)
931		goto err_destroy_pipe_0;
932	err = rpcauth_init_credcache(auth);
933	if (err)
934		goto err_unlink_pipes;
935
936	return auth;
937err_unlink_pipes:
938	gss_pipes_dentries_destroy_net(clnt, auth);
939err_destroy_pipe_0:
940	rpc_destroy_pipe_data(gss_auth->pipe[0]);
941err_destroy_pipe_1:
942	rpc_destroy_pipe_data(gss_auth->pipe[1]);
943err_put_mech:
944	gss_mech_put(gss_auth->mech);
945err_put_net:
946	put_net(gss_auth->net);
947err_free:
948	kfree(gss_auth->target_name);
949	kfree(gss_auth);
950out_dec:
951	module_put(THIS_MODULE);
952	return ERR_PTR(err);
953}
954
955static void
956gss_free(struct gss_auth *gss_auth)
957{
958	gss_pipes_dentries_destroy_net(gss_auth->client, &gss_auth->rpc_auth);
959	rpc_destroy_pipe_data(gss_auth->pipe[0]);
960	rpc_destroy_pipe_data(gss_auth->pipe[1]);
961	gss_mech_put(gss_auth->mech);
962	put_net(gss_auth->net);
963	kfree(gss_auth->target_name);
964
965	kfree(gss_auth);
966	module_put(THIS_MODULE);
967}
968
969static void
970gss_free_callback(struct kref *kref)
971{
972	struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
973
974	gss_free(gss_auth);
975}
976
977static void
978gss_destroy(struct rpc_auth *auth)
979{
980	struct gss_auth *gss_auth;
981
982	dprintk("RPC:       destroying GSS authenticator %p flavor %d\n",
983			auth, auth->au_flavor);
984
985	rpcauth_destroy_credcache(auth);
986
987	gss_auth = container_of(auth, struct gss_auth, rpc_auth);
988	kref_put(&gss_auth->kref, gss_free_callback);
989}
990
991/*
992 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
993 * to the server with the GSS control procedure field set to
994 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
995 * all RPCSEC_GSS state associated with that context.
996 */
997static int
998gss_destroying_context(struct rpc_cred *cred)
999{
1000	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1001	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1002	struct rpc_task *task;
1003
1004	if (gss_cred->gc_ctx == NULL ||
1005	    test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
1006		return 0;
1007
1008	gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1009	cred->cr_ops = &gss_nullops;
1010
1011	/* Take a reference to ensure the cred will be destroyed either
1012	 * by the RPC call or by the put_rpccred() below */
1013	get_rpccred(cred);
1014
1015	task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
1016	if (!IS_ERR(task))
1017		rpc_put_task(task);
1018
1019	put_rpccred(cred);
1020	return 1;
1021}
1022
1023/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1024 * to create a new cred or context, so they check that things have been
1025 * allocated before freeing them. */
1026static void
1027gss_do_free_ctx(struct gss_cl_ctx *ctx)
1028{
1029	dprintk("RPC:       %s\n", __func__);
1030
1031	gss_delete_sec_context(&ctx->gc_gss_ctx);
1032	kfree(ctx->gc_wire_ctx.data);
1033	kfree(ctx);
1034}
1035
1036static void
1037gss_free_ctx_callback(struct rcu_head *head)
1038{
1039	struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1040	gss_do_free_ctx(ctx);
1041}
1042
1043static void
1044gss_free_ctx(struct gss_cl_ctx *ctx)
1045{
1046	call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1047}
1048
1049static void
1050gss_free_cred(struct gss_cred *gss_cred)
1051{
1052	dprintk("RPC:       %s cred=%p\n", __func__, gss_cred);
1053	kfree(gss_cred);
1054}
1055
1056static void
1057gss_free_cred_callback(struct rcu_head *head)
1058{
1059	struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1060	gss_free_cred(gss_cred);
1061}
1062
1063static void
1064gss_destroy_nullcred(struct rpc_cred *cred)
1065{
1066	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1067	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1068	struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
1069
1070	RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1071	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1072	if (ctx)
1073		gss_put_ctx(ctx);
1074	kref_put(&gss_auth->kref, gss_free_callback);
1075}
1076
1077static void
1078gss_destroy_cred(struct rpc_cred *cred)
1079{
1080
1081	if (gss_destroying_context(cred))
1082		return;
1083	gss_destroy_nullcred(cred);
1084}
1085
1086/*
1087 * Lookup RPCSEC_GSS cred for the current process
1088 */
1089static struct rpc_cred *
1090gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1091{
1092	return rpcauth_lookup_credcache(auth, acred, flags);
1093}
1094
1095static struct rpc_cred *
1096gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1097{
1098	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1099	struct gss_cred	*cred = NULL;
1100	int err = -ENOMEM;
1101
1102	dprintk("RPC:       %s for uid %d, flavor %d\n",
1103		__func__, from_kuid(&init_user_ns, acred->uid),
1104		auth->au_flavor);
1105
1106	if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
1107		goto out_err;
1108
1109	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1110	/*
1111	 * Note: in order to force a call to call_refresh(), we deliberately
1112	 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1113	 */
1114	cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1115	cred->gc_service = gss_auth->service;
1116	cred->gc_principal = NULL;
1117	if (acred->machine_cred)
1118		cred->gc_principal = acred->principal;
1119	kref_get(&gss_auth->kref);
1120	return &cred->gc_base;
1121
1122out_err:
1123	dprintk("RPC:       %s failed with error %d\n", __func__, err);
1124	return ERR_PTR(err);
1125}
1126
1127static int
1128gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1129{
1130	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1131	struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1132	int err;
1133
1134	do {
1135		err = gss_create_upcall(gss_auth, gss_cred);
1136	} while (err == -EAGAIN);
1137	return err;
1138}
1139
1140static int
1141gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1142{
1143	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1144
1145	if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1146		goto out;
1147	/* Don't match with creds that have expired. */
1148	if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
1149		return 0;
1150	if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1151		return 0;
1152out:
1153	if (acred->principal != NULL) {
1154		if (gss_cred->gc_principal == NULL)
1155			return 0;
1156		return strcmp(acred->principal, gss_cred->gc_principal) == 0;
1157	}
1158	if (gss_cred->gc_principal != NULL)
1159		return 0;
1160	return uid_eq(rc->cr_uid, acred->uid);
1161}
1162
1163/*
1164* Marshal credentials.
1165* Maybe we should keep a cached credential for performance reasons.
1166*/
1167static __be32 *
1168gss_marshal(struct rpc_task *task, __be32 *p)
1169{
1170	struct rpc_rqst *req = task->tk_rqstp;
1171	struct rpc_cred *cred = req->rq_cred;
1172	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1173						 gc_base);
1174	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
1175	__be32		*cred_len;
1176	u32             maj_stat = 0;
1177	struct xdr_netobj mic;
1178	struct kvec	iov;
1179	struct xdr_buf	verf_buf;
1180
1181	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1182
1183	*p++ = htonl(RPC_AUTH_GSS);
1184	cred_len = p++;
1185
1186	spin_lock(&ctx->gc_seq_lock);
1187	req->rq_seqno = ctx->gc_seq++;
1188	spin_unlock(&ctx->gc_seq_lock);
1189
1190	*p++ = htonl((u32) RPC_GSS_VERSION);
1191	*p++ = htonl((u32) ctx->gc_proc);
1192	*p++ = htonl((u32) req->rq_seqno);
1193	*p++ = htonl((u32) gss_cred->gc_service);
1194	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1195	*cred_len = htonl((p - (cred_len + 1)) << 2);
1196
1197	/* We compute the checksum for the verifier over the xdr-encoded bytes
1198	 * starting with the xid and ending at the end of the credential: */
1199	iov.iov_base = xprt_skip_transport_header(req->rq_xprt,
1200					req->rq_snd_buf.head[0].iov_base);
1201	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1202	xdr_buf_from_iov(&iov, &verf_buf);
1203
1204	/* set verifier flavor*/
1205	*p++ = htonl(RPC_AUTH_GSS);
1206
1207	mic.data = (u8 *)(p + 1);
1208	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1209	if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
1210		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1211	} else if (maj_stat != 0) {
1212		printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
1213		goto out_put_ctx;
1214	}
1215	p = xdr_encode_opaque(p, NULL, mic.len);
1216	gss_put_ctx(ctx);
1217	return p;
1218out_put_ctx:
1219	gss_put_ctx(ctx);
1220	return NULL;
1221}
1222
1223static int gss_renew_cred(struct rpc_task *task)
1224{
1225	struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1226	struct gss_cred *gss_cred = container_of(oldcred,
1227						 struct gss_cred,
1228						 gc_base);
1229	struct rpc_auth *auth = oldcred->cr_auth;
1230	struct auth_cred acred = {
1231		.uid = oldcred->cr_uid,
1232		.principal = gss_cred->gc_principal,
1233		.machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0),
1234	};
1235	struct rpc_cred *new;
1236
1237	new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1238	if (IS_ERR(new))
1239		return PTR_ERR(new);
1240	task->tk_rqstp->rq_cred = new;
1241	put_rpccred(oldcred);
1242	return 0;
1243}
1244
1245static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1246{
1247	if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1248		unsigned long now = jiffies;
1249		unsigned long begin, expire;
1250		struct gss_cred *gss_cred;
1251
1252		gss_cred = container_of(cred, struct gss_cred, gc_base);
1253		begin = gss_cred->gc_upcall_timestamp;
1254		expire = begin + gss_expired_cred_retry_delay * HZ;
1255
1256		if (time_in_range_open(now, begin, expire))
1257			return 1;
1258	}
1259	return 0;
1260}
1261
1262/*
1263* Refresh credentials. XXX - finish
1264*/
1265static int
1266gss_refresh(struct rpc_task *task)
1267{
1268	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1269	int ret = 0;
1270
1271	if (gss_cred_is_negative_entry(cred))
1272		return -EKEYEXPIRED;
1273
1274	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1275			!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1276		ret = gss_renew_cred(task);
1277		if (ret < 0)
1278			goto out;
1279		cred = task->tk_rqstp->rq_cred;
1280	}
1281
1282	if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1283		ret = gss_refresh_upcall(task);
1284out:
1285	return ret;
1286}
1287
1288/* Dummy refresh routine: used only when destroying the context */
1289static int
1290gss_refresh_null(struct rpc_task *task)
1291{
1292	return -EACCES;
1293}
1294
1295static __be32 *
1296gss_validate(struct rpc_task *task, __be32 *p)
1297{
1298	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1299	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1300	__be32		seq;
1301	struct kvec	iov;
1302	struct xdr_buf	verf_buf;
1303	struct xdr_netobj mic;
1304	u32		flav,len;
1305	u32		maj_stat;
1306
1307	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1308
1309	flav = ntohl(*p++);
1310	if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
1311		goto out_bad;
1312	if (flav != RPC_AUTH_GSS)
1313		goto out_bad;
1314	seq = htonl(task->tk_rqstp->rq_seqno);
1315	iov.iov_base = &seq;
1316	iov.iov_len = sizeof(seq);
1317	xdr_buf_from_iov(&iov, &verf_buf);
1318	mic.data = (u8 *)p;
1319	mic.len = len;
1320
1321	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1322	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1323		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1324	if (maj_stat) {
1325		dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n",
1326			task->tk_pid, __func__, maj_stat);
1327		goto out_bad;
1328	}
1329	/* We leave it to unwrap to calculate au_rslack. For now we just
1330	 * calculate the length of the verifier: */
1331	cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1332	gss_put_ctx(ctx);
1333	dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n",
1334			task->tk_pid, __func__);
1335	return p + XDR_QUADLEN(len);
1336out_bad:
1337	gss_put_ctx(ctx);
1338	dprintk("RPC: %5u %s failed.\n", task->tk_pid, __func__);
1339	return NULL;
1340}
1341
1342static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
1343				__be32 *p, void *obj)
1344{
1345	struct xdr_stream xdr;
1346
1347	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
1348	encode(rqstp, &xdr, obj);
1349}
1350
1351static inline int
1352gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1353		   kxdreproc_t encode, struct rpc_rqst *rqstp,
1354		   __be32 *p, void *obj)
1355{
1356	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1357	struct xdr_buf	integ_buf;
1358	__be32          *integ_len = NULL;
1359	struct xdr_netobj mic;
1360	u32		offset;
1361	__be32		*q;
1362	struct kvec	*iov;
1363	u32             maj_stat = 0;
1364	int		status = -EIO;
1365
1366	integ_len = p++;
1367	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1368	*p++ = htonl(rqstp->rq_seqno);
1369
1370	gss_wrap_req_encode(encode, rqstp, p, obj);
1371
1372	if (xdr_buf_subsegment(snd_buf, &integ_buf,
1373				offset, snd_buf->len - offset))
1374		return status;
1375	*integ_len = htonl(integ_buf.len);
1376
1377	/* guess whether we're in the head or the tail: */
1378	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1379		iov = snd_buf->tail;
1380	else
1381		iov = snd_buf->head;
1382	p = iov->iov_base + iov->iov_len;
1383	mic.data = (u8 *)(p + 1);
1384
1385	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1386	status = -EIO; /* XXX? */
1387	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1388		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1389	else if (maj_stat)
1390		return status;
1391	q = xdr_encode_opaque(p, NULL, mic.len);
1392
1393	offset = (u8 *)q - (u8 *)p;
1394	iov->iov_len += offset;
1395	snd_buf->len += offset;
1396	return 0;
1397}
1398
1399static void
1400priv_release_snd_buf(struct rpc_rqst *rqstp)
1401{
1402	int i;
1403
1404	for (i=0; i < rqstp->rq_enc_pages_num; i++)
1405		__free_page(rqstp->rq_enc_pages[i]);
1406	kfree(rqstp->rq_enc_pages);
1407}
1408
1409static int
1410alloc_enc_pages(struct rpc_rqst *rqstp)
1411{
1412	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1413	int first, last, i;
1414
1415	if (snd_buf->page_len == 0) {
1416		rqstp->rq_enc_pages_num = 0;
1417		return 0;
1418	}
1419
1420	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1421	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
1422	rqstp->rq_enc_pages_num = last - first + 1 + 1;
1423	rqstp->rq_enc_pages
1424		= kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
1425				GFP_NOFS);
1426	if (!rqstp->rq_enc_pages)
1427		goto out;
1428	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1429		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1430		if (rqstp->rq_enc_pages[i] == NULL)
1431			goto out_free;
1432	}
1433	rqstp->rq_release_snd_buf = priv_release_snd_buf;
1434	return 0;
1435out_free:
1436	rqstp->rq_enc_pages_num = i;
1437	priv_release_snd_buf(rqstp);
1438out:
1439	return -EAGAIN;
1440}
1441
1442static inline int
1443gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1444		  kxdreproc_t encode, struct rpc_rqst *rqstp,
1445		  __be32 *p, void *obj)
1446{
1447	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1448	u32		offset;
1449	u32             maj_stat;
1450	int		status;
1451	__be32		*opaque_len;
1452	struct page	**inpages;
1453	int		first;
1454	int		pad;
1455	struct kvec	*iov;
1456	char		*tmp;
1457
1458	opaque_len = p++;
1459	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1460	*p++ = htonl(rqstp->rq_seqno);
1461
1462	gss_wrap_req_encode(encode, rqstp, p, obj);
1463
1464	status = alloc_enc_pages(rqstp);
1465	if (status)
1466		return status;
1467	first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1468	inpages = snd_buf->pages + first;
1469	snd_buf->pages = rqstp->rq_enc_pages;
1470	snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1471	/*
1472	 * Give the tail its own page, in case we need extra space in the
1473	 * head when wrapping:
1474	 *
1475	 * call_allocate() allocates twice the slack space required
1476	 * by the authentication flavor to rq_callsize.
1477	 * For GSS, slack is GSS_CRED_SLACK.
1478	 */
1479	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1480		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1481		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1482		snd_buf->tail[0].iov_base = tmp;
1483	}
1484	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1485	/* slack space should prevent this ever happening: */
1486	BUG_ON(snd_buf->len > snd_buf->buflen);
1487	status = -EIO;
1488	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1489	 * done anyway, so it's safe to put the request on the wire: */
1490	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1491		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1492	else if (maj_stat)
1493		return status;
1494
1495	*opaque_len = htonl(snd_buf->len - offset);
1496	/* guess whether we're in the head or the tail: */
1497	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1498		iov = snd_buf->tail;
1499	else
1500		iov = snd_buf->head;
1501	p = iov->iov_base + iov->iov_len;
1502	pad = 3 - ((snd_buf->len - offset - 1) & 3);
1503	memset(p, 0, pad);
1504	iov->iov_len += pad;
1505	snd_buf->len += pad;
1506
1507	return 0;
1508}
1509
1510static int
1511gss_wrap_req(struct rpc_task *task,
1512	     kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
1513{
1514	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1515	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1516			gc_base);
1517	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1518	int             status = -EIO;
1519
1520	dprintk("RPC: %5u %s\n", task->tk_pid, __func__);
1521	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1522		/* The spec seems a little ambiguous here, but I think that not
1523		 * wrapping context destruction requests makes the most sense.
1524		 */
1525		gss_wrap_req_encode(encode, rqstp, p, obj);
1526		status = 0;
1527		goto out;
1528	}
1529	switch (gss_cred->gc_service) {
1530	case RPC_GSS_SVC_NONE:
1531		gss_wrap_req_encode(encode, rqstp, p, obj);
1532		status = 0;
1533		break;
1534	case RPC_GSS_SVC_INTEGRITY:
1535		status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
1536		break;
1537	case RPC_GSS_SVC_PRIVACY:
1538		status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
1539		break;
1540	}
1541out:
1542	gss_put_ctx(ctx);
1543	dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status);
1544	return status;
1545}
1546
1547static inline int
1548gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1549		struct rpc_rqst *rqstp, __be32 **p)
1550{
1551	struct xdr_buf	*rcv_buf = &rqstp->rq_rcv_buf;
1552	struct xdr_buf integ_buf;
1553	struct xdr_netobj mic;
1554	u32 data_offset, mic_offset;
1555	u32 integ_len;
1556	u32 maj_stat;
1557	int status = -EIO;
1558
1559	integ_len = ntohl(*(*p)++);
1560	if (integ_len & 3)
1561		return status;
1562	data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1563	mic_offset = integ_len + data_offset;
1564	if (mic_offset > rcv_buf->len)
1565		return status;
1566	if (ntohl(*(*p)++) != rqstp->rq_seqno)
1567		return status;
1568
1569	if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1570				mic_offset - data_offset))
1571		return status;
1572
1573	if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1574		return status;
1575
1576	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1577	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1578		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1579	if (maj_stat != GSS_S_COMPLETE)
1580		return status;
1581	return 0;
1582}
1583
1584static inline int
1585gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1586		struct rpc_rqst *rqstp, __be32 **p)
1587{
1588	struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
1589	u32 offset;
1590	u32 opaque_len;
1591	u32 maj_stat;
1592	int status = -EIO;
1593
1594	opaque_len = ntohl(*(*p)++);
1595	offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1596	if (offset + opaque_len > rcv_buf->len)
1597		return status;
1598	/* remove padding: */
1599	rcv_buf->len = offset + opaque_len;
1600
1601	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1602	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1603		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1604	if (maj_stat != GSS_S_COMPLETE)
1605		return status;
1606	if (ntohl(*(*p)++) != rqstp->rq_seqno)
1607		return status;
1608
1609	return 0;
1610}
1611
1612static int
1613gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
1614		      __be32 *p, void *obj)
1615{
1616	struct xdr_stream xdr;
1617
1618	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
1619	return decode(rqstp, &xdr, obj);
1620}
1621
1622static int
1623gss_unwrap_resp(struct rpc_task *task,
1624		kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
1625{
1626	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1627	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1628			gc_base);
1629	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1630	__be32		*savedp = p;
1631	struct kvec	*head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1632	int		savedlen = head->iov_len;
1633	int             status = -EIO;
1634
1635	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1636		goto out_decode;
1637	switch (gss_cred->gc_service) {
1638	case RPC_GSS_SVC_NONE:
1639		break;
1640	case RPC_GSS_SVC_INTEGRITY:
1641		status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1642		if (status)
1643			goto out;
1644		break;
1645	case RPC_GSS_SVC_PRIVACY:
1646		status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1647		if (status)
1648			goto out;
1649		break;
1650	}
1651	/* take into account extra slack for integrity and privacy cases: */
1652	cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1653						+ (savedlen - head->iov_len);
1654out_decode:
1655	status = gss_unwrap_req_decode(decode, rqstp, p, obj);
1656out:
1657	gss_put_ctx(ctx);
1658	dprintk("RPC: %5u %s returning %d\n",
1659		task->tk_pid, __func__, status);
1660	return status;
1661}
1662
1663static const struct rpc_authops authgss_ops = {
1664	.owner		= THIS_MODULE,
1665	.au_flavor	= RPC_AUTH_GSS,
1666	.au_name	= "RPCSEC_GSS",
1667	.create		= gss_create,
1668	.destroy	= gss_destroy,
1669	.lookup_cred	= gss_lookup_cred,
1670	.crcreate	= gss_create_cred,
1671	.pipes_create	= gss_pipes_dentries_create,
1672	.pipes_destroy	= gss_pipes_dentries_destroy,
1673	.list_pseudoflavors = gss_mech_list_pseudoflavors,
1674	.info2flavor	= gss_mech_info2flavor,
1675	.flavor2info	= gss_mech_flavor2info,
1676};
1677
1678static const struct rpc_credops gss_credops = {
1679	.cr_name	= "AUTH_GSS",
1680	.crdestroy	= gss_destroy_cred,
1681	.cr_init	= gss_cred_init,
1682	.crbind		= rpcauth_generic_bind_cred,
1683	.crmatch	= gss_match,
1684	.crmarshal	= gss_marshal,
1685	.crrefresh	= gss_refresh,
1686	.crvalidate	= gss_validate,
1687	.crwrap_req	= gss_wrap_req,
1688	.crunwrap_resp	= gss_unwrap_resp,
1689};
1690
1691static const struct rpc_credops gss_nullops = {
1692	.cr_name	= "AUTH_GSS",
1693	.crdestroy	= gss_destroy_nullcred,
1694	.crbind		= rpcauth_generic_bind_cred,
1695	.crmatch	= gss_match,
1696	.crmarshal	= gss_marshal,
1697	.crrefresh	= gss_refresh_null,
1698	.crvalidate	= gss_validate,
1699	.crwrap_req	= gss_wrap_req,
1700	.crunwrap_resp	= gss_unwrap_resp,
1701};
1702
1703static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
1704	.upcall		= rpc_pipe_generic_upcall,
1705	.downcall	= gss_pipe_downcall,
1706	.destroy_msg	= gss_pipe_destroy_msg,
1707	.open_pipe	= gss_pipe_open_v0,
1708	.release_pipe	= gss_pipe_release,
1709};
1710
1711static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
1712	.upcall		= rpc_pipe_generic_upcall,
1713	.downcall	= gss_pipe_downcall,
1714	.destroy_msg	= gss_pipe_destroy_msg,
1715	.open_pipe	= gss_pipe_open_v1,
1716	.release_pipe	= gss_pipe_release,
1717};
1718
1719static __net_init int rpcsec_gss_init_net(struct net *net)
1720{
1721	return gss_svc_init_net(net);
1722}
1723
1724static __net_exit void rpcsec_gss_exit_net(struct net *net)
1725{
1726	gss_svc_shutdown_net(net);
1727}
1728
1729static struct pernet_operations rpcsec_gss_net_ops = {
1730	.init = rpcsec_gss_init_net,
1731	.exit = rpcsec_gss_exit_net,
1732};
1733
1734/*
1735 * Initialize RPCSEC_GSS module
1736 */
1737static int __init init_rpcsec_gss(void)
1738{
1739	int err = 0;
1740
1741	err = rpcauth_register(&authgss_ops);
1742	if (err)
1743		goto out;
1744	err = gss_svc_init();
1745	if (err)
1746		goto out_unregister;
1747	err = register_pernet_subsys(&rpcsec_gss_net_ops);
1748	if (err)
1749		goto out_svc_exit;
1750	rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
1751	return 0;
1752out_svc_exit:
1753	gss_svc_shutdown();
1754out_unregister:
1755	rpcauth_unregister(&authgss_ops);
1756out:
1757	return err;
1758}
1759
1760static void __exit exit_rpcsec_gss(void)
1761{
1762	unregister_pernet_subsys(&rpcsec_gss_net_ops);
1763	gss_svc_shutdown();
1764	rpcauth_unregister(&authgss_ops);
1765	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1766}
1767
1768MODULE_ALIAS("rpc-auth-6");
1769MODULE_LICENSE("GPL");
1770module_param_named(expired_cred_retry_delay,
1771		   gss_expired_cred_retry_delay,
1772		   uint, 0644);
1773MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
1774		"the RPC engine retries an expired credential");
1775
1776module_init(init_rpcsec_gss)
1777module_exit(exit_rpcsec_gss)
1778