cache.c revision a490c681cbcf65d548138c377bb691c85824d323
1/*
2 * net/sunrpc/cache.c
3 *
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
6 *
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8 *
9 * Released under terms in GPL version 2.  See COPYING.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <asm/uaccess.h>
24#include <linux/poll.h>
25#include <linux/seq_file.h>
26#include <linux/proc_fs.h>
27#include <linux/net.h>
28#include <linux/workqueue.h>
29#include <linux/mutex.h>
30#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h>
34
35#define	 RPCDBG_FACILITY RPCDBG_CACHE
36
37static int cache_defer_req(struct cache_req *req, struct cache_head *item);
38static void cache_revisit_request(struct cache_head *item);
39
40static void cache_init(struct cache_head *h)
41{
42	time_t now = get_seconds();
43	h->next = NULL;
44	h->flags = 0;
45	kref_init(&h->ref);
46	h->expiry_time = now + CACHE_NEW_EXPIRY;
47	h->last_refresh = now;
48}
49
50struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
51				       struct cache_head *key, int hash)
52{
53	struct cache_head **head,  **hp;
54	struct cache_head *new = NULL;
55
56	head = &detail->hash_table[hash];
57
58	read_lock(&detail->hash_lock);
59
60	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
61		struct cache_head *tmp = *hp;
62		if (detail->match(tmp, key)) {
63			cache_get(tmp);
64			read_unlock(&detail->hash_lock);
65			return tmp;
66		}
67	}
68	read_unlock(&detail->hash_lock);
69	/* Didn't find anything, insert an empty entry */
70
71	new = detail->alloc();
72	if (!new)
73		return NULL;
74	/* must fully initialise 'new', else
75	 * we might get lose if we need to
76	 * cache_put it soon.
77	 */
78	cache_init(new);
79	detail->init(new, key);
80
81	write_lock(&detail->hash_lock);
82
83	/* check if entry appeared while we slept */
84	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
85		struct cache_head *tmp = *hp;
86		if (detail->match(tmp, key)) {
87			cache_get(tmp);
88			write_unlock(&detail->hash_lock);
89			cache_put(new, detail);
90			return tmp;
91		}
92	}
93	new->next = *head;
94	*head = new;
95	detail->entries++;
96	cache_get(new);
97	write_unlock(&detail->hash_lock);
98
99	return new;
100}
101EXPORT_SYMBOL(sunrpc_cache_lookup);
102
103
104static void queue_loose(struct cache_detail *detail, struct cache_head *ch);
105
106static int cache_fresh_locked(struct cache_head *head, time_t expiry)
107{
108	head->expiry_time = expiry;
109	head->last_refresh = get_seconds();
110	return !test_and_set_bit(CACHE_VALID, &head->flags);
111}
112
113static void cache_fresh_unlocked(struct cache_head *head,
114			struct cache_detail *detail, int new)
115{
116	if (new)
117		cache_revisit_request(head);
118	if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
119		cache_revisit_request(head);
120		queue_loose(detail, head);
121	}
122}
123
124struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
125				       struct cache_head *new, struct cache_head *old, int hash)
126{
127	/* The 'old' entry is to be replaced by 'new'.
128	 * If 'old' is not VALID, we update it directly,
129	 * otherwise we need to replace it
130	 */
131	struct cache_head **head;
132	struct cache_head *tmp;
133	int is_new;
134
135	if (!test_bit(CACHE_VALID, &old->flags)) {
136		write_lock(&detail->hash_lock);
137		if (!test_bit(CACHE_VALID, &old->flags)) {
138			if (test_bit(CACHE_NEGATIVE, &new->flags))
139				set_bit(CACHE_NEGATIVE, &old->flags);
140			else
141				detail->update(old, new);
142			is_new = cache_fresh_locked(old, new->expiry_time);
143			write_unlock(&detail->hash_lock);
144			cache_fresh_unlocked(old, detail, is_new);
145			return old;
146		}
147		write_unlock(&detail->hash_lock);
148	}
149	/* We need to insert a new entry */
150	tmp = detail->alloc();
151	if (!tmp) {
152		cache_put(old, detail);
153		return NULL;
154	}
155	cache_init(tmp);
156	detail->init(tmp, old);
157	head = &detail->hash_table[hash];
158
159	write_lock(&detail->hash_lock);
160	if (test_bit(CACHE_NEGATIVE, &new->flags))
161		set_bit(CACHE_NEGATIVE, &tmp->flags);
162	else
163		detail->update(tmp, new);
164	tmp->next = *head;
165	*head = tmp;
166	detail->entries++;
167	cache_get(tmp);
168	is_new = cache_fresh_locked(tmp, new->expiry_time);
169	cache_fresh_locked(old, 0);
170	write_unlock(&detail->hash_lock);
171	cache_fresh_unlocked(tmp, detail, is_new);
172	cache_fresh_unlocked(old, detail, 0);
173	cache_put(old, detail);
174	return tmp;
175}
176EXPORT_SYMBOL(sunrpc_cache_update);
177
178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h);
179/*
180 * This is the generic cache management routine for all
181 * the authentication caches.
182 * It checks the currency of a cache item and will (later)
183 * initiate an upcall to fill it if needed.
184 *
185 *
186 * Returns 0 if the cache_head can be used, or cache_puts it and returns
187 * -EAGAIN if upcall is pending,
188 * -ETIMEDOUT if upcall failed and should be retried,
189 * -ENOENT if cache entry was negative
190 */
191int cache_check(struct cache_detail *detail,
192		    struct cache_head *h, struct cache_req *rqstp)
193{
194	int rv;
195	long refresh_age, age;
196
197	/* First decide return status as best we can */
198	if (!test_bit(CACHE_VALID, &h->flags) ||
199	    h->expiry_time < get_seconds())
200		rv = -EAGAIN;
201	else if (detail->flush_time > h->last_refresh)
202		rv = -EAGAIN;
203	else {
204		/* entry is valid */
205		if (test_bit(CACHE_NEGATIVE, &h->flags))
206			rv = -ENOENT;
207		else rv = 0;
208	}
209
210	/* now see if we want to start an upcall */
211	refresh_age = (h->expiry_time - h->last_refresh);
212	age = get_seconds() - h->last_refresh;
213
214	if (rqstp == NULL) {
215		if (rv == -EAGAIN)
216			rv = -ENOENT;
217	} else if (rv == -EAGAIN || age > refresh_age/2) {
218		dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
219				refresh_age, age);
220		if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
221			switch (cache_make_upcall(detail, h)) {
222			case -EINVAL:
223				clear_bit(CACHE_PENDING, &h->flags);
224				if (rv == -EAGAIN) {
225					set_bit(CACHE_NEGATIVE, &h->flags);
226					cache_fresh_unlocked(h, detail,
227					     cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
228					rv = -ENOENT;
229				}
230				break;
231
232			case -EAGAIN:
233				clear_bit(CACHE_PENDING, &h->flags);
234				cache_revisit_request(h);
235				break;
236			}
237		}
238	}
239
240	if (rv == -EAGAIN)
241		if (cache_defer_req(rqstp, h) != 0)
242			rv = -ETIMEDOUT;
243
244	if (rv)
245		cache_put(h, detail);
246	return rv;
247}
248
249/*
250 * caches need to be periodically cleaned.
251 * For this we maintain a list of cache_detail and
252 * a current pointer into that list and into the table
253 * for that entry.
254 *
255 * Each time clean_cache is called it finds the next non-empty entry
256 * in the current table and walks the list in that entry
257 * looking for entries that can be removed.
258 *
259 * An entry gets removed if:
260 * - The expiry is before current time
261 * - The last_refresh time is before the flush_time for that cache
262 *
263 * later we might drop old entries with non-NEVER expiry if that table
264 * is getting 'full' for some definition of 'full'
265 *
266 * The question of "how often to scan a table" is an interesting one
267 * and is answered in part by the use of the "nextcheck" field in the
268 * cache_detail.
269 * When a scan of a table begins, the nextcheck field is set to a time
270 * that is well into the future.
271 * While scanning, if an expiry time is found that is earlier than the
272 * current nextcheck time, nextcheck is set to that expiry time.
273 * If the flush_time is ever set to a time earlier than the nextcheck
274 * time, the nextcheck time is then set to that flush_time.
275 *
276 * A table is then only scanned if the current time is at least
277 * the nextcheck time.
278 *
279 */
280
281static LIST_HEAD(cache_list);
282static DEFINE_SPINLOCK(cache_list_lock);
283static struct cache_detail *current_detail;
284static int current_index;
285
286static const struct file_operations cache_file_operations;
287static const struct file_operations content_file_operations;
288static const struct file_operations cache_flush_operations;
289
290static void do_cache_clean(struct work_struct *work);
291static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
292
293void cache_register(struct cache_detail *cd)
294{
295	cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
296	if (cd->proc_ent) {
297		struct proc_dir_entry *p;
298		cd->proc_ent->owner = cd->owner;
299		cd->channel_ent = cd->content_ent = NULL;
300
301		p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
302				      cd->proc_ent);
303		cd->flush_ent =  p;
304		if (p) {
305			p->proc_fops = &cache_flush_operations;
306			p->owner = cd->owner;
307			p->data = cd;
308		}
309
310		if (cd->cache_request || cd->cache_parse) {
311			p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
312					      cd->proc_ent);
313			cd->channel_ent = p;
314			if (p) {
315				p->proc_fops = &cache_file_operations;
316				p->owner = cd->owner;
317				p->data = cd;
318			}
319		}
320		if (cd->cache_show) {
321			p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
322					      cd->proc_ent);
323			cd->content_ent = p;
324			if (p) {
325				p->proc_fops = &content_file_operations;
326				p->owner = cd->owner;
327				p->data = cd;
328			}
329		}
330	}
331	rwlock_init(&cd->hash_lock);
332	INIT_LIST_HEAD(&cd->queue);
333	spin_lock(&cache_list_lock);
334	cd->nextcheck = 0;
335	cd->entries = 0;
336	atomic_set(&cd->readers, 0);
337	cd->last_close = 0;
338	cd->last_warn = -1;
339	list_add(&cd->others, &cache_list);
340	spin_unlock(&cache_list_lock);
341
342	/* start the cleaning process */
343	schedule_delayed_work(&cache_cleaner, 0);
344}
345
346int cache_unregister(struct cache_detail *cd)
347{
348	cache_purge(cd);
349	spin_lock(&cache_list_lock);
350	write_lock(&cd->hash_lock);
351	if (cd->entries || atomic_read(&cd->inuse)) {
352		write_unlock(&cd->hash_lock);
353		spin_unlock(&cache_list_lock);
354		return -EBUSY;
355	}
356	if (current_detail == cd)
357		current_detail = NULL;
358	list_del_init(&cd->others);
359	write_unlock(&cd->hash_lock);
360	spin_unlock(&cache_list_lock);
361	if (cd->proc_ent) {
362		if (cd->flush_ent)
363			remove_proc_entry("flush", cd->proc_ent);
364		if (cd->channel_ent)
365			remove_proc_entry("channel", cd->proc_ent);
366		if (cd->content_ent)
367			remove_proc_entry("content", cd->proc_ent);
368
369		cd->proc_ent = NULL;
370		remove_proc_entry(cd->name, proc_net_rpc);
371	}
372	if (list_empty(&cache_list)) {
373		/* module must be being unloaded so its safe to kill the worker */
374		cancel_delayed_work_sync(&cache_cleaner);
375	}
376	return 0;
377}
378
379/* clean cache tries to find something to clean
380 * and cleans it.
381 * It returns 1 if it cleaned something,
382 *            0 if it didn't find anything this time
383 *           -1 if it fell off the end of the list.
384 */
385static int cache_clean(void)
386{
387	int rv = 0;
388	struct list_head *next;
389
390	spin_lock(&cache_list_lock);
391
392	/* find a suitable table if we don't already have one */
393	while (current_detail == NULL ||
394	    current_index >= current_detail->hash_size) {
395		if (current_detail)
396			next = current_detail->others.next;
397		else
398			next = cache_list.next;
399		if (next == &cache_list) {
400			current_detail = NULL;
401			spin_unlock(&cache_list_lock);
402			return -1;
403		}
404		current_detail = list_entry(next, struct cache_detail, others);
405		if (current_detail->nextcheck > get_seconds())
406			current_index = current_detail->hash_size;
407		else {
408			current_index = 0;
409			current_detail->nextcheck = get_seconds()+30*60;
410		}
411	}
412
413	/* find a non-empty bucket in the table */
414	while (current_detail &&
415	       current_index < current_detail->hash_size &&
416	       current_detail->hash_table[current_index] == NULL)
417		current_index++;
418
419	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
420
421	if (current_detail && current_index < current_detail->hash_size) {
422		struct cache_head *ch, **cp;
423		struct cache_detail *d;
424
425		write_lock(&current_detail->hash_lock);
426
427		/* Ok, now to clean this strand */
428
429		cp = & current_detail->hash_table[current_index];
430		ch = *cp;
431		for (; ch; cp= & ch->next, ch= *cp) {
432			if (current_detail->nextcheck > ch->expiry_time)
433				current_detail->nextcheck = ch->expiry_time+1;
434			if (ch->expiry_time >= get_seconds()
435			    && ch->last_refresh >= current_detail->flush_time
436				)
437				continue;
438			if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
439				queue_loose(current_detail, ch);
440
441			if (atomic_read(&ch->ref.refcount) == 1)
442				break;
443		}
444		if (ch) {
445			*cp = ch->next;
446			ch->next = NULL;
447			current_detail->entries--;
448			rv = 1;
449		}
450		write_unlock(&current_detail->hash_lock);
451		d = current_detail;
452		if (!ch)
453			current_index ++;
454		spin_unlock(&cache_list_lock);
455		if (ch)
456			cache_put(ch, d);
457	} else
458		spin_unlock(&cache_list_lock);
459
460	return rv;
461}
462
463/*
464 * We want to regularly clean the cache, so we need to schedule some work ...
465 */
466static void do_cache_clean(struct work_struct *work)
467{
468	int delay = 5;
469	if (cache_clean() == -1)
470		delay = 30*HZ;
471
472	if (list_empty(&cache_list))
473		delay = 0;
474
475	if (delay)
476		schedule_delayed_work(&cache_cleaner, delay);
477}
478
479
480/*
481 * Clean all caches promptly.  This just calls cache_clean
482 * repeatedly until we are sure that every cache has had a chance to
483 * be fully cleaned
484 */
485void cache_flush(void)
486{
487	while (cache_clean() != -1)
488		cond_resched();
489	while (cache_clean() != -1)
490		cond_resched();
491}
492
493void cache_purge(struct cache_detail *detail)
494{
495	detail->flush_time = LONG_MAX;
496	detail->nextcheck = get_seconds();
497	cache_flush();
498	detail->flush_time = 1;
499}
500
501
502
503/*
504 * Deferral and Revisiting of Requests.
505 *
506 * If a cache lookup finds a pending entry, we
507 * need to defer the request and revisit it later.
508 * All deferred requests are stored in a hash table,
509 * indexed by "struct cache_head *".
510 * As it may be wasteful to store a whole request
511 * structure, we allow the request to provide a
512 * deferred form, which must contain a
513 * 'struct cache_deferred_req'
514 * This cache_deferred_req contains a method to allow
515 * it to be revisited when cache info is available
516 */
517
518#define	DFR_HASHSIZE	(PAGE_SIZE/sizeof(struct list_head))
519#define	DFR_HASH(item)	((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
520
521#define	DFR_MAX	300	/* ??? */
522
523static DEFINE_SPINLOCK(cache_defer_lock);
524static LIST_HEAD(cache_defer_list);
525static struct list_head cache_defer_hash[DFR_HASHSIZE];
526static int cache_defer_cnt;
527
528static int cache_defer_req(struct cache_req *req, struct cache_head *item)
529{
530	struct cache_deferred_req *dreq;
531	int hash = DFR_HASH(item);
532
533	if (cache_defer_cnt >= DFR_MAX) {
534		/* too much in the cache, randomly drop this one,
535		 * or continue and drop the oldest below
536		 */
537		if (net_random()&1)
538			return -ETIMEDOUT;
539	}
540	dreq = req->defer(req);
541	if (dreq == NULL)
542		return -ETIMEDOUT;
543
544	dreq->item = item;
545	dreq->recv_time = get_seconds();
546
547	spin_lock(&cache_defer_lock);
548
549	list_add(&dreq->recent, &cache_defer_list);
550
551	if (cache_defer_hash[hash].next == NULL)
552		INIT_LIST_HEAD(&cache_defer_hash[hash]);
553	list_add(&dreq->hash, &cache_defer_hash[hash]);
554
555	/* it is in, now maybe clean up */
556	dreq = NULL;
557	if (++cache_defer_cnt > DFR_MAX) {
558		dreq = list_entry(cache_defer_list.prev,
559				  struct cache_deferred_req, recent);
560		list_del(&dreq->recent);
561		list_del(&dreq->hash);
562		cache_defer_cnt--;
563	}
564	spin_unlock(&cache_defer_lock);
565
566	if (dreq) {
567		/* there was one too many */
568		dreq->revisit(dreq, 1);
569	}
570	if (!test_bit(CACHE_PENDING, &item->flags)) {
571		/* must have just been validated... */
572		cache_revisit_request(item);
573	}
574	return 0;
575}
576
577static void cache_revisit_request(struct cache_head *item)
578{
579	struct cache_deferred_req *dreq;
580	struct list_head pending;
581
582	struct list_head *lp;
583	int hash = DFR_HASH(item);
584
585	INIT_LIST_HEAD(&pending);
586	spin_lock(&cache_defer_lock);
587
588	lp = cache_defer_hash[hash].next;
589	if (lp) {
590		while (lp != &cache_defer_hash[hash]) {
591			dreq = list_entry(lp, struct cache_deferred_req, hash);
592			lp = lp->next;
593			if (dreq->item == item) {
594				list_del(&dreq->hash);
595				list_move(&dreq->recent, &pending);
596				cache_defer_cnt--;
597			}
598		}
599	}
600	spin_unlock(&cache_defer_lock);
601
602	while (!list_empty(&pending)) {
603		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
604		list_del_init(&dreq->recent);
605		dreq->revisit(dreq, 0);
606	}
607}
608
609void cache_clean_deferred(void *owner)
610{
611	struct cache_deferred_req *dreq, *tmp;
612	struct list_head pending;
613
614
615	INIT_LIST_HEAD(&pending);
616	spin_lock(&cache_defer_lock);
617
618	list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
619		if (dreq->owner == owner) {
620			list_del(&dreq->hash);
621			list_move(&dreq->recent, &pending);
622			cache_defer_cnt--;
623		}
624	}
625	spin_unlock(&cache_defer_lock);
626
627	while (!list_empty(&pending)) {
628		dreq = list_entry(pending.next, struct cache_deferred_req, recent);
629		list_del_init(&dreq->recent);
630		dreq->revisit(dreq, 1);
631	}
632}
633
634/*
635 * communicate with user-space
636 *
637 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
638 * On read, you get a full request, or block.
639 * On write, an update request is processed.
640 * Poll works if anything to read, and always allows write.
641 *
642 * Implemented by linked list of requests.  Each open file has
643 * a ->private that also exists in this list.  New requests are added
644 * to the end and may wakeup and preceding readers.
645 * New readers are added to the head.  If, on read, an item is found with
646 * CACHE_UPCALLING clear, we free it from the list.
647 *
648 */
649
650static DEFINE_SPINLOCK(queue_lock);
651static DEFINE_MUTEX(queue_io_mutex);
652
653struct cache_queue {
654	struct list_head	list;
655	int			reader;	/* if 0, then request */
656};
657struct cache_request {
658	struct cache_queue	q;
659	struct cache_head	*item;
660	char			* buf;
661	int			len;
662	int			readers;
663};
664struct cache_reader {
665	struct cache_queue	q;
666	int			offset;	/* if non-0, we have a refcnt on next request */
667};
668
669static ssize_t
670cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
671{
672	struct cache_reader *rp = filp->private_data;
673	struct cache_request *rq;
674	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
675	int err;
676
677	if (count == 0)
678		return 0;
679
680	mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
681			      * readers on this file */
682 again:
683	spin_lock(&queue_lock);
684	/* need to find next request */
685	while (rp->q.list.next != &cd->queue &&
686	       list_entry(rp->q.list.next, struct cache_queue, list)
687	       ->reader) {
688		struct list_head *next = rp->q.list.next;
689		list_move(&rp->q.list, next);
690	}
691	if (rp->q.list.next == &cd->queue) {
692		spin_unlock(&queue_lock);
693		mutex_unlock(&queue_io_mutex);
694		BUG_ON(rp->offset);
695		return 0;
696	}
697	rq = container_of(rp->q.list.next, struct cache_request, q.list);
698	BUG_ON(rq->q.reader);
699	if (rp->offset == 0)
700		rq->readers++;
701	spin_unlock(&queue_lock);
702
703	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
704		err = -EAGAIN;
705		spin_lock(&queue_lock);
706		list_move(&rp->q.list, &rq->q.list);
707		spin_unlock(&queue_lock);
708	} else {
709		if (rp->offset + count > rq->len)
710			count = rq->len - rp->offset;
711		err = -EFAULT;
712		if (copy_to_user(buf, rq->buf + rp->offset, count))
713			goto out;
714		rp->offset += count;
715		if (rp->offset >= rq->len) {
716			rp->offset = 0;
717			spin_lock(&queue_lock);
718			list_move(&rp->q.list, &rq->q.list);
719			spin_unlock(&queue_lock);
720		}
721		err = 0;
722	}
723 out:
724	if (rp->offset == 0) {
725		/* need to release rq */
726		spin_lock(&queue_lock);
727		rq->readers--;
728		if (rq->readers == 0 &&
729		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
730			list_del(&rq->q.list);
731			spin_unlock(&queue_lock);
732			cache_put(rq->item, cd);
733			kfree(rq->buf);
734			kfree(rq);
735		} else
736			spin_unlock(&queue_lock);
737	}
738	if (err == -EAGAIN)
739		goto again;
740	mutex_unlock(&queue_io_mutex);
741	return err ? err :  count;
742}
743
744static char write_buf[8192]; /* protected by queue_io_mutex */
745
746static ssize_t
747cache_write(struct file *filp, const char __user *buf, size_t count,
748	    loff_t *ppos)
749{
750	int err;
751	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
752
753	if (count == 0)
754		return 0;
755	if (count >= sizeof(write_buf))
756		return -EINVAL;
757
758	mutex_lock(&queue_io_mutex);
759
760	if (copy_from_user(write_buf, buf, count)) {
761		mutex_unlock(&queue_io_mutex);
762		return -EFAULT;
763	}
764	write_buf[count] = '\0';
765	if (cd->cache_parse)
766		err = cd->cache_parse(cd, write_buf, count);
767	else
768		err = -EINVAL;
769
770	mutex_unlock(&queue_io_mutex);
771	return err ? err : count;
772}
773
774static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
775
776static unsigned int
777cache_poll(struct file *filp, poll_table *wait)
778{
779	unsigned int mask;
780	struct cache_reader *rp = filp->private_data;
781	struct cache_queue *cq;
782	struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
783
784	poll_wait(filp, &queue_wait, wait);
785
786	/* alway allow write */
787	mask = POLL_OUT | POLLWRNORM;
788
789	if (!rp)
790		return mask;
791
792	spin_lock(&queue_lock);
793
794	for (cq= &rp->q; &cq->list != &cd->queue;
795	     cq = list_entry(cq->list.next, struct cache_queue, list))
796		if (!cq->reader) {
797			mask |= POLLIN | POLLRDNORM;
798			break;
799		}
800	spin_unlock(&queue_lock);
801	return mask;
802}
803
804static int
805cache_ioctl(struct inode *ino, struct file *filp,
806	    unsigned int cmd, unsigned long arg)
807{
808	int len = 0;
809	struct cache_reader *rp = filp->private_data;
810	struct cache_queue *cq;
811	struct cache_detail *cd = PDE(ino)->data;
812
813	if (cmd != FIONREAD || !rp)
814		return -EINVAL;
815
816	spin_lock(&queue_lock);
817
818	/* only find the length remaining in current request,
819	 * or the length of the next request
820	 */
821	for (cq= &rp->q; &cq->list != &cd->queue;
822	     cq = list_entry(cq->list.next, struct cache_queue, list))
823		if (!cq->reader) {
824			struct cache_request *cr =
825				container_of(cq, struct cache_request, q);
826			len = cr->len - rp->offset;
827			break;
828		}
829	spin_unlock(&queue_lock);
830
831	return put_user(len, (int __user *)arg);
832}
833
834static int
835cache_open(struct inode *inode, struct file *filp)
836{
837	struct cache_reader *rp = NULL;
838
839	nonseekable_open(inode, filp);
840	if (filp->f_mode & FMODE_READ) {
841		struct cache_detail *cd = PDE(inode)->data;
842
843		rp = kmalloc(sizeof(*rp), GFP_KERNEL);
844		if (!rp)
845			return -ENOMEM;
846		rp->offset = 0;
847		rp->q.reader = 1;
848		atomic_inc(&cd->readers);
849		spin_lock(&queue_lock);
850		list_add(&rp->q.list, &cd->queue);
851		spin_unlock(&queue_lock);
852	}
853	filp->private_data = rp;
854	return 0;
855}
856
857static int
858cache_release(struct inode *inode, struct file *filp)
859{
860	struct cache_reader *rp = filp->private_data;
861	struct cache_detail *cd = PDE(inode)->data;
862
863	if (rp) {
864		spin_lock(&queue_lock);
865		if (rp->offset) {
866			struct cache_queue *cq;
867			for (cq= &rp->q; &cq->list != &cd->queue;
868			     cq = list_entry(cq->list.next, struct cache_queue, list))
869				if (!cq->reader) {
870					container_of(cq, struct cache_request, q)
871						->readers--;
872					break;
873				}
874			rp->offset = 0;
875		}
876		list_del(&rp->q.list);
877		spin_unlock(&queue_lock);
878
879		filp->private_data = NULL;
880		kfree(rp);
881
882		cd->last_close = get_seconds();
883		atomic_dec(&cd->readers);
884	}
885	return 0;
886}
887
888
889
890static const struct file_operations cache_file_operations = {
891	.owner		= THIS_MODULE,
892	.llseek		= no_llseek,
893	.read		= cache_read,
894	.write		= cache_write,
895	.poll		= cache_poll,
896	.ioctl		= cache_ioctl, /* for FIONREAD */
897	.open		= cache_open,
898	.release	= cache_release,
899};
900
901
902static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
903{
904	struct cache_queue *cq;
905	spin_lock(&queue_lock);
906	list_for_each_entry(cq, &detail->queue, list)
907		if (!cq->reader) {
908			struct cache_request *cr = container_of(cq, struct cache_request, q);
909			if (cr->item != ch)
910				continue;
911			if (cr->readers != 0)
912				continue;
913			list_del(&cr->q.list);
914			spin_unlock(&queue_lock);
915			cache_put(cr->item, detail);
916			kfree(cr->buf);
917			kfree(cr);
918			return;
919		}
920	spin_unlock(&queue_lock);
921}
922
923/*
924 * Support routines for text-based upcalls.
925 * Fields are separated by spaces.
926 * Fields are either mangled to quote space tab newline slosh with slosh
927 * or a hexified with a leading \x
928 * Record is terminated with newline.
929 *
930 */
931
932void qword_add(char **bpp, int *lp, char *str)
933{
934	char *bp = *bpp;
935	int len = *lp;
936	char c;
937
938	if (len < 0) return;
939
940	while ((c=*str++) && len)
941		switch(c) {
942		case ' ':
943		case '\t':
944		case '\n':
945		case '\\':
946			if (len >= 4) {
947				*bp++ = '\\';
948				*bp++ = '0' + ((c & 0300)>>6);
949				*bp++ = '0' + ((c & 0070)>>3);
950				*bp++ = '0' + ((c & 0007)>>0);
951			}
952			len -= 4;
953			break;
954		default:
955			*bp++ = c;
956			len--;
957		}
958	if (c || len <1) len = -1;
959	else {
960		*bp++ = ' ';
961		len--;
962	}
963	*bpp = bp;
964	*lp = len;
965}
966
967void qword_addhex(char **bpp, int *lp, char *buf, int blen)
968{
969	char *bp = *bpp;
970	int len = *lp;
971
972	if (len < 0) return;
973
974	if (len > 2) {
975		*bp++ = '\\';
976		*bp++ = 'x';
977		len -= 2;
978		while (blen && len >= 2) {
979			unsigned char c = *buf++;
980			*bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
981			*bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
982			len -= 2;
983			blen--;
984		}
985	}
986	if (blen || len<1) len = -1;
987	else {
988		*bp++ = ' ';
989		len--;
990	}
991	*bpp = bp;
992	*lp = len;
993}
994
995static void warn_no_listener(struct cache_detail *detail)
996{
997	if (detail->last_warn != detail->last_close) {
998		detail->last_warn = detail->last_close;
999		if (detail->warn_no_listener)
1000			detail->warn_no_listener(detail);
1001	}
1002}
1003
1004/*
1005 * register an upcall request to user-space.
1006 * Each request is at most one page long.
1007 */
1008static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1009{
1010
1011	char *buf;
1012	struct cache_request *crq;
1013	char *bp;
1014	int len;
1015
1016	if (detail->cache_request == NULL)
1017		return -EINVAL;
1018
1019	if (atomic_read(&detail->readers) == 0 &&
1020	    detail->last_close < get_seconds() - 30) {
1021			warn_no_listener(detail);
1022			return -EINVAL;
1023	}
1024
1025	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1026	if (!buf)
1027		return -EAGAIN;
1028
1029	crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1030	if (!crq) {
1031		kfree(buf);
1032		return -EAGAIN;
1033	}
1034
1035	bp = buf; len = PAGE_SIZE;
1036
1037	detail->cache_request(detail, h, &bp, &len);
1038
1039	if (len < 0) {
1040		kfree(buf);
1041		kfree(crq);
1042		return -EAGAIN;
1043	}
1044	crq->q.reader = 0;
1045	crq->item = cache_get(h);
1046	crq->buf = buf;
1047	crq->len = PAGE_SIZE - len;
1048	crq->readers = 0;
1049	spin_lock(&queue_lock);
1050	list_add_tail(&crq->q.list, &detail->queue);
1051	spin_unlock(&queue_lock);
1052	wake_up(&queue_wait);
1053	return 0;
1054}
1055
1056/*
1057 * parse a message from user-space and pass it
1058 * to an appropriate cache
1059 * Messages are, like requests, separated into fields by
1060 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1061 *
1062 * Message is
1063 *   reply cachename expiry key ... content....
1064 *
1065 * key and content are both parsed by cache
1066 */
1067
1068#define isodigit(c) (isdigit(c) && c <= '7')
1069int qword_get(char **bpp, char *dest, int bufsize)
1070{
1071	/* return bytes copied, or -1 on error */
1072	char *bp = *bpp;
1073	int len = 0;
1074
1075	while (*bp == ' ') bp++;
1076
1077	if (bp[0] == '\\' && bp[1] == 'x') {
1078		/* HEX STRING */
1079		bp += 2;
1080		while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1081			int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1082			bp++;
1083			byte <<= 4;
1084			byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1085			*dest++ = byte;
1086			bp++;
1087			len++;
1088		}
1089	} else {
1090		/* text with \nnn octal quoting */
1091		while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1092			if (*bp == '\\' &&
1093			    isodigit(bp[1]) && (bp[1] <= '3') &&
1094			    isodigit(bp[2]) &&
1095			    isodigit(bp[3])) {
1096				int byte = (*++bp -'0');
1097				bp++;
1098				byte = (byte << 3) | (*bp++ - '0');
1099				byte = (byte << 3) | (*bp++ - '0');
1100				*dest++ = byte;
1101				len++;
1102			} else {
1103				*dest++ = *bp++;
1104				len++;
1105			}
1106		}
1107	}
1108
1109	if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1110		return -1;
1111	while (*bp == ' ') bp++;
1112	*bpp = bp;
1113	*dest = '\0';
1114	return len;
1115}
1116
1117
1118/*
1119 * support /proc/sunrpc/cache/$CACHENAME/content
1120 * as a seqfile.
1121 * We call ->cache_show passing NULL for the item to
1122 * get a header, then pass each real item in the cache
1123 */
1124
1125struct handle {
1126	struct cache_detail *cd;
1127};
1128
1129static void *c_start(struct seq_file *m, loff_t *pos)
1130	__acquires(cd->hash_lock)
1131{
1132	loff_t n = *pos;
1133	unsigned hash, entry;
1134	struct cache_head *ch;
1135	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1136
1137
1138	read_lock(&cd->hash_lock);
1139	if (!n--)
1140		return SEQ_START_TOKEN;
1141	hash = n >> 32;
1142	entry = n & ((1LL<<32) - 1);
1143
1144	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1145		if (!entry--)
1146			return ch;
1147	n &= ~((1LL<<32) - 1);
1148	do {
1149		hash++;
1150		n += 1LL<<32;
1151	} while(hash < cd->hash_size &&
1152		cd->hash_table[hash]==NULL);
1153	if (hash >= cd->hash_size)
1154		return NULL;
1155	*pos = n+1;
1156	return cd->hash_table[hash];
1157}
1158
1159static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1160{
1161	struct cache_head *ch = p;
1162	int hash = (*pos >> 32);
1163	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1164
1165	if (p == SEQ_START_TOKEN)
1166		hash = 0;
1167	else if (ch->next == NULL) {
1168		hash++;
1169		*pos += 1LL<<32;
1170	} else {
1171		++*pos;
1172		return ch->next;
1173	}
1174	*pos &= ~((1LL<<32) - 1);
1175	while (hash < cd->hash_size &&
1176	       cd->hash_table[hash] == NULL) {
1177		hash++;
1178		*pos += 1LL<<32;
1179	}
1180	if (hash >= cd->hash_size)
1181		return NULL;
1182	++*pos;
1183	return cd->hash_table[hash];
1184}
1185
1186static void c_stop(struct seq_file *m, void *p)
1187	__releases(cd->hash_lock)
1188{
1189	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1190	read_unlock(&cd->hash_lock);
1191}
1192
1193static int c_show(struct seq_file *m, void *p)
1194{
1195	struct cache_head *cp = p;
1196	struct cache_detail *cd = ((struct handle*)m->private)->cd;
1197
1198	if (p == SEQ_START_TOKEN)
1199		return cd->cache_show(m, cd, NULL);
1200
1201	ifdebug(CACHE)
1202		seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1203			   cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
1204	cache_get(cp);
1205	if (cache_check(cd, cp, NULL))
1206		/* cache_check does a cache_put on failure */
1207		seq_printf(m, "# ");
1208	else
1209		cache_put(cp, cd);
1210
1211	return cd->cache_show(m, cd, cp);
1212}
1213
1214static const struct seq_operations cache_content_op = {
1215	.start	= c_start,
1216	.next	= c_next,
1217	.stop	= c_stop,
1218	.show	= c_show,
1219};
1220
1221static int content_open(struct inode *inode, struct file *file)
1222{
1223	struct handle *han;
1224	struct cache_detail *cd = PDE(inode)->data;
1225
1226	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1227	if (han == NULL)
1228		return -ENOMEM;
1229
1230	han->cd = cd;
1231	return 0;
1232}
1233
1234static const struct file_operations content_file_operations = {
1235	.open		= content_open,
1236	.read		= seq_read,
1237	.llseek		= seq_lseek,
1238	.release	= seq_release_private,
1239};
1240
1241static ssize_t read_flush(struct file *file, char __user *buf,
1242			    size_t count, loff_t *ppos)
1243{
1244	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1245	char tbuf[20];
1246	unsigned long p = *ppos;
1247	size_t len;
1248
1249	sprintf(tbuf, "%lu\n", cd->flush_time);
1250	len = strlen(tbuf);
1251	if (p >= len)
1252		return 0;
1253	len -= p;
1254	if (len > count)
1255		len = count;
1256	if (copy_to_user(buf, (void*)(tbuf+p), len))
1257		return -EFAULT;
1258	*ppos += len;
1259	return len;
1260}
1261
1262static ssize_t write_flush(struct file * file, const char __user * buf,
1263			     size_t count, loff_t *ppos)
1264{
1265	struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1266	char tbuf[20];
1267	char *ep;
1268	long flushtime;
1269	if (*ppos || count > sizeof(tbuf)-1)
1270		return -EINVAL;
1271	if (copy_from_user(tbuf, buf, count))
1272		return -EFAULT;
1273	tbuf[count] = 0;
1274	flushtime = simple_strtoul(tbuf, &ep, 0);
1275	if (*ep && *ep != '\n')
1276		return -EINVAL;
1277
1278	cd->flush_time = flushtime;
1279	cd->nextcheck = get_seconds();
1280	cache_flush();
1281
1282	*ppos += count;
1283	return count;
1284}
1285
1286static const struct file_operations cache_flush_operations = {
1287	.open		= nonseekable_open,
1288	.read		= read_flush,
1289	.write		= write_flush,
1290};
1291