1/*
2 * Implementation of the kernel access vector cache (AVC).
3 *
4 * Authors:  Stephen Smalley, <sds@epoch.ncsc.mil>
5 *	     James Morris <jmorris@redhat.com>
6 *
7 * Update:   KaiGai, Kohei <kaigai@ak.jp.nec.com>
8 *	Replaced the avc_lock spinlock by RCU.
9 *
10 * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
11 *
12 *	This program is free software; you can redistribute it and/or modify
13 *	it under the terms of the GNU General Public License version 2,
14 *	as published by the Free Software Foundation.
15 */
16#include <linux/types.h>
17#include <linux/stddef.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/fs.h>
21#include <linux/dcache.h>
22#include <linux/init.h>
23#include <linux/skbuff.h>
24#include <linux/percpu.h>
25#include <linux/list.h>
26#include <net/sock.h>
27#include <linux/un.h>
28#include <net/af_unix.h>
29#include <linux/ip.h>
30#include <linux/audit.h>
31#include <linux/ipv6.h>
32#include <net/ipv6.h>
33#include "avc.h"
34#include "avc_ss.h"
35#include "classmap.h"
36
37#define AVC_CACHE_SLOTS			512
38#define AVC_DEF_CACHE_THRESHOLD		512
39#define AVC_CACHE_RECLAIM		16
40
41#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
42#define avc_cache_stats_incr(field)	this_cpu_inc(avc_cache_stats.field)
43#else
44#define avc_cache_stats_incr(field)	do {} while (0)
45#endif
46
47struct avc_entry {
48	u32			ssid;
49	u32			tsid;
50	u16			tclass;
51	struct av_decision	avd;
52	struct avc_operation_node *ops_node;
53};
54
55struct avc_node {
56	struct avc_entry	ae;
57	struct hlist_node	list; /* anchored in avc_cache->slots[i] */
58	struct rcu_head		rhead;
59};
60
61struct avc_cache {
62	struct hlist_head	slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
63	spinlock_t		slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
64	atomic_t		lru_hint;	/* LRU hint for reclaim scan */
65	atomic_t		active_nodes;
66	u32			latest_notif;	/* latest revocation notification */
67};
68
69struct avc_operation_decision_node {
70	struct operation_decision od;
71	struct list_head od_list;
72};
73
74struct avc_operation_node {
75	struct operation ops;
76	struct list_head od_head; /* list of operation_decision_node */
77};
78
79struct avc_callback_node {
80	int (*callback) (u32 event);
81	u32 events;
82	struct avc_callback_node *next;
83};
84
85/* Exported via selinufs */
86unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
87
88#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
89DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
90#endif
91
92static struct avc_cache avc_cache;
93static struct avc_callback_node *avc_callbacks;
94static struct kmem_cache *avc_node_cachep;
95static struct kmem_cache *avc_operation_decision_node_cachep;
96static struct kmem_cache *avc_operation_node_cachep;
97static struct kmem_cache *avc_operation_perm_cachep;
98
99static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
100{
101	return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
102}
103
104/**
105 * avc_dump_av - Display an access vector in human-readable form.
106 * @tclass: target security class
107 * @av: access vector
108 */
109static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
110{
111	const char **perms;
112	int i, perm;
113
114	if (av == 0) {
115		audit_log_format(ab, " null");
116		return;
117	}
118
119	perms = secclass_map[tclass-1].perms;
120
121	audit_log_format(ab, " {");
122	i = 0;
123	perm = 1;
124	while (i < (sizeof(av) * 8)) {
125		if ((perm & av) && perms[i]) {
126			audit_log_format(ab, " %s", perms[i]);
127			av &= ~perm;
128		}
129		i++;
130		perm <<= 1;
131	}
132
133	if (av)
134		audit_log_format(ab, " 0x%x", av);
135
136	audit_log_format(ab, " }");
137}
138
139/**
140 * avc_dump_query - Display a SID pair and a class in human-readable form.
141 * @ssid: source security identifier
142 * @tsid: target security identifier
143 * @tclass: target security class
144 */
145static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
146{
147	int rc;
148	char *scontext;
149	u32 scontext_len;
150
151	rc = security_sid_to_context(ssid, &scontext, &scontext_len);
152	if (rc)
153		audit_log_format(ab, "ssid=%d", ssid);
154	else {
155		audit_log_format(ab, "scontext=%s", scontext);
156		kfree(scontext);
157	}
158
159	rc = security_sid_to_context(tsid, &scontext, &scontext_len);
160	if (rc)
161		audit_log_format(ab, " tsid=%d", tsid);
162	else {
163		audit_log_format(ab, " tcontext=%s", scontext);
164		kfree(scontext);
165	}
166
167	BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
168	audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
169}
170
171/**
172 * avc_init - Initialize the AVC.
173 *
174 * Initialize the access vector cache.
175 */
176void __init avc_init(void)
177{
178	int i;
179
180	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
181		INIT_HLIST_HEAD(&avc_cache.slots[i]);
182		spin_lock_init(&avc_cache.slots_lock[i]);
183	}
184	atomic_set(&avc_cache.active_nodes, 0);
185	atomic_set(&avc_cache.lru_hint, 0);
186
187	avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
188					     0, SLAB_PANIC, NULL);
189	avc_operation_node_cachep = kmem_cache_create("avc_operation_node",
190				sizeof(struct avc_operation_node),
191				0, SLAB_PANIC, NULL);
192	avc_operation_decision_node_cachep = kmem_cache_create(
193				"avc_operation_decision_node",
194				sizeof(struct avc_operation_decision_node),
195				0, SLAB_PANIC, NULL);
196	avc_operation_perm_cachep = kmem_cache_create("avc_operation_perm",
197				sizeof(struct operation_perm),
198				0, SLAB_PANIC, NULL);
199
200	audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
201}
202
203int avc_get_hash_stats(char *page)
204{
205	int i, chain_len, max_chain_len, slots_used;
206	struct avc_node *node;
207	struct hlist_head *head;
208
209	rcu_read_lock();
210
211	slots_used = 0;
212	max_chain_len = 0;
213	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
214		head = &avc_cache.slots[i];
215		if (!hlist_empty(head)) {
216			slots_used++;
217			chain_len = 0;
218			hlist_for_each_entry_rcu(node, head, list)
219				chain_len++;
220			if (chain_len > max_chain_len)
221				max_chain_len = chain_len;
222		}
223	}
224
225	rcu_read_unlock();
226
227	return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
228			 "longest chain: %d\n",
229			 atomic_read(&avc_cache.active_nodes),
230			 slots_used, AVC_CACHE_SLOTS, max_chain_len);
231}
232
233/*
234 * using a linked list for operation_decision lookup because the list is
235 * always small. i.e. less than 5, typically 1
236 */
237static struct operation_decision *avc_operation_lookup(u8 type,
238					struct avc_operation_node *ops_node)
239{
240	struct avc_operation_decision_node *od_node;
241	struct operation_decision *od = NULL;
242
243	list_for_each_entry(od_node, &ops_node->od_head, od_list) {
244		if (od_node->od.type != type)
245			continue;
246		od = &od_node->od;
247		break;
248	}
249	return od;
250}
251
252static inline unsigned int avc_operation_has_perm(struct operation_decision *od,
253						u16 cmd, u8 specified)
254{
255	unsigned int rc = 0;
256	u8 num = cmd & 0xff;
257
258	if ((specified == OPERATION_ALLOWED) &&
259			(od->specified & OPERATION_ALLOWED))
260		rc = security_operation_test(od->allowed->perms, num);
261	else if ((specified == OPERATION_AUDITALLOW) &&
262			(od->specified & OPERATION_AUDITALLOW))
263		rc = security_operation_test(od->auditallow->perms, num);
264	else if ((specified == OPERATION_DONTAUDIT) &&
265			(od->specified & OPERATION_DONTAUDIT))
266		rc = security_operation_test(od->dontaudit->perms, num);
267	return rc;
268}
269
270static void avc_operation_allow_perm(struct avc_operation_node *node, u16 cmd)
271{
272	struct operation_decision *od;
273	u8 type;
274	u8 num;
275
276	type = cmd >> 8;
277	num = cmd & 0xff;
278	security_operation_set(node->ops.type, type);
279	od = avc_operation_lookup(type, node);
280	if (od && od->allowed)
281		security_operation_set(od->allowed->perms, num);
282}
283
284static void avc_operation_decision_free(
285				struct avc_operation_decision_node *od_node)
286{
287	struct operation_decision *od;
288
289	od = &od_node->od;
290	if (od->allowed)
291		kmem_cache_free(avc_operation_perm_cachep, od->allowed);
292	if (od->auditallow)
293		kmem_cache_free(avc_operation_perm_cachep, od->auditallow);
294	if (od->dontaudit)
295		kmem_cache_free(avc_operation_perm_cachep, od->dontaudit);
296	kmem_cache_free(avc_operation_decision_node_cachep, od_node);
297}
298
299static void avc_operation_free(struct avc_operation_node *ops_node)
300{
301	struct avc_operation_decision_node *od_node, *tmp;
302
303	if (!ops_node)
304		return;
305
306	list_for_each_entry_safe(od_node, tmp, &ops_node->od_head, od_list) {
307		list_del(&od_node->od_list);
308		avc_operation_decision_free(od_node);
309	}
310	kmem_cache_free(avc_operation_node_cachep, ops_node);
311}
312
313static void avc_copy_operation_decision(struct operation_decision *dest,
314					struct operation_decision *src)
315{
316	dest->type = src->type;
317	dest->specified = src->specified;
318	if (dest->specified & OPERATION_ALLOWED)
319		memcpy(dest->allowed->perms, src->allowed->perms,
320				sizeof(src->allowed->perms));
321	if (dest->specified & OPERATION_AUDITALLOW)
322		memcpy(dest->auditallow->perms, src->auditallow->perms,
323				sizeof(src->auditallow->perms));
324	if (dest->specified & OPERATION_DONTAUDIT)
325		memcpy(dest->dontaudit->perms, src->dontaudit->perms,
326				sizeof(src->dontaudit->perms));
327}
328
329/*
330 * similar to avc_copy_operation_decision, but only copy decision
331 * information relevant to this command
332 */
333static inline void avc_quick_copy_operation_decision(u16 cmd,
334			struct operation_decision *dest,
335			struct operation_decision *src)
336{
337	/*
338	 * compute index of the u32 of the 256 bits (8 u32s) that contain this
339	 * command permission
340	 */
341	u8 i = (0xff & cmd) >> 5;
342
343	dest->specified = src->specified;
344	if (dest->specified & OPERATION_ALLOWED)
345		dest->allowed->perms[i] = src->allowed->perms[i];
346	if (dest->specified & OPERATION_AUDITALLOW)
347		dest->auditallow->perms[i] = src->auditallow->perms[i];
348	if (dest->specified & OPERATION_DONTAUDIT)
349		dest->dontaudit->perms[i] = src->dontaudit->perms[i];
350}
351
352static struct avc_operation_decision_node
353		*avc_operation_decision_alloc(u8 specified)
354{
355	struct avc_operation_decision_node *node;
356	struct operation_decision *od;
357
358	node = kmem_cache_zalloc(avc_operation_decision_node_cachep,
359				GFP_ATOMIC | __GFP_NOMEMALLOC);
360	if (!node)
361		return NULL;
362
363	od = &node->od;
364	if (specified & OPERATION_ALLOWED) {
365		od->allowed = kmem_cache_zalloc(avc_operation_perm_cachep,
366						GFP_ATOMIC | __GFP_NOMEMALLOC);
367		if (!od->allowed)
368			goto error;
369	}
370	if (specified & OPERATION_AUDITALLOW) {
371		od->auditallow = kmem_cache_zalloc(avc_operation_perm_cachep,
372						GFP_ATOMIC | __GFP_NOMEMALLOC);
373		if (!od->auditallow)
374			goto error;
375	}
376	if (specified & OPERATION_DONTAUDIT) {
377		od->dontaudit = kmem_cache_zalloc(avc_operation_perm_cachep,
378						GFP_ATOMIC | __GFP_NOMEMALLOC);
379		if (!od->dontaudit)
380			goto error;
381	}
382	return node;
383error:
384	avc_operation_decision_free(node);
385	return NULL;
386}
387
388static int avc_add_operation(struct avc_node *node,
389			struct operation_decision *od)
390{
391	struct avc_operation_decision_node *dest_od;
392
393	node->ae.ops_node->ops.len++;
394	dest_od = avc_operation_decision_alloc(od->specified);
395	if (!dest_od)
396		return -ENOMEM;
397	avc_copy_operation_decision(&dest_od->od, od);
398	list_add(&dest_od->od_list, &node->ae.ops_node->od_head);
399	return 0;
400}
401
402static struct avc_operation_node *avc_operation_alloc(void)
403{
404	struct avc_operation_node *ops;
405
406	ops = kmem_cache_zalloc(avc_operation_node_cachep,
407				GFP_ATOMIC|__GFP_NOMEMALLOC);
408	if (!ops)
409		return ops;
410	INIT_LIST_HEAD(&ops->od_head);
411	return ops;
412}
413
414static int avc_operation_populate(struct avc_node *node,
415				struct avc_operation_node *src)
416{
417	struct avc_operation_node *dest;
418	struct avc_operation_decision_node *dest_od;
419	struct avc_operation_decision_node *src_od;
420
421	if (src->ops.len == 0)
422		return 0;
423	dest = avc_operation_alloc();
424	if (!dest)
425		return -ENOMEM;
426
427	memcpy(dest->ops.type, &src->ops.type, sizeof(dest->ops.type));
428	dest->ops.len = src->ops.len;
429
430	/* for each source od allocate a destination od and copy */
431	list_for_each_entry(src_od, &src->od_head, od_list) {
432		dest_od = avc_operation_decision_alloc(src_od->od.specified);
433		if (!dest_od)
434			goto error;
435		avc_copy_operation_decision(&dest_od->od, &src_od->od);
436		list_add(&dest_od->od_list, &dest->od_head);
437	}
438	node->ae.ops_node = dest;
439	return 0;
440error:
441	avc_operation_free(dest);
442	return -ENOMEM;
443
444}
445
446static inline u32 avc_operation_audit_required(u32 requested,
447					struct av_decision *avd,
448					struct operation_decision *od,
449					u16 cmd,
450					int result,
451					u32 *deniedp)
452{
453	u32 denied, audited;
454
455	denied = requested & ~avd->allowed;
456	if (unlikely(denied)) {
457		audited = denied & avd->auditdeny;
458		if (audited && od) {
459			if (avc_operation_has_perm(od, cmd,
460						OPERATION_DONTAUDIT))
461				audited &= ~requested;
462		}
463	} else if (result) {
464		audited = denied = requested;
465	} else {
466		audited = requested & avd->auditallow;
467		if (audited && od) {
468			if (!avc_operation_has_perm(od, cmd,
469						OPERATION_AUDITALLOW))
470				audited &= ~requested;
471		}
472	}
473
474	*deniedp = denied;
475	return audited;
476}
477
478static inline int avc_operation_audit(u32 ssid, u32 tsid, u16 tclass,
479				u32 requested, struct av_decision *avd,
480				struct operation_decision *od,
481				u16 cmd, int result,
482				struct common_audit_data *ad)
483{
484	u32 audited, denied;
485
486	audited = avc_operation_audit_required(
487			requested, avd, od, cmd, result, &denied);
488	if (likely(!audited))
489		return 0;
490	return slow_avc_audit(ssid, tsid, tclass, requested,
491			audited, denied, result, ad, 0);
492}
493
494static void avc_node_free(struct rcu_head *rhead)
495{
496	struct avc_node *node = container_of(rhead, struct avc_node, rhead);
497	avc_operation_free(node->ae.ops_node);
498	kmem_cache_free(avc_node_cachep, node);
499	avc_cache_stats_incr(frees);
500}
501
502static void avc_node_delete(struct avc_node *node)
503{
504	hlist_del_rcu(&node->list);
505	call_rcu(&node->rhead, avc_node_free);
506	atomic_dec(&avc_cache.active_nodes);
507}
508
509static void avc_node_kill(struct avc_node *node)
510{
511	avc_operation_free(node->ae.ops_node);
512	kmem_cache_free(avc_node_cachep, node);
513	avc_cache_stats_incr(frees);
514	atomic_dec(&avc_cache.active_nodes);
515}
516
517static void avc_node_replace(struct avc_node *new, struct avc_node *old)
518{
519	hlist_replace_rcu(&old->list, &new->list);
520	call_rcu(&old->rhead, avc_node_free);
521	atomic_dec(&avc_cache.active_nodes);
522}
523
524static inline int avc_reclaim_node(void)
525{
526	struct avc_node *node;
527	int hvalue, try, ecx;
528	unsigned long flags;
529	struct hlist_head *head;
530	spinlock_t *lock;
531
532	for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
533		hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
534		head = &avc_cache.slots[hvalue];
535		lock = &avc_cache.slots_lock[hvalue];
536
537		if (!spin_trylock_irqsave(lock, flags))
538			continue;
539
540		rcu_read_lock();
541		hlist_for_each_entry(node, head, list) {
542			avc_node_delete(node);
543			avc_cache_stats_incr(reclaims);
544			ecx++;
545			if (ecx >= AVC_CACHE_RECLAIM) {
546				rcu_read_unlock();
547				spin_unlock_irqrestore(lock, flags);
548				goto out;
549			}
550		}
551		rcu_read_unlock();
552		spin_unlock_irqrestore(lock, flags);
553	}
554out:
555	return ecx;
556}
557
558static struct avc_node *avc_alloc_node(void)
559{
560	struct avc_node *node;
561
562	node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
563	if (!node)
564		goto out;
565
566	INIT_HLIST_NODE(&node->list);
567	avc_cache_stats_incr(allocations);
568
569	if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
570		avc_reclaim_node();
571
572out:
573	return node;
574}
575
576static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
577{
578	node->ae.ssid = ssid;
579	node->ae.tsid = tsid;
580	node->ae.tclass = tclass;
581	memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
582}
583
584static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
585{
586	struct avc_node *node, *ret = NULL;
587	int hvalue;
588	struct hlist_head *head;
589
590	hvalue = avc_hash(ssid, tsid, tclass);
591	head = &avc_cache.slots[hvalue];
592	hlist_for_each_entry_rcu(node, head, list) {
593		if (ssid == node->ae.ssid &&
594		    tclass == node->ae.tclass &&
595		    tsid == node->ae.tsid) {
596			ret = node;
597			break;
598		}
599	}
600
601	return ret;
602}
603
604/**
605 * avc_lookup - Look up an AVC entry.
606 * @ssid: source security identifier
607 * @tsid: target security identifier
608 * @tclass: target security class
609 *
610 * Look up an AVC entry that is valid for the
611 * (@ssid, @tsid), interpreting the permissions
612 * based on @tclass.  If a valid AVC entry exists,
613 * then this function returns the avc_node.
614 * Otherwise, this function returns NULL.
615 */
616static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
617{
618	struct avc_node *node;
619
620	avc_cache_stats_incr(lookups);
621	node = avc_search_node(ssid, tsid, tclass);
622
623	if (node)
624		return node;
625
626	avc_cache_stats_incr(misses);
627	return NULL;
628}
629
630static int avc_latest_notif_update(int seqno, int is_insert)
631{
632	int ret = 0;
633	static DEFINE_SPINLOCK(notif_lock);
634	unsigned long flag;
635
636	spin_lock_irqsave(&notif_lock, flag);
637	if (is_insert) {
638		if (seqno < avc_cache.latest_notif) {
639			printk(KERN_WARNING "SELinux: avc:  seqno %d < latest_notif %d\n",
640			       seqno, avc_cache.latest_notif);
641			ret = -EAGAIN;
642		}
643	} else {
644		if (seqno > avc_cache.latest_notif)
645			avc_cache.latest_notif = seqno;
646	}
647	spin_unlock_irqrestore(&notif_lock, flag);
648
649	return ret;
650}
651
652/**
653 * avc_insert - Insert an AVC entry.
654 * @ssid: source security identifier
655 * @tsid: target security identifier
656 * @tclass: target security class
657 * @avd: resulting av decision
658 * @ops: resulting operation decisions
659 *
660 * Insert an AVC entry for the SID pair
661 * (@ssid, @tsid) and class @tclass.
662 * The access vectors and the sequence number are
663 * normally provided by the security server in
664 * response to a security_compute_av() call.  If the
665 * sequence number @avd->seqno is not less than the latest
666 * revocation notification, then the function copies
667 * the access vectors into a cache entry, returns
668 * avc_node inserted. Otherwise, this function returns NULL.
669 */
670static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
671				struct av_decision *avd,
672				struct avc_operation_node *ops_node)
673{
674	struct avc_node *pos, *node = NULL;
675	int hvalue;
676	unsigned long flag;
677
678	if (avc_latest_notif_update(avd->seqno, 1))
679		goto out;
680
681	node = avc_alloc_node();
682	if (node) {
683		struct hlist_head *head;
684		spinlock_t *lock;
685		int rc = 0;
686
687		hvalue = avc_hash(ssid, tsid, tclass);
688		avc_node_populate(node, ssid, tsid, tclass, avd);
689		rc = avc_operation_populate(node, ops_node);
690		if (rc) {
691			kmem_cache_free(avc_node_cachep, node);
692			return NULL;
693		}
694		head = &avc_cache.slots[hvalue];
695		lock = &avc_cache.slots_lock[hvalue];
696
697		spin_lock_irqsave(lock, flag);
698		hlist_for_each_entry(pos, head, list) {
699			if (pos->ae.ssid == ssid &&
700			    pos->ae.tsid == tsid &&
701			    pos->ae.tclass == tclass) {
702				avc_node_replace(node, pos);
703				goto found;
704			}
705		}
706		hlist_add_head_rcu(&node->list, head);
707found:
708		spin_unlock_irqrestore(lock, flag);
709	}
710out:
711	return node;
712}
713
714/**
715 * avc_audit_pre_callback - SELinux specific information
716 * will be called by generic audit code
717 * @ab: the audit buffer
718 * @a: audit_data
719 */
720static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
721{
722	struct common_audit_data *ad = a;
723	audit_log_format(ab, "avc:  %s ",
724			 ad->selinux_audit_data->denied ? "denied" : "granted");
725	avc_dump_av(ab, ad->selinux_audit_data->tclass,
726			ad->selinux_audit_data->audited);
727	audit_log_format(ab, " for ");
728}
729
730/**
731 * avc_audit_post_callback - SELinux specific information
732 * will be called by generic audit code
733 * @ab: the audit buffer
734 * @a: audit_data
735 */
736static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
737{
738	struct common_audit_data *ad = a;
739	audit_log_format(ab, " ");
740	avc_dump_query(ab, ad->selinux_audit_data->ssid,
741			   ad->selinux_audit_data->tsid,
742			   ad->selinux_audit_data->tclass);
743	if (ad->selinux_audit_data->denied) {
744		audit_log_format(ab, " permissive=%u",
745				 ad->selinux_audit_data->result ? 0 : 1);
746	}
747}
748
749/* This is the slow part of avc audit with big stack footprint */
750noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
751		u32 requested, u32 audited, u32 denied, int result,
752		struct common_audit_data *a,
753		unsigned flags)
754{
755	struct common_audit_data stack_data;
756	struct selinux_audit_data sad;
757
758	if (!a) {
759		a = &stack_data;
760		a->type = LSM_AUDIT_DATA_NONE;
761	}
762
763	/*
764	 * When in a RCU walk do the audit on the RCU retry.  This is because
765	 * the collection of the dname in an inode audit message is not RCU
766	 * safe.  Note this may drop some audits when the situation changes
767	 * during retry. However this is logically just as if the operation
768	 * happened a little later.
769	 */
770	if ((a->type == LSM_AUDIT_DATA_INODE) &&
771	    (flags & MAY_NOT_BLOCK))
772		return -ECHILD;
773
774	sad.tclass = tclass;
775	sad.requested = requested;
776	sad.ssid = ssid;
777	sad.tsid = tsid;
778	sad.audited = audited;
779	sad.denied = denied;
780	sad.result = result;
781
782	a->selinux_audit_data = &sad;
783
784	common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
785	return 0;
786}
787
788/**
789 * avc_add_callback - Register a callback for security events.
790 * @callback: callback function
791 * @events: security events
792 *
793 * Register a callback function for events in the set @events.
794 * Returns %0 on success or -%ENOMEM if insufficient memory
795 * exists to add the callback.
796 */
797int __init avc_add_callback(int (*callback)(u32 event), u32 events)
798{
799	struct avc_callback_node *c;
800	int rc = 0;
801
802	c = kmalloc(sizeof(*c), GFP_KERNEL);
803	if (!c) {
804		rc = -ENOMEM;
805		goto out;
806	}
807
808	c->callback = callback;
809	c->events = events;
810	c->next = avc_callbacks;
811	avc_callbacks = c;
812out:
813	return rc;
814}
815
816static inline int avc_sidcmp(u32 x, u32 y)
817{
818	return (x == y || x == SECSID_WILD || y == SECSID_WILD);
819}
820
821/**
822 * avc_update_node Update an AVC entry
823 * @event : Updating event
824 * @perms : Permission mask bits
825 * @ssid,@tsid,@tclass : identifier of an AVC entry
826 * @seqno : sequence number when decision was made
827 * @od: operation_decision to be added to the node
828 *
829 * if a valid AVC entry doesn't exist,this function returns -ENOENT.
830 * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
831 * otherwise, this function updates the AVC entry. The original AVC-entry object
832 * will release later by RCU.
833 */
834static int avc_update_node(u32 event, u32 perms, u16 cmd, u32 ssid, u32 tsid,
835			u16 tclass, u32 seqno,
836			struct operation_decision *od,
837			u32 flags)
838{
839	int hvalue, rc = 0;
840	unsigned long flag;
841	struct avc_node *pos, *node, *orig = NULL;
842	struct hlist_head *head;
843	spinlock_t *lock;
844
845	node = avc_alloc_node();
846	if (!node) {
847		rc = -ENOMEM;
848		goto out;
849	}
850
851	/* Lock the target slot */
852	hvalue = avc_hash(ssid, tsid, tclass);
853
854	head = &avc_cache.slots[hvalue];
855	lock = &avc_cache.slots_lock[hvalue];
856
857	spin_lock_irqsave(lock, flag);
858
859	hlist_for_each_entry(pos, head, list) {
860		if (ssid == pos->ae.ssid &&
861		    tsid == pos->ae.tsid &&
862		    tclass == pos->ae.tclass &&
863		    seqno == pos->ae.avd.seqno){
864			orig = pos;
865			break;
866		}
867	}
868
869	if (!orig) {
870		rc = -ENOENT;
871		avc_node_kill(node);
872		goto out_unlock;
873	}
874
875	/*
876	 * Copy and replace original node.
877	 */
878
879	avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
880
881	if (orig->ae.ops_node) {
882		rc = avc_operation_populate(node, orig->ae.ops_node);
883		if (rc) {
884			kmem_cache_free(avc_node_cachep, node);
885			goto out_unlock;
886		}
887	}
888
889	switch (event) {
890	case AVC_CALLBACK_GRANT:
891		node->ae.avd.allowed |= perms;
892		if (node->ae.ops_node && (flags & AVC_OPERATION_CMD))
893			avc_operation_allow_perm(node->ae.ops_node, cmd);
894		break;
895	case AVC_CALLBACK_TRY_REVOKE:
896	case AVC_CALLBACK_REVOKE:
897		node->ae.avd.allowed &= ~perms;
898		break;
899	case AVC_CALLBACK_AUDITALLOW_ENABLE:
900		node->ae.avd.auditallow |= perms;
901		break;
902	case AVC_CALLBACK_AUDITALLOW_DISABLE:
903		node->ae.avd.auditallow &= ~perms;
904		break;
905	case AVC_CALLBACK_AUDITDENY_ENABLE:
906		node->ae.avd.auditdeny |= perms;
907		break;
908	case AVC_CALLBACK_AUDITDENY_DISABLE:
909		node->ae.avd.auditdeny &= ~perms;
910		break;
911	case AVC_CALLBACK_ADD_OPERATION:
912		avc_add_operation(node, od);
913		break;
914	}
915	avc_node_replace(node, orig);
916out_unlock:
917	spin_unlock_irqrestore(lock, flag);
918out:
919	return rc;
920}
921
922/**
923 * avc_flush - Flush the cache
924 */
925static void avc_flush(void)
926{
927	struct hlist_head *head;
928	struct avc_node *node;
929	spinlock_t *lock;
930	unsigned long flag;
931	int i;
932
933	for (i = 0; i < AVC_CACHE_SLOTS; i++) {
934		head = &avc_cache.slots[i];
935		lock = &avc_cache.slots_lock[i];
936
937		spin_lock_irqsave(lock, flag);
938		/*
939		 * With preemptable RCU, the outer spinlock does not
940		 * prevent RCU grace periods from ending.
941		 */
942		rcu_read_lock();
943		hlist_for_each_entry(node, head, list)
944			avc_node_delete(node);
945		rcu_read_unlock();
946		spin_unlock_irqrestore(lock, flag);
947	}
948}
949
950/**
951 * avc_ss_reset - Flush the cache and revalidate migrated permissions.
952 * @seqno: policy sequence number
953 */
954int avc_ss_reset(u32 seqno)
955{
956	struct avc_callback_node *c;
957	int rc = 0, tmprc;
958
959	avc_flush();
960
961	for (c = avc_callbacks; c; c = c->next) {
962		if (c->events & AVC_CALLBACK_RESET) {
963			tmprc = c->callback(AVC_CALLBACK_RESET);
964			/* save the first error encountered for the return
965			   value and continue processing the callbacks */
966			if (!rc)
967				rc = tmprc;
968		}
969	}
970
971	avc_latest_notif_update(seqno, 0);
972	return rc;
973}
974
975/*
976 * Slow-path helper function for avc_has_perm_noaudit,
977 * when the avc_node lookup fails. We get called with
978 * the RCU read lock held, and need to return with it
979 * still held, but drop if for the security compute.
980 *
981 * Don't inline this, since it's the slow-path and just
982 * results in a bigger stack frame.
983 */
984static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
985			 u16 tclass, struct av_decision *avd,
986			 struct avc_operation_node *ops_node)
987{
988	rcu_read_unlock();
989	INIT_LIST_HEAD(&ops_node->od_head);
990	security_compute_av(ssid, tsid, tclass, avd, &ops_node->ops);
991	rcu_read_lock();
992	return avc_insert(ssid, tsid, tclass, avd, ops_node);
993}
994
995static noinline int avc_denied(u32 ssid, u32 tsid,
996				u16 tclass, u32 requested,
997				u16 cmd, unsigned flags,
998				struct av_decision *avd)
999{
1000	if (flags & AVC_STRICT)
1001		return -EACCES;
1002
1003	if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
1004		return -EACCES;
1005
1006	avc_update_node(AVC_CALLBACK_GRANT, requested, cmd, ssid,
1007				tsid, tclass, avd->seqno, NULL, flags);
1008	return 0;
1009}
1010
1011/*
1012 * ioctl commands are comprised of four fields, direction, size, type, and
1013 * number. The avc operation logic filters based on two of them:
1014 *
1015 * type: or code, typically unique to each driver
1016 * number: or function
1017 *
1018 * For example, 0x89 is a socket type, and number 0x27 is the get hardware
1019 * address function.
1020 */
1021int avc_has_operation(u32 ssid, u32 tsid, u16 tclass, u32 requested,
1022			u16 cmd, struct common_audit_data *ad)
1023{
1024	struct avc_node *node;
1025	struct av_decision avd;
1026	u32 denied;
1027	struct operation_decision *od = NULL;
1028	struct operation_decision od_local;
1029	struct operation_perm allowed;
1030	struct operation_perm auditallow;
1031	struct operation_perm dontaudit;
1032	struct avc_operation_node local_ops_node;
1033	struct avc_operation_node *ops_node;
1034	u8 type = cmd >> 8;
1035	int rc = 0, rc2;
1036
1037	ops_node = &local_ops_node;
1038	BUG_ON(!requested);
1039
1040	rcu_read_lock();
1041
1042	node = avc_lookup(ssid, tsid, tclass);
1043	if (unlikely(!node)) {
1044		node = avc_compute_av(ssid, tsid, tclass, &avd, ops_node);
1045	} else {
1046		memcpy(&avd, &node->ae.avd, sizeof(avd));
1047		ops_node = node->ae.ops_node;
1048	}
1049	/* if operations are not defined, only consider av_decision */
1050	if (!ops_node || !ops_node->ops.len)
1051		goto decision;
1052
1053	od_local.allowed = &allowed;
1054	od_local.auditallow = &auditallow;
1055	od_local.dontaudit = &dontaudit;
1056
1057	/* lookup operation decision */
1058	od = avc_operation_lookup(type, ops_node);
1059	if (unlikely(!od)) {
1060		/* Compute operation decision if type is flagged */
1061		if (!security_operation_test(ops_node->ops.type, type)) {
1062			avd.allowed &= ~requested;
1063			goto decision;
1064		}
1065		rcu_read_unlock();
1066		security_compute_operation(ssid, tsid, tclass, type, &od_local);
1067		rcu_read_lock();
1068		avc_update_node(AVC_CALLBACK_ADD_OPERATION, requested, cmd,
1069				ssid, tsid, tclass, avd.seqno, &od_local, 0);
1070	} else {
1071		avc_quick_copy_operation_decision(cmd, &od_local, od);
1072	}
1073	od = &od_local;
1074
1075	if (!avc_operation_has_perm(od, cmd, OPERATION_ALLOWED))
1076		avd.allowed &= ~requested;
1077
1078decision:
1079	denied = requested & ~(avd.allowed);
1080	if (unlikely(denied))
1081		rc = avc_denied(ssid, tsid, tclass, requested, cmd,
1082				AVC_OPERATION_CMD, &avd);
1083
1084	rcu_read_unlock();
1085
1086	rc2 = avc_operation_audit(ssid, tsid, tclass, requested,
1087			&avd, od, cmd, rc, ad);
1088	if (rc2)
1089		return rc2;
1090	return rc;
1091}
1092
1093/**
1094 * avc_has_perm_noaudit - Check permissions but perform no auditing.
1095 * @ssid: source security identifier
1096 * @tsid: target security identifier
1097 * @tclass: target security class
1098 * @requested: requested permissions, interpreted based on @tclass
1099 * @flags:  AVC_STRICT or 0
1100 * @avd: access vector decisions
1101 *
1102 * Check the AVC to determine whether the @requested permissions are granted
1103 * for the SID pair (@ssid, @tsid), interpreting the permissions
1104 * based on @tclass, and call the security server on a cache miss to obtain
1105 * a new decision and add it to the cache.  Return a copy of the decisions
1106 * in @avd.  Return %0 if all @requested permissions are granted,
1107 * -%EACCES if any permissions are denied, or another -errno upon
1108 * other errors.  This function is typically called by avc_has_perm(),
1109 * but may also be called directly to separate permission checking from
1110 * auditing, e.g. in cases where a lock must be held for the check but
1111 * should be released for the auditing.
1112 */
1113inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
1114			 u16 tclass, u32 requested,
1115			 unsigned flags,
1116			 struct av_decision *avd)
1117{
1118	struct avc_node *node;
1119	struct avc_operation_node ops_node;
1120	int rc = 0;
1121	u32 denied;
1122
1123	BUG_ON(!requested);
1124
1125	rcu_read_lock();
1126
1127	node = avc_lookup(ssid, tsid, tclass);
1128	if (unlikely(!node))
1129		node = avc_compute_av(ssid, tsid, tclass, avd, &ops_node);
1130	else
1131		memcpy(avd, &node->ae.avd, sizeof(*avd));
1132
1133	denied = requested & ~(avd->allowed);
1134	if (unlikely(denied))
1135		rc = avc_denied(ssid, tsid, tclass, requested, 0, flags, avd);
1136
1137	rcu_read_unlock();
1138	return rc;
1139}
1140
1141/**
1142 * avc_has_perm - Check permissions and perform any appropriate auditing.
1143 * @ssid: source security identifier
1144 * @tsid: target security identifier
1145 * @tclass: target security class
1146 * @requested: requested permissions, interpreted based on @tclass
1147 * @auditdata: auxiliary audit data
1148 *
1149 * Check the AVC to determine whether the @requested permissions are granted
1150 * for the SID pair (@ssid, @tsid), interpreting the permissions
1151 * based on @tclass, and call the security server on a cache miss to obtain
1152 * a new decision and add it to the cache.  Audit the granting or denial of
1153 * permissions in accordance with the policy.  Return %0 if all @requested
1154 * permissions are granted, -%EACCES if any permissions are denied, or
1155 * another -errno upon other errors.
1156 */
1157int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
1158		 u32 requested, struct common_audit_data *auditdata)
1159{
1160	struct av_decision avd;
1161	int rc, rc2;
1162
1163	rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
1164
1165	rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
1166	if (rc2)
1167		return rc2;
1168	return rc;
1169}
1170
1171u32 avc_policy_seqno(void)
1172{
1173	return avc_cache.latest_notif;
1174}
1175
1176void avc_disable(void)
1177{
1178	/*
1179	 * If you are looking at this because you have realized that we are
1180	 * not destroying the avc_node_cachep it might be easy to fix, but
1181	 * I don't know the memory barrier semantics well enough to know.  It's
1182	 * possible that some other task dereferenced security_ops when
1183	 * it still pointed to selinux operations.  If that is the case it's
1184	 * possible that it is about to use the avc and is about to need the
1185	 * avc_node_cachep.  I know I could wrap the security.c security_ops call
1186	 * in an rcu_lock, but seriously, it's not worth it.  Instead I just flush
1187	 * the cache and get that memory back.
1188	 */
1189	if (avc_node_cachep) {
1190		avc_flush();
1191		/* kmem_cache_destroy(avc_node_cachep); */
1192	}
1193}
1194