1/*
2 * INET        An implementation of the TCP/IP protocol suite for the LINUX
3 *             operating system.  INET is implemented using the  BSD Socket
4 *             interface as the means of communication with the user level.
5 *
6 *             Support for INET6 connection oriented protocols.
7 *
8 * Authors:    See the TCPv6 sources
9 *
10 *             This program is free software; you can redistribute it and/or
11 *             modify it under the terms of the GNU General Public License
12 *             as published by the Free Software Foundation; either version
13 *             2 of the License, or(at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/in6.h>
18#include <linux/ipv6.h>
19#include <linux/jhash.h>
20#include <linux/slab.h>
21
22#include <net/addrconf.h>
23#include <net/inet_connection_sock.h>
24#include <net/inet_ecn.h>
25#include <net/inet_hashtables.h>
26#include <net/ip6_route.h>
27#include <net/sock.h>
28#include <net/inet6_connection_sock.h>
29
30int inet6_csk_bind_conflict(const struct sock *sk,
31			    const struct inet_bind_bucket *tb, bool relax)
32{
33	const struct sock *sk2;
34	int reuse = sk->sk_reuse;
35	int reuseport = sk->sk_reuseport;
36	kuid_t uid = sock_i_uid((struct sock *)sk);
37
38	/* We must walk the whole port owner list in this case. -DaveM */
39	/*
40	 * See comment in inet_csk_bind_conflict about sock lookup
41	 * vs net namespaces issues.
42	 */
43	sk_for_each_bound(sk2, &tb->owners) {
44		if (sk != sk2 &&
45		    (!sk->sk_bound_dev_if ||
46		     !sk2->sk_bound_dev_if ||
47		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
48			if ((!reuse || !sk2->sk_reuse ||
49			     sk2->sk_state == TCP_LISTEN) &&
50			    (!reuseport || !sk2->sk_reuseport ||
51			     (sk2->sk_state != TCP_TIME_WAIT &&
52			      !uid_eq(uid,
53				      sock_i_uid((struct sock *)sk2))))) {
54				if (ipv6_rcv_saddr_equal(sk, sk2))
55					break;
56			}
57			if (!relax && reuse && sk2->sk_reuse &&
58			    sk2->sk_state != TCP_LISTEN &&
59			    ipv6_rcv_saddr_equal(sk, sk2))
60				break;
61		}
62	}
63
64	return sk2 != NULL;
65}
66EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
67
68struct dst_entry *inet6_csk_route_req(struct sock *sk,
69				      struct flowi6 *fl6,
70				      const struct request_sock *req)
71{
72	struct inet_request_sock *ireq = inet_rsk(req);
73	struct ipv6_pinfo *np = inet6_sk(sk);
74	struct in6_addr *final_p, final;
75	struct dst_entry *dst;
76
77	memset(fl6, 0, sizeof(*fl6));
78	fl6->flowi6_proto = IPPROTO_TCP;
79	fl6->daddr = ireq->ir_v6_rmt_addr;
80	final_p = fl6_update_dst(fl6, np->opt, &final);
81	fl6->saddr = ireq->ir_v6_loc_addr;
82	fl6->flowi6_oif = ireq->ir_iif;
83	fl6->flowi6_mark = ireq->ir_mark;
84	fl6->fl6_dport = ireq->ir_rmt_port;
85	fl6->fl6_sport = htons(ireq->ir_num);
86	fl6->flowi6_uid = sock_i_uid(sk);
87	security_req_classify_flow(req, flowi6_to_flowi(fl6));
88
89	dst = ip6_dst_lookup_flow(sk, fl6, final_p);
90	if (IS_ERR(dst))
91		return NULL;
92
93	return dst;
94}
95
96/*
97 * request_sock (formerly open request) hash tables.
98 */
99static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
100			   const u32 rnd, const u32 synq_hsize)
101{
102	u32 c;
103
104	c = jhash_3words((__force u32)raddr->s6_addr32[0],
105			 (__force u32)raddr->s6_addr32[1],
106			 (__force u32)raddr->s6_addr32[2],
107			 rnd);
108
109	c = jhash_2words((__force u32)raddr->s6_addr32[3],
110			 (__force u32)rport,
111			 c);
112
113	return c & (synq_hsize - 1);
114}
115
116struct request_sock *inet6_csk_search_req(const struct sock *sk,
117					  struct request_sock ***prevp,
118					  const __be16 rport,
119					  const struct in6_addr *raddr,
120					  const struct in6_addr *laddr,
121					  const int iif)
122{
123	const struct inet_connection_sock *icsk = inet_csk(sk);
124	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
125	struct request_sock *req, **prev;
126
127	for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
128						     lopt->hash_rnd,
129						     lopt->nr_table_entries)];
130	     (req = *prev) != NULL;
131	     prev = &req->dl_next) {
132		const struct inet_request_sock *ireq = inet_rsk(req);
133
134		if (ireq->ir_rmt_port == rport &&
135		    req->rsk_ops->family == AF_INET6 &&
136		    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
137		    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
138		    (!ireq->ir_iif || ireq->ir_iif == iif)) {
139			WARN_ON(req->sk != NULL);
140			*prevp = prev;
141			return req;
142		}
143	}
144
145	return NULL;
146}
147EXPORT_SYMBOL_GPL(inet6_csk_search_req);
148
149void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
150				    struct request_sock *req,
151				    const unsigned long timeout)
152{
153	struct inet_connection_sock *icsk = inet_csk(sk);
154	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
155	const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
156				      inet_rsk(req)->ir_rmt_port,
157				      lopt->hash_rnd, lopt->nr_table_entries);
158
159	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
160	inet_csk_reqsk_queue_added(sk, timeout);
161}
162EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
163
164void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
165{
166	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
167
168	sin6->sin6_family = AF_INET6;
169	sin6->sin6_addr = sk->sk_v6_daddr;
170	sin6->sin6_port	= inet_sk(sk)->inet_dport;
171	/* We do not store received flowlabel for TCP */
172	sin6->sin6_flowinfo = 0;
173	sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
174						  sk->sk_bound_dev_if);
175}
176EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
177
178static inline
179void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
180			   const struct in6_addr *daddr,
181			   const struct in6_addr *saddr)
182{
183	__ip6_dst_store(sk, dst, daddr, saddr);
184}
185
186static inline
187struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
188{
189	return __sk_dst_check(sk, cookie);
190}
191
192static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
193						struct flowi6 *fl6)
194{
195	struct inet_sock *inet = inet_sk(sk);
196	struct ipv6_pinfo *np = inet6_sk(sk);
197	struct in6_addr *final_p, final;
198	struct dst_entry *dst;
199
200	memset(fl6, 0, sizeof(*fl6));
201	fl6->flowi6_proto = sk->sk_protocol;
202	fl6->daddr = sk->sk_v6_daddr;
203	fl6->saddr = np->saddr;
204	fl6->flowlabel = np->flow_label;
205	IP6_ECN_flow_xmit(sk, fl6->flowlabel);
206	fl6->flowi6_oif = sk->sk_bound_dev_if;
207	fl6->flowi6_mark = sk->sk_mark;
208	fl6->fl6_sport = inet->inet_sport;
209	fl6->fl6_dport = inet->inet_dport;
210	fl6->flowi6_uid = sock_i_uid(sk);
211	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
212
213	final_p = fl6_update_dst(fl6, np->opt, &final);
214
215	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
216	if (!dst) {
217		dst = ip6_dst_lookup_flow(sk, fl6, final_p);
218
219		if (!IS_ERR(dst))
220			__inet6_csk_dst_store(sk, dst, NULL, NULL);
221	}
222	return dst;
223}
224
225int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
226{
227	struct ipv6_pinfo *np = inet6_sk(sk);
228	struct flowi6 fl6;
229	struct dst_entry *dst;
230	int res;
231
232	dst = inet6_csk_route_socket(sk, &fl6);
233	if (IS_ERR(dst)) {
234		sk->sk_err_soft = -PTR_ERR(dst);
235		sk->sk_route_caps = 0;
236		kfree_skb(skb);
237		return PTR_ERR(dst);
238	}
239
240	rcu_read_lock();
241	skb_dst_set_noref(skb, dst);
242
243	/* Restore final destination back after routing done */
244	fl6.daddr = sk->sk_v6_daddr;
245
246	res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
247	rcu_read_unlock();
248	return res;
249}
250EXPORT_SYMBOL_GPL(inet6_csk_xmit);
251
252struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
253{
254	struct flowi6 fl6;
255	struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
256
257	if (IS_ERR(dst))
258		return NULL;
259	dst->ops->update_pmtu(dst, sk, NULL, mtu);
260
261	dst = inet6_csk_route_socket(sk, &fl6);
262	return IS_ERR(dst) ? NULL : dst;
263}
264EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
265