tcp_metrics.c revision 00ca9c5b2b11d44eaf20a4b647efc999734323ec
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/module.h>
5#include <linux/cache.h>
6#include <linux/slab.h>
7#include <linux/init.h>
8#include <linux/tcp.h>
9#include <linux/hash.h>
10#include <linux/tcp_metrics.h>
11#include <linux/vmalloc.h>
12
13#include <net/inet_connection_sock.h>
14#include <net/net_namespace.h>
15#include <net/request_sock.h>
16#include <net/inetpeer.h>
17#include <net/sock.h>
18#include <net/ipv6.h>
19#include <net/dst.h>
20#include <net/tcp.h>
21#include <net/genetlink.h>
22
23int sysctl_tcp_nometrics_save __read_mostly;
24
25static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26						   const struct inetpeer_addr *daddr,
27						   struct net *net, unsigned int hash);
28
29struct tcp_fastopen_metrics {
30	u16	mss;
31	u16	syn_loss:10;		/* Recurring Fast Open SYN losses */
32	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
33	struct	tcp_fastopen_cookie	cookie;
34};
35
36struct tcp_metrics_block {
37	struct tcp_metrics_block __rcu	*tcpm_next;
38	struct inetpeer_addr		tcpm_saddr;
39	struct inetpeer_addr		tcpm_daddr;
40	unsigned long			tcpm_stamp;
41	u32				tcpm_ts;
42	u32				tcpm_ts_stamp;
43	u32				tcpm_lock;
44	u32				tcpm_vals[TCP_METRIC_MAX + 1];
45	struct tcp_fastopen_metrics	tcpm_fastopen;
46
47	struct rcu_head			rcu_head;
48};
49
50static bool tcp_metric_locked(struct tcp_metrics_block *tm,
51			      enum tcp_metric_index idx)
52{
53	return tm->tcpm_lock & (1 << idx);
54}
55
56static u32 tcp_metric_get(struct tcp_metrics_block *tm,
57			  enum tcp_metric_index idx)
58{
59	return tm->tcpm_vals[idx];
60}
61
62static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
63				  enum tcp_metric_index idx)
64{
65	return msecs_to_jiffies(tm->tcpm_vals[idx]);
66}
67
68static void tcp_metric_set(struct tcp_metrics_block *tm,
69			   enum tcp_metric_index idx,
70			   u32 val)
71{
72	tm->tcpm_vals[idx] = val;
73}
74
75static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
76				 enum tcp_metric_index idx,
77				 u32 val)
78{
79	tm->tcpm_vals[idx] = jiffies_to_msecs(val);
80}
81
82static bool addr_same(const struct inetpeer_addr *a,
83		      const struct inetpeer_addr *b)
84{
85	const struct in6_addr *a6, *b6;
86
87	if (a->family != b->family)
88		return false;
89	if (a->family == AF_INET)
90		return a->addr.a4 == b->addr.a4;
91
92	a6 = (const struct in6_addr *) &a->addr.a6[0];
93	b6 = (const struct in6_addr *) &b->addr.a6[0];
94
95	return ipv6_addr_equal(a6, b6);
96}
97
98struct tcpm_hash_bucket {
99	struct tcp_metrics_block __rcu	*chain;
100};
101
102static DEFINE_SPINLOCK(tcp_metrics_lock);
103
104static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
105			  bool fastopen_clear)
106{
107	u32 val;
108
109	tm->tcpm_stamp = jiffies;
110
111	val = 0;
112	if (dst_metric_locked(dst, RTAX_RTT))
113		val |= 1 << TCP_METRIC_RTT;
114	if (dst_metric_locked(dst, RTAX_RTTVAR))
115		val |= 1 << TCP_METRIC_RTTVAR;
116	if (dst_metric_locked(dst, RTAX_SSTHRESH))
117		val |= 1 << TCP_METRIC_SSTHRESH;
118	if (dst_metric_locked(dst, RTAX_CWND))
119		val |= 1 << TCP_METRIC_CWND;
120	if (dst_metric_locked(dst, RTAX_REORDERING))
121		val |= 1 << TCP_METRIC_REORDERING;
122	tm->tcpm_lock = val;
123
124	tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
125	tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
126	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
127	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
128	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
129	tm->tcpm_ts = 0;
130	tm->tcpm_ts_stamp = 0;
131	if (fastopen_clear) {
132		tm->tcpm_fastopen.mss = 0;
133		tm->tcpm_fastopen.syn_loss = 0;
134		tm->tcpm_fastopen.cookie.len = 0;
135	}
136}
137
138#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
139
140static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
141{
142	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
143		tcpm_suck_dst(tm, dst, false);
144}
145
146#define TCP_METRICS_RECLAIM_DEPTH	5
147#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
148
149static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
150					  struct inetpeer_addr *saddr,
151					  struct inetpeer_addr *daddr,
152					  unsigned int hash)
153{
154	struct tcp_metrics_block *tm;
155	struct net *net;
156	bool reclaim = false;
157
158	spin_lock_bh(&tcp_metrics_lock);
159	net = dev_net(dst->dev);
160
161	/* While waiting for the spin-lock the cache might have been populated
162	 * with this entry and so we have to check again.
163	 */
164	tm = __tcp_get_metrics(saddr, daddr, net, hash);
165	if (tm == TCP_METRICS_RECLAIM_PTR) {
166		reclaim = true;
167		tm = NULL;
168	}
169	if (tm) {
170		tcpm_check_stamp(tm, dst);
171		goto out_unlock;
172	}
173
174	if (unlikely(reclaim)) {
175		struct tcp_metrics_block *oldest;
176
177		oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
178		for (tm = rcu_dereference(oldest->tcpm_next); tm;
179		     tm = rcu_dereference(tm->tcpm_next)) {
180			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
181				oldest = tm;
182		}
183		tm = oldest;
184	} else {
185		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
186		if (!tm)
187			goto out_unlock;
188	}
189	tm->tcpm_saddr = *saddr;
190	tm->tcpm_daddr = *daddr;
191
192	tcpm_suck_dst(tm, dst, true);
193
194	if (likely(!reclaim)) {
195		tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
196		rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
197	}
198
199out_unlock:
200	spin_unlock_bh(&tcp_metrics_lock);
201	return tm;
202}
203
204static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
205{
206	if (tm)
207		return tm;
208	if (depth > TCP_METRICS_RECLAIM_DEPTH)
209		return TCP_METRICS_RECLAIM_PTR;
210	return NULL;
211}
212
213static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
214						   const struct inetpeer_addr *daddr,
215						   struct net *net, unsigned int hash)
216{
217	struct tcp_metrics_block *tm;
218	int depth = 0;
219
220	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
221	     tm = rcu_dereference(tm->tcpm_next)) {
222		if (addr_same(&tm->tcpm_saddr, saddr) &&
223		    addr_same(&tm->tcpm_daddr, daddr))
224			break;
225		depth++;
226	}
227	return tcp_get_encode(tm, depth);
228}
229
230static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
231						       struct dst_entry *dst)
232{
233	struct tcp_metrics_block *tm;
234	struct inetpeer_addr saddr, daddr;
235	unsigned int hash;
236	struct net *net;
237
238	saddr.family = req->rsk_ops->family;
239	daddr.family = req->rsk_ops->family;
240	switch (daddr.family) {
241	case AF_INET:
242		saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
243		daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
244		hash = (__force unsigned int) daddr.addr.a4;
245		break;
246#if IS_ENABLED(CONFIG_IPV6)
247	case AF_INET6:
248		*(struct in6_addr *)saddr.addr.a6 = inet_rsk(req)->ir_v6_loc_addr;
249		*(struct in6_addr *)daddr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
250		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
251		break;
252#endif
253	default:
254		return NULL;
255	}
256
257	net = dev_net(dst->dev);
258	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
259
260	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
261	     tm = rcu_dereference(tm->tcpm_next)) {
262		if (addr_same(&tm->tcpm_saddr, &saddr) &&
263		    addr_same(&tm->tcpm_daddr, &daddr))
264			break;
265	}
266	tcpm_check_stamp(tm, dst);
267	return tm;
268}
269
270static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
271{
272	struct tcp_metrics_block *tm;
273	struct inetpeer_addr saddr, daddr;
274	unsigned int hash;
275	struct net *net;
276
277	saddr.family = tw->tw_family;
278	daddr.family = tw->tw_family;
279	switch (daddr.family) {
280	case AF_INET:
281		saddr.addr.a4 = tw->tw_rcv_saddr;
282		daddr.addr.a4 = tw->tw_daddr;
283		hash = (__force unsigned int) daddr.addr.a4;
284		break;
285#if IS_ENABLED(CONFIG_IPV6)
286	case AF_INET6:
287		*(struct in6_addr *)saddr.addr.a6 = tw->tw_v6_rcv_saddr;
288		*(struct in6_addr *)daddr.addr.a6 = tw->tw_v6_daddr;
289		hash = ipv6_addr_hash(&tw->tw_v6_daddr);
290		break;
291#endif
292	default:
293		return NULL;
294	}
295
296	net = twsk_net(tw);
297	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
298
299	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
300	     tm = rcu_dereference(tm->tcpm_next)) {
301		if (addr_same(&tm->tcpm_saddr, &saddr) &&
302		    addr_same(&tm->tcpm_daddr, &daddr))
303			break;
304	}
305	return tm;
306}
307
308static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
309						 struct dst_entry *dst,
310						 bool create)
311{
312	struct tcp_metrics_block *tm;
313	struct inetpeer_addr saddr, daddr;
314	unsigned int hash;
315	struct net *net;
316
317	saddr.family = sk->sk_family;
318	daddr.family = sk->sk_family;
319	switch (daddr.family) {
320	case AF_INET:
321		saddr.addr.a4 = inet_sk(sk)->inet_saddr;
322		daddr.addr.a4 = inet_sk(sk)->inet_daddr;
323		hash = (__force unsigned int) daddr.addr.a4;
324		break;
325#if IS_ENABLED(CONFIG_IPV6)
326	case AF_INET6:
327		*(struct in6_addr *)saddr.addr.a6 = sk->sk_v6_rcv_saddr;
328		*(struct in6_addr *)daddr.addr.a6 = sk->sk_v6_daddr;
329		hash = ipv6_addr_hash(&sk->sk_v6_daddr);
330		break;
331#endif
332	default:
333		return NULL;
334	}
335
336	net = dev_net(dst->dev);
337	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
338
339	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
340	if (tm == TCP_METRICS_RECLAIM_PTR)
341		tm = NULL;
342	if (!tm && create)
343		tm = tcpm_new(dst, &saddr, &daddr, hash);
344	else
345		tcpm_check_stamp(tm, dst);
346
347	return tm;
348}
349
350/* Save metrics learned by this TCP session.  This function is called
351 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
352 * or goes from LAST-ACK to CLOSE.
353 */
354void tcp_update_metrics(struct sock *sk)
355{
356	const struct inet_connection_sock *icsk = inet_csk(sk);
357	struct dst_entry *dst = __sk_dst_get(sk);
358	struct tcp_sock *tp = tcp_sk(sk);
359	struct tcp_metrics_block *tm;
360	unsigned long rtt;
361	u32 val;
362	int m;
363
364	if (sysctl_tcp_nometrics_save || !dst)
365		return;
366
367	if (dst->flags & DST_HOST)
368		dst_confirm(dst);
369
370	rcu_read_lock();
371	if (icsk->icsk_backoff || !tp->srtt) {
372		/* This session failed to estimate rtt. Why?
373		 * Probably, no packets returned in time.  Reset our
374		 * results.
375		 */
376		tm = tcp_get_metrics(sk, dst, false);
377		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
378			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
379		goto out_unlock;
380	} else
381		tm = tcp_get_metrics(sk, dst, true);
382
383	if (!tm)
384		goto out_unlock;
385
386	rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
387	m = rtt - tp->srtt;
388
389	/* If newly calculated rtt larger than stored one, store new
390	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
391	 * always better than underestimation.
392	 */
393	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
394		if (m <= 0)
395			rtt = tp->srtt;
396		else
397			rtt -= (m >> 3);
398		tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
399	}
400
401	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
402		unsigned long var;
403
404		if (m < 0)
405			m = -m;
406
407		/* Scale deviation to rttvar fixed point */
408		m >>= 1;
409		if (m < tp->mdev)
410			m = tp->mdev;
411
412		var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
413		if (m >= var)
414			var = m;
415		else
416			var -= (var - m) >> 2;
417
418		tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
419	}
420
421	if (tcp_in_initial_slowstart(tp)) {
422		/* Slow start still did not finish. */
423		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
424			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
425			if (val && (tp->snd_cwnd >> 1) > val)
426				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
427					       tp->snd_cwnd >> 1);
428		}
429		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
430			val = tcp_metric_get(tm, TCP_METRIC_CWND);
431			if (tp->snd_cwnd > val)
432				tcp_metric_set(tm, TCP_METRIC_CWND,
433					       tp->snd_cwnd);
434		}
435	} else if (tp->snd_cwnd > tp->snd_ssthresh &&
436		   icsk->icsk_ca_state == TCP_CA_Open) {
437		/* Cong. avoidance phase, cwnd is reliable. */
438		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
439			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
440				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
441		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
442			val = tcp_metric_get(tm, TCP_METRIC_CWND);
443			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
444		}
445	} else {
446		/* Else slow start did not finish, cwnd is non-sense,
447		 * ssthresh may be also invalid.
448		 */
449		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
450			val = tcp_metric_get(tm, TCP_METRIC_CWND);
451			tcp_metric_set(tm, TCP_METRIC_CWND,
452				       (val + tp->snd_ssthresh) >> 1);
453		}
454		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
455			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
456			if (val && tp->snd_ssthresh > val)
457				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
458					       tp->snd_ssthresh);
459		}
460		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
461			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
462			if (val < tp->reordering &&
463			    tp->reordering != sysctl_tcp_reordering)
464				tcp_metric_set(tm, TCP_METRIC_REORDERING,
465					       tp->reordering);
466		}
467	}
468	tm->tcpm_stamp = jiffies;
469out_unlock:
470	rcu_read_unlock();
471}
472
473/* Initialize metrics on socket. */
474
475void tcp_init_metrics(struct sock *sk)
476{
477	struct dst_entry *dst = __sk_dst_get(sk);
478	struct tcp_sock *tp = tcp_sk(sk);
479	struct tcp_metrics_block *tm;
480	u32 val, crtt = 0; /* cached RTT scaled by 8 */
481
482	if (dst == NULL)
483		goto reset;
484
485	dst_confirm(dst);
486
487	rcu_read_lock();
488	tm = tcp_get_metrics(sk, dst, true);
489	if (!tm) {
490		rcu_read_unlock();
491		goto reset;
492	}
493
494	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
495		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
496
497	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
498	if (val) {
499		tp->snd_ssthresh = val;
500		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
501			tp->snd_ssthresh = tp->snd_cwnd_clamp;
502	} else {
503		/* ssthresh may have been reduced unnecessarily during.
504		 * 3WHS. Restore it back to its initial default.
505		 */
506		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
507	}
508	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
509	if (val && tp->reordering != val) {
510		tcp_disable_fack(tp);
511		tcp_disable_early_retrans(tp);
512		tp->reordering = val;
513	}
514
515	crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
516	rcu_read_unlock();
517reset:
518	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
519	 * to seed the RTO for later data packets because SYN packets are
520	 * small. Use the per-dst cached values to seed the RTO but keep
521	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
522	 * Later the RTO will be updated immediately upon obtaining the first
523	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
524	 * influences the first RTO but not later RTT estimation.
525	 *
526	 * But if RTT is not available from the SYN (due to retransmits or
527	 * syn cookies) or the cache, force a conservative 3secs timeout.
528	 *
529	 * A bit of theory. RTT is time passed after "normal" sized packet
530	 * is sent until it is ACKed. In normal circumstances sending small
531	 * packets force peer to delay ACKs and calculation is correct too.
532	 * The algorithm is adaptive and, provided we follow specs, it
533	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
534	 * tricks sort of "quick acks" for time long enough to decrease RTT
535	 * to low value, and then abruptly stops to do it and starts to delay
536	 * ACKs, wait for troubles.
537	 */
538	if (crtt > tp->srtt) {
539		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
540		crtt >>= 3;
541		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
542	} else if (tp->srtt == 0) {
543		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
544		 * 3WHS. This is most likely due to retransmission,
545		 * including spurious one. Reset the RTO back to 3secs
546		 * from the more aggressive 1sec to avoid more spurious
547		 * retransmission.
548		 */
549		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
550		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
551	}
552	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
553	 * retransmitted. In light of RFC6298 more aggressive 1sec
554	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
555	 * retransmission has occurred.
556	 */
557	if (tp->total_retrans > 1)
558		tp->snd_cwnd = 1;
559	else
560		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
561	tp->snd_cwnd_stamp = tcp_time_stamp;
562}
563
564bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
565{
566	struct tcp_metrics_block *tm;
567	bool ret;
568
569	if (!dst)
570		return false;
571
572	rcu_read_lock();
573	tm = __tcp_get_metrics_req(req, dst);
574	if (paws_check) {
575		if (tm &&
576		    (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
577		    (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
578			ret = false;
579		else
580			ret = true;
581	} else {
582		if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
583			ret = true;
584		else
585			ret = false;
586	}
587	rcu_read_unlock();
588
589	return ret;
590}
591EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
592
593void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
594{
595	struct tcp_metrics_block *tm;
596
597	rcu_read_lock();
598	tm = tcp_get_metrics(sk, dst, true);
599	if (tm) {
600		struct tcp_sock *tp = tcp_sk(sk);
601
602		if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
603			tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
604			tp->rx_opt.ts_recent = tm->tcpm_ts;
605		}
606	}
607	rcu_read_unlock();
608}
609EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
610
611/* VJ's idea. Save last timestamp seen from this destination and hold
612 * it at least for normal timewait interval to use for duplicate
613 * segment detection in subsequent connections, before they enter
614 * synchronized state.
615 */
616bool tcp_remember_stamp(struct sock *sk)
617{
618	struct dst_entry *dst = __sk_dst_get(sk);
619	bool ret = false;
620
621	if (dst) {
622		struct tcp_metrics_block *tm;
623
624		rcu_read_lock();
625		tm = tcp_get_metrics(sk, dst, true);
626		if (tm) {
627			struct tcp_sock *tp = tcp_sk(sk);
628
629			if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
630			    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
631			     tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
632				tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
633				tm->tcpm_ts = tp->rx_opt.ts_recent;
634			}
635			ret = true;
636		}
637		rcu_read_unlock();
638	}
639	return ret;
640}
641
642bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
643{
644	struct tcp_metrics_block *tm;
645	bool ret = false;
646
647	rcu_read_lock();
648	tm = __tcp_get_metrics_tw(tw);
649	if (tm) {
650		const struct tcp_timewait_sock *tcptw;
651		struct sock *sk = (struct sock *) tw;
652
653		tcptw = tcp_twsk(sk);
654		if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
655		    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
656		     tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
657			tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
658			tm->tcpm_ts	   = tcptw->tw_ts_recent;
659		}
660		ret = true;
661	}
662	rcu_read_unlock();
663
664	return ret;
665}
666
667static DEFINE_SEQLOCK(fastopen_seqlock);
668
669void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
670			    struct tcp_fastopen_cookie *cookie,
671			    int *syn_loss, unsigned long *last_syn_loss)
672{
673	struct tcp_metrics_block *tm;
674
675	rcu_read_lock();
676	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
677	if (tm) {
678		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
679		unsigned int seq;
680
681		do {
682			seq = read_seqbegin(&fastopen_seqlock);
683			if (tfom->mss)
684				*mss = tfom->mss;
685			*cookie = tfom->cookie;
686			*syn_loss = tfom->syn_loss;
687			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
688		} while (read_seqretry(&fastopen_seqlock, seq));
689	}
690	rcu_read_unlock();
691}
692
693void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
694			    struct tcp_fastopen_cookie *cookie, bool syn_lost)
695{
696	struct dst_entry *dst = __sk_dst_get(sk);
697	struct tcp_metrics_block *tm;
698
699	if (!dst)
700		return;
701	rcu_read_lock();
702	tm = tcp_get_metrics(sk, dst, true);
703	if (tm) {
704		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
705
706		write_seqlock_bh(&fastopen_seqlock);
707		if (mss)
708			tfom->mss = mss;
709		if (cookie && cookie->len > 0)
710			tfom->cookie = *cookie;
711		if (syn_lost) {
712			++tfom->syn_loss;
713			tfom->last_syn_loss = jiffies;
714		} else
715			tfom->syn_loss = 0;
716		write_sequnlock_bh(&fastopen_seqlock);
717	}
718	rcu_read_unlock();
719}
720
721static struct genl_family tcp_metrics_nl_family = {
722	.id		= GENL_ID_GENERATE,
723	.hdrsize	= 0,
724	.name		= TCP_METRICS_GENL_NAME,
725	.version	= TCP_METRICS_GENL_VERSION,
726	.maxattr	= TCP_METRICS_ATTR_MAX,
727	.netnsok	= true,
728};
729
730static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
731	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
732	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
733					    .len = sizeof(struct in6_addr), },
734	/* Following attributes are not received for GET/DEL,
735	 * we keep them for reference
736	 */
737#if 0
738	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
739	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
740	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
741	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
742	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
743	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
744	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
745	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
746					    .len = TCP_FASTOPEN_COOKIE_MAX, },
747#endif
748};
749
750/* Add attributes, caller cancels its header on failure */
751static int tcp_metrics_fill_info(struct sk_buff *msg,
752				 struct tcp_metrics_block *tm)
753{
754	struct nlattr *nest;
755	int i;
756
757	switch (tm->tcpm_daddr.family) {
758	case AF_INET:
759		if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
760				tm->tcpm_daddr.addr.a4) < 0)
761			goto nla_put_failure;
762		if (nla_put_be32(msg, TCP_METRICS_ATTR_SADDR_IPV4,
763				tm->tcpm_saddr.addr.a4) < 0)
764			goto nla_put_failure;
765		break;
766	case AF_INET6:
767		if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
768			    tm->tcpm_daddr.addr.a6) < 0)
769			goto nla_put_failure;
770		if (nla_put(msg, TCP_METRICS_ATTR_SADDR_IPV6, 16,
771			    tm->tcpm_saddr.addr.a6) < 0)
772			goto nla_put_failure;
773		break;
774	default:
775		return -EAFNOSUPPORT;
776	}
777
778	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
779			  jiffies - tm->tcpm_stamp) < 0)
780		goto nla_put_failure;
781	if (tm->tcpm_ts_stamp) {
782		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
783				(s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
784			goto nla_put_failure;
785		if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
786				tm->tcpm_ts) < 0)
787			goto nla_put_failure;
788	}
789
790	{
791		int n = 0;
792
793		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
794		if (!nest)
795			goto nla_put_failure;
796		for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
797			if (!tm->tcpm_vals[i])
798				continue;
799			if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
800				goto nla_put_failure;
801			n++;
802		}
803		if (n)
804			nla_nest_end(msg, nest);
805		else
806			nla_nest_cancel(msg, nest);
807	}
808
809	{
810		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
811		unsigned int seq;
812
813		do {
814			seq = read_seqbegin(&fastopen_seqlock);
815			tfom_copy[0] = tm->tcpm_fastopen;
816		} while (read_seqretry(&fastopen_seqlock, seq));
817
818		tfom = tfom_copy;
819		if (tfom->mss &&
820		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
821				tfom->mss) < 0)
822			goto nla_put_failure;
823		if (tfom->syn_loss &&
824		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
825				tfom->syn_loss) < 0 ||
826		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
827				jiffies - tfom->last_syn_loss) < 0))
828			goto nla_put_failure;
829		if (tfom->cookie.len > 0 &&
830		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
831			    tfom->cookie.len, tfom->cookie.val) < 0)
832			goto nla_put_failure;
833	}
834
835	return 0;
836
837nla_put_failure:
838	return -EMSGSIZE;
839}
840
841static int tcp_metrics_dump_info(struct sk_buff *skb,
842				 struct netlink_callback *cb,
843				 struct tcp_metrics_block *tm)
844{
845	void *hdr;
846
847	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
848			  &tcp_metrics_nl_family, NLM_F_MULTI,
849			  TCP_METRICS_CMD_GET);
850	if (!hdr)
851		return -EMSGSIZE;
852
853	if (tcp_metrics_fill_info(skb, tm) < 0)
854		goto nla_put_failure;
855
856	return genlmsg_end(skb, hdr);
857
858nla_put_failure:
859	genlmsg_cancel(skb, hdr);
860	return -EMSGSIZE;
861}
862
863static int tcp_metrics_nl_dump(struct sk_buff *skb,
864			       struct netlink_callback *cb)
865{
866	struct net *net = sock_net(skb->sk);
867	unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
868	unsigned int row, s_row = cb->args[0];
869	int s_col = cb->args[1], col = s_col;
870
871	for (row = s_row; row < max_rows; row++, s_col = 0) {
872		struct tcp_metrics_block *tm;
873		struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
874
875		rcu_read_lock();
876		for (col = 0, tm = rcu_dereference(hb->chain); tm;
877		     tm = rcu_dereference(tm->tcpm_next), col++) {
878			if (col < s_col)
879				continue;
880			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
881				rcu_read_unlock();
882				goto done;
883			}
884		}
885		rcu_read_unlock();
886	}
887
888done:
889	cb->args[0] = row;
890	cb->args[1] = col;
891	return skb->len;
892}
893
894static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
895			   unsigned int *hash, int optional, int v4, int v6)
896{
897	struct nlattr *a;
898
899	a = info->attrs[v4];
900	if (a) {
901		addr->family = AF_INET;
902		addr->addr.a4 = nla_get_be32(a);
903		if (hash)
904			*hash = (__force unsigned int) addr->addr.a4;
905		return 0;
906	}
907	a = info->attrs[v6];
908	if (a) {
909		if (nla_len(a) != sizeof(struct in6_addr))
910			return -EINVAL;
911		addr->family = AF_INET6;
912		memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
913		if (hash)
914			*hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
915		return 0;
916	}
917	return optional ? 1 : -EAFNOSUPPORT;
918}
919
920static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
921			 unsigned int *hash, int optional)
922{
923	return __parse_nl_addr(info, addr, hash, optional,
924			       TCP_METRICS_ATTR_ADDR_IPV4,
925			       TCP_METRICS_ATTR_ADDR_IPV6);
926}
927
928static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
929{
930	return __parse_nl_addr(info, addr, NULL, 0,
931			       TCP_METRICS_ATTR_SADDR_IPV4,
932			       TCP_METRICS_ATTR_SADDR_IPV6);
933}
934
935static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
936{
937	struct tcp_metrics_block *tm;
938	struct inetpeer_addr saddr, daddr;
939	unsigned int hash;
940	struct sk_buff *msg;
941	struct net *net = genl_info_net(info);
942	void *reply;
943	int ret;
944	bool src = true;
945
946	ret = parse_nl_addr(info, &daddr, &hash, 0);
947	if (ret < 0)
948		return ret;
949
950	ret = parse_nl_saddr(info, &saddr);
951	if (ret < 0)
952		src = false;
953
954	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
955	if (!msg)
956		return -ENOMEM;
957
958	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
959				  info->genlhdr->cmd);
960	if (!reply)
961		goto nla_put_failure;
962
963	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
964	ret = -ESRCH;
965	rcu_read_lock();
966	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
967	     tm = rcu_dereference(tm->tcpm_next)) {
968		if (addr_same(&tm->tcpm_daddr, &daddr) &&
969		    (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
970			ret = tcp_metrics_fill_info(msg, tm);
971			break;
972		}
973	}
974	rcu_read_unlock();
975	if (ret < 0)
976		goto out_free;
977
978	genlmsg_end(msg, reply);
979	return genlmsg_reply(msg, info);
980
981nla_put_failure:
982	ret = -EMSGSIZE;
983
984out_free:
985	nlmsg_free(msg);
986	return ret;
987}
988
989#define deref_locked_genl(p)	\
990	rcu_dereference_protected(p, lockdep_genl_is_held() && \
991				     lockdep_is_held(&tcp_metrics_lock))
992
993#define deref_genl(p)	rcu_dereference_protected(p, lockdep_genl_is_held())
994
995static int tcp_metrics_flush_all(struct net *net)
996{
997	unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
998	struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
999	struct tcp_metrics_block *tm;
1000	unsigned int row;
1001
1002	for (row = 0; row < max_rows; row++, hb++) {
1003		spin_lock_bh(&tcp_metrics_lock);
1004		tm = deref_locked_genl(hb->chain);
1005		if (tm)
1006			hb->chain = NULL;
1007		spin_unlock_bh(&tcp_metrics_lock);
1008		while (tm) {
1009			struct tcp_metrics_block *next;
1010
1011			next = deref_genl(tm->tcpm_next);
1012			kfree_rcu(tm, rcu_head);
1013			tm = next;
1014		}
1015	}
1016	return 0;
1017}
1018
1019static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1020{
1021	struct tcpm_hash_bucket *hb;
1022	struct tcp_metrics_block *tm;
1023	struct tcp_metrics_block __rcu **pp;
1024	struct inetpeer_addr saddr, daddr;
1025	unsigned int hash;
1026	struct net *net = genl_info_net(info);
1027	int ret;
1028	bool src = true, found = false;
1029
1030	ret = parse_nl_addr(info, &daddr, &hash, 1);
1031	if (ret < 0)
1032		return ret;
1033	if (ret > 0)
1034		return tcp_metrics_flush_all(net);
1035	ret = parse_nl_saddr(info, &saddr);
1036	if (ret < 0)
1037		src = false;
1038
1039	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
1040	hb = net->ipv4.tcp_metrics_hash + hash;
1041	pp = &hb->chain;
1042	spin_lock_bh(&tcp_metrics_lock);
1043	for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) {
1044		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1045		    (!src || addr_same(&tm->tcpm_saddr, &saddr))) {
1046			*pp = tm->tcpm_next;
1047			kfree_rcu(tm, rcu_head);
1048			found = true;
1049		} else {
1050			pp = &tm->tcpm_next;
1051		}
1052	}
1053	spin_unlock_bh(&tcp_metrics_lock);
1054	if (!found)
1055		return -ESRCH;
1056	return 0;
1057}
1058
1059static const struct genl_ops tcp_metrics_nl_ops[] = {
1060	{
1061		.cmd = TCP_METRICS_CMD_GET,
1062		.doit = tcp_metrics_nl_cmd_get,
1063		.dumpit = tcp_metrics_nl_dump,
1064		.policy = tcp_metrics_nl_policy,
1065		.flags = GENL_ADMIN_PERM,
1066	},
1067	{
1068		.cmd = TCP_METRICS_CMD_DEL,
1069		.doit = tcp_metrics_nl_cmd_del,
1070		.policy = tcp_metrics_nl_policy,
1071		.flags = GENL_ADMIN_PERM,
1072	},
1073};
1074
1075static unsigned int tcpmhash_entries;
1076static int __init set_tcpmhash_entries(char *str)
1077{
1078	ssize_t ret;
1079
1080	if (!str)
1081		return 0;
1082
1083	ret = kstrtouint(str, 0, &tcpmhash_entries);
1084	if (ret)
1085		return 0;
1086
1087	return 1;
1088}
1089__setup("tcpmhash_entries=", set_tcpmhash_entries);
1090
1091static int __net_init tcp_net_metrics_init(struct net *net)
1092{
1093	size_t size;
1094	unsigned int slots;
1095
1096	slots = tcpmhash_entries;
1097	if (!slots) {
1098		if (totalram_pages >= 128 * 1024)
1099			slots = 16 * 1024;
1100		else
1101			slots = 8 * 1024;
1102	}
1103
1104	net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1105	size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1106
1107	net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1108	if (!net->ipv4.tcp_metrics_hash)
1109		net->ipv4.tcp_metrics_hash = vzalloc(size);
1110
1111	if (!net->ipv4.tcp_metrics_hash)
1112		return -ENOMEM;
1113
1114	return 0;
1115}
1116
1117static void __net_exit tcp_net_metrics_exit(struct net *net)
1118{
1119	unsigned int i;
1120
1121	for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1122		struct tcp_metrics_block *tm, *next;
1123
1124		tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1125		while (tm) {
1126			next = rcu_dereference_protected(tm->tcpm_next, 1);
1127			kfree(tm);
1128			tm = next;
1129		}
1130	}
1131	if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1132		vfree(net->ipv4.tcp_metrics_hash);
1133	else
1134		kfree(net->ipv4.tcp_metrics_hash);
1135}
1136
1137static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1138	.init	=	tcp_net_metrics_init,
1139	.exit	=	tcp_net_metrics_exit,
1140};
1141
1142void __init tcp_metrics_init(void)
1143{
1144	int ret;
1145
1146	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1147	if (ret < 0)
1148		goto cleanup;
1149	ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1150					    tcp_metrics_nl_ops);
1151	if (ret < 0)
1152		goto cleanup_subsys;
1153	return;
1154
1155cleanup_subsys:
1156	unregister_pernet_subsys(&tcp_net_metrics_ops);
1157
1158cleanup:
1159	return;
1160}
1161