tcp_metrics.c revision 2c42a3fb30845867bfcaf0747ff50c1375884ff2
1#include <linux/rcupdate.h>
2#include <linux/spinlock.h>
3#include <linux/jiffies.h>
4#include <linux/bootmem.h>
5#include <linux/module.h>
6#include <linux/cache.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/tcp.h>
10#include <linux/hash.h>
11#include <linux/tcp_metrics.h>
12
13#include <net/inet_connection_sock.h>
14#include <net/net_namespace.h>
15#include <net/request_sock.h>
16#include <net/inetpeer.h>
17#include <net/sock.h>
18#include <net/ipv6.h>
19#include <net/dst.h>
20#include <net/tcp.h>
21#include <net/genetlink.h>
22
23int sysctl_tcp_nometrics_save __read_mostly;
24
25struct tcp_fastopen_metrics {
26	u16	mss;
27	u16	syn_loss:10;		/* Recurring Fast Open SYN losses */
28	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
29	struct	tcp_fastopen_cookie	cookie;
30};
31
32struct tcp_metrics_block {
33	struct tcp_metrics_block __rcu	*tcpm_next;
34	struct inetpeer_addr		tcpm_addr;
35	unsigned long			tcpm_stamp;
36	u32				tcpm_ts;
37	u32				tcpm_ts_stamp;
38	u32				tcpm_lock;
39	u32				tcpm_vals[TCP_METRIC_MAX + 1];
40	struct tcp_fastopen_metrics	tcpm_fastopen;
41
42	struct rcu_head			rcu_head;
43};
44
45static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46			      enum tcp_metric_index idx)
47{
48	return tm->tcpm_lock & (1 << idx);
49}
50
51static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52			  enum tcp_metric_index idx)
53{
54	return tm->tcpm_vals[idx];
55}
56
57static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58				  enum tcp_metric_index idx)
59{
60	return msecs_to_jiffies(tm->tcpm_vals[idx]);
61}
62
63static void tcp_metric_set(struct tcp_metrics_block *tm,
64			   enum tcp_metric_index idx,
65			   u32 val)
66{
67	tm->tcpm_vals[idx] = val;
68}
69
70static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71				 enum tcp_metric_index idx,
72				 u32 val)
73{
74	tm->tcpm_vals[idx] = jiffies_to_msecs(val);
75}
76
77static bool addr_same(const struct inetpeer_addr *a,
78		      const struct inetpeer_addr *b)
79{
80	const struct in6_addr *a6, *b6;
81
82	if (a->family != b->family)
83		return false;
84	if (a->family == AF_INET)
85		return a->addr.a4 == b->addr.a4;
86
87	a6 = (const struct in6_addr *) &a->addr.a6[0];
88	b6 = (const struct in6_addr *) &b->addr.a6[0];
89
90	return ipv6_addr_equal(a6, b6);
91}
92
93struct tcpm_hash_bucket {
94	struct tcp_metrics_block __rcu	*chain;
95};
96
97static DEFINE_SPINLOCK(tcp_metrics_lock);
98
99static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
100{
101	u32 val;
102
103	tm->tcpm_stamp = jiffies;
104
105	val = 0;
106	if (dst_metric_locked(dst, RTAX_RTT))
107		val |= 1 << TCP_METRIC_RTT;
108	if (dst_metric_locked(dst, RTAX_RTTVAR))
109		val |= 1 << TCP_METRIC_RTTVAR;
110	if (dst_metric_locked(dst, RTAX_SSTHRESH))
111		val |= 1 << TCP_METRIC_SSTHRESH;
112	if (dst_metric_locked(dst, RTAX_CWND))
113		val |= 1 << TCP_METRIC_CWND;
114	if (dst_metric_locked(dst, RTAX_REORDERING))
115		val |= 1 << TCP_METRIC_REORDERING;
116	tm->tcpm_lock = val;
117
118	tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
119	tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
120	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
121	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
122	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
123	tm->tcpm_ts = 0;
124	tm->tcpm_ts_stamp = 0;
125	tm->tcpm_fastopen.mss = 0;
126	tm->tcpm_fastopen.syn_loss = 0;
127	tm->tcpm_fastopen.cookie.len = 0;
128}
129
130static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
131					  struct inetpeer_addr *addr,
132					  unsigned int hash,
133					  bool reclaim)
134{
135	struct tcp_metrics_block *tm;
136	struct net *net;
137
138	spin_lock_bh(&tcp_metrics_lock);
139	net = dev_net(dst->dev);
140	if (unlikely(reclaim)) {
141		struct tcp_metrics_block *oldest;
142
143		oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
144		for (tm = rcu_dereference(oldest->tcpm_next); tm;
145		     tm = rcu_dereference(tm->tcpm_next)) {
146			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
147				oldest = tm;
148		}
149		tm = oldest;
150	} else {
151		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
152		if (!tm)
153			goto out_unlock;
154	}
155	tm->tcpm_addr = *addr;
156
157	tcpm_suck_dst(tm, dst);
158
159	if (likely(!reclaim)) {
160		tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
161		rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
162	}
163
164out_unlock:
165	spin_unlock_bh(&tcp_metrics_lock);
166	return tm;
167}
168
169#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
170
171static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
172{
173	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
174		tcpm_suck_dst(tm, dst);
175}
176
177#define TCP_METRICS_RECLAIM_DEPTH	5
178#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
179
180static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
181{
182	if (tm)
183		return tm;
184	if (depth > TCP_METRICS_RECLAIM_DEPTH)
185		return TCP_METRICS_RECLAIM_PTR;
186	return NULL;
187}
188
189static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
190						   struct net *net, unsigned int hash)
191{
192	struct tcp_metrics_block *tm;
193	int depth = 0;
194
195	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
196	     tm = rcu_dereference(tm->tcpm_next)) {
197		if (addr_same(&tm->tcpm_addr, addr))
198			break;
199		depth++;
200	}
201	return tcp_get_encode(tm, depth);
202}
203
204static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
205						       struct dst_entry *dst)
206{
207	struct tcp_metrics_block *tm;
208	struct inetpeer_addr addr;
209	unsigned int hash;
210	struct net *net;
211
212	addr.family = req->rsk_ops->family;
213	switch (addr.family) {
214	case AF_INET:
215		addr.addr.a4 = inet_rsk(req)->rmt_addr;
216		hash = (__force unsigned int) addr.addr.a4;
217		break;
218	case AF_INET6:
219		*(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
220		hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
221		break;
222	default:
223		return NULL;
224	}
225
226	net = dev_net(dst->dev);
227	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
228
229	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
230	     tm = rcu_dereference(tm->tcpm_next)) {
231		if (addr_same(&tm->tcpm_addr, &addr))
232			break;
233	}
234	tcpm_check_stamp(tm, dst);
235	return tm;
236}
237
238static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
239{
240	struct inet6_timewait_sock *tw6;
241	struct tcp_metrics_block *tm;
242	struct inetpeer_addr addr;
243	unsigned int hash;
244	struct net *net;
245
246	addr.family = tw->tw_family;
247	switch (addr.family) {
248	case AF_INET:
249		addr.addr.a4 = tw->tw_daddr;
250		hash = (__force unsigned int) addr.addr.a4;
251		break;
252	case AF_INET6:
253		tw6 = inet6_twsk((struct sock *)tw);
254		*(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
255		hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
256		break;
257	default:
258		return NULL;
259	}
260
261	net = twsk_net(tw);
262	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
263
264	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
265	     tm = rcu_dereference(tm->tcpm_next)) {
266		if (addr_same(&tm->tcpm_addr, &addr))
267			break;
268	}
269	return tm;
270}
271
272static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
273						 struct dst_entry *dst,
274						 bool create)
275{
276	struct tcp_metrics_block *tm;
277	struct inetpeer_addr addr;
278	unsigned int hash;
279	struct net *net;
280	bool reclaim;
281
282	addr.family = sk->sk_family;
283	switch (addr.family) {
284	case AF_INET:
285		addr.addr.a4 = inet_sk(sk)->inet_daddr;
286		hash = (__force unsigned int) addr.addr.a4;
287		break;
288	case AF_INET6:
289		*(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
290		hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
291		break;
292	default:
293		return NULL;
294	}
295
296	net = dev_net(dst->dev);
297	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
298
299	tm = __tcp_get_metrics(&addr, net, hash);
300	reclaim = false;
301	if (tm == TCP_METRICS_RECLAIM_PTR) {
302		reclaim = true;
303		tm = NULL;
304	}
305	if (!tm && create)
306		tm = tcpm_new(dst, &addr, hash, reclaim);
307	else
308		tcpm_check_stamp(tm, dst);
309
310	return tm;
311}
312
313/* Save metrics learned by this TCP session.  This function is called
314 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
315 * or goes from LAST-ACK to CLOSE.
316 */
317void tcp_update_metrics(struct sock *sk)
318{
319	const struct inet_connection_sock *icsk = inet_csk(sk);
320	struct dst_entry *dst = __sk_dst_get(sk);
321	struct tcp_sock *tp = tcp_sk(sk);
322	struct tcp_metrics_block *tm;
323	unsigned long rtt;
324	u32 val;
325	int m;
326
327	if (sysctl_tcp_nometrics_save || !dst)
328		return;
329
330	if (dst->flags & DST_HOST)
331		dst_confirm(dst);
332
333	rcu_read_lock();
334	if (icsk->icsk_backoff || !tp->srtt) {
335		/* This session failed to estimate rtt. Why?
336		 * Probably, no packets returned in time.  Reset our
337		 * results.
338		 */
339		tm = tcp_get_metrics(sk, dst, false);
340		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
341			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
342		goto out_unlock;
343	} else
344		tm = tcp_get_metrics(sk, dst, true);
345
346	if (!tm)
347		goto out_unlock;
348
349	rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
350	m = rtt - tp->srtt;
351
352	/* If newly calculated rtt larger than stored one, store new
353	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
354	 * always better than underestimation.
355	 */
356	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
357		if (m <= 0)
358			rtt = tp->srtt;
359		else
360			rtt -= (m >> 3);
361		tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
362	}
363
364	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
365		unsigned long var;
366
367		if (m < 0)
368			m = -m;
369
370		/* Scale deviation to rttvar fixed point */
371		m >>= 1;
372		if (m < tp->mdev)
373			m = tp->mdev;
374
375		var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
376		if (m >= var)
377			var = m;
378		else
379			var -= (var - m) >> 2;
380
381		tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
382	}
383
384	if (tcp_in_initial_slowstart(tp)) {
385		/* Slow start still did not finish. */
386		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
387			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
388			if (val && (tp->snd_cwnd >> 1) > val)
389				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
390					       tp->snd_cwnd >> 1);
391		}
392		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
393			val = tcp_metric_get(tm, TCP_METRIC_CWND);
394			if (tp->snd_cwnd > val)
395				tcp_metric_set(tm, TCP_METRIC_CWND,
396					       tp->snd_cwnd);
397		}
398	} else if (tp->snd_cwnd > tp->snd_ssthresh &&
399		   icsk->icsk_ca_state == TCP_CA_Open) {
400		/* Cong. avoidance phase, cwnd is reliable. */
401		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
402			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
403				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
404		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
405			val = tcp_metric_get(tm, TCP_METRIC_CWND);
406			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
407		}
408	} else {
409		/* Else slow start did not finish, cwnd is non-sense,
410		 * ssthresh may be also invalid.
411		 */
412		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
413			val = tcp_metric_get(tm, TCP_METRIC_CWND);
414			tcp_metric_set(tm, TCP_METRIC_CWND,
415				       (val + tp->snd_ssthresh) >> 1);
416		}
417		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
418			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
419			if (val && tp->snd_ssthresh > val)
420				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
421					       tp->snd_ssthresh);
422		}
423		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
424			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
425			if (val < tp->reordering &&
426			    tp->reordering != sysctl_tcp_reordering)
427				tcp_metric_set(tm, TCP_METRIC_REORDERING,
428					       tp->reordering);
429		}
430	}
431	tm->tcpm_stamp = jiffies;
432out_unlock:
433	rcu_read_unlock();
434}
435
436/* Initialize metrics on socket. */
437
438void tcp_init_metrics(struct sock *sk)
439{
440	struct dst_entry *dst = __sk_dst_get(sk);
441	struct tcp_sock *tp = tcp_sk(sk);
442	struct tcp_metrics_block *tm;
443	u32 val;
444
445	if (dst == NULL)
446		goto reset;
447
448	dst_confirm(dst);
449
450	rcu_read_lock();
451	tm = tcp_get_metrics(sk, dst, true);
452	if (!tm) {
453		rcu_read_unlock();
454		goto reset;
455	}
456
457	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
458		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
459
460	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
461	if (val) {
462		tp->snd_ssthresh = val;
463		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
464			tp->snd_ssthresh = tp->snd_cwnd_clamp;
465	} else {
466		/* ssthresh may have been reduced unnecessarily during.
467		 * 3WHS. Restore it back to its initial default.
468		 */
469		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
470	}
471	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
472	if (val && tp->reordering != val) {
473		tcp_disable_fack(tp);
474		tcp_disable_early_retrans(tp);
475		tp->reordering = val;
476	}
477
478	val = tcp_metric_get(tm, TCP_METRIC_RTT);
479	if (val == 0 || tp->srtt == 0) {
480		rcu_read_unlock();
481		goto reset;
482	}
483	/* Initial rtt is determined from SYN,SYN-ACK.
484	 * The segment is small and rtt may appear much
485	 * less than real one. Use per-dst memory
486	 * to make it more realistic.
487	 *
488	 * A bit of theory. RTT is time passed after "normal" sized packet
489	 * is sent until it is ACKed. In normal circumstances sending small
490	 * packets force peer to delay ACKs and calculation is correct too.
491	 * The algorithm is adaptive and, provided we follow specs, it
492	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
493	 * tricks sort of "quick acks" for time long enough to decrease RTT
494	 * to low value, and then abruptly stops to do it and starts to delay
495	 * ACKs, wait for troubles.
496	 */
497	val = msecs_to_jiffies(val);
498	if (val > tp->srtt) {
499		tp->srtt = val;
500		tp->rtt_seq = tp->snd_nxt;
501	}
502	val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
503	if (val > tp->mdev) {
504		tp->mdev = val;
505		tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
506	}
507	rcu_read_unlock();
508
509	tcp_set_rto(sk);
510reset:
511	if (tp->srtt == 0) {
512		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
513		 * 3WHS. This is most likely due to retransmission,
514		 * including spurious one. Reset the RTO back to 3secs
515		 * from the more aggressive 1sec to avoid more spurious
516		 * retransmission.
517		 */
518		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
519		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
520	}
521	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
522	 * retransmitted. In light of RFC6298 more aggressive 1sec
523	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
524	 * retransmission has occurred.
525	 */
526	if (tp->total_retrans > 1)
527		tp->snd_cwnd = 1;
528	else
529		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
530	tp->snd_cwnd_stamp = tcp_time_stamp;
531}
532
533bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
534{
535	struct tcp_metrics_block *tm;
536	bool ret;
537
538	if (!dst)
539		return false;
540
541	rcu_read_lock();
542	tm = __tcp_get_metrics_req(req, dst);
543	if (paws_check) {
544		if (tm &&
545		    (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
546		    (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
547			ret = false;
548		else
549			ret = true;
550	} else {
551		if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
552			ret = true;
553		else
554			ret = false;
555	}
556	rcu_read_unlock();
557
558	return ret;
559}
560EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
561
562void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
563{
564	struct tcp_metrics_block *tm;
565
566	rcu_read_lock();
567	tm = tcp_get_metrics(sk, dst, true);
568	if (tm) {
569		struct tcp_sock *tp = tcp_sk(sk);
570
571		if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
572			tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
573			tp->rx_opt.ts_recent = tm->tcpm_ts;
574		}
575	}
576	rcu_read_unlock();
577}
578EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
579
580/* VJ's idea. Save last timestamp seen from this destination and hold
581 * it at least for normal timewait interval to use for duplicate
582 * segment detection in subsequent connections, before they enter
583 * synchronized state.
584 */
585bool tcp_remember_stamp(struct sock *sk)
586{
587	struct dst_entry *dst = __sk_dst_get(sk);
588	bool ret = false;
589
590	if (dst) {
591		struct tcp_metrics_block *tm;
592
593		rcu_read_lock();
594		tm = tcp_get_metrics(sk, dst, true);
595		if (tm) {
596			struct tcp_sock *tp = tcp_sk(sk);
597
598			if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
599			    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
600			     tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
601				tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
602				tm->tcpm_ts = tp->rx_opt.ts_recent;
603			}
604			ret = true;
605		}
606		rcu_read_unlock();
607	}
608	return ret;
609}
610
611bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
612{
613	struct tcp_metrics_block *tm;
614	bool ret = false;
615
616	rcu_read_lock();
617	tm = __tcp_get_metrics_tw(tw);
618	if (tm) {
619		const struct tcp_timewait_sock *tcptw;
620		struct sock *sk = (struct sock *) tw;
621
622		tcptw = tcp_twsk(sk);
623		if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
624		    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
625		     tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
626			tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
627			tm->tcpm_ts	   = tcptw->tw_ts_recent;
628		}
629		ret = true;
630	}
631	rcu_read_unlock();
632
633	return ret;
634}
635
636static DEFINE_SEQLOCK(fastopen_seqlock);
637
638void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
639			    struct tcp_fastopen_cookie *cookie,
640			    int *syn_loss, unsigned long *last_syn_loss)
641{
642	struct tcp_metrics_block *tm;
643
644	rcu_read_lock();
645	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
646	if (tm) {
647		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
648		unsigned int seq;
649
650		do {
651			seq = read_seqbegin(&fastopen_seqlock);
652			if (tfom->mss)
653				*mss = tfom->mss;
654			*cookie = tfom->cookie;
655			*syn_loss = tfom->syn_loss;
656			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
657		} while (read_seqretry(&fastopen_seqlock, seq));
658	}
659	rcu_read_unlock();
660}
661
662void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
663			    struct tcp_fastopen_cookie *cookie, bool syn_lost)
664{
665	struct tcp_metrics_block *tm;
666
667	rcu_read_lock();
668	tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
669	if (tm) {
670		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
671
672		write_seqlock_bh(&fastopen_seqlock);
673		tfom->mss = mss;
674		if (cookie->len > 0)
675			tfom->cookie = *cookie;
676		if (syn_lost) {
677			++tfom->syn_loss;
678			tfom->last_syn_loss = jiffies;
679		} else
680			tfom->syn_loss = 0;
681		write_sequnlock_bh(&fastopen_seqlock);
682	}
683	rcu_read_unlock();
684}
685
686static struct genl_family tcp_metrics_nl_family = {
687	.id		= GENL_ID_GENERATE,
688	.hdrsize	= 0,
689	.name		= TCP_METRICS_GENL_NAME,
690	.version	= TCP_METRICS_GENL_VERSION,
691	.maxattr	= TCP_METRICS_ATTR_MAX,
692	.netnsok	= true,
693};
694
695static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
696	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
697	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
698					    .len = sizeof(struct in6_addr), },
699	/* Following attributes are not received for GET/DEL,
700	 * we keep them for reference
701	 */
702#if 0
703	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
704	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
705	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
706	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
707	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
708	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
709	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
710	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
711					    .len = TCP_FASTOPEN_COOKIE_MAX, },
712#endif
713};
714
715/* Add attributes, caller cancels its header on failure */
716static int tcp_metrics_fill_info(struct sk_buff *msg,
717				 struct tcp_metrics_block *tm)
718{
719	struct nlattr *nest;
720	int i;
721
722	switch (tm->tcpm_addr.family) {
723	case AF_INET:
724		if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
725				tm->tcpm_addr.addr.a4) < 0)
726			goto nla_put_failure;
727		break;
728	case AF_INET6:
729		if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
730			    tm->tcpm_addr.addr.a6) < 0)
731			goto nla_put_failure;
732		break;
733	default:
734		return -EAFNOSUPPORT;
735	}
736
737	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
738			  jiffies - tm->tcpm_stamp) < 0)
739		goto nla_put_failure;
740	if (tm->tcpm_ts_stamp) {
741		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
742				(s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
743			goto nla_put_failure;
744		if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
745				tm->tcpm_ts) < 0)
746			goto nla_put_failure;
747	}
748
749	{
750		int n = 0;
751
752		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
753		if (!nest)
754			goto nla_put_failure;
755		for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
756			if (!tm->tcpm_vals[i])
757				continue;
758			if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
759				goto nla_put_failure;
760			n++;
761		}
762		if (n)
763			nla_nest_end(msg, nest);
764		else
765			nla_nest_cancel(msg, nest);
766	}
767
768	{
769		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
770		unsigned int seq;
771
772		do {
773			seq = read_seqbegin(&fastopen_seqlock);
774			tfom_copy[0] = tm->tcpm_fastopen;
775		} while (read_seqretry(&fastopen_seqlock, seq));
776
777		tfom = tfom_copy;
778		if (tfom->mss &&
779		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
780				tfom->mss) < 0)
781			goto nla_put_failure;
782		if (tfom->syn_loss &&
783		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
784				tfom->syn_loss) < 0 ||
785		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
786				jiffies - tfom->last_syn_loss) < 0))
787			goto nla_put_failure;
788		if (tfom->cookie.len > 0 &&
789		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
790			    tfom->cookie.len, tfom->cookie.val) < 0)
791			goto nla_put_failure;
792	}
793
794	return 0;
795
796nla_put_failure:
797	return -EMSGSIZE;
798}
799
800static int tcp_metrics_dump_info(struct sk_buff *skb,
801				 struct netlink_callback *cb,
802				 struct tcp_metrics_block *tm)
803{
804	void *hdr;
805
806	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
807			  &tcp_metrics_nl_family, NLM_F_MULTI,
808			  TCP_METRICS_CMD_GET);
809	if (!hdr)
810		return -EMSGSIZE;
811
812	if (tcp_metrics_fill_info(skb, tm) < 0)
813		goto nla_put_failure;
814
815	return genlmsg_end(skb, hdr);
816
817nla_put_failure:
818	genlmsg_cancel(skb, hdr);
819	return -EMSGSIZE;
820}
821
822static int tcp_metrics_nl_dump(struct sk_buff *skb,
823			       struct netlink_callback *cb)
824{
825	struct net *net = sock_net(skb->sk);
826	unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
827	unsigned int row, s_row = cb->args[0];
828	int s_col = cb->args[1], col = s_col;
829
830	for (row = s_row; row < max_rows; row++, s_col = 0) {
831		struct tcp_metrics_block *tm;
832		struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
833
834		rcu_read_lock();
835		for (col = 0, tm = rcu_dereference(hb->chain); tm;
836		     tm = rcu_dereference(tm->tcpm_next), col++) {
837			if (col < s_col)
838				continue;
839			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
840				rcu_read_unlock();
841				goto done;
842			}
843		}
844		rcu_read_unlock();
845	}
846
847done:
848	cb->args[0] = row;
849	cb->args[1] = col;
850	return skb->len;
851}
852
853static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
854			 unsigned int *hash, int optional)
855{
856	struct nlattr *a;
857
858	a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
859	if (a) {
860		addr->family = AF_INET;
861		addr->addr.a4 = nla_get_be32(a);
862		*hash = (__force unsigned int) addr->addr.a4;
863		return 0;
864	}
865	a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
866	if (a) {
867		if (nla_len(a) != sizeof(struct in6_addr))
868			return -EINVAL;
869		addr->family = AF_INET6;
870		memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
871		*hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
872		return 0;
873	}
874	return optional ? 1 : -EAFNOSUPPORT;
875}
876
877static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
878{
879	struct tcp_metrics_block *tm;
880	struct inetpeer_addr addr;
881	unsigned int hash;
882	struct sk_buff *msg;
883	struct net *net = genl_info_net(info);
884	void *reply;
885	int ret;
886
887	ret = parse_nl_addr(info, &addr, &hash, 0);
888	if (ret < 0)
889		return ret;
890
891	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
892	if (!msg)
893		return -ENOMEM;
894
895	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
896				  info->genlhdr->cmd);
897	if (!reply)
898		goto nla_put_failure;
899
900	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
901	ret = -ESRCH;
902	rcu_read_lock();
903	for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
904	     tm = rcu_dereference(tm->tcpm_next)) {
905		if (addr_same(&tm->tcpm_addr, &addr)) {
906			ret = tcp_metrics_fill_info(msg, tm);
907			break;
908		}
909	}
910	rcu_read_unlock();
911	if (ret < 0)
912		goto out_free;
913
914	genlmsg_end(msg, reply);
915	return genlmsg_reply(msg, info);
916
917nla_put_failure:
918	ret = -EMSGSIZE;
919
920out_free:
921	nlmsg_free(msg);
922	return ret;
923}
924
925#define deref_locked_genl(p)	\
926	rcu_dereference_protected(p, lockdep_genl_is_held() && \
927				     lockdep_is_held(&tcp_metrics_lock))
928
929#define deref_genl(p)	rcu_dereference_protected(p, lockdep_genl_is_held())
930
931static int tcp_metrics_flush_all(struct net *net)
932{
933	unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
934	struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
935	struct tcp_metrics_block *tm;
936	unsigned int row;
937
938	for (row = 0; row < max_rows; row++, hb++) {
939		spin_lock_bh(&tcp_metrics_lock);
940		tm = deref_locked_genl(hb->chain);
941		if (tm)
942			hb->chain = NULL;
943		spin_unlock_bh(&tcp_metrics_lock);
944		while (tm) {
945			struct tcp_metrics_block *next;
946
947			next = deref_genl(tm->tcpm_next);
948			kfree_rcu(tm, rcu_head);
949			tm = next;
950		}
951	}
952	return 0;
953}
954
955static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
956{
957	struct tcpm_hash_bucket *hb;
958	struct tcp_metrics_block *tm;
959	struct tcp_metrics_block __rcu **pp;
960	struct inetpeer_addr addr;
961	unsigned int hash;
962	struct net *net = genl_info_net(info);
963	int ret;
964
965	ret = parse_nl_addr(info, &addr, &hash, 1);
966	if (ret < 0)
967		return ret;
968	if (ret > 0)
969		return tcp_metrics_flush_all(net);
970
971	hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
972	hb = net->ipv4.tcp_metrics_hash + hash;
973	pp = &hb->chain;
974	spin_lock_bh(&tcp_metrics_lock);
975	for (tm = deref_locked_genl(*pp); tm;
976	     pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
977		if (addr_same(&tm->tcpm_addr, &addr)) {
978			*pp = tm->tcpm_next;
979			break;
980		}
981	}
982	spin_unlock_bh(&tcp_metrics_lock);
983	if (!tm)
984		return -ESRCH;
985	kfree_rcu(tm, rcu_head);
986	return 0;
987}
988
989static struct genl_ops tcp_metrics_nl_ops[] = {
990	{
991		.cmd = TCP_METRICS_CMD_GET,
992		.doit = tcp_metrics_nl_cmd_get,
993		.dumpit = tcp_metrics_nl_dump,
994		.policy = tcp_metrics_nl_policy,
995		.flags = GENL_ADMIN_PERM,
996	},
997	{
998		.cmd = TCP_METRICS_CMD_DEL,
999		.doit = tcp_metrics_nl_cmd_del,
1000		.policy = tcp_metrics_nl_policy,
1001		.flags = GENL_ADMIN_PERM,
1002	},
1003};
1004
1005static unsigned int tcpmhash_entries;
1006static int __init set_tcpmhash_entries(char *str)
1007{
1008	ssize_t ret;
1009
1010	if (!str)
1011		return 0;
1012
1013	ret = kstrtouint(str, 0, &tcpmhash_entries);
1014	if (ret)
1015		return 0;
1016
1017	return 1;
1018}
1019__setup("tcpmhash_entries=", set_tcpmhash_entries);
1020
1021static int __net_init tcp_net_metrics_init(struct net *net)
1022{
1023	size_t size;
1024	unsigned int slots;
1025
1026	slots = tcpmhash_entries;
1027	if (!slots) {
1028		if (totalram_pages >= 128 * 1024)
1029			slots = 16 * 1024;
1030		else
1031			slots = 8 * 1024;
1032	}
1033
1034	net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1035	size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1036
1037	net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
1038	if (!net->ipv4.tcp_metrics_hash)
1039		return -ENOMEM;
1040
1041	return 0;
1042}
1043
1044static void __net_exit tcp_net_metrics_exit(struct net *net)
1045{
1046	unsigned int i;
1047
1048	for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1049		struct tcp_metrics_block *tm, *next;
1050
1051		tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1052		while (tm) {
1053			next = rcu_dereference_protected(tm->tcpm_next, 1);
1054			kfree(tm);
1055			tm = next;
1056		}
1057	}
1058	kfree(net->ipv4.tcp_metrics_hash);
1059}
1060
1061static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1062	.init	=	tcp_net_metrics_init,
1063	.exit	=	tcp_net_metrics_exit,
1064};
1065
1066void __init tcp_metrics_init(void)
1067{
1068	int ret;
1069
1070	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1071	if (ret < 0)
1072		goto cleanup;
1073	ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1074					    tcp_metrics_nl_ops,
1075					    ARRAY_SIZE(tcp_metrics_nl_ops));
1076	if (ret < 0)
1077		goto cleanup_subsys;
1078	return;
1079
1080cleanup_subsys:
1081	unregister_pernet_subsys(&tcp_net_metrics_ops);
1082
1083cleanup:
1084	return;
1085}
1086