1/* 2 * TCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: 9 * linux/net/ipv4/tcp.c 10 * linux/net/ipv4/tcp_input.c 11 * linux/net/ipv4/tcp_output.c 12 * 13 * Fixes: 14 * Hideaki YOSHIFUJI : sin6_scope_id support 15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 17 * a single port at the same time. 18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * as published by the Free Software Foundation; either version 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26#include <linux/bottom_half.h> 27#include <linux/module.h> 28#include <linux/errno.h> 29#include <linux/types.h> 30#include <linux/socket.h> 31#include <linux/sockios.h> 32#include <linux/net.h> 33#include <linux/jiffies.h> 34#include <linux/in.h> 35#include <linux/in6.h> 36#include <linux/netdevice.h> 37#include <linux/init.h> 38#include <linux/jhash.h> 39#include <linux/ipsec.h> 40#include <linux/times.h> 41#include <linux/slab.h> 42 43#include <linux/ipv6.h> 44#include <linux/icmpv6.h> 45#include <linux/random.h> 46 47#include <net/tcp.h> 48#include <net/ndisc.h> 49#include <net/inet6_hashtables.h> 50#include <net/inet6_connection_sock.h> 51#include <net/ipv6.h> 52#include <net/transp_v6.h> 53#include <net/addrconf.h> 54#include <net/ip6_route.h> 55#include <net/ip6_checksum.h> 56#include <net/inet_ecn.h> 57#include <net/protocol.h> 58#include <net/xfrm.h> 59#include <net/snmp.h> 60#include <net/dsfield.h> 61#include <net/timewait_sock.h> 62#include <net/netdma.h> 63#include <net/inet_common.h> 64#include <net/secure_seq.h> 65#include <net/tcp_memcontrol.h> 66 67#include <asm/uaccess.h> 68 69#include <linux/proc_fs.h> 70#include <linux/seq_file.h> 71 72#include <linux/crypto.h> 73#include <linux/scatterlist.h> 74 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 76static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 77 struct request_sock *req); 78 79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 80 81static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_specific; 83#ifdef CONFIG_TCP_MD5SIG 84static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; 85static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 86#else 87static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 88 const struct in6_addr *addr) 89{ 90 return NULL; 91} 92#endif 93 94static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) 95{ 96 struct dst_entry *dst = skb_dst(skb); 97 const struct rt6_info *rt = (const struct rt6_info *)dst; 98 99 dst_hold(dst); 100 sk->sk_rx_dst = dst; 101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 102 if (rt->rt6i_node) 103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; 104} 105 106static void tcp_v6_hash(struct sock *sk) 107{ 108 if (sk->sk_state != TCP_CLOSE) { 109 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { 110 tcp_prot.hash(sk); 111 return; 112 } 113 local_bh_disable(); 114 __inet6_hash(sk, NULL); 115 local_bh_enable(); 116 } 117} 118 119static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) 120{ 121 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 122 ipv6_hdr(skb)->saddr.s6_addr32, 123 tcp_hdr(skb)->dest, 124 tcp_hdr(skb)->source); 125} 126 127static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 128 int addr_len) 129{ 130 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 131 struct inet_sock *inet = inet_sk(sk); 132 struct inet_connection_sock *icsk = inet_csk(sk); 133 struct ipv6_pinfo *np = inet6_sk(sk); 134 struct tcp_sock *tp = tcp_sk(sk); 135 struct in6_addr *saddr = NULL, *final_p, final; 136 struct rt6_info *rt; 137 struct flowi6 fl6; 138 struct dst_entry *dst; 139 int addr_type; 140 int err; 141 142 if (addr_len < SIN6_LEN_RFC2133) 143 return -EINVAL; 144 145 if (usin->sin6_family != AF_INET6) 146 return -EAFNOSUPPORT; 147 148 memset(&fl6, 0, sizeof(fl6)); 149 150 if (np->sndflow) { 151 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 152 IP6_ECN_flow_init(fl6.flowlabel); 153 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 154 struct ip6_flowlabel *flowlabel; 155 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 156 if (flowlabel == NULL) 157 return -EINVAL; 158 usin->sin6_addr = flowlabel->dst; 159 fl6_sock_release(flowlabel); 160 } 161 } 162 163 /* 164 * connect() to INADDR_ANY means loopback (BSD'ism). 165 */ 166 167 if(ipv6_addr_any(&usin->sin6_addr)) 168 usin->sin6_addr.s6_addr[15] = 0x1; 169 170 addr_type = ipv6_addr_type(&usin->sin6_addr); 171 172 if(addr_type & IPV6_ADDR_MULTICAST) 173 return -ENETUNREACH; 174 175 if (addr_type&IPV6_ADDR_LINKLOCAL) { 176 if (addr_len >= sizeof(struct sockaddr_in6) && 177 usin->sin6_scope_id) { 178 /* If interface is set while binding, indices 179 * must coincide. 180 */ 181 if (sk->sk_bound_dev_if && 182 sk->sk_bound_dev_if != usin->sin6_scope_id) 183 return -EINVAL; 184 185 sk->sk_bound_dev_if = usin->sin6_scope_id; 186 } 187 188 /* Connect to link-local address requires an interface */ 189 if (!sk->sk_bound_dev_if) 190 return -EINVAL; 191 } 192 193 if (tp->rx_opt.ts_recent_stamp && 194 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { 195 tp->rx_opt.ts_recent = 0; 196 tp->rx_opt.ts_recent_stamp = 0; 197 tp->write_seq = 0; 198 } 199 200 np->daddr = usin->sin6_addr; 201 np->flow_label = fl6.flowlabel; 202 203 /* 204 * TCP over IPv4 205 */ 206 207 if (addr_type == IPV6_ADDR_MAPPED) { 208 u32 exthdrlen = icsk->icsk_ext_hdr_len; 209 struct sockaddr_in sin; 210 211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 212 213 if (__ipv6_only_sock(sk)) 214 return -ENETUNREACH; 215 216 sin.sin_family = AF_INET; 217 sin.sin_port = usin->sin6_port; 218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 219 220 icsk->icsk_af_ops = &ipv6_mapped; 221 sk->sk_backlog_rcv = tcp_v4_do_rcv; 222#ifdef CONFIG_TCP_MD5SIG 223 tp->af_specific = &tcp_sock_ipv6_mapped_specific; 224#endif 225 226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 227 228 if (err) { 229 icsk->icsk_ext_hdr_len = exthdrlen; 230 icsk->icsk_af_ops = &ipv6_specific; 231 sk->sk_backlog_rcv = tcp_v6_do_rcv; 232#ifdef CONFIG_TCP_MD5SIG 233 tp->af_specific = &tcp_sock_ipv6_specific; 234#endif 235 goto failure; 236 } else { 237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); 238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, 239 &np->rcv_saddr); 240 } 241 242 return err; 243 } 244 245 if (!ipv6_addr_any(&np->rcv_saddr)) 246 saddr = &np->rcv_saddr; 247 248 fl6.flowi6_proto = IPPROTO_TCP; 249 fl6.daddr = np->daddr; 250 fl6.saddr = saddr ? *saddr : np->saddr; 251 fl6.flowi6_oif = sk->sk_bound_dev_if; 252 fl6.flowi6_mark = sk->sk_mark; 253 fl6.fl6_dport = usin->sin6_port; 254 fl6.fl6_sport = inet->inet_sport; 255 fl6.flowi6_uid = sock_i_uid(sk); 256 257 final_p = fl6_update_dst(&fl6, np->opt, &final); 258 259 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 260 261 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); 262 if (IS_ERR(dst)) { 263 err = PTR_ERR(dst); 264 goto failure; 265 } 266 267 if (saddr == NULL) { 268 saddr = &fl6.saddr; 269 np->rcv_saddr = *saddr; 270 } 271 272 /* set the source address */ 273 np->saddr = *saddr; 274 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 275 276 sk->sk_gso_type = SKB_GSO_TCPV6; 277 __ip6_dst_store(sk, dst, NULL, NULL); 278 279 rt = (struct rt6_info *) dst; 280 if (tcp_death_row.sysctl_tw_recycle && 281 !tp->rx_opt.ts_recent_stamp && 282 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) 283 tcp_fetch_timewait_stamp(sk, dst); 284 285 icsk->icsk_ext_hdr_len = 0; 286 if (np->opt) 287 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 288 np->opt->opt_nflen); 289 290 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 291 292 inet->inet_dport = usin->sin6_port; 293 294 tcp_set_state(sk, TCP_SYN_SENT); 295 err = inet6_hash_connect(&tcp_death_row, sk); 296 if (err) 297 goto late_failure; 298 299 if (!tp->write_seq && likely(!tp->repair)) 300 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 301 np->daddr.s6_addr32, 302 inet->inet_sport, 303 inet->inet_dport); 304 305 err = tcp_connect(sk); 306 if (err) 307 goto late_failure; 308 309 return 0; 310 311late_failure: 312 tcp_set_state(sk, TCP_CLOSE); 313 __sk_dst_reset(sk); 314failure: 315 inet->inet_dport = 0; 316 sk->sk_route_caps = 0; 317 return err; 318} 319 320static void tcp_v6_mtu_reduced(struct sock *sk) 321{ 322 struct dst_entry *dst; 323 324 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 325 return; 326 327 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); 328 if (!dst) 329 return; 330 331 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 332 tcp_sync_mss(sk, dst_mtu(dst)); 333 tcp_simple_retransmit(sk); 334 } 335} 336 337static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 338 u8 type, u8 code, int offset, __be32 info) 339{ 340 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data; 341 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 342 struct ipv6_pinfo *np; 343 struct sock *sk; 344 int err; 345 struct tcp_sock *tp; 346 __u32 seq; 347 struct net *net = dev_net(skb->dev); 348 349 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, 350 th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 351 352 if (sk == NULL) { 353 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 354 ICMP6_MIB_INERRORS); 355 return; 356 } 357 358 if (sk->sk_state == TCP_TIME_WAIT) { 359 inet_twsk_put(inet_twsk(sk)); 360 return; 361 } 362 363 bh_lock_sock(sk); 364 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) 365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 366 367 if (sk->sk_state == TCP_CLOSE) 368 goto out; 369 370 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { 371 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 372 goto out; 373 } 374 375 tp = tcp_sk(sk); 376 seq = ntohl(th->seq); 377 if (sk->sk_state != TCP_LISTEN && 378 !between(seq, tp->snd_una, tp->snd_nxt)) { 379 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 380 goto out; 381 } 382 383 np = inet6_sk(sk); 384 385 if (type == NDISC_REDIRECT) { 386 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 387 388 if (dst) 389 dst->ops->redirect(dst, sk, skb); 390 goto out; 391 } 392 393 if (type == ICMPV6_PKT_TOOBIG) { 394 /* We are not interested in TCP_LISTEN and open_requests 395 * (SYN-ACKs send out by Linux are always <576bytes so 396 * they should go through unfragmented). 397 */ 398 if (sk->sk_state == TCP_LISTEN) 399 goto out; 400 401 tp->mtu_info = ntohl(info); 402 if (!sock_owned_by_user(sk)) 403 tcp_v6_mtu_reduced(sk); 404 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, 405 &tp->tsq_flags)) 406 sock_hold(sk); 407 goto out; 408 } 409 410 icmpv6_err_convert(type, code, &err); 411 412 /* Might be for an request_sock */ 413 switch (sk->sk_state) { 414 struct request_sock *req, **prev; 415 case TCP_LISTEN: 416 if (sock_owned_by_user(sk)) 417 goto out; 418 419 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, 420 &hdr->saddr, inet6_iif(skb)); 421 if (!req) 422 goto out; 423 424 /* ICMPs are not backlogged, hence we cannot get 425 * an established socket here. 426 */ 427 WARN_ON(req->sk != NULL); 428 429 if (seq != tcp_rsk(req)->snt_isn) { 430 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 431 goto out; 432 } 433 434 inet_csk_reqsk_queue_drop(sk, req, prev); 435 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 436 goto out; 437 438 case TCP_SYN_SENT: 439 case TCP_SYN_RECV: /* Cannot happen. 440 It can, it SYNs are crossed. --ANK */ 441 if (!sock_owned_by_user(sk)) { 442 sk->sk_err = err; 443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 444 445 tcp_done(sk); 446 } else 447 sk->sk_err_soft = err; 448 goto out; 449 } 450 451 if (!sock_owned_by_user(sk) && np->recverr) { 452 sk->sk_err = err; 453 sk->sk_error_report(sk); 454 } else 455 sk->sk_err_soft = err; 456 457out: 458 bh_unlock_sock(sk); 459 sock_put(sk); 460} 461 462 463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 464 struct flowi6 *fl6, 465 struct request_sock *req, 466 u16 queue_mapping) 467{ 468 struct inet6_request_sock *treq = inet6_rsk(req); 469 struct ipv6_pinfo *np = inet6_sk(sk); 470 struct sk_buff * skb; 471 int err = -ENOMEM; 472 473 /* First, grab a route. */ 474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 475 goto done; 476 477 skb = tcp_make_synack(sk, dst, req, NULL); 478 479 if (skb) { 480 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 481 482 fl6->daddr = treq->rmt_addr; 483 skb_set_queue_mapping(skb, queue_mapping); 484 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 485 err = net_xmit_eval(err); 486 } 487 488done: 489 return err; 490} 491 492static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req) 493{ 494 struct flowi6 fl6; 495 int res; 496 497 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0); 498 if (!res) 499 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 500 return res; 501} 502 503static void tcp_v6_reqsk_destructor(struct request_sock *req) 504{ 505 kfree_skb(inet6_rsk(req)->pktopts); 506} 507 508#ifdef CONFIG_TCP_MD5SIG 509static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 510 const struct in6_addr *addr) 511{ 512 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); 513} 514 515static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 516 struct sock *addr_sk) 517{ 518 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); 519} 520 521static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, 522 struct request_sock *req) 523{ 524 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 525} 526 527static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 528 int optlen) 529{ 530 struct tcp_md5sig cmd; 531 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 532 533 if (optlen < sizeof(cmd)) 534 return -EINVAL; 535 536 if (copy_from_user(&cmd, optval, sizeof(cmd))) 537 return -EFAULT; 538 539 if (sin6->sin6_family != AF_INET6) 540 return -EINVAL; 541 542 if (!cmd.tcpm_keylen) { 543 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 544 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], 545 AF_INET); 546 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, 547 AF_INET6); 548 } 549 550 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 551 return -EINVAL; 552 553 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 554 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], 555 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 556 557 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, 558 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 559} 560 561static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 562 const struct in6_addr *daddr, 563 const struct in6_addr *saddr, int nbytes) 564{ 565 struct tcp6_pseudohdr *bp; 566 struct scatterlist sg; 567 568 bp = &hp->md5_blk.ip6; 569 /* 1. TCP pseudo-header (RFC2460) */ 570 bp->saddr = *saddr; 571 bp->daddr = *daddr; 572 bp->protocol = cpu_to_be32(IPPROTO_TCP); 573 bp->len = cpu_to_be32(nbytes); 574 575 sg_init_one(&sg, bp, sizeof(*bp)); 576 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); 577} 578 579static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 580 const struct in6_addr *daddr, struct in6_addr *saddr, 581 const struct tcphdr *th) 582{ 583 struct tcp_md5sig_pool *hp; 584 struct hash_desc *desc; 585 586 hp = tcp_get_md5sig_pool(); 587 if (!hp) 588 goto clear_hash_noput; 589 desc = &hp->md5_desc; 590 591 if (crypto_hash_init(desc)) 592 goto clear_hash; 593 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) 594 goto clear_hash; 595 if (tcp_md5_hash_header(hp, th)) 596 goto clear_hash; 597 if (tcp_md5_hash_key(hp, key)) 598 goto clear_hash; 599 if (crypto_hash_final(desc, md5_hash)) 600 goto clear_hash; 601 602 tcp_put_md5sig_pool(); 603 return 0; 604 605clear_hash: 606 tcp_put_md5sig_pool(); 607clear_hash_noput: 608 memset(md5_hash, 0, 16); 609 return 1; 610} 611 612static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 613 const struct sock *sk, 614 const struct request_sock *req, 615 const struct sk_buff *skb) 616{ 617 const struct in6_addr *saddr, *daddr; 618 struct tcp_md5sig_pool *hp; 619 struct hash_desc *desc; 620 const struct tcphdr *th = tcp_hdr(skb); 621 622 if (sk) { 623 saddr = &inet6_sk(sk)->saddr; 624 daddr = &inet6_sk(sk)->daddr; 625 } else if (req) { 626 saddr = &inet6_rsk(req)->loc_addr; 627 daddr = &inet6_rsk(req)->rmt_addr; 628 } else { 629 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 630 saddr = &ip6h->saddr; 631 daddr = &ip6h->daddr; 632 } 633 634 hp = tcp_get_md5sig_pool(); 635 if (!hp) 636 goto clear_hash_noput; 637 desc = &hp->md5_desc; 638 639 if (crypto_hash_init(desc)) 640 goto clear_hash; 641 642 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) 643 goto clear_hash; 644 if (tcp_md5_hash_header(hp, th)) 645 goto clear_hash; 646 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 647 goto clear_hash; 648 if (tcp_md5_hash_key(hp, key)) 649 goto clear_hash; 650 if (crypto_hash_final(desc, md5_hash)) 651 goto clear_hash; 652 653 tcp_put_md5sig_pool(); 654 return 0; 655 656clear_hash: 657 tcp_put_md5sig_pool(); 658clear_hash_noput: 659 memset(md5_hash, 0, 16); 660 return 1; 661} 662 663static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) 664{ 665 const __u8 *hash_location = NULL; 666 struct tcp_md5sig_key *hash_expected; 667 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 668 const struct tcphdr *th = tcp_hdr(skb); 669 int genhash; 670 u8 newhash[16]; 671 672 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 673 hash_location = tcp_parse_md5sig_option(th); 674 675 /* We've parsed the options - do we have a hash? */ 676 if (!hash_expected && !hash_location) 677 return 0; 678 679 if (hash_expected && !hash_location) { 680 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 681 return 1; 682 } 683 684 if (!hash_expected && hash_location) { 685 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 686 return 1; 687 } 688 689 /* check the signature */ 690 genhash = tcp_v6_md5_hash_skb(newhash, 691 hash_expected, 692 NULL, NULL, skb); 693 694 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 695 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", 696 genhash ? "failed" : "mismatch", 697 &ip6h->saddr, ntohs(th->source), 698 &ip6h->daddr, ntohs(th->dest)); 699 return 1; 700 } 701 return 0; 702} 703#endif 704 705struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 706 .family = AF_INET6, 707 .obj_size = sizeof(struct tcp6_request_sock), 708 .rtx_syn_ack = tcp_v6_rtx_synack, 709 .send_ack = tcp_v6_reqsk_send_ack, 710 .destructor = tcp_v6_reqsk_destructor, 711 .send_reset = tcp_v6_send_reset, 712 .syn_ack_timeout = tcp_syn_ack_timeout, 713}; 714 715#ifdef CONFIG_TCP_MD5SIG 716static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { 717 .md5_lookup = tcp_v6_reqsk_md5_lookup, 718 .calc_md5_hash = tcp_v6_md5_hash_skb, 719}; 720#endif 721 722static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 723 u32 tsval, u32 tsecr, 724 struct tcp_md5sig_key *key, int rst, u8 tclass) 725{ 726 const struct tcphdr *th = tcp_hdr(skb); 727 struct tcphdr *t1; 728 struct sk_buff *buff; 729 struct flowi6 fl6; 730 struct net *net = dev_net(skb_dst(skb)->dev); 731 struct sock *ctl_sk = net->ipv6.tcp_sk; 732 unsigned int tot_len = sizeof(struct tcphdr); 733 struct dst_entry *dst; 734 __be32 *topt; 735 736 if (tsecr) 737 tot_len += TCPOLEN_TSTAMP_ALIGNED; 738#ifdef CONFIG_TCP_MD5SIG 739 if (key) 740 tot_len += TCPOLEN_MD5SIG_ALIGNED; 741#endif 742 743 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 744 GFP_ATOMIC); 745 if (buff == NULL) 746 return; 747 748 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 749 750 t1 = (struct tcphdr *) skb_push(buff, tot_len); 751 skb_reset_transport_header(buff); 752 753 /* Swap the send and the receive. */ 754 memset(t1, 0, sizeof(*t1)); 755 t1->dest = th->source; 756 t1->source = th->dest; 757 t1->doff = tot_len / 4; 758 t1->seq = htonl(seq); 759 t1->ack_seq = htonl(ack); 760 t1->ack = !rst || !th->ack; 761 t1->rst = rst; 762 t1->window = htons(win); 763 764 topt = (__be32 *)(t1 + 1); 765 766 if (tsecr) { 767 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 768 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 769 *topt++ = htonl(tsval); 770 *topt++ = htonl(tsecr); 771 } 772 773#ifdef CONFIG_TCP_MD5SIG 774 if (key) { 775 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 776 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 777 tcp_v6_md5_hash_hdr((__u8 *)topt, key, 778 &ipv6_hdr(skb)->saddr, 779 &ipv6_hdr(skb)->daddr, t1); 780 } 781#endif 782 783 memset(&fl6, 0, sizeof(fl6)); 784 fl6.daddr = ipv6_hdr(skb)->saddr; 785 fl6.saddr = ipv6_hdr(skb)->daddr; 786 787 buff->ip_summed = CHECKSUM_PARTIAL; 788 buff->csum = 0; 789 790 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 791 792 fl6.flowi6_proto = IPPROTO_TCP; 793 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 794 fl6.flowi6_oif = inet6_iif(skb); 795 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 796 fl6.fl6_dport = t1->dest; 797 fl6.fl6_sport = t1->source; 798 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 799 800 /* Pass a socket to ip6_dst_lookup either it is for RST 801 * Underlying function will use this to retrieve the network 802 * namespace 803 */ 804 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 805 if (!IS_ERR(dst)) { 806 skb_dst_set(buff, dst); 807 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 808 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 809 if (rst) 810 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 811 return; 812 } 813 814 kfree_skb(buff); 815} 816 817static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 818{ 819 const struct tcphdr *th = tcp_hdr(skb); 820 u32 seq = 0, ack_seq = 0; 821 struct tcp_md5sig_key *key = NULL; 822#ifdef CONFIG_TCP_MD5SIG 823 const __u8 *hash_location = NULL; 824 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 825 unsigned char newhash[16]; 826 int genhash; 827 struct sock *sk1 = NULL; 828#endif 829 830 if (th->rst) 831 return; 832 833 if (!ipv6_unicast_destination(skb)) 834 return; 835 836#ifdef CONFIG_TCP_MD5SIG 837 hash_location = tcp_parse_md5sig_option(th); 838 if (!sk && hash_location) { 839 /* 840 * active side is lost. Try to find listening socket through 841 * source port, and then find md5 key through listening socket. 842 * we are not loose security here: 843 * Incoming packet is checked with md5 hash with finding key, 844 * no RST generated if md5 hash doesn't match. 845 */ 846 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), 847 &tcp_hashinfo, &ipv6h->saddr, 848 th->source, &ipv6h->daddr, 849 ntohs(th->source), inet6_iif(skb)); 850 if (!sk1) 851 return; 852 853 rcu_read_lock(); 854 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); 855 if (!key) 856 goto release_sk1; 857 858 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb); 859 if (genhash || memcmp(hash_location, newhash, 16) != 0) 860 goto release_sk1; 861 } else { 862 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL; 863 } 864#endif 865 866 if (th->ack) 867 seq = ntohl(th->ack_seq); 868 else 869 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 870 (th->doff << 2); 871 872 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0); 873 874#ifdef CONFIG_TCP_MD5SIG 875release_sk1: 876 if (sk1) { 877 rcu_read_unlock(); 878 sock_put(sk1); 879 } 880#endif 881} 882 883static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 884 u32 win, u32 tsval, u32 tsecr, 885 struct tcp_md5sig_key *key, u8 tclass) 886{ 887 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass); 888} 889 890static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 891{ 892 struct inet_timewait_sock *tw = inet_twsk(sk); 893 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 894 895 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 896 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 897 tcp_time_stamp + tcptw->tw_ts_offset, 898 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw), 899 tw->tw_tclass); 900 901 inet_twsk_put(tw); 902} 903 904static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 905 struct request_sock *req) 906{ 907 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, 908 req->rcv_wnd, tcp_time_stamp, req->ts_recent, 909 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); 910} 911 912 913static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 914{ 915 struct request_sock *req, **prev; 916 const struct tcphdr *th = tcp_hdr(skb); 917 struct sock *nsk; 918 919 /* Find possible connection requests. */ 920 req = inet6_csk_search_req(sk, &prev, th->source, 921 &ipv6_hdr(skb)->saddr, 922 &ipv6_hdr(skb)->daddr, inet6_iif(skb)); 923 if (req) 924 return tcp_check_req(sk, skb, req, prev, false); 925 926 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 927 &ipv6_hdr(skb)->saddr, th->source, 928 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); 929 930 if (nsk) { 931 if (nsk->sk_state != TCP_TIME_WAIT) { 932 bh_lock_sock(nsk); 933 return nsk; 934 } 935 inet_twsk_put(inet_twsk(nsk)); 936 return NULL; 937 } 938 939#ifdef CONFIG_SYN_COOKIES 940 if (!th->syn) 941 sk = cookie_v6_check(sk, skb); 942#endif 943 return sk; 944} 945 946/* FIXME: this is substantially similar to the ipv4 code. 947 * Can some kind of merge be done? -- erics 948 */ 949static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 950{ 951 struct tcp_options_received tmp_opt; 952 struct request_sock *req; 953 struct inet6_request_sock *treq; 954 struct ipv6_pinfo *np = inet6_sk(sk); 955 struct tcp_sock *tp = tcp_sk(sk); 956 __u32 isn = TCP_SKB_CB(skb)->when; 957 struct dst_entry *dst = NULL; 958 struct flowi6 fl6; 959 bool want_cookie = false; 960 961 if (skb->protocol == htons(ETH_P_IP)) 962 return tcp_v4_conn_request(sk, skb); 963 964 if (!ipv6_unicast_destination(skb)) 965 goto drop; 966 967 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 968 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); 969 if (!want_cookie) 970 goto drop; 971 } 972 973 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 974 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 975 goto drop; 976 } 977 978 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 979 if (req == NULL) 980 goto drop; 981 982#ifdef CONFIG_TCP_MD5SIG 983 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; 984#endif 985 986 tcp_clear_options(&tmp_opt); 987 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 988 tmp_opt.user_mss = tp->rx_opt.user_mss; 989 tcp_parse_options(skb, &tmp_opt, 0, NULL); 990 991 if (want_cookie && !tmp_opt.saw_tstamp) 992 tcp_clear_options(&tmp_opt); 993 994 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 995 tcp_openreq_init(req, &tmp_opt, skb); 996 997 treq = inet6_rsk(req); 998 treq->rmt_addr = ipv6_hdr(skb)->saddr; 999 treq->loc_addr = ipv6_hdr(skb)->daddr; 1000 if (!want_cookie || tmp_opt.tstamp_ok) 1001 TCP_ECN_create_request(req, skb, sock_net(sk)); 1002 1003 treq->iif = sk->sk_bound_dev_if; 1004 inet_rsk(req)->ir_mark = inet_request_mark(sk, skb); 1005 1006 /* So that link locals have meaning */ 1007 if (!sk->sk_bound_dev_if && 1008 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1009 treq->iif = inet6_iif(skb); 1010 1011 if (!isn) { 1012 if (ipv6_opt_accepted(sk, skb) || 1013 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1014 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1015 atomic_inc(&skb->users); 1016 treq->pktopts = skb; 1017 } 1018 1019 if (want_cookie) { 1020 isn = cookie_v6_init_sequence(sk, skb, &req->mss); 1021 req->cookie_ts = tmp_opt.tstamp_ok; 1022 goto have_isn; 1023 } 1024 1025 /* VJ's idea. We save last timestamp seen 1026 * from the destination in peer table, when entering 1027 * state TIME-WAIT, and check against it before 1028 * accepting new connection request. 1029 * 1030 * If "isn" is not zero, this request hit alive 1031 * timewait bucket, so that all the necessary checks 1032 * are made in the function processing timewait state. 1033 */ 1034 if (tmp_opt.saw_tstamp && 1035 tcp_death_row.sysctl_tw_recycle && 1036 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) { 1037 if (!tcp_peer_is_proven(req, dst, true)) { 1038 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1039 goto drop_and_release; 1040 } 1041 } 1042 /* Kill the following clause, if you dislike this way. */ 1043 else if (!sysctl_tcp_syncookies && 1044 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1045 (sysctl_max_syn_backlog >> 2)) && 1046 !tcp_peer_is_proven(req, dst, false)) { 1047 /* Without syncookies last quarter of 1048 * backlog is filled with destinations, 1049 * proven to be alive. 1050 * It means that we continue to communicate 1051 * to destinations, already remembered 1052 * to the moment of synflood. 1053 */ 1054 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n", 1055 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source)); 1056 goto drop_and_release; 1057 } 1058 1059 isn = tcp_v6_init_sequence(skb); 1060 } 1061have_isn: 1062 tcp_rsk(req)->snt_isn = isn; 1063 1064 if (security_inet_conn_request(sk, skb, req)) 1065 goto drop_and_release; 1066 1067 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1068 skb_get_queue_mapping(skb)) || 1069 want_cookie) 1070 goto drop_and_free; 1071 1072 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1073 tcp_rsk(req)->listener = NULL; 1074 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1075 return 0; 1076 1077drop_and_release: 1078 dst_release(dst); 1079drop_and_free: 1080 reqsk_free(req); 1081drop: 1082 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1083 return 0; /* don't send reset */ 1084} 1085 1086static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, 1087 struct request_sock *req, 1088 struct dst_entry *dst) 1089{ 1090 struct inet6_request_sock *treq; 1091 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1092 struct tcp6_sock *newtcp6sk; 1093 struct inet_sock *newinet; 1094 struct tcp_sock *newtp; 1095 struct sock *newsk; 1096#ifdef CONFIG_TCP_MD5SIG 1097 struct tcp_md5sig_key *key; 1098#endif 1099 struct flowi6 fl6; 1100 1101 if (skb->protocol == htons(ETH_P_IP)) { 1102 /* 1103 * v6 mapped 1104 */ 1105 1106 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); 1107 1108 if (newsk == NULL) 1109 return NULL; 1110 1111 newtcp6sk = (struct tcp6_sock *)newsk; 1112 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1113 1114 newinet = inet_sk(newsk); 1115 newnp = inet6_sk(newsk); 1116 newtp = tcp_sk(newsk); 1117 1118 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1119 1120 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); 1121 1122 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); 1123 1124 newnp->rcv_saddr = newnp->saddr; 1125 1126 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1127 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1128#ifdef CONFIG_TCP_MD5SIG 1129 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1130#endif 1131 1132 newnp->ipv6_ac_list = NULL; 1133 newnp->ipv6_fl_list = NULL; 1134 newnp->pktoptions = NULL; 1135 newnp->opt = NULL; 1136 newnp->mcast_oif = inet6_iif(skb); 1137 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1138 newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); 1139 1140 /* 1141 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1142 * here, tcp_create_openreq_child now does this for us, see the comment in 1143 * that function for the gory details. -acme 1144 */ 1145 1146 /* It is tricky place. Until this moment IPv4 tcp 1147 worked with IPv6 icsk.icsk_af_ops. 1148 Sync it now. 1149 */ 1150 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 1151 1152 return newsk; 1153 } 1154 1155 treq = inet6_rsk(req); 1156 1157 if (sk_acceptq_is_full(sk)) 1158 goto out_overflow; 1159 1160 if (!dst) { 1161 dst = inet6_csk_route_req(sk, &fl6, req); 1162 if (!dst) 1163 goto out; 1164 } 1165 1166 newsk = tcp_create_openreq_child(sk, req, skb); 1167 if (newsk == NULL) 1168 goto out_nonewsk; 1169 1170 /* 1171 * No need to charge this sock to the relevant IPv6 refcnt debug socks 1172 * count here, tcp_create_openreq_child now does this for us, see the 1173 * comment in that function for the gory details. -acme 1174 */ 1175 1176 newsk->sk_gso_type = SKB_GSO_TCPV6; 1177 __ip6_dst_store(newsk, dst, NULL, NULL); 1178 inet6_sk_rx_dst_set(newsk, skb); 1179 1180 newtcp6sk = (struct tcp6_sock *)newsk; 1181 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1182 1183 newtp = tcp_sk(newsk); 1184 newinet = inet_sk(newsk); 1185 newnp = inet6_sk(newsk); 1186 1187 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1188 1189 newnp->daddr = treq->rmt_addr; 1190 newnp->saddr = treq->loc_addr; 1191 newnp->rcv_saddr = treq->loc_addr; 1192 newsk->sk_bound_dev_if = treq->iif; 1193 1194 /* Now IPv6 options... 1195 1196 First: no IPv4 options. 1197 */ 1198 newinet->inet_opt = NULL; 1199 newnp->ipv6_ac_list = NULL; 1200 newnp->ipv6_fl_list = NULL; 1201 1202 /* Clone RX bits */ 1203 newnp->rxopt.all = np->rxopt.all; 1204 1205 /* Clone pktoptions received with SYN */ 1206 newnp->pktoptions = NULL; 1207 if (treq->pktopts != NULL) { 1208 newnp->pktoptions = skb_clone(treq->pktopts, 1209 sk_gfp_atomic(sk, GFP_ATOMIC)); 1210 consume_skb(treq->pktopts); 1211 treq->pktopts = NULL; 1212 if (newnp->pktoptions) 1213 skb_set_owner_r(newnp->pktoptions, newsk); 1214 } 1215 newnp->opt = NULL; 1216 newnp->mcast_oif = inet6_iif(skb); 1217 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1218 newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); 1219 1220 /* Clone native IPv6 options from listening socket (if any) 1221 1222 Yes, keeping reference count would be much more clever, 1223 but we make one more one thing there: reattach optmem 1224 to newsk. 1225 */ 1226 if (np->opt) 1227 newnp->opt = ipv6_dup_options(newsk, np->opt); 1228 1229 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1230 if (newnp->opt) 1231 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 1232 newnp->opt->opt_flen); 1233 1234 tcp_mtup_init(newsk); 1235 tcp_sync_mss(newsk, dst_mtu(dst)); 1236 newtp->advmss = dst_metric_advmss(dst); 1237 if (tcp_sk(sk)->rx_opt.user_mss && 1238 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) 1239 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; 1240 1241 tcp_initialize_rcv_mss(newsk); 1242 tcp_synack_rtt_meas(newsk, req); 1243 newtp->total_retrans = req->num_retrans; 1244 1245 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1246 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 1247 1248#ifdef CONFIG_TCP_MD5SIG 1249 /* Copy over the MD5 key from the original socket */ 1250 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { 1251 /* We're using one, so create a matching key 1252 * on the newsk structure. If we fail to get 1253 * memory, then we end up not copying the key 1254 * across. Shucks. 1255 */ 1256 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr, 1257 AF_INET6, key->key, key->keylen, 1258 sk_gfp_atomic(sk, GFP_ATOMIC)); 1259 } 1260#endif 1261 1262 if (__inet_inherit_port(sk, newsk) < 0) { 1263 inet_csk_prepare_forced_close(newsk); 1264 tcp_done(newsk); 1265 goto out; 1266 } 1267 __inet6_hash(newsk, NULL); 1268 1269 return newsk; 1270 1271out_overflow: 1272 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1273out_nonewsk: 1274 dst_release(dst); 1275out: 1276 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1277 return NULL; 1278} 1279 1280static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1281{ 1282 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1283 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, 1284 &ipv6_hdr(skb)->daddr, skb->csum)) { 1285 skb->ip_summed = CHECKSUM_UNNECESSARY; 1286 return 0; 1287 } 1288 } 1289 1290 skb->csum = ~csum_unfold(tcp_v6_check(skb->len, 1291 &ipv6_hdr(skb)->saddr, 1292 &ipv6_hdr(skb)->daddr, 0)); 1293 1294 if (skb->len <= 76) { 1295 return __skb_checksum_complete(skb); 1296 } 1297 return 0; 1298} 1299 1300/* The socket must have it's spinlock held when we get 1301 * here. 1302 * 1303 * We have a potential double-lock case here, so even when 1304 * doing backlog processing we use the BH locking scheme. 1305 * This is because we cannot sleep with the original spinlock 1306 * held. 1307 */ 1308static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 1309{ 1310 struct ipv6_pinfo *np = inet6_sk(sk); 1311 struct tcp_sock *tp; 1312 struct sk_buff *opt_skb = NULL; 1313 1314 /* Imagine: socket is IPv6. IPv4 packet arrives, 1315 goes to IPv4 receive handler and backlogged. 1316 From backlog it always goes here. Kerboom... 1317 Fortunately, tcp_rcv_established and rcv_established 1318 handle them correctly, but it is not case with 1319 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK 1320 */ 1321 1322 if (skb->protocol == htons(ETH_P_IP)) 1323 return tcp_v4_do_rcv(sk, skb); 1324 1325#ifdef CONFIG_TCP_MD5SIG 1326 if (tcp_v6_inbound_md5_hash (sk, skb)) 1327 goto discard; 1328#endif 1329 1330 if (sk_filter(sk, skb)) 1331 goto discard; 1332 1333 /* 1334 * socket locking is here for SMP purposes as backlog rcv 1335 * is currently called with bh processing disabled. 1336 */ 1337 1338 /* Do Stevens' IPV6_PKTOPTIONS. 1339 1340 Yes, guys, it is the only place in our code, where we 1341 may make it not affecting IPv4. 1342 The rest of code is protocol independent, 1343 and I do not like idea to uglify IPv4. 1344 1345 Actually, all the idea behind IPV6_PKTOPTIONS 1346 looks not very well thought. For now we latch 1347 options, received in the last packet, enqueued 1348 by tcp. Feel free to propose better solution. 1349 --ANK (980728) 1350 */ 1351 if (np->rxopt.all) 1352 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); 1353 1354 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1355 struct dst_entry *dst = sk->sk_rx_dst; 1356 1357 sock_rps_save_rxhash(sk, skb); 1358 if (dst) { 1359 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1360 dst->ops->check(dst, np->rx_dst_cookie) == NULL) { 1361 dst_release(dst); 1362 sk->sk_rx_dst = NULL; 1363 } 1364 } 1365 1366 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1367 goto reset; 1368 if (opt_skb) 1369 goto ipv6_pktoptions; 1370 return 0; 1371 } 1372 1373 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) 1374 goto csum_err; 1375 1376 if (sk->sk_state == TCP_LISTEN) { 1377 struct sock *nsk = tcp_v6_hnd_req(sk, skb); 1378 if (!nsk) 1379 goto discard; 1380 1381 /* 1382 * Queue it on the new socket if the new socket is active, 1383 * otherwise we just shortcircuit this and continue with 1384 * the new socket.. 1385 */ 1386 if(nsk != sk) { 1387 sock_rps_save_rxhash(nsk, skb); 1388 if (tcp_child_process(sk, nsk, skb)) 1389 goto reset; 1390 if (opt_skb) 1391 __kfree_skb(opt_skb); 1392 return 0; 1393 } 1394 } else 1395 sock_rps_save_rxhash(sk, skb); 1396 1397 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1398 goto reset; 1399 if (opt_skb) 1400 goto ipv6_pktoptions; 1401 return 0; 1402 1403reset: 1404 tcp_v6_send_reset(sk, skb); 1405discard: 1406 if (opt_skb) 1407 __kfree_skb(opt_skb); 1408 kfree_skb(skb); 1409 return 0; 1410csum_err: 1411 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); 1412 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 1413 goto discard; 1414 1415 1416ipv6_pktoptions: 1417 /* Do you ask, what is it? 1418 1419 1. skb was enqueued by tcp. 1420 2. skb is added to tail of read queue, rather than out of order. 1421 3. socket is not in passive state. 1422 4. Finally, it really contains options, which user wants to receive. 1423 */ 1424 tp = tcp_sk(sk); 1425 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1426 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1427 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) 1428 np->mcast_oif = inet6_iif(opt_skb); 1429 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1430 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1431 if (np->rxopt.bits.rxtclass) 1432 np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb)); 1433 if (ipv6_opt_accepted(sk, opt_skb)) { 1434 skb_set_owner_r(opt_skb, sk); 1435 opt_skb = xchg(&np->pktoptions, opt_skb); 1436 } else { 1437 __kfree_skb(opt_skb); 1438 opt_skb = xchg(&np->pktoptions, NULL); 1439 } 1440 } 1441 1442 kfree_skb(opt_skb); 1443 return 0; 1444} 1445 1446static int tcp_v6_rcv(struct sk_buff *skb) 1447{ 1448 const struct tcphdr *th; 1449 const struct ipv6hdr *hdr; 1450 struct sock *sk; 1451 int ret; 1452 struct net *net = dev_net(skb->dev); 1453 1454 if (skb->pkt_type != PACKET_HOST) 1455 goto discard_it; 1456 1457 /* 1458 * Count it even if it's bad. 1459 */ 1460 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); 1461 1462 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1463 goto discard_it; 1464 1465 th = tcp_hdr(skb); 1466 1467 if (th->doff < sizeof(struct tcphdr)/4) 1468 goto bad_packet; 1469 if (!pskb_may_pull(skb, th->doff*4)) 1470 goto discard_it; 1471 1472 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) 1473 goto csum_error; 1474 1475 th = tcp_hdr(skb); 1476 hdr = ipv6_hdr(skb); 1477 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1478 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1479 skb->len - th->doff*4); 1480 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1481 TCP_SKB_CB(skb)->when = 0; 1482 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); 1483 TCP_SKB_CB(skb)->sacked = 0; 1484 1485 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1486 if (!sk) 1487 goto no_tcp_socket; 1488 1489process: 1490 if (sk->sk_state == TCP_TIME_WAIT) 1491 goto do_time_wait; 1492 1493 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { 1494 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 1495 goto discard_and_relse; 1496 } 1497 1498 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1499 goto discard_and_relse; 1500 1501 if (sk_filter(sk, skb)) 1502 goto discard_and_relse; 1503 1504 skb->dev = NULL; 1505 1506 bh_lock_sock_nested(sk); 1507 ret = 0; 1508 if (!sock_owned_by_user(sk)) { 1509#ifdef CONFIG_NET_DMA 1510 struct tcp_sock *tp = tcp_sk(sk); 1511 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1512 tp->ucopy.dma_chan = net_dma_find_channel(); 1513 if (tp->ucopy.dma_chan) 1514 ret = tcp_v6_do_rcv(sk, skb); 1515 else 1516#endif 1517 { 1518 if (!tcp_prequeue(sk, skb)) 1519 ret = tcp_v6_do_rcv(sk, skb); 1520 } 1521 } else if (unlikely(sk_add_backlog(sk, skb, 1522 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1523 bh_unlock_sock(sk); 1524 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1525 goto discard_and_relse; 1526 } 1527 bh_unlock_sock(sk); 1528 1529 sock_put(sk); 1530 return ret ? -1 : 0; 1531 1532no_tcp_socket: 1533 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1534 goto discard_it; 1535 1536 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1537csum_error: 1538 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); 1539bad_packet: 1540 TCP_INC_STATS_BH(net, TCP_MIB_INERRS); 1541 } else { 1542 tcp_v6_send_reset(NULL, skb); 1543 } 1544 1545discard_it: 1546 kfree_skb(skb); 1547 return 0; 1548 1549discard_and_relse: 1550 sock_put(sk); 1551 goto discard_it; 1552 1553do_time_wait: 1554 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1555 inet_twsk_put(inet_twsk(sk)); 1556 goto discard_it; 1557 } 1558 1559 if (skb->len < (th->doff<<2)) { 1560 inet_twsk_put(inet_twsk(sk)); 1561 goto bad_packet; 1562 } 1563 if (tcp_checksum_complete(skb)) { 1564 inet_twsk_put(inet_twsk(sk)); 1565 goto csum_error; 1566 } 1567 1568 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1569 case TCP_TW_SYN: 1570 { 1571 struct sock *sk2; 1572 1573 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, 1574 &ipv6_hdr(skb)->saddr, th->source, 1575 &ipv6_hdr(skb)->daddr, 1576 ntohs(th->dest), inet6_iif(skb)); 1577 if (sk2 != NULL) { 1578 struct inet_timewait_sock *tw = inet_twsk(sk); 1579 inet_twsk_deschedule(tw, &tcp_death_row); 1580 inet_twsk_put(tw); 1581 sk = sk2; 1582 goto process; 1583 } 1584 /* Fall through to ACK */ 1585 } 1586 case TCP_TW_ACK: 1587 tcp_v6_timewait_ack(sk, skb); 1588 break; 1589 case TCP_TW_RST: 1590 goto no_tcp_socket; 1591 case TCP_TW_SUCCESS:; 1592 } 1593 goto discard_it; 1594} 1595 1596static void tcp_v6_early_demux(struct sk_buff *skb) 1597{ 1598 const struct ipv6hdr *hdr; 1599 const struct tcphdr *th; 1600 struct sock *sk; 1601 1602 if (skb->pkt_type != PACKET_HOST) 1603 return; 1604 1605 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) 1606 return; 1607 1608 hdr = ipv6_hdr(skb); 1609 th = tcp_hdr(skb); 1610 1611 if (th->doff < sizeof(struct tcphdr) / 4) 1612 return; 1613 1614 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, 1615 &hdr->saddr, th->source, 1616 &hdr->daddr, ntohs(th->dest), 1617 inet6_iif(skb)); 1618 if (sk) { 1619 skb->sk = sk; 1620 skb->destructor = sock_edemux; 1621 if (sk->sk_state != TCP_TIME_WAIT) { 1622 struct dst_entry *dst = sk->sk_rx_dst; 1623 1624 if (dst) 1625 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1626 if (dst && 1627 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) 1628 skb_dst_set_noref(skb, dst); 1629 } 1630 } 1631} 1632 1633static struct timewait_sock_ops tcp6_timewait_sock_ops = { 1634 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 1635 .twsk_unique = tcp_twsk_unique, 1636 .twsk_destructor= tcp_twsk_destructor, 1637}; 1638 1639static const struct inet_connection_sock_af_ops ipv6_specific = { 1640 .queue_xmit = inet6_csk_xmit, 1641 .send_check = tcp_v6_send_check, 1642 .rebuild_header = inet6_sk_rebuild_header, 1643 .sk_rx_dst_set = inet6_sk_rx_dst_set, 1644 .conn_request = tcp_v6_conn_request, 1645 .syn_recv_sock = tcp_v6_syn_recv_sock, 1646 .net_header_len = sizeof(struct ipv6hdr), 1647 .net_frag_header_len = sizeof(struct frag_hdr), 1648 .setsockopt = ipv6_setsockopt, 1649 .getsockopt = ipv6_getsockopt, 1650 .addr2sockaddr = inet6_csk_addr2sockaddr, 1651 .sockaddr_len = sizeof(struct sockaddr_in6), 1652 .bind_conflict = inet6_csk_bind_conflict, 1653#ifdef CONFIG_COMPAT 1654 .compat_setsockopt = compat_ipv6_setsockopt, 1655 .compat_getsockopt = compat_ipv6_getsockopt, 1656#endif 1657}; 1658 1659#ifdef CONFIG_TCP_MD5SIG 1660static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1661 .md5_lookup = tcp_v6_md5_lookup, 1662 .calc_md5_hash = tcp_v6_md5_hash_skb, 1663 .md5_parse = tcp_v6_parse_md5_keys, 1664}; 1665#endif 1666 1667/* 1668 * TCP over IPv4 via INET6 API 1669 */ 1670 1671static const struct inet_connection_sock_af_ops ipv6_mapped = { 1672 .queue_xmit = ip_queue_xmit, 1673 .send_check = tcp_v4_send_check, 1674 .rebuild_header = inet_sk_rebuild_header, 1675 .sk_rx_dst_set = inet_sk_rx_dst_set, 1676 .conn_request = tcp_v6_conn_request, 1677 .syn_recv_sock = tcp_v6_syn_recv_sock, 1678 .net_header_len = sizeof(struct iphdr), 1679 .setsockopt = ipv6_setsockopt, 1680 .getsockopt = ipv6_getsockopt, 1681 .addr2sockaddr = inet6_csk_addr2sockaddr, 1682 .sockaddr_len = sizeof(struct sockaddr_in6), 1683 .bind_conflict = inet6_csk_bind_conflict, 1684#ifdef CONFIG_COMPAT 1685 .compat_setsockopt = compat_ipv6_setsockopt, 1686 .compat_getsockopt = compat_ipv6_getsockopt, 1687#endif 1688}; 1689 1690#ifdef CONFIG_TCP_MD5SIG 1691static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1692 .md5_lookup = tcp_v4_md5_lookup, 1693 .calc_md5_hash = tcp_v4_md5_hash_skb, 1694 .md5_parse = tcp_v6_parse_md5_keys, 1695}; 1696#endif 1697 1698/* NOTE: A lot of things set to zero explicitly by call to 1699 * sk_alloc() so need not be done here. 1700 */ 1701static int tcp_v6_init_sock(struct sock *sk) 1702{ 1703 struct inet_connection_sock *icsk = inet_csk(sk); 1704 1705 tcp_init_sock(sk); 1706 1707 icsk->icsk_af_ops = &ipv6_specific; 1708 1709#ifdef CONFIG_TCP_MD5SIG 1710 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; 1711#endif 1712 1713 return 0; 1714} 1715 1716static void tcp_v6_destroy_sock(struct sock *sk) 1717{ 1718 tcp_v4_destroy_sock(sk); 1719 inet6_destroy_sock(sk); 1720} 1721 1722#ifdef CONFIG_PROC_FS 1723/* Proc filesystem TCPv6 sock list dumping. */ 1724static void get_openreq6(struct seq_file *seq, 1725 const struct sock *sk, struct request_sock *req, int i, kuid_t uid) 1726{ 1727 int ttd = req->expires - jiffies; 1728 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 1729 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 1730 1731 if (ttd < 0) 1732 ttd = 0; 1733 1734 seq_printf(seq, 1735 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1736 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", 1737 i, 1738 src->s6_addr32[0], src->s6_addr32[1], 1739 src->s6_addr32[2], src->s6_addr32[3], 1740 ntohs(inet_rsk(req)->loc_port), 1741 dest->s6_addr32[0], dest->s6_addr32[1], 1742 dest->s6_addr32[2], dest->s6_addr32[3], 1743 ntohs(inet_rsk(req)->rmt_port), 1744 TCP_SYN_RECV, 1745 0,0, /* could print option size, but that is af dependent. */ 1746 1, /* timers active (only the expire timer) */ 1747 jiffies_to_clock_t(ttd), 1748 req->num_timeout, 1749 from_kuid_munged(seq_user_ns(seq), uid), 1750 0, /* non standard timer */ 1751 0, /* open_requests have no inode */ 1752 0, req); 1753} 1754 1755static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 1756{ 1757 const struct in6_addr *dest, *src; 1758 __u16 destp, srcp; 1759 int timer_active; 1760 unsigned long timer_expires; 1761 const struct inet_sock *inet = inet_sk(sp); 1762 const struct tcp_sock *tp = tcp_sk(sp); 1763 const struct inet_connection_sock *icsk = inet_csk(sp); 1764 const struct ipv6_pinfo *np = inet6_sk(sp); 1765 1766 dest = &np->daddr; 1767 src = &np->rcv_saddr; 1768 destp = ntohs(inet->inet_dport); 1769 srcp = ntohs(inet->inet_sport); 1770 1771 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1772 timer_active = 1; 1773 timer_expires = icsk->icsk_timeout; 1774 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 1775 timer_active = 4; 1776 timer_expires = icsk->icsk_timeout; 1777 } else if (timer_pending(&sp->sk_timer)) { 1778 timer_active = 2; 1779 timer_expires = sp->sk_timer.expires; 1780 } else { 1781 timer_active = 0; 1782 timer_expires = jiffies; 1783 } 1784 1785 seq_printf(seq, 1786 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1787 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", 1788 i, 1789 src->s6_addr32[0], src->s6_addr32[1], 1790 src->s6_addr32[2], src->s6_addr32[3], srcp, 1791 dest->s6_addr32[0], dest->s6_addr32[1], 1792 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1793 sp->sk_state, 1794 tp->write_seq-tp->snd_una, 1795 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1796 timer_active, 1797 jiffies_delta_to_clock_t(timer_expires - jiffies), 1798 icsk->icsk_retransmits, 1799 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 1800 icsk->icsk_probes_out, 1801 sock_i_ino(sp), 1802 atomic_read(&sp->sk_refcnt), sp, 1803 jiffies_to_clock_t(icsk->icsk_rto), 1804 jiffies_to_clock_t(icsk->icsk_ack.ato), 1805 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, 1806 tp->snd_cwnd, 1807 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh 1808 ); 1809} 1810 1811static void get_timewait6_sock(struct seq_file *seq, 1812 struct inet_timewait_sock *tw, int i) 1813{ 1814 const struct in6_addr *dest, *src; 1815 __u16 destp, srcp; 1816 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 1817 long delta = tw->tw_ttd - jiffies; 1818 1819 dest = &tw6->tw_v6_daddr; 1820 src = &tw6->tw_v6_rcv_saddr; 1821 destp = ntohs(tw->tw_dport); 1822 srcp = ntohs(tw->tw_sport); 1823 1824 seq_printf(seq, 1825 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1826 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", 1827 i, 1828 src->s6_addr32[0], src->s6_addr32[1], 1829 src->s6_addr32[2], src->s6_addr32[3], srcp, 1830 dest->s6_addr32[0], dest->s6_addr32[1], 1831 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1832 tw->tw_substate, 0, 0, 1833 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, 1834 atomic_read(&tw->tw_refcnt), tw); 1835} 1836 1837static int tcp6_seq_show(struct seq_file *seq, void *v) 1838{ 1839 struct tcp_iter_state *st; 1840 1841 if (v == SEQ_START_TOKEN) { 1842 seq_puts(seq, 1843 " sl " 1844 "local_address " 1845 "remote_address " 1846 "st tx_queue rx_queue tr tm->when retrnsmt" 1847 " uid timeout inode\n"); 1848 goto out; 1849 } 1850 st = seq->private; 1851 1852 switch (st->state) { 1853 case TCP_SEQ_STATE_LISTENING: 1854 case TCP_SEQ_STATE_ESTABLISHED: 1855 get_tcp6_sock(seq, v, st->num); 1856 break; 1857 case TCP_SEQ_STATE_OPENREQ: 1858 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); 1859 break; 1860 case TCP_SEQ_STATE_TIME_WAIT: 1861 get_timewait6_sock(seq, v, st->num); 1862 break; 1863 } 1864out: 1865 return 0; 1866} 1867 1868static const struct file_operations tcp6_afinfo_seq_fops = { 1869 .owner = THIS_MODULE, 1870 .open = tcp_seq_open, 1871 .read = seq_read, 1872 .llseek = seq_lseek, 1873 .release = seq_release_net 1874}; 1875 1876static struct tcp_seq_afinfo tcp6_seq_afinfo = { 1877 .name = "tcp6", 1878 .family = AF_INET6, 1879 .seq_fops = &tcp6_afinfo_seq_fops, 1880 .seq_ops = { 1881 .show = tcp6_seq_show, 1882 }, 1883}; 1884 1885int __net_init tcp6_proc_init(struct net *net) 1886{ 1887 return tcp_proc_register(net, &tcp6_seq_afinfo); 1888} 1889 1890void tcp6_proc_exit(struct net *net) 1891{ 1892 tcp_proc_unregister(net, &tcp6_seq_afinfo); 1893} 1894#endif 1895 1896static void tcp_v6_clear_sk(struct sock *sk, int size) 1897{ 1898 struct inet_sock *inet = inet_sk(sk); 1899 1900 /* we do not want to clear pinet6 field, because of RCU lookups */ 1901 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); 1902 1903 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); 1904 memset(&inet->pinet6 + 1, 0, size); 1905} 1906 1907struct proto tcpv6_prot = { 1908 .name = "TCPv6", 1909 .owner = THIS_MODULE, 1910 .close = tcp_close, 1911 .connect = tcp_v6_connect, 1912 .disconnect = tcp_disconnect, 1913 .accept = inet_csk_accept, 1914 .ioctl = tcp_ioctl, 1915 .init = tcp_v6_init_sock, 1916 .destroy = tcp_v6_destroy_sock, 1917 .shutdown = tcp_shutdown, 1918 .setsockopt = tcp_setsockopt, 1919 .getsockopt = tcp_getsockopt, 1920 .recvmsg = tcp_recvmsg, 1921 .sendmsg = tcp_sendmsg, 1922 .sendpage = tcp_sendpage, 1923 .backlog_rcv = tcp_v6_do_rcv, 1924 .release_cb = tcp_release_cb, 1925 .mtu_reduced = tcp_v6_mtu_reduced, 1926 .hash = tcp_v6_hash, 1927 .unhash = inet_unhash, 1928 .get_port = inet_csk_get_port, 1929 .enter_memory_pressure = tcp_enter_memory_pressure, 1930 .sockets_allocated = &tcp_sockets_allocated, 1931 .memory_allocated = &tcp_memory_allocated, 1932 .memory_pressure = &tcp_memory_pressure, 1933 .orphan_count = &tcp_orphan_count, 1934 .sysctl_wmem = sysctl_tcp_wmem, 1935 .sysctl_rmem = sysctl_tcp_rmem, 1936 .max_header = MAX_TCP_HEADER, 1937 .obj_size = sizeof(struct tcp6_sock), 1938 .slab_flags = SLAB_DESTROY_BY_RCU, 1939 .twsk_prot = &tcp6_timewait_sock_ops, 1940 .rsk_prot = &tcp6_request_sock_ops, 1941 .h.hashinfo = &tcp_hashinfo, 1942 .no_autobind = true, 1943#ifdef CONFIG_COMPAT 1944 .compat_setsockopt = compat_tcp_setsockopt, 1945 .compat_getsockopt = compat_tcp_getsockopt, 1946#endif 1947#ifdef CONFIG_MEMCG_KMEM 1948 .proto_cgroup = tcp_proto_cgroup, 1949#endif 1950 .clear_sk = tcp_v6_clear_sk, 1951}; 1952 1953static const struct inet6_protocol tcpv6_protocol = { 1954 .early_demux = tcp_v6_early_demux, 1955 .handler = tcp_v6_rcv, 1956 .err_handler = tcp_v6_err, 1957 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1958}; 1959 1960static struct inet_protosw tcpv6_protosw = { 1961 .type = SOCK_STREAM, 1962 .protocol = IPPROTO_TCP, 1963 .prot = &tcpv6_prot, 1964 .ops = &inet6_stream_ops, 1965 .no_check = 0, 1966 .flags = INET_PROTOSW_PERMANENT | 1967 INET_PROTOSW_ICSK, 1968}; 1969 1970static int __net_init tcpv6_net_init(struct net *net) 1971{ 1972 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 1973 SOCK_RAW, IPPROTO_TCP, net); 1974} 1975 1976static void __net_exit tcpv6_net_exit(struct net *net) 1977{ 1978 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 1979} 1980 1981static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) 1982{ 1983 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); 1984} 1985 1986static struct pernet_operations tcpv6_net_ops = { 1987 .init = tcpv6_net_init, 1988 .exit = tcpv6_net_exit, 1989 .exit_batch = tcpv6_net_exit_batch, 1990}; 1991 1992int __init tcpv6_init(void) 1993{ 1994 int ret; 1995 1996 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); 1997 if (ret) 1998 goto out; 1999 2000 /* register inet6 protocol */ 2001 ret = inet6_register_protosw(&tcpv6_protosw); 2002 if (ret) 2003 goto out_tcpv6_protocol; 2004 2005 ret = register_pernet_subsys(&tcpv6_net_ops); 2006 if (ret) 2007 goto out_tcpv6_protosw; 2008out: 2009 return ret; 2010 2011out_tcpv6_protosw: 2012 inet6_unregister_protosw(&tcpv6_protosw); 2013out_tcpv6_protocol: 2014 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2015 goto out; 2016} 2017 2018void tcpv6_exit(void) 2019{ 2020 unregister_pernet_subsys(&tcpv6_net_ops); 2021 inet6_unregister_protosw(&tcpv6_protosw); 2022 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2023} 2024