xfrm_state.c revision cf35f43e6e41b160d8dedd80a127210fd3be9ada
1/* 2 * xfrm_state.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * YOSHIFUJI Hideaki @USAGI 10 * Split up af-specific functions 11 * Derek Atkins <derek@ihtfp.com> 12 * Add UDP Encapsulation 13 * 14 */ 15 16#include <linux/workqueue.h> 17#include <net/xfrm.h> 18#include <linux/pfkeyv2.h> 19#include <linux/ipsec.h> 20#include <linux/module.h> 21#include <linux/cache.h> 22#include <linux/audit.h> 23#include <asm/uaccess.h> 24 25#include "xfrm_hash.h" 26 27struct sock *xfrm_nl; 28EXPORT_SYMBOL(xfrm_nl); 29 30u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME; 31EXPORT_SYMBOL(sysctl_xfrm_aevent_etime); 32 33u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE; 34EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth); 35 36u32 sysctl_xfrm_acq_expires __read_mostly = 30; 37 38/* Each xfrm_state may be linked to two tables: 39 40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) 41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given 42 destination/tunnel endpoint. (output) 43 */ 44 45static DEFINE_SPINLOCK(xfrm_state_lock); 46 47/* Hash table to find appropriate SA towards given target (endpoint 48 * of tunnel or destination of transport mode) allowed by selector. 49 * 50 * Main use is finding SA after policy selected tunnel or transport mode. 51 * Also, it can be used by ah/esp icmp error handler to find offending SA. 52 */ 53static struct hlist_head *xfrm_state_bydst __read_mostly; 54static struct hlist_head *xfrm_state_bysrc __read_mostly; 55static struct hlist_head *xfrm_state_byspi __read_mostly; 56static unsigned int xfrm_state_hmask __read_mostly; 57static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 58static unsigned int xfrm_state_num; 59static unsigned int xfrm_state_genid; 60 61static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 62static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 63 64#ifdef CONFIG_AUDITSYSCALL 65static void xfrm_audit_state_replay(struct xfrm_state *x, 66 struct sk_buff *skb, __be32 net_seq); 67#else 68#define xfrm_audit_state_replay(x, s, sq) do { ; } while (0) 69#endif /* CONFIG_AUDITSYSCALL */ 70 71static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr, 72 xfrm_address_t *saddr, 73 u32 reqid, 74 unsigned short family) 75{ 76 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask); 77} 78 79static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr, 80 xfrm_address_t *saddr, 81 unsigned short family) 82{ 83 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask); 84} 85 86static inline unsigned int 87xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) 88{ 89 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask); 90} 91 92static void xfrm_hash_transfer(struct hlist_head *list, 93 struct hlist_head *ndsttable, 94 struct hlist_head *nsrctable, 95 struct hlist_head *nspitable, 96 unsigned int nhashmask) 97{ 98 struct hlist_node *entry, *tmp; 99 struct xfrm_state *x; 100 101 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { 102 unsigned int h; 103 104 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 105 x->props.reqid, x->props.family, 106 nhashmask); 107 hlist_add_head(&x->bydst, ndsttable+h); 108 109 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, 110 x->props.family, 111 nhashmask); 112 hlist_add_head(&x->bysrc, nsrctable+h); 113 114 if (x->id.spi) { 115 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, 116 x->id.proto, x->props.family, 117 nhashmask); 118 hlist_add_head(&x->byspi, nspitable+h); 119 } 120 } 121} 122 123static unsigned long xfrm_hash_new_size(void) 124{ 125 return ((xfrm_state_hmask + 1) << 1) * 126 sizeof(struct hlist_head); 127} 128 129static DEFINE_MUTEX(hash_resize_mutex); 130 131static void xfrm_hash_resize(struct work_struct *__unused) 132{ 133 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; 134 unsigned long nsize, osize; 135 unsigned int nhashmask, ohashmask; 136 int i; 137 138 mutex_lock(&hash_resize_mutex); 139 140 nsize = xfrm_hash_new_size(); 141 ndst = xfrm_hash_alloc(nsize); 142 if (!ndst) 143 goto out_unlock; 144 nsrc = xfrm_hash_alloc(nsize); 145 if (!nsrc) { 146 xfrm_hash_free(ndst, nsize); 147 goto out_unlock; 148 } 149 nspi = xfrm_hash_alloc(nsize); 150 if (!nspi) { 151 xfrm_hash_free(ndst, nsize); 152 xfrm_hash_free(nsrc, nsize); 153 goto out_unlock; 154 } 155 156 spin_lock_bh(&xfrm_state_lock); 157 158 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; 159 for (i = xfrm_state_hmask; i >= 0; i--) 160 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi, 161 nhashmask); 162 163 odst = xfrm_state_bydst; 164 osrc = xfrm_state_bysrc; 165 ospi = xfrm_state_byspi; 166 ohashmask = xfrm_state_hmask; 167 168 xfrm_state_bydst = ndst; 169 xfrm_state_bysrc = nsrc; 170 xfrm_state_byspi = nspi; 171 xfrm_state_hmask = nhashmask; 172 173 spin_unlock_bh(&xfrm_state_lock); 174 175 osize = (ohashmask + 1) * sizeof(struct hlist_head); 176 xfrm_hash_free(odst, osize); 177 xfrm_hash_free(osrc, osize); 178 xfrm_hash_free(ospi, osize); 179 180out_unlock: 181 mutex_unlock(&hash_resize_mutex); 182} 183 184static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); 185 186DECLARE_WAIT_QUEUE_HEAD(km_waitq); 187EXPORT_SYMBOL(km_waitq); 188 189static DEFINE_RWLOCK(xfrm_state_afinfo_lock); 190static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; 191 192static struct work_struct xfrm_state_gc_work; 193static HLIST_HEAD(xfrm_state_gc_list); 194static DEFINE_SPINLOCK(xfrm_state_gc_lock); 195 196int __xfrm_state_delete(struct xfrm_state *x); 197 198int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 199void km_state_expired(struct xfrm_state *x, int hard, u32 pid); 200 201static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family) 202{ 203 struct xfrm_state_afinfo *afinfo; 204 if (unlikely(family >= NPROTO)) 205 return NULL; 206 write_lock_bh(&xfrm_state_afinfo_lock); 207 afinfo = xfrm_state_afinfo[family]; 208 if (unlikely(!afinfo)) 209 write_unlock_bh(&xfrm_state_afinfo_lock); 210 return afinfo; 211} 212 213static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo) 214 __releases(xfrm_state_afinfo_lock) 215{ 216 write_unlock_bh(&xfrm_state_afinfo_lock); 217} 218 219int xfrm_register_type(struct xfrm_type *type, unsigned short family) 220{ 221 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); 222 struct xfrm_type **typemap; 223 int err = 0; 224 225 if (unlikely(afinfo == NULL)) 226 return -EAFNOSUPPORT; 227 typemap = afinfo->type_map; 228 229 if (likely(typemap[type->proto] == NULL)) 230 typemap[type->proto] = type; 231 else 232 err = -EEXIST; 233 xfrm_state_unlock_afinfo(afinfo); 234 return err; 235} 236EXPORT_SYMBOL(xfrm_register_type); 237 238int xfrm_unregister_type(struct xfrm_type *type, unsigned short family) 239{ 240 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); 241 struct xfrm_type **typemap; 242 int err = 0; 243 244 if (unlikely(afinfo == NULL)) 245 return -EAFNOSUPPORT; 246 typemap = afinfo->type_map; 247 248 if (unlikely(typemap[type->proto] != type)) 249 err = -ENOENT; 250 else 251 typemap[type->proto] = NULL; 252 xfrm_state_unlock_afinfo(afinfo); 253 return err; 254} 255EXPORT_SYMBOL(xfrm_unregister_type); 256 257static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 258{ 259 struct xfrm_state_afinfo *afinfo; 260 struct xfrm_type **typemap; 261 struct xfrm_type *type; 262 int modload_attempted = 0; 263 264retry: 265 afinfo = xfrm_state_get_afinfo(family); 266 if (unlikely(afinfo == NULL)) 267 return NULL; 268 typemap = afinfo->type_map; 269 270 type = typemap[proto]; 271 if (unlikely(type && !try_module_get(type->owner))) 272 type = NULL; 273 if (!type && !modload_attempted) { 274 xfrm_state_put_afinfo(afinfo); 275 request_module("xfrm-type-%d-%d", family, proto); 276 modload_attempted = 1; 277 goto retry; 278 } 279 280 xfrm_state_put_afinfo(afinfo); 281 return type; 282} 283 284static void xfrm_put_type(struct xfrm_type *type) 285{ 286 module_put(type->owner); 287} 288 289int xfrm_register_mode(struct xfrm_mode *mode, int family) 290{ 291 struct xfrm_state_afinfo *afinfo; 292 struct xfrm_mode **modemap; 293 int err; 294 295 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 296 return -EINVAL; 297 298 afinfo = xfrm_state_lock_afinfo(family); 299 if (unlikely(afinfo == NULL)) 300 return -EAFNOSUPPORT; 301 302 err = -EEXIST; 303 modemap = afinfo->mode_map; 304 if (modemap[mode->encap]) 305 goto out; 306 307 err = -ENOENT; 308 if (!try_module_get(afinfo->owner)) 309 goto out; 310 311 mode->afinfo = afinfo; 312 modemap[mode->encap] = mode; 313 err = 0; 314 315out: 316 xfrm_state_unlock_afinfo(afinfo); 317 return err; 318} 319EXPORT_SYMBOL(xfrm_register_mode); 320 321int xfrm_unregister_mode(struct xfrm_mode *mode, int family) 322{ 323 struct xfrm_state_afinfo *afinfo; 324 struct xfrm_mode **modemap; 325 int err; 326 327 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 328 return -EINVAL; 329 330 afinfo = xfrm_state_lock_afinfo(family); 331 if (unlikely(afinfo == NULL)) 332 return -EAFNOSUPPORT; 333 334 err = -ENOENT; 335 modemap = afinfo->mode_map; 336 if (likely(modemap[mode->encap] == mode)) { 337 modemap[mode->encap] = NULL; 338 module_put(mode->afinfo->owner); 339 err = 0; 340 } 341 342 xfrm_state_unlock_afinfo(afinfo); 343 return err; 344} 345EXPORT_SYMBOL(xfrm_unregister_mode); 346 347static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 348{ 349 struct xfrm_state_afinfo *afinfo; 350 struct xfrm_mode *mode; 351 int modload_attempted = 0; 352 353 if (unlikely(encap >= XFRM_MODE_MAX)) 354 return NULL; 355 356retry: 357 afinfo = xfrm_state_get_afinfo(family); 358 if (unlikely(afinfo == NULL)) 359 return NULL; 360 361 mode = afinfo->mode_map[encap]; 362 if (unlikely(mode && !try_module_get(mode->owner))) 363 mode = NULL; 364 if (!mode && !modload_attempted) { 365 xfrm_state_put_afinfo(afinfo); 366 request_module("xfrm-mode-%d-%d", family, encap); 367 modload_attempted = 1; 368 goto retry; 369 } 370 371 xfrm_state_put_afinfo(afinfo); 372 return mode; 373} 374 375static void xfrm_put_mode(struct xfrm_mode *mode) 376{ 377 module_put(mode->owner); 378} 379 380static void xfrm_state_gc_destroy(struct xfrm_state *x) 381{ 382 del_timer_sync(&x->timer); 383 del_timer_sync(&x->rtimer); 384 kfree(x->aalg); 385 kfree(x->ealg); 386 kfree(x->calg); 387 kfree(x->encap); 388 kfree(x->coaddr); 389 if (x->inner_mode) 390 xfrm_put_mode(x->inner_mode); 391 if (x->outer_mode) 392 xfrm_put_mode(x->outer_mode); 393 if (x->type) { 394 x->type->destructor(x); 395 xfrm_put_type(x->type); 396 } 397 security_xfrm_state_free(x); 398 kfree(x); 399} 400 401static void xfrm_state_gc_task(struct work_struct *data) 402{ 403 struct xfrm_state *x; 404 struct hlist_node *entry, *tmp; 405 struct hlist_head gc_list; 406 407 spin_lock_bh(&xfrm_state_gc_lock); 408 gc_list.first = xfrm_state_gc_list.first; 409 INIT_HLIST_HEAD(&xfrm_state_gc_list); 410 spin_unlock_bh(&xfrm_state_gc_lock); 411 412 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst) 413 xfrm_state_gc_destroy(x); 414 415 wake_up(&km_waitq); 416} 417 418static inline unsigned long make_jiffies(long secs) 419{ 420 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 421 return MAX_SCHEDULE_TIMEOUT-1; 422 else 423 return secs*HZ; 424} 425 426static void xfrm_timer_handler(unsigned long data) 427{ 428 struct xfrm_state *x = (struct xfrm_state*)data; 429 unsigned long now = get_seconds(); 430 long next = LONG_MAX; 431 int warn = 0; 432 int err = 0; 433 434 spin_lock(&x->lock); 435 if (x->km.state == XFRM_STATE_DEAD) 436 goto out; 437 if (x->km.state == XFRM_STATE_EXPIRED) 438 goto expired; 439 if (x->lft.hard_add_expires_seconds) { 440 long tmo = x->lft.hard_add_expires_seconds + 441 x->curlft.add_time - now; 442 if (tmo <= 0) 443 goto expired; 444 if (tmo < next) 445 next = tmo; 446 } 447 if (x->lft.hard_use_expires_seconds) { 448 long tmo = x->lft.hard_use_expires_seconds + 449 (x->curlft.use_time ? : now) - now; 450 if (tmo <= 0) 451 goto expired; 452 if (tmo < next) 453 next = tmo; 454 } 455 if (x->km.dying) 456 goto resched; 457 if (x->lft.soft_add_expires_seconds) { 458 long tmo = x->lft.soft_add_expires_seconds + 459 x->curlft.add_time - now; 460 if (tmo <= 0) 461 warn = 1; 462 else if (tmo < next) 463 next = tmo; 464 } 465 if (x->lft.soft_use_expires_seconds) { 466 long tmo = x->lft.soft_use_expires_seconds + 467 (x->curlft.use_time ? : now) - now; 468 if (tmo <= 0) 469 warn = 1; 470 else if (tmo < next) 471 next = tmo; 472 } 473 474 x->km.dying = warn; 475 if (warn) 476 km_state_expired(x, 0, 0); 477resched: 478 if (next != LONG_MAX) 479 mod_timer(&x->timer, jiffies + make_jiffies(next)); 480 481 goto out; 482 483expired: 484 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) { 485 x->km.state = XFRM_STATE_EXPIRED; 486 wake_up(&km_waitq); 487 next = 2; 488 goto resched; 489 } 490 491 err = __xfrm_state_delete(x); 492 if (!err && x->id.spi) 493 km_state_expired(x, 1, 0); 494 495 xfrm_audit_state_delete(x, err ? 0 : 1, 496 audit_get_loginuid(current->audit_context), 0); 497 498out: 499 spin_unlock(&x->lock); 500} 501 502static void xfrm_replay_timer_handler(unsigned long data); 503 504struct xfrm_state *xfrm_state_alloc(void) 505{ 506 struct xfrm_state *x; 507 508 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 509 510 if (x) { 511 atomic_set(&x->refcnt, 1); 512 atomic_set(&x->tunnel_users, 0); 513 INIT_HLIST_NODE(&x->bydst); 514 INIT_HLIST_NODE(&x->bysrc); 515 INIT_HLIST_NODE(&x->byspi); 516 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x); 517 setup_timer(&x->rtimer, xfrm_replay_timer_handler, 518 (unsigned long)x); 519 x->curlft.add_time = get_seconds(); 520 x->lft.soft_byte_limit = XFRM_INF; 521 x->lft.soft_packet_limit = XFRM_INF; 522 x->lft.hard_byte_limit = XFRM_INF; 523 x->lft.hard_packet_limit = XFRM_INF; 524 x->replay_maxage = 0; 525 x->replay_maxdiff = 0; 526 spin_lock_init(&x->lock); 527 } 528 return x; 529} 530EXPORT_SYMBOL(xfrm_state_alloc); 531 532void __xfrm_state_destroy(struct xfrm_state *x) 533{ 534 BUG_TRAP(x->km.state == XFRM_STATE_DEAD); 535 536 spin_lock_bh(&xfrm_state_gc_lock); 537 hlist_add_head(&x->bydst, &xfrm_state_gc_list); 538 spin_unlock_bh(&xfrm_state_gc_lock); 539 schedule_work(&xfrm_state_gc_work); 540} 541EXPORT_SYMBOL(__xfrm_state_destroy); 542 543int __xfrm_state_delete(struct xfrm_state *x) 544{ 545 int err = -ESRCH; 546 547 if (x->km.state != XFRM_STATE_DEAD) { 548 x->km.state = XFRM_STATE_DEAD; 549 spin_lock(&xfrm_state_lock); 550 hlist_del(&x->bydst); 551 hlist_del(&x->bysrc); 552 if (x->id.spi) 553 hlist_del(&x->byspi); 554 xfrm_state_num--; 555 spin_unlock(&xfrm_state_lock); 556 557 /* All xfrm_state objects are created by xfrm_state_alloc. 558 * The xfrm_state_alloc call gives a reference, and that 559 * is what we are dropping here. 560 */ 561 xfrm_state_put(x); 562 err = 0; 563 } 564 565 return err; 566} 567EXPORT_SYMBOL(__xfrm_state_delete); 568 569int xfrm_state_delete(struct xfrm_state *x) 570{ 571 int err; 572 573 spin_lock_bh(&x->lock); 574 err = __xfrm_state_delete(x); 575 spin_unlock_bh(&x->lock); 576 577 return err; 578} 579EXPORT_SYMBOL(xfrm_state_delete); 580 581#ifdef CONFIG_SECURITY_NETWORK_XFRM 582static inline int 583xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info) 584{ 585 int i, err = 0; 586 587 for (i = 0; i <= xfrm_state_hmask; i++) { 588 struct hlist_node *entry; 589 struct xfrm_state *x; 590 591 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { 592 if (xfrm_id_proto_match(x->id.proto, proto) && 593 (err = security_xfrm_state_delete(x)) != 0) { 594 xfrm_audit_state_delete(x, 0, 595 audit_info->loginuid, 596 audit_info->secid); 597 return err; 598 } 599 } 600 } 601 602 return err; 603} 604#else 605static inline int 606xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info) 607{ 608 return 0; 609} 610#endif 611 612int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info) 613{ 614 int i, err = 0; 615 616 spin_lock_bh(&xfrm_state_lock); 617 err = xfrm_state_flush_secctx_check(proto, audit_info); 618 if (err) 619 goto out; 620 621 for (i = 0; i <= xfrm_state_hmask; i++) { 622 struct hlist_node *entry; 623 struct xfrm_state *x; 624restart: 625 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { 626 if (!xfrm_state_kern(x) && 627 xfrm_id_proto_match(x->id.proto, proto)) { 628 xfrm_state_hold(x); 629 spin_unlock_bh(&xfrm_state_lock); 630 631 err = xfrm_state_delete(x); 632 xfrm_audit_state_delete(x, err ? 0 : 1, 633 audit_info->loginuid, 634 audit_info->secid); 635 xfrm_state_put(x); 636 637 spin_lock_bh(&xfrm_state_lock); 638 goto restart; 639 } 640 } 641 } 642 err = 0; 643 644out: 645 spin_unlock_bh(&xfrm_state_lock); 646 wake_up(&km_waitq); 647 return err; 648} 649EXPORT_SYMBOL(xfrm_state_flush); 650 651void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) 652{ 653 spin_lock_bh(&xfrm_state_lock); 654 si->sadcnt = xfrm_state_num; 655 si->sadhcnt = xfrm_state_hmask; 656 si->sadhmcnt = xfrm_state_hashmax; 657 spin_unlock_bh(&xfrm_state_lock); 658} 659EXPORT_SYMBOL(xfrm_sad_getinfo); 660 661static int 662xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, 663 struct xfrm_tmpl *tmpl, 664 xfrm_address_t *daddr, xfrm_address_t *saddr, 665 unsigned short family) 666{ 667 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 668 if (!afinfo) 669 return -1; 670 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); 671 xfrm_state_put_afinfo(afinfo); 672 return 0; 673} 674 675static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) 676{ 677 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family); 678 struct xfrm_state *x; 679 struct hlist_node *entry; 680 681 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) { 682 if (x->props.family != family || 683 x->id.spi != spi || 684 x->id.proto != proto) 685 continue; 686 687 switch (family) { 688 case AF_INET: 689 if (x->id.daddr.a4 != daddr->a4) 690 continue; 691 break; 692 case AF_INET6: 693 if (!ipv6_addr_equal((struct in6_addr *)daddr, 694 (struct in6_addr *) 695 x->id.daddr.a6)) 696 continue; 697 break; 698 } 699 700 xfrm_state_hold(x); 701 return x; 702 } 703 704 return NULL; 705} 706 707static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family) 708{ 709 unsigned int h = xfrm_src_hash(daddr, saddr, family); 710 struct xfrm_state *x; 711 struct hlist_node *entry; 712 713 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) { 714 if (x->props.family != family || 715 x->id.proto != proto) 716 continue; 717 718 switch (family) { 719 case AF_INET: 720 if (x->id.daddr.a4 != daddr->a4 || 721 x->props.saddr.a4 != saddr->a4) 722 continue; 723 break; 724 case AF_INET6: 725 if (!ipv6_addr_equal((struct in6_addr *)daddr, 726 (struct in6_addr *) 727 x->id.daddr.a6) || 728 !ipv6_addr_equal((struct in6_addr *)saddr, 729 (struct in6_addr *) 730 x->props.saddr.a6)) 731 continue; 732 break; 733 } 734 735 xfrm_state_hold(x); 736 return x; 737 } 738 739 return NULL; 740} 741 742static inline struct xfrm_state * 743__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 744{ 745 if (use_spi) 746 return __xfrm_state_lookup(&x->id.daddr, x->id.spi, 747 x->id.proto, family); 748 else 749 return __xfrm_state_lookup_byaddr(&x->id.daddr, 750 &x->props.saddr, 751 x->id.proto, family); 752} 753 754static void xfrm_hash_grow_check(int have_hash_collision) 755{ 756 if (have_hash_collision && 757 (xfrm_state_hmask + 1) < xfrm_state_hashmax && 758 xfrm_state_num > xfrm_state_hmask) 759 schedule_work(&xfrm_hash_work); 760} 761 762struct xfrm_state * 763xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 764 struct flowi *fl, struct xfrm_tmpl *tmpl, 765 struct xfrm_policy *pol, int *err, 766 unsigned short family) 767{ 768 unsigned int h; 769 struct hlist_node *entry; 770 struct xfrm_state *x, *x0; 771 int acquire_in_progress = 0; 772 int error = 0; 773 struct xfrm_state *best = NULL; 774 775 spin_lock_bh(&xfrm_state_lock); 776 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family); 777 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 778 if (x->props.family == family && 779 x->props.reqid == tmpl->reqid && 780 !(x->props.flags & XFRM_STATE_WILDRECV) && 781 xfrm_state_addr_check(x, daddr, saddr, family) && 782 tmpl->mode == x->props.mode && 783 tmpl->id.proto == x->id.proto && 784 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) { 785 /* Resolution logic: 786 1. There is a valid state with matching selector. 787 Done. 788 2. Valid state with inappropriate selector. Skip. 789 790 Entering area of "sysdeps". 791 792 3. If state is not valid, selector is temporary, 793 it selects only session which triggered 794 previous resolution. Key manager will do 795 something to install a state with proper 796 selector. 797 */ 798 if (x->km.state == XFRM_STATE_VALID) { 799 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) || 800 !security_xfrm_state_pol_flow_match(x, pol, fl)) 801 continue; 802 if (!best || 803 best->km.dying > x->km.dying || 804 (best->km.dying == x->km.dying && 805 best->curlft.add_time < x->curlft.add_time)) 806 best = x; 807 } else if (x->km.state == XFRM_STATE_ACQ) { 808 acquire_in_progress = 1; 809 } else if (x->km.state == XFRM_STATE_ERROR || 810 x->km.state == XFRM_STATE_EXPIRED) { 811 if (xfrm_selector_match(&x->sel, fl, x->sel.family) && 812 security_xfrm_state_pol_flow_match(x, pol, fl)) 813 error = -ESRCH; 814 } 815 } 816 } 817 818 x = best; 819 if (!x && !error && !acquire_in_progress) { 820 if (tmpl->id.spi && 821 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi, 822 tmpl->id.proto, family)) != NULL) { 823 xfrm_state_put(x0); 824 error = -EEXIST; 825 goto out; 826 } 827 x = xfrm_state_alloc(); 828 if (x == NULL) { 829 error = -ENOMEM; 830 goto out; 831 } 832 /* Initialize temporary selector matching only 833 * to current session. */ 834 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 835 836 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 837 if (error) { 838 x->km.state = XFRM_STATE_DEAD; 839 xfrm_state_put(x); 840 x = NULL; 841 goto out; 842 } 843 844 if (km_query(x, tmpl, pol) == 0) { 845 x->km.state = XFRM_STATE_ACQ; 846 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 847 h = xfrm_src_hash(daddr, saddr, family); 848 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 849 if (x->id.spi) { 850 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family); 851 hlist_add_head(&x->byspi, xfrm_state_byspi+h); 852 } 853 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires; 854 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; 855 add_timer(&x->timer); 856 xfrm_state_num++; 857 xfrm_hash_grow_check(x->bydst.next != NULL); 858 } else { 859 x->km.state = XFRM_STATE_DEAD; 860 xfrm_state_put(x); 861 x = NULL; 862 error = -ESRCH; 863 } 864 } 865out: 866 if (x) 867 xfrm_state_hold(x); 868 else 869 *err = acquire_in_progress ? -EAGAIN : error; 870 spin_unlock_bh(&xfrm_state_lock); 871 return x; 872} 873 874struct xfrm_state * 875xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 876 unsigned short family, u8 mode, u8 proto, u32 reqid) 877{ 878 unsigned int h; 879 struct xfrm_state *rx = NULL, *x = NULL; 880 struct hlist_node *entry; 881 882 spin_lock(&xfrm_state_lock); 883 h = xfrm_dst_hash(daddr, saddr, reqid, family); 884 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 885 if (x->props.family == family && 886 x->props.reqid == reqid && 887 !(x->props.flags & XFRM_STATE_WILDRECV) && 888 xfrm_state_addr_check(x, daddr, saddr, family) && 889 mode == x->props.mode && 890 proto == x->id.proto && 891 x->km.state == XFRM_STATE_VALID) { 892 rx = x; 893 break; 894 } 895 } 896 897 if (rx) 898 xfrm_state_hold(rx); 899 spin_unlock(&xfrm_state_lock); 900 901 902 return rx; 903} 904EXPORT_SYMBOL(xfrm_stateonly_find); 905 906static void __xfrm_state_insert(struct xfrm_state *x) 907{ 908 unsigned int h; 909 910 x->genid = ++xfrm_state_genid; 911 912 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 913 x->props.reqid, x->props.family); 914 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 915 916 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family); 917 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 918 919 if (x->id.spi) { 920 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, 921 x->props.family); 922 923 hlist_add_head(&x->byspi, xfrm_state_byspi+h); 924 } 925 926 mod_timer(&x->timer, jiffies + HZ); 927 if (x->replay_maxage) 928 mod_timer(&x->rtimer, jiffies + x->replay_maxage); 929 930 wake_up(&km_waitq); 931 932 xfrm_state_num++; 933 934 xfrm_hash_grow_check(x->bydst.next != NULL); 935} 936 937/* xfrm_state_lock is held */ 938static void __xfrm_state_bump_genids(struct xfrm_state *xnew) 939{ 940 unsigned short family = xnew->props.family; 941 u32 reqid = xnew->props.reqid; 942 struct xfrm_state *x; 943 struct hlist_node *entry; 944 unsigned int h; 945 946 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family); 947 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 948 if (x->props.family == family && 949 x->props.reqid == reqid && 950 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 951 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 952 x->genid = xfrm_state_genid; 953 } 954} 955 956void xfrm_state_insert(struct xfrm_state *x) 957{ 958 spin_lock_bh(&xfrm_state_lock); 959 __xfrm_state_bump_genids(x); 960 __xfrm_state_insert(x); 961 spin_unlock_bh(&xfrm_state_lock); 962} 963EXPORT_SYMBOL(xfrm_state_insert); 964 965/* xfrm_state_lock is held */ 966static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create) 967{ 968 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family); 969 struct hlist_node *entry; 970 struct xfrm_state *x; 971 972 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 973 if (x->props.reqid != reqid || 974 x->props.mode != mode || 975 x->props.family != family || 976 x->km.state != XFRM_STATE_ACQ || 977 x->id.spi != 0 || 978 x->id.proto != proto) 979 continue; 980 981 switch (family) { 982 case AF_INET: 983 if (x->id.daddr.a4 != daddr->a4 || 984 x->props.saddr.a4 != saddr->a4) 985 continue; 986 break; 987 case AF_INET6: 988 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6, 989 (struct in6_addr *)daddr) || 990 !ipv6_addr_equal((struct in6_addr *) 991 x->props.saddr.a6, 992 (struct in6_addr *)saddr)) 993 continue; 994 break; 995 } 996 997 xfrm_state_hold(x); 998 return x; 999 } 1000 1001 if (!create) 1002 return NULL; 1003 1004 x = xfrm_state_alloc(); 1005 if (likely(x)) { 1006 switch (family) { 1007 case AF_INET: 1008 x->sel.daddr.a4 = daddr->a4; 1009 x->sel.saddr.a4 = saddr->a4; 1010 x->sel.prefixlen_d = 32; 1011 x->sel.prefixlen_s = 32; 1012 x->props.saddr.a4 = saddr->a4; 1013 x->id.daddr.a4 = daddr->a4; 1014 break; 1015 1016 case AF_INET6: 1017 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, 1018 (struct in6_addr *)daddr); 1019 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, 1020 (struct in6_addr *)saddr); 1021 x->sel.prefixlen_d = 128; 1022 x->sel.prefixlen_s = 128; 1023 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, 1024 (struct in6_addr *)saddr); 1025 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, 1026 (struct in6_addr *)daddr); 1027 break; 1028 } 1029 1030 x->km.state = XFRM_STATE_ACQ; 1031 x->id.proto = proto; 1032 x->props.family = family; 1033 x->props.mode = mode; 1034 x->props.reqid = reqid; 1035 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires; 1036 xfrm_state_hold(x); 1037 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; 1038 add_timer(&x->timer); 1039 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 1040 h = xfrm_src_hash(daddr, saddr, family); 1041 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 1042 1043 xfrm_state_num++; 1044 1045 xfrm_hash_grow_check(x->bydst.next != NULL); 1046 } 1047 1048 return x; 1049} 1050 1051static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq); 1052 1053int xfrm_state_add(struct xfrm_state *x) 1054{ 1055 struct xfrm_state *x1; 1056 int family; 1057 int err; 1058 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1059 1060 family = x->props.family; 1061 1062 spin_lock_bh(&xfrm_state_lock); 1063 1064 x1 = __xfrm_state_locate(x, use_spi, family); 1065 if (x1) { 1066 xfrm_state_put(x1); 1067 x1 = NULL; 1068 err = -EEXIST; 1069 goto out; 1070 } 1071 1072 if (use_spi && x->km.seq) { 1073 x1 = __xfrm_find_acq_byseq(x->km.seq); 1074 if (x1 && ((x1->id.proto != x->id.proto) || 1075 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { 1076 xfrm_state_put(x1); 1077 x1 = NULL; 1078 } 1079 } 1080 1081 if (use_spi && !x1) 1082 x1 = __find_acq_core(family, x->props.mode, x->props.reqid, 1083 x->id.proto, 1084 &x->id.daddr, &x->props.saddr, 0); 1085 1086 __xfrm_state_bump_genids(x); 1087 __xfrm_state_insert(x); 1088 err = 0; 1089 1090out: 1091 spin_unlock_bh(&xfrm_state_lock); 1092 1093 if (x1) { 1094 xfrm_state_delete(x1); 1095 xfrm_state_put(x1); 1096 } 1097 1098 return err; 1099} 1100EXPORT_SYMBOL(xfrm_state_add); 1101 1102#ifdef CONFIG_XFRM_MIGRATE 1103struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) 1104{ 1105 int err = -ENOMEM; 1106 struct xfrm_state *x = xfrm_state_alloc(); 1107 if (!x) 1108 goto error; 1109 1110 memcpy(&x->id, &orig->id, sizeof(x->id)); 1111 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1112 memcpy(&x->lft, &orig->lft, sizeof(x->lft)); 1113 x->props.mode = orig->props.mode; 1114 x->props.replay_window = orig->props.replay_window; 1115 x->props.reqid = orig->props.reqid; 1116 x->props.family = orig->props.family; 1117 x->props.saddr = orig->props.saddr; 1118 1119 if (orig->aalg) { 1120 x->aalg = xfrm_algo_clone(orig->aalg); 1121 if (!x->aalg) 1122 goto error; 1123 } 1124 x->props.aalgo = orig->props.aalgo; 1125 1126 if (orig->ealg) { 1127 x->ealg = xfrm_algo_clone(orig->ealg); 1128 if (!x->ealg) 1129 goto error; 1130 } 1131 x->props.ealgo = orig->props.ealgo; 1132 1133 if (orig->calg) { 1134 x->calg = xfrm_algo_clone(orig->calg); 1135 if (!x->calg) 1136 goto error; 1137 } 1138 x->props.calgo = orig->props.calgo; 1139 1140 if (orig->encap) { 1141 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL); 1142 if (!x->encap) 1143 goto error; 1144 } 1145 1146 if (orig->coaddr) { 1147 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), 1148 GFP_KERNEL); 1149 if (!x->coaddr) 1150 goto error; 1151 } 1152 1153 err = xfrm_init_state(x); 1154 if (err) 1155 goto error; 1156 1157 x->props.flags = orig->props.flags; 1158 1159 x->curlft.add_time = orig->curlft.add_time; 1160 x->km.state = orig->km.state; 1161 x->km.seq = orig->km.seq; 1162 1163 return x; 1164 1165 error: 1166 if (errp) 1167 *errp = err; 1168 if (x) { 1169 kfree(x->aalg); 1170 kfree(x->ealg); 1171 kfree(x->calg); 1172 kfree(x->encap); 1173 kfree(x->coaddr); 1174 } 1175 kfree(x); 1176 return NULL; 1177} 1178EXPORT_SYMBOL(xfrm_state_clone); 1179 1180/* xfrm_state_lock is held */ 1181struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) 1182{ 1183 unsigned int h; 1184 struct xfrm_state *x; 1185 struct hlist_node *entry; 1186 1187 if (m->reqid) { 1188 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr, 1189 m->reqid, m->old_family); 1190 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 1191 if (x->props.mode != m->mode || 1192 x->id.proto != m->proto) 1193 continue; 1194 if (m->reqid && x->props.reqid != m->reqid) 1195 continue; 1196 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr, 1197 m->old_family) || 1198 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr, 1199 m->old_family)) 1200 continue; 1201 xfrm_state_hold(x); 1202 return x; 1203 } 1204 } else { 1205 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr, 1206 m->old_family); 1207 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) { 1208 if (x->props.mode != m->mode || 1209 x->id.proto != m->proto) 1210 continue; 1211 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr, 1212 m->old_family) || 1213 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr, 1214 m->old_family)) 1215 continue; 1216 xfrm_state_hold(x); 1217 return x; 1218 } 1219 } 1220 1221 return NULL; 1222} 1223EXPORT_SYMBOL(xfrm_migrate_state_find); 1224 1225struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x, 1226 struct xfrm_migrate *m) 1227{ 1228 struct xfrm_state *xc; 1229 int err; 1230 1231 xc = xfrm_state_clone(x, &err); 1232 if (!xc) 1233 return NULL; 1234 1235 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); 1236 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); 1237 1238 /* add state */ 1239 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) { 1240 /* a care is needed when the destination address of the 1241 state is to be updated as it is a part of triplet */ 1242 xfrm_state_insert(xc); 1243 } else { 1244 if ((err = xfrm_state_add(xc)) < 0) 1245 goto error; 1246 } 1247 1248 return xc; 1249error: 1250 kfree(xc); 1251 return NULL; 1252} 1253EXPORT_SYMBOL(xfrm_state_migrate); 1254#endif 1255 1256int xfrm_state_update(struct xfrm_state *x) 1257{ 1258 struct xfrm_state *x1; 1259 int err; 1260 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1261 1262 spin_lock_bh(&xfrm_state_lock); 1263 x1 = __xfrm_state_locate(x, use_spi, x->props.family); 1264 1265 err = -ESRCH; 1266 if (!x1) 1267 goto out; 1268 1269 if (xfrm_state_kern(x1)) { 1270 xfrm_state_put(x1); 1271 err = -EEXIST; 1272 goto out; 1273 } 1274 1275 if (x1->km.state == XFRM_STATE_ACQ) { 1276 __xfrm_state_insert(x); 1277 x = NULL; 1278 } 1279 err = 0; 1280 1281out: 1282 spin_unlock_bh(&xfrm_state_lock); 1283 1284 if (err) 1285 return err; 1286 1287 if (!x) { 1288 xfrm_state_delete(x1); 1289 xfrm_state_put(x1); 1290 return 0; 1291 } 1292 1293 err = -EINVAL; 1294 spin_lock_bh(&x1->lock); 1295 if (likely(x1->km.state == XFRM_STATE_VALID)) { 1296 if (x->encap && x1->encap) 1297 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 1298 if (x->coaddr && x1->coaddr) { 1299 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 1300 } 1301 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel))) 1302 memcpy(&x1->sel, &x->sel, sizeof(x1->sel)); 1303 memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); 1304 x1->km.dying = 0; 1305 1306 mod_timer(&x1->timer, jiffies + HZ); 1307 if (x1->curlft.use_time) 1308 xfrm_state_check_expire(x1); 1309 1310 err = 0; 1311 } 1312 spin_unlock_bh(&x1->lock); 1313 1314 xfrm_state_put(x1); 1315 1316 return err; 1317} 1318EXPORT_SYMBOL(xfrm_state_update); 1319 1320int xfrm_state_check_expire(struct xfrm_state *x) 1321{ 1322 if (!x->curlft.use_time) 1323 x->curlft.use_time = get_seconds(); 1324 1325 if (x->km.state != XFRM_STATE_VALID) 1326 return -EINVAL; 1327 1328 if (x->curlft.bytes >= x->lft.hard_byte_limit || 1329 x->curlft.packets >= x->lft.hard_packet_limit) { 1330 x->km.state = XFRM_STATE_EXPIRED; 1331 mod_timer(&x->timer, jiffies); 1332 return -EINVAL; 1333 } 1334 1335 if (!x->km.dying && 1336 (x->curlft.bytes >= x->lft.soft_byte_limit || 1337 x->curlft.packets >= x->lft.soft_packet_limit)) { 1338 x->km.dying = 1; 1339 km_state_expired(x, 0, 0); 1340 } 1341 return 0; 1342} 1343EXPORT_SYMBOL(xfrm_state_check_expire); 1344 1345struct xfrm_state * 1346xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, 1347 unsigned short family) 1348{ 1349 struct xfrm_state *x; 1350 1351 spin_lock_bh(&xfrm_state_lock); 1352 x = __xfrm_state_lookup(daddr, spi, proto, family); 1353 spin_unlock_bh(&xfrm_state_lock); 1354 return x; 1355} 1356EXPORT_SYMBOL(xfrm_state_lookup); 1357 1358struct xfrm_state * 1359xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, 1360 u8 proto, unsigned short family) 1361{ 1362 struct xfrm_state *x; 1363 1364 spin_lock_bh(&xfrm_state_lock); 1365 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family); 1366 spin_unlock_bh(&xfrm_state_lock); 1367 return x; 1368} 1369EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 1370 1371struct xfrm_state * 1372xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 1373 xfrm_address_t *daddr, xfrm_address_t *saddr, 1374 int create, unsigned short family) 1375{ 1376 struct xfrm_state *x; 1377 1378 spin_lock_bh(&xfrm_state_lock); 1379 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create); 1380 spin_unlock_bh(&xfrm_state_lock); 1381 1382 return x; 1383} 1384EXPORT_SYMBOL(xfrm_find_acq); 1385 1386#ifdef CONFIG_XFRM_SUB_POLICY 1387int 1388xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 1389 unsigned short family) 1390{ 1391 int err = 0; 1392 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1393 if (!afinfo) 1394 return -EAFNOSUPPORT; 1395 1396 spin_lock_bh(&xfrm_state_lock); 1397 if (afinfo->tmpl_sort) 1398 err = afinfo->tmpl_sort(dst, src, n); 1399 spin_unlock_bh(&xfrm_state_lock); 1400 xfrm_state_put_afinfo(afinfo); 1401 return err; 1402} 1403EXPORT_SYMBOL(xfrm_tmpl_sort); 1404 1405int 1406xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 1407 unsigned short family) 1408{ 1409 int err = 0; 1410 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 1411 if (!afinfo) 1412 return -EAFNOSUPPORT; 1413 1414 spin_lock_bh(&xfrm_state_lock); 1415 if (afinfo->state_sort) 1416 err = afinfo->state_sort(dst, src, n); 1417 spin_unlock_bh(&xfrm_state_lock); 1418 xfrm_state_put_afinfo(afinfo); 1419 return err; 1420} 1421EXPORT_SYMBOL(xfrm_state_sort); 1422#endif 1423 1424/* Silly enough, but I'm lazy to build resolution list */ 1425 1426static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq) 1427{ 1428 int i; 1429 1430 for (i = 0; i <= xfrm_state_hmask; i++) { 1431 struct hlist_node *entry; 1432 struct xfrm_state *x; 1433 1434 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { 1435 if (x->km.seq == seq && 1436 x->km.state == XFRM_STATE_ACQ) { 1437 xfrm_state_hold(x); 1438 return x; 1439 } 1440 } 1441 } 1442 return NULL; 1443} 1444 1445struct xfrm_state *xfrm_find_acq_byseq(u32 seq) 1446{ 1447 struct xfrm_state *x; 1448 1449 spin_lock_bh(&xfrm_state_lock); 1450 x = __xfrm_find_acq_byseq(seq); 1451 spin_unlock_bh(&xfrm_state_lock); 1452 return x; 1453} 1454EXPORT_SYMBOL(xfrm_find_acq_byseq); 1455 1456u32 xfrm_get_acqseq(void) 1457{ 1458 u32 res; 1459 static u32 acqseq; 1460 static DEFINE_SPINLOCK(acqseq_lock); 1461 1462 spin_lock_bh(&acqseq_lock); 1463 res = (++acqseq ? : ++acqseq); 1464 spin_unlock_bh(&acqseq_lock); 1465 return res; 1466} 1467EXPORT_SYMBOL(xfrm_get_acqseq); 1468 1469int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) 1470{ 1471 unsigned int h; 1472 struct xfrm_state *x0; 1473 int err = -ENOENT; 1474 __be32 minspi = htonl(low); 1475 __be32 maxspi = htonl(high); 1476 1477 spin_lock_bh(&x->lock); 1478 if (x->km.state == XFRM_STATE_DEAD) 1479 goto unlock; 1480 1481 err = 0; 1482 if (x->id.spi) 1483 goto unlock; 1484 1485 err = -ENOENT; 1486 1487 if (minspi == maxspi) { 1488 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family); 1489 if (x0) { 1490 xfrm_state_put(x0); 1491 goto unlock; 1492 } 1493 x->id.spi = minspi; 1494 } else { 1495 u32 spi = 0; 1496 for (h=0; h<high-low+1; h++) { 1497 spi = low + net_random()%(high-low+1); 1498 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family); 1499 if (x0 == NULL) { 1500 x->id.spi = htonl(spi); 1501 break; 1502 } 1503 xfrm_state_put(x0); 1504 } 1505 } 1506 if (x->id.spi) { 1507 spin_lock_bh(&xfrm_state_lock); 1508 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family); 1509 hlist_add_head(&x->byspi, xfrm_state_byspi+h); 1510 spin_unlock_bh(&xfrm_state_lock); 1511 1512 err = 0; 1513 } 1514 1515unlock: 1516 spin_unlock_bh(&x->lock); 1517 1518 return err; 1519} 1520EXPORT_SYMBOL(xfrm_alloc_spi); 1521 1522int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), 1523 void *data) 1524{ 1525 int i; 1526 struct xfrm_state *x, *last = NULL; 1527 struct hlist_node *entry; 1528 int count = 0; 1529 int err = 0; 1530 1531 spin_lock_bh(&xfrm_state_lock); 1532 for (i = 0; i <= xfrm_state_hmask; i++) { 1533 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { 1534 if (!xfrm_id_proto_match(x->id.proto, proto)) 1535 continue; 1536 if (last) { 1537 err = func(last, count, data); 1538 if (err) 1539 goto out; 1540 } 1541 last = x; 1542 count++; 1543 } 1544 } 1545 if (count == 0) { 1546 err = -ENOENT; 1547 goto out; 1548 } 1549 err = func(last, 0, data); 1550out: 1551 spin_unlock_bh(&xfrm_state_lock); 1552 return err; 1553} 1554EXPORT_SYMBOL(xfrm_state_walk); 1555 1556 1557void xfrm_replay_notify(struct xfrm_state *x, int event) 1558{ 1559 struct km_event c; 1560 /* we send notify messages in case 1561 * 1. we updated on of the sequence numbers, and the seqno difference 1562 * is at least x->replay_maxdiff, in this case we also update the 1563 * timeout of our timer function 1564 * 2. if x->replay_maxage has elapsed since last update, 1565 * and there were changes 1566 * 1567 * The state structure must be locked! 1568 */ 1569 1570 switch (event) { 1571 case XFRM_REPLAY_UPDATE: 1572 if (x->replay_maxdiff && 1573 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && 1574 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { 1575 if (x->xflags & XFRM_TIME_DEFER) 1576 event = XFRM_REPLAY_TIMEOUT; 1577 else 1578 return; 1579 } 1580 1581 break; 1582 1583 case XFRM_REPLAY_TIMEOUT: 1584 if ((x->replay.seq == x->preplay.seq) && 1585 (x->replay.bitmap == x->preplay.bitmap) && 1586 (x->replay.oseq == x->preplay.oseq)) { 1587 x->xflags |= XFRM_TIME_DEFER; 1588 return; 1589 } 1590 1591 break; 1592 } 1593 1594 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state)); 1595 c.event = XFRM_MSG_NEWAE; 1596 c.data.aevent = event; 1597 km_state_notify(x, &c); 1598 1599 if (x->replay_maxage && 1600 !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) 1601 x->xflags &= ~XFRM_TIME_DEFER; 1602} 1603 1604static void xfrm_replay_timer_handler(unsigned long data) 1605{ 1606 struct xfrm_state *x = (struct xfrm_state*)data; 1607 1608 spin_lock(&x->lock); 1609 1610 if (x->km.state == XFRM_STATE_VALID) { 1611 if (xfrm_aevent_is_on()) 1612 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT); 1613 else 1614 x->xflags |= XFRM_TIME_DEFER; 1615 } 1616 1617 spin_unlock(&x->lock); 1618} 1619 1620int xfrm_replay_check(struct xfrm_state *x, 1621 struct sk_buff *skb, __be32 net_seq) 1622{ 1623 u32 diff; 1624 u32 seq = ntohl(net_seq); 1625 1626 if (unlikely(seq == 0)) 1627 goto err; 1628 1629 if (likely(seq > x->replay.seq)) 1630 return 0; 1631 1632 diff = x->replay.seq - seq; 1633 if (diff >= min_t(unsigned int, x->props.replay_window, 1634 sizeof(x->replay.bitmap) * 8)) { 1635 x->stats.replay_window++; 1636 goto err; 1637 } 1638 1639 if (x->replay.bitmap & (1U << diff)) { 1640 x->stats.replay++; 1641 goto err; 1642 } 1643 return 0; 1644 1645err: 1646 xfrm_audit_state_replay(x, skb, net_seq); 1647 return -EINVAL; 1648} 1649EXPORT_SYMBOL(xfrm_replay_check); 1650 1651void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) 1652{ 1653 u32 diff; 1654 u32 seq = ntohl(net_seq); 1655 1656 if (seq > x->replay.seq) { 1657 diff = seq - x->replay.seq; 1658 if (diff < x->props.replay_window) 1659 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1; 1660 else 1661 x->replay.bitmap = 1; 1662 x->replay.seq = seq; 1663 } else { 1664 diff = x->replay.seq - seq; 1665 x->replay.bitmap |= (1U << diff); 1666 } 1667 1668 if (xfrm_aevent_is_on()) 1669 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 1670} 1671EXPORT_SYMBOL(xfrm_replay_advance); 1672 1673static LIST_HEAD(xfrm_km_list); 1674static DEFINE_RWLOCK(xfrm_km_lock); 1675 1676void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c) 1677{ 1678 struct xfrm_mgr *km; 1679 1680 read_lock(&xfrm_km_lock); 1681 list_for_each_entry(km, &xfrm_km_list, list) 1682 if (km->notify_policy) 1683 km->notify_policy(xp, dir, c); 1684 read_unlock(&xfrm_km_lock); 1685} 1686 1687void km_state_notify(struct xfrm_state *x, struct km_event *c) 1688{ 1689 struct xfrm_mgr *km; 1690 read_lock(&xfrm_km_lock); 1691 list_for_each_entry(km, &xfrm_km_list, list) 1692 if (km->notify) 1693 km->notify(x, c); 1694 read_unlock(&xfrm_km_lock); 1695} 1696 1697EXPORT_SYMBOL(km_policy_notify); 1698EXPORT_SYMBOL(km_state_notify); 1699 1700void km_state_expired(struct xfrm_state *x, int hard, u32 pid) 1701{ 1702 struct km_event c; 1703 1704 c.data.hard = hard; 1705 c.pid = pid; 1706 c.event = XFRM_MSG_EXPIRE; 1707 km_state_notify(x, &c); 1708 1709 if (hard) 1710 wake_up(&km_waitq); 1711} 1712 1713EXPORT_SYMBOL(km_state_expired); 1714/* 1715 * We send to all registered managers regardless of failure 1716 * We are happy with one success 1717*/ 1718int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) 1719{ 1720 int err = -EINVAL, acqret; 1721 struct xfrm_mgr *km; 1722 1723 read_lock(&xfrm_km_lock); 1724 list_for_each_entry(km, &xfrm_km_list, list) { 1725 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT); 1726 if (!acqret) 1727 err = acqret; 1728 } 1729 read_unlock(&xfrm_km_lock); 1730 return err; 1731} 1732EXPORT_SYMBOL(km_query); 1733 1734int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 1735{ 1736 int err = -EINVAL; 1737 struct xfrm_mgr *km; 1738 1739 read_lock(&xfrm_km_lock); 1740 list_for_each_entry(km, &xfrm_km_list, list) { 1741 if (km->new_mapping) 1742 err = km->new_mapping(x, ipaddr, sport); 1743 if (!err) 1744 break; 1745 } 1746 read_unlock(&xfrm_km_lock); 1747 return err; 1748} 1749EXPORT_SYMBOL(km_new_mapping); 1750 1751void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid) 1752{ 1753 struct km_event c; 1754 1755 c.data.hard = hard; 1756 c.pid = pid; 1757 c.event = XFRM_MSG_POLEXPIRE; 1758 km_policy_notify(pol, dir, &c); 1759 1760 if (hard) 1761 wake_up(&km_waitq); 1762} 1763EXPORT_SYMBOL(km_policy_expired); 1764 1765#ifdef CONFIG_XFRM_MIGRATE 1766int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1767 struct xfrm_migrate *m, int num_migrate) 1768{ 1769 int err = -EINVAL; 1770 int ret; 1771 struct xfrm_mgr *km; 1772 1773 read_lock(&xfrm_km_lock); 1774 list_for_each_entry(km, &xfrm_km_list, list) { 1775 if (km->migrate) { 1776 ret = km->migrate(sel, dir, type, m, num_migrate); 1777 if (!ret) 1778 err = ret; 1779 } 1780 } 1781 read_unlock(&xfrm_km_lock); 1782 return err; 1783} 1784EXPORT_SYMBOL(km_migrate); 1785#endif 1786 1787int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 1788{ 1789 int err = -EINVAL; 1790 int ret; 1791 struct xfrm_mgr *km; 1792 1793 read_lock(&xfrm_km_lock); 1794 list_for_each_entry(km, &xfrm_km_list, list) { 1795 if (km->report) { 1796 ret = km->report(proto, sel, addr); 1797 if (!ret) 1798 err = ret; 1799 } 1800 } 1801 read_unlock(&xfrm_km_lock); 1802 return err; 1803} 1804EXPORT_SYMBOL(km_report); 1805 1806int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) 1807{ 1808 int err; 1809 u8 *data; 1810 struct xfrm_mgr *km; 1811 struct xfrm_policy *pol = NULL; 1812 1813 if (optlen <= 0 || optlen > PAGE_SIZE) 1814 return -EMSGSIZE; 1815 1816 data = kmalloc(optlen, GFP_KERNEL); 1817 if (!data) 1818 return -ENOMEM; 1819 1820 err = -EFAULT; 1821 if (copy_from_user(data, optval, optlen)) 1822 goto out; 1823 1824 err = -EINVAL; 1825 read_lock(&xfrm_km_lock); 1826 list_for_each_entry(km, &xfrm_km_list, list) { 1827 pol = km->compile_policy(sk, optname, data, 1828 optlen, &err); 1829 if (err >= 0) 1830 break; 1831 } 1832 read_unlock(&xfrm_km_lock); 1833 1834 if (err >= 0) { 1835 xfrm_sk_policy_insert(sk, err, pol); 1836 xfrm_pol_put(pol); 1837 err = 0; 1838 } 1839 1840out: 1841 kfree(data); 1842 return err; 1843} 1844EXPORT_SYMBOL(xfrm_user_policy); 1845 1846int xfrm_register_km(struct xfrm_mgr *km) 1847{ 1848 write_lock_bh(&xfrm_km_lock); 1849 list_add_tail(&km->list, &xfrm_km_list); 1850 write_unlock_bh(&xfrm_km_lock); 1851 return 0; 1852} 1853EXPORT_SYMBOL(xfrm_register_km); 1854 1855int xfrm_unregister_km(struct xfrm_mgr *km) 1856{ 1857 write_lock_bh(&xfrm_km_lock); 1858 list_del(&km->list); 1859 write_unlock_bh(&xfrm_km_lock); 1860 return 0; 1861} 1862EXPORT_SYMBOL(xfrm_unregister_km); 1863 1864int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) 1865{ 1866 int err = 0; 1867 if (unlikely(afinfo == NULL)) 1868 return -EINVAL; 1869 if (unlikely(afinfo->family >= NPROTO)) 1870 return -EAFNOSUPPORT; 1871 write_lock_bh(&xfrm_state_afinfo_lock); 1872 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 1873 err = -ENOBUFS; 1874 else 1875 xfrm_state_afinfo[afinfo->family] = afinfo; 1876 write_unlock_bh(&xfrm_state_afinfo_lock); 1877 return err; 1878} 1879EXPORT_SYMBOL(xfrm_state_register_afinfo); 1880 1881int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) 1882{ 1883 int err = 0; 1884 if (unlikely(afinfo == NULL)) 1885 return -EINVAL; 1886 if (unlikely(afinfo->family >= NPROTO)) 1887 return -EAFNOSUPPORT; 1888 write_lock_bh(&xfrm_state_afinfo_lock); 1889 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 1890 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) 1891 err = -EINVAL; 1892 else 1893 xfrm_state_afinfo[afinfo->family] = NULL; 1894 } 1895 write_unlock_bh(&xfrm_state_afinfo_lock); 1896 return err; 1897} 1898EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1899 1900static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1901{ 1902 struct xfrm_state_afinfo *afinfo; 1903 if (unlikely(family >= NPROTO)) 1904 return NULL; 1905 read_lock(&xfrm_state_afinfo_lock); 1906 afinfo = xfrm_state_afinfo[family]; 1907 if (unlikely(!afinfo)) 1908 read_unlock(&xfrm_state_afinfo_lock); 1909 return afinfo; 1910} 1911 1912static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1913 __releases(xfrm_state_afinfo_lock) 1914{ 1915 read_unlock(&xfrm_state_afinfo_lock); 1916} 1917 1918/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 1919void xfrm_state_delete_tunnel(struct xfrm_state *x) 1920{ 1921 if (x->tunnel) { 1922 struct xfrm_state *t = x->tunnel; 1923 1924 if (atomic_read(&t->tunnel_users) == 2) 1925 xfrm_state_delete(t); 1926 atomic_dec(&t->tunnel_users); 1927 xfrm_state_put(t); 1928 x->tunnel = NULL; 1929 } 1930} 1931EXPORT_SYMBOL(xfrm_state_delete_tunnel); 1932 1933int xfrm_state_mtu(struct xfrm_state *x, int mtu) 1934{ 1935 int res; 1936 1937 spin_lock_bh(&x->lock); 1938 if (x->km.state == XFRM_STATE_VALID && 1939 x->type && x->type->get_mtu) 1940 res = x->type->get_mtu(x, mtu); 1941 else 1942 res = mtu - x->props.header_len; 1943 spin_unlock_bh(&x->lock); 1944 return res; 1945} 1946 1947int xfrm_init_state(struct xfrm_state *x) 1948{ 1949 struct xfrm_state_afinfo *afinfo; 1950 int family = x->props.family; 1951 int err; 1952 1953 err = -EAFNOSUPPORT; 1954 afinfo = xfrm_state_get_afinfo(family); 1955 if (!afinfo) 1956 goto error; 1957 1958 err = 0; 1959 if (afinfo->init_flags) 1960 err = afinfo->init_flags(x); 1961 1962 xfrm_state_put_afinfo(afinfo); 1963 1964 if (err) 1965 goto error; 1966 1967 err = -EPROTONOSUPPORT; 1968 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family); 1969 if (x->inner_mode == NULL) 1970 goto error; 1971 1972 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && 1973 family != x->sel.family) 1974 goto error; 1975 1976 x->type = xfrm_get_type(x->id.proto, family); 1977 if (x->type == NULL) 1978 goto error; 1979 1980 err = x->type->init_state(x); 1981 if (err) 1982 goto error; 1983 1984 x->outer_mode = xfrm_get_mode(x->props.mode, family); 1985 if (x->outer_mode == NULL) 1986 goto error; 1987 1988 x->km.state = XFRM_STATE_VALID; 1989 1990error: 1991 return err; 1992} 1993 1994EXPORT_SYMBOL(xfrm_init_state); 1995 1996void __init xfrm_state_init(void) 1997{ 1998 unsigned int sz; 1999 2000 sz = sizeof(struct hlist_head) * 8; 2001 2002 xfrm_state_bydst = xfrm_hash_alloc(sz); 2003 xfrm_state_bysrc = xfrm_hash_alloc(sz); 2004 xfrm_state_byspi = xfrm_hash_alloc(sz); 2005 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi) 2006 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); 2007 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 2008 2009 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); 2010} 2011 2012#ifdef CONFIG_AUDITSYSCALL 2013static void xfrm_audit_helper_sainfo(struct xfrm_state *x, 2014 struct audit_buffer *audit_buf) 2015{ 2016 struct xfrm_sec_ctx *ctx = x->security; 2017 u32 spi = ntohl(x->id.spi); 2018 2019 if (ctx) 2020 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2021 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2022 2023 switch(x->props.family) { 2024 case AF_INET: 2025 audit_log_format(audit_buf, 2026 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT, 2027 NIPQUAD(x->props.saddr.a4), 2028 NIPQUAD(x->id.daddr.a4)); 2029 break; 2030 case AF_INET6: 2031 audit_log_format(audit_buf, 2032 " src=" NIP6_FMT " dst=" NIP6_FMT, 2033 NIP6(*(struct in6_addr *)x->props.saddr.a6), 2034 NIP6(*(struct in6_addr *)x->id.daddr.a6)); 2035 break; 2036 } 2037 2038 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 2039} 2040 2041static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 2042 struct audit_buffer *audit_buf) 2043{ 2044 struct iphdr *iph4; 2045 struct ipv6hdr *iph6; 2046 2047 switch (family) { 2048 case AF_INET: 2049 iph4 = ip_hdr(skb); 2050 audit_log_format(audit_buf, 2051 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT, 2052 NIPQUAD(iph4->saddr), 2053 NIPQUAD(iph4->daddr)); 2054 break; 2055 case AF_INET6: 2056 iph6 = ipv6_hdr(skb); 2057 audit_log_format(audit_buf, 2058 " src=" NIP6_FMT " dst=" NIP6_FMT 2059 " flowlbl=0x%x%x%x", 2060 NIP6(iph6->saddr), 2061 NIP6(iph6->daddr), 2062 iph6->flow_lbl[0] & 0x0f, 2063 iph6->flow_lbl[1], 2064 iph6->flow_lbl[2]); 2065 break; 2066 } 2067} 2068 2069void xfrm_audit_state_add(struct xfrm_state *x, int result, 2070 u32 auid, u32 secid) 2071{ 2072 struct audit_buffer *audit_buf; 2073 2074 audit_buf = xfrm_audit_start("SAD-add"); 2075 if (audit_buf == NULL) 2076 return; 2077 xfrm_audit_helper_usrinfo(auid, secid, audit_buf); 2078 xfrm_audit_helper_sainfo(x, audit_buf); 2079 audit_log_format(audit_buf, " res=%u", result); 2080 audit_log_end(audit_buf); 2081} 2082EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 2083 2084void xfrm_audit_state_delete(struct xfrm_state *x, int result, 2085 u32 auid, u32 secid) 2086{ 2087 struct audit_buffer *audit_buf; 2088 2089 audit_buf = xfrm_audit_start("SAD-delete"); 2090 if (audit_buf == NULL) 2091 return; 2092 xfrm_audit_helper_usrinfo(auid, secid, audit_buf); 2093 xfrm_audit_helper_sainfo(x, audit_buf); 2094 audit_log_format(audit_buf, " res=%u", result); 2095 audit_log_end(audit_buf); 2096} 2097EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); 2098 2099void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 2100 struct sk_buff *skb) 2101{ 2102 struct audit_buffer *audit_buf; 2103 u32 spi; 2104 2105 audit_buf = xfrm_audit_start("SA-replay-overflow"); 2106 if (audit_buf == NULL) 2107 return; 2108 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 2109 /* don't record the sequence number because it's inherent in this kind 2110 * of audit message */ 2111 spi = ntohl(x->id.spi); 2112 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 2113 audit_log_end(audit_buf); 2114} 2115EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow); 2116 2117static void xfrm_audit_state_replay(struct xfrm_state *x, 2118 struct sk_buff *skb, __be32 net_seq) 2119{ 2120 struct audit_buffer *audit_buf; 2121 u32 spi; 2122 2123 audit_buf = xfrm_audit_start("SA-replayed-pkt"); 2124 if (audit_buf == NULL) 2125 return; 2126 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 2127 spi = ntohl(x->id.spi); 2128 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 2129 spi, spi, ntohl(net_seq)); 2130 audit_log_end(audit_buf); 2131} 2132 2133void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family) 2134{ 2135 struct audit_buffer *audit_buf; 2136 2137 audit_buf = xfrm_audit_start("SA-notfound"); 2138 if (audit_buf == NULL) 2139 return; 2140 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 2141 audit_log_end(audit_buf); 2142} 2143EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple); 2144 2145void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 2146 __be32 net_spi, __be32 net_seq) 2147{ 2148 struct audit_buffer *audit_buf; 2149 u32 spi; 2150 2151 audit_buf = xfrm_audit_start("SA-notfound"); 2152 if (audit_buf == NULL) 2153 return; 2154 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 2155 spi = ntohl(net_spi); 2156 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 2157 spi, spi, ntohl(net_seq)); 2158 audit_log_end(audit_buf); 2159} 2160EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound); 2161 2162void xfrm_audit_state_icvfail(struct xfrm_state *x, 2163 struct sk_buff *skb, u8 proto) 2164{ 2165 struct audit_buffer *audit_buf; 2166 __be32 net_spi; 2167 __be32 net_seq; 2168 2169 audit_buf = xfrm_audit_start("SA-icv-failure"); 2170 if (audit_buf == NULL) 2171 return; 2172 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 2173 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) { 2174 u32 spi = ntohl(net_spi); 2175 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 2176 spi, spi, ntohl(net_seq)); 2177 } 2178 audit_log_end(audit_buf); 2179} 2180EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail); 2181#endif /* CONFIG_AUDITSYSCALL */ 2182