node.c revision c80262829769419e19527f972672e8df0480235a
1/* 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the names of the copyright holders nor the names of its 17 * contributors may be used to endorse or promote products derived from 18 * this software without specific prior written permission. 19 * 20 * Alternatively, this software may be distributed under the terms of the 21 * GNU General Public License ("GPL") version 2 as published by the Free 22 * Software Foundation. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37#include "core.h" 38#include "config.h" 39#include "node.h" 40#include "port.h" 41#include "name_distr.h" 42 43void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str); 44static void node_lost_contact(struct tipc_node *n_ptr); 45static void node_established_contact(struct tipc_node *n_ptr); 46 47/* sorted list of nodes within cluster */ 48static struct tipc_node *tipc_nodes = NULL; 49 50static DEFINE_SPINLOCK(node_create_lock); 51 52u32 tipc_own_tag = 0; 53 54/** 55 * tipc_node_create - create neighboring node 56 * 57 * Currently, this routine is called by neighbor discovery code, which holds 58 * net_lock for reading only. We must take node_create_lock to ensure a node 59 * isn't created twice if two different bearers discover the node at the same 60 * time. (It would be preferable to switch to holding net_lock in write mode, 61 * but this is a non-trivial change.) 62 */ 63 64struct tipc_node *tipc_node_create(u32 addr) 65{ 66 struct cluster *c_ptr; 67 struct tipc_node *n_ptr; 68 struct tipc_node **curr_node; 69 70 spin_lock_bh(&node_create_lock); 71 72 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 73 if (addr < n_ptr->addr) 74 break; 75 if (addr == n_ptr->addr) { 76 spin_unlock_bh(&node_create_lock); 77 return n_ptr; 78 } 79 } 80 81 n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC); 82 if (!n_ptr) { 83 spin_unlock_bh(&node_create_lock); 84 warn("Node creation failed, no memory\n"); 85 return NULL; 86 } 87 88 c_ptr = tipc_cltr_find(addr); 89 if (!c_ptr) { 90 c_ptr = tipc_cltr_create(addr); 91 } 92 if (!c_ptr) { 93 spin_unlock_bh(&node_create_lock); 94 kfree(n_ptr); 95 return NULL; 96 } 97 98 n_ptr->addr = addr; 99 spin_lock_init(&n_ptr->lock); 100 INIT_LIST_HEAD(&n_ptr->nsub); 101 n_ptr->owner = c_ptr; 102 tipc_cltr_attach_node(c_ptr, n_ptr); 103 n_ptr->last_router = -1; 104 105 /* Insert node into ordered list */ 106 for (curr_node = &tipc_nodes; *curr_node; 107 curr_node = &(*curr_node)->next) { 108 if (addr < (*curr_node)->addr) { 109 n_ptr->next = *curr_node; 110 break; 111 } 112 } 113 (*curr_node) = n_ptr; 114 spin_unlock_bh(&node_create_lock); 115 return n_ptr; 116} 117 118void tipc_node_delete(struct tipc_node *n_ptr) 119{ 120 if (!n_ptr) 121 return; 122 123 dbg("node %x deleted\n", n_ptr->addr); 124 kfree(n_ptr); 125} 126 127 128/** 129 * tipc_node_link_up - handle addition of link 130 * 131 * Link becomes active (alone or shared) or standby, depending on its priority. 132 */ 133 134void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr) 135{ 136 struct link **active = &n_ptr->active_links[0]; 137 138 n_ptr->working_links++; 139 140 info("Established link <%s> on network plane %c\n", 141 l_ptr->name, l_ptr->b_ptr->net_plane); 142 143 if (!active[0]) { 144 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); 145 active[0] = active[1] = l_ptr; 146 node_established_contact(n_ptr); 147 return; 148 } 149 if (l_ptr->priority < active[0]->priority) { 150 info("New link <%s> becomes standby\n", l_ptr->name); 151 return; 152 } 153 tipc_link_send_duplicate(active[0], l_ptr); 154 if (l_ptr->priority == active[0]->priority) { 155 active[0] = l_ptr; 156 return; 157 } 158 info("Old link <%s> becomes standby\n", active[0]->name); 159 if (active[1] != active[0]) 160 info("Old link <%s> becomes standby\n", active[1]->name); 161 active[0] = active[1] = l_ptr; 162} 163 164/** 165 * node_select_active_links - select active link 166 */ 167 168static void node_select_active_links(struct tipc_node *n_ptr) 169{ 170 struct link **active = &n_ptr->active_links[0]; 171 u32 i; 172 u32 highest_prio = 0; 173 174 active[0] = active[1] = NULL; 175 176 for (i = 0; i < MAX_BEARERS; i++) { 177 struct link *l_ptr = n_ptr->links[i]; 178 179 if (!l_ptr || !tipc_link_is_up(l_ptr) || 180 (l_ptr->priority < highest_prio)) 181 continue; 182 183 if (l_ptr->priority > highest_prio) { 184 highest_prio = l_ptr->priority; 185 active[0] = active[1] = l_ptr; 186 } else { 187 active[1] = l_ptr; 188 } 189 } 190} 191 192/** 193 * tipc_node_link_down - handle loss of link 194 */ 195 196void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr) 197{ 198 struct link **active; 199 200 n_ptr->working_links--; 201 202 if (!tipc_link_is_active(l_ptr)) { 203 info("Lost standby link <%s> on network plane %c\n", 204 l_ptr->name, l_ptr->b_ptr->net_plane); 205 return; 206 } 207 info("Lost link <%s> on network plane %c\n", 208 l_ptr->name, l_ptr->b_ptr->net_plane); 209 210 active = &n_ptr->active_links[0]; 211 if (active[0] == l_ptr) 212 active[0] = active[1]; 213 if (active[1] == l_ptr) 214 active[1] = active[0]; 215 if (active[0] == l_ptr) 216 node_select_active_links(n_ptr); 217 if (tipc_node_is_up(n_ptr)) 218 tipc_link_changeover(l_ptr); 219 else 220 node_lost_contact(n_ptr); 221} 222 223int tipc_node_has_active_links(struct tipc_node *n_ptr) 224{ 225 return n_ptr->active_links[0] != NULL; 226} 227 228int tipc_node_has_redundant_links(struct tipc_node *n_ptr) 229{ 230 return n_ptr->working_links > 1; 231} 232 233static int tipc_node_has_active_routes(struct tipc_node *n_ptr) 234{ 235 return n_ptr && (n_ptr->last_router >= 0); 236} 237 238int tipc_node_is_up(struct tipc_node *n_ptr) 239{ 240 return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr); 241} 242 243struct tipc_node *tipc_node_attach_link(struct link *l_ptr) 244{ 245 struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr); 246 247 if (!n_ptr) 248 n_ptr = tipc_node_create(l_ptr->addr); 249 if (n_ptr) { 250 u32 bearer_id = l_ptr->b_ptr->identity; 251 char addr_string[16]; 252 253 if (n_ptr->link_cnt >= 2) { 254 err("Attempt to create third link to %s\n", 255 tipc_addr_string_fill(addr_string, n_ptr->addr)); 256 return NULL; 257 } 258 259 if (!n_ptr->links[bearer_id]) { 260 n_ptr->links[bearer_id] = l_ptr; 261 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++; 262 n_ptr->link_cnt++; 263 return n_ptr; 264 } 265 err("Attempt to establish second link on <%s> to %s\n", 266 l_ptr->b_ptr->publ.name, 267 tipc_addr_string_fill(addr_string, l_ptr->addr)); 268 } 269 return NULL; 270} 271 272void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr) 273{ 274 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 275 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; 276 n_ptr->link_cnt--; 277} 278 279/* 280 * Routing table management - five cases to handle: 281 * 282 * 1: A link towards a zone/cluster external node comes up. 283 * => Send a multicast message updating routing tables of all 284 * system nodes within own cluster that the new destination 285 * can be reached via this node. 286 * (node.establishedContact()=>cluster.multicastNewRoute()) 287 * 288 * 2: A link towards a slave node comes up. 289 * => Send a multicast message updating routing tables of all 290 * system nodes within own cluster that the new destination 291 * can be reached via this node. 292 * (node.establishedContact()=>cluster.multicastNewRoute()) 293 * => Send a message to the slave node about existence 294 * of all system nodes within cluster: 295 * (node.establishedContact()=>cluster.sendLocalRoutes()) 296 * 297 * 3: A new cluster local system node becomes available. 298 * => Send message(s) to this particular node containing 299 * information about all cluster external and slave 300 * nodes which can be reached via this node. 301 * (node.establishedContact()==>network.sendExternalRoutes()) 302 * (node.establishedContact()==>network.sendSlaveRoutes()) 303 * => Send messages to all directly connected slave nodes 304 * containing information about the existence of the new node 305 * (node.establishedContact()=>cluster.multicastNewRoute()) 306 * 307 * 4: The link towards a zone/cluster external node or slave 308 * node goes down. 309 * => Send a multcast message updating routing tables of all 310 * nodes within cluster that the new destination can not any 311 * longer be reached via this node. 312 * (node.lostAllLinks()=>cluster.bcastLostRoute()) 313 * 314 * 5: A cluster local system node becomes unavailable. 315 * => Remove all references to this node from the local 316 * routing tables. Note: This is a completely node 317 * local operation. 318 * (node.lostAllLinks()=>network.removeAsRouter()) 319 * => Send messages to all directly connected slave nodes 320 * containing information about loss of the node 321 * (node.establishedContact()=>cluster.multicastLostRoute()) 322 * 323 */ 324 325static void node_established_contact(struct tipc_node *n_ptr) 326{ 327 struct cluster *c_ptr; 328 329 dbg("node_established_contact:-> %x\n", n_ptr->addr); 330 if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) { 331 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 332 } 333 334 /* Syncronize broadcast acks */ 335 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 336 337 if (is_slave(tipc_own_addr)) 338 return; 339 if (!in_own_cluster(n_ptr->addr)) { 340 /* Usage case 1 (see above) */ 341 c_ptr = tipc_cltr_find(tipc_own_addr); 342 if (!c_ptr) 343 c_ptr = tipc_cltr_create(tipc_own_addr); 344 if (c_ptr) 345 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, 346 tipc_max_nodes); 347 return; 348 } 349 350 c_ptr = n_ptr->owner; 351 if (is_slave(n_ptr->addr)) { 352 /* Usage case 2 (see above) */ 353 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); 354 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr); 355 return; 356 } 357 358 if (n_ptr->bclink.supported) { 359 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr); 360 if (n_ptr->addr < tipc_own_addr) 361 tipc_own_tag++; 362 } 363 364 /* Case 3 (see above) */ 365 tipc_net_send_external_routes(n_ptr->addr); 366 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr); 367 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, 368 tipc_highest_allowed_slave); 369} 370 371static void node_cleanup_finished(unsigned long node_addr) 372{ 373 struct tipc_node *n_ptr; 374 375 read_lock_bh(&tipc_net_lock); 376 n_ptr = tipc_node_find(node_addr); 377 if (n_ptr) { 378 tipc_node_lock(n_ptr); 379 n_ptr->cleanup_required = 0; 380 tipc_node_unlock(n_ptr); 381 } 382 read_unlock_bh(&tipc_net_lock); 383} 384 385static void node_lost_contact(struct tipc_node *n_ptr) 386{ 387 struct cluster *c_ptr; 388 struct tipc_node_subscr *ns, *tns; 389 char addr_string[16]; 390 u32 i; 391 392 /* Clean up broadcast reception remains */ 393 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; 394 while (n_ptr->bclink.deferred_head) { 395 struct sk_buff* buf = n_ptr->bclink.deferred_head; 396 n_ptr->bclink.deferred_head = buf->next; 397 buf_discard(buf); 398 } 399 if (n_ptr->bclink.defragm) { 400 buf_discard(n_ptr->bclink.defragm); 401 n_ptr->bclink.defragm = NULL; 402 } 403 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 404 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 405 } 406 407 /* Update routing tables */ 408 if (is_slave(tipc_own_addr)) { 409 tipc_net_remove_as_router(n_ptr->addr); 410 } else { 411 if (!in_own_cluster(n_ptr->addr)) { 412 /* Case 4 (see above) */ 413 c_ptr = tipc_cltr_find(tipc_own_addr); 414 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 415 tipc_max_nodes); 416 } else { 417 /* Case 5 (see above) */ 418 c_ptr = tipc_cltr_find(n_ptr->addr); 419 if (is_slave(n_ptr->addr)) { 420 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1, 421 tipc_max_nodes); 422 } else { 423 if (n_ptr->bclink.supported) { 424 tipc_nmap_remove(&tipc_cltr_bcast_nodes, 425 n_ptr->addr); 426 if (n_ptr->addr < tipc_own_addr) 427 tipc_own_tag--; 428 } 429 tipc_net_remove_as_router(n_ptr->addr); 430 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 431 LOWEST_SLAVE, 432 tipc_highest_allowed_slave); 433 } 434 } 435 } 436 if (tipc_node_has_active_routes(n_ptr)) 437 return; 438 439 info("Lost contact with %s\n", 440 tipc_addr_string_fill(addr_string, n_ptr->addr)); 441 442 /* Abort link changeover */ 443 for (i = 0; i < MAX_BEARERS; i++) { 444 struct link *l_ptr = n_ptr->links[i]; 445 if (!l_ptr) 446 continue; 447 l_ptr->reset_checkpoint = l_ptr->next_in_no; 448 l_ptr->exp_msg_count = 0; 449 tipc_link_reset_fragments(l_ptr); 450 } 451 452 /* Notify subscribers */ 453 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 454 ns->node = NULL; 455 list_del_init(&ns->nodesub_list); 456 tipc_k_signal((Handler)ns->handle_node_down, 457 (unsigned long)ns->usr_handle); 458 } 459 460 /* Prevent re-contact with node until all cleanup is done */ 461 462 n_ptr->cleanup_required = 1; 463 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr); 464} 465 466/** 467 * tipc_node_select_next_hop - find the next-hop node for a message 468 * 469 * Called by when cluster local lookup has failed. 470 */ 471 472struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector) 473{ 474 struct tipc_node *n_ptr; 475 u32 router_addr; 476 477 if (!tipc_addr_domain_valid(addr)) 478 return NULL; 479 480 /* Look for direct link to destination processsor */ 481 n_ptr = tipc_node_find(addr); 482 if (n_ptr && tipc_node_has_active_links(n_ptr)) 483 return n_ptr; 484 485 /* Cluster local system nodes *must* have direct links */ 486 if (!is_slave(addr) && in_own_cluster(addr)) 487 return NULL; 488 489 /* Look for cluster local router with direct link to node */ 490 router_addr = tipc_node_select_router(n_ptr, selector); 491 if (router_addr) 492 return tipc_node_select(router_addr, selector); 493 494 /* Slave nodes can only be accessed within own cluster via a 495 known router with direct link -- if no router was found,give up */ 496 if (is_slave(addr)) 497 return NULL; 498 499 /* Inter zone/cluster -- find any direct link to remote cluster */ 500 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 501 n_ptr = tipc_net_select_remote_node(addr, selector); 502 if (n_ptr && tipc_node_has_active_links(n_ptr)) 503 return n_ptr; 504 505 /* Last resort -- look for any router to anywhere in remote zone */ 506 router_addr = tipc_net_select_router(addr, selector); 507 if (router_addr) 508 return tipc_node_select(router_addr, selector); 509 510 return NULL; 511} 512 513/** 514 * tipc_node_select_router - select router to reach specified node 515 * 516 * Uses a deterministic and fair algorithm for selecting router node. 517 */ 518 519u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref) 520{ 521 u32 ulim; 522 u32 mask; 523 u32 start; 524 u32 r; 525 526 if (!n_ptr) 527 return 0; 528 529 if (n_ptr->last_router < 0) 530 return 0; 531 ulim = ((n_ptr->last_router + 1) * 32) - 1; 532 533 /* Start entry must be random */ 534 mask = tipc_max_nodes; 535 while (mask > ulim) 536 mask >>= 1; 537 start = ref & mask; 538 r = start; 539 540 /* Lookup upwards with wrap-around */ 541 do { 542 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 543 break; 544 } while (++r <= ulim); 545 if (r > ulim) { 546 r = 1; 547 do { 548 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) 549 break; 550 } while (++r < start); 551 assert(r != start); 552 } 553 assert(r && (r <= ulim)); 554 return tipc_addr(own_zone(), own_cluster(), r); 555} 556 557void tipc_node_add_router(struct tipc_node *n_ptr, u32 router) 558{ 559 u32 r_num = tipc_node(router); 560 561 n_ptr->routers[r_num / 32] = 562 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); 563 n_ptr->last_router = tipc_max_nodes / 32; 564 while ((--n_ptr->last_router >= 0) && 565 !n_ptr->routers[n_ptr->last_router]); 566} 567 568void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router) 569{ 570 u32 r_num = tipc_node(router); 571 572 if (n_ptr->last_router < 0) 573 return; /* No routes */ 574 575 n_ptr->routers[r_num / 32] = 576 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); 577 n_ptr->last_router = tipc_max_nodes / 32; 578 while ((--n_ptr->last_router >= 0) && 579 !n_ptr->routers[n_ptr->last_router]); 580 581 if (!tipc_node_is_up(n_ptr)) 582 node_lost_contact(n_ptr); 583} 584 585struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 586{ 587 u32 domain; 588 struct sk_buff *buf; 589 struct tipc_node *n_ptr; 590 struct tipc_node_info node_info; 591 u32 payload_size; 592 593 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 594 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 595 596 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 597 if (!tipc_addr_domain_valid(domain)) 598 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 599 " (network address)"); 600 601 read_lock_bh(&tipc_net_lock); 602 if (!tipc_nodes) { 603 read_unlock_bh(&tipc_net_lock); 604 return tipc_cfg_reply_none(); 605 } 606 607 /* For now, get space for all other nodes 608 (will need to modify this when slave nodes are supported */ 609 610 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 611 if (payload_size > 32768u) { 612 read_unlock_bh(&tipc_net_lock); 613 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 614 " (too many nodes)"); 615 } 616 buf = tipc_cfg_reply_alloc(payload_size); 617 if (!buf) { 618 read_unlock_bh(&tipc_net_lock); 619 return NULL; 620 } 621 622 /* Add TLVs for all nodes in scope */ 623 624 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 625 if (!tipc_in_scope(domain, n_ptr->addr)) 626 continue; 627 node_info.addr = htonl(n_ptr->addr); 628 node_info.up = htonl(tipc_node_is_up(n_ptr)); 629 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 630 &node_info, sizeof(node_info)); 631 } 632 633 read_unlock_bh(&tipc_net_lock); 634 return buf; 635} 636 637struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 638{ 639 u32 domain; 640 struct sk_buff *buf; 641 struct tipc_node *n_ptr; 642 struct tipc_link_info link_info; 643 u32 payload_size; 644 645 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 646 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 647 648 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); 649 if (!tipc_addr_domain_valid(domain)) 650 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 651 " (network address)"); 652 653 if (tipc_mode != TIPC_NET_MODE) 654 return tipc_cfg_reply_none(); 655 656 read_lock_bh(&tipc_net_lock); 657 658 /* Get space for all unicast links + multicast link */ 659 660 payload_size = TLV_SPACE(sizeof(link_info)) * 661 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); 662 if (payload_size > 32768u) { 663 read_unlock_bh(&tipc_net_lock); 664 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 665 " (too many links)"); 666 } 667 buf = tipc_cfg_reply_alloc(payload_size); 668 if (!buf) { 669 read_unlock_bh(&tipc_net_lock); 670 return NULL; 671 } 672 673 /* Add TLV for broadcast link */ 674 675 link_info.dest = htonl(tipc_own_addr & 0xfffff00); 676 link_info.up = htonl(1); 677 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 678 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 679 680 /* Add TLVs for any other links in scope */ 681 682 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) { 683 u32 i; 684 685 if (!tipc_in_scope(domain, n_ptr->addr)) 686 continue; 687 tipc_node_lock(n_ptr); 688 for (i = 0; i < MAX_BEARERS; i++) { 689 if (!n_ptr->links[i]) 690 continue; 691 link_info.dest = htonl(n_ptr->addr); 692 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 693 strcpy(link_info.str, n_ptr->links[i]->name); 694 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 695 &link_info, sizeof(link_info)); 696 } 697 tipc_node_unlock(n_ptr); 698 } 699 700 read_unlock_bh(&tipc_net_lock); 701 return buf; 702} 703