Lines Matching defs:mc

609 	struct ipv6_mc_socklist *mc;
614 for_each_pmc_rcu(np, mc) {
615 if (ipv6_addr_equal(&mc->addr, mc_addr))
618 if (!mc) {
622 read_lock(&mc->sflock);
623 psl = mc->sflist;
625 rv = mc->sfmode == MCAST_EXCLUDE;
633 if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
635 if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
638 read_unlock(&mc->sflock);
644 static void igmp6_group_added(struct ifmcaddr6 *mc)
646 struct net_device *dev = mc->idev->dev;
649 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
653 spin_lock_bh(&mc->mca_lock);
654 if (!(mc->mca_flags&MAF_LOADED)) {
655 mc->mca_flags |= MAF_LOADED;
656 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
659 spin_unlock_bh(&mc->mca_lock);
661 if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
664 if (mld_in_v1_mode(mc->idev)) {
665 igmp6_join_group(mc);
670 mc->mca_crcount = mc->idev->mc_qrv;
671 mld_ifc_event(mc->idev);
674 static void igmp6_group_dropped(struct ifmcaddr6 *mc)
676 struct net_device *dev = mc->idev->dev;
679 if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
683 spin_lock_bh(&mc->mca_lock);
684 if (mc->mca_flags&MAF_LOADED) {
685 mc->mca_flags &= ~MAF_LOADED;
686 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
690 if (mc->mca_flags & MAF_NOREPORT)
692 spin_unlock_bh(&mc->mca_lock);
694 if (!mc->idev->dead)
695 igmp6_leave_group(mc);
697 spin_lock_bh(&mc->mca_lock);
698 if (del_timer(&mc->mca_timer))
699 atomic_dec(&mc->mca_refcnt);
701 ip6_mc_clear_src(mc);
702 spin_unlock_bh(&mc->mca_lock);
809 static void mca_get(struct ifmcaddr6 *mc)
811 atomic_inc(&mc->mca_refcnt);
814 static void ma_put(struct ifmcaddr6 *mc)
816 if (atomic_dec_and_test(&mc->mca_refcnt)) {
817 in6_dev_put(mc->idev);
818 kfree(mc);
825 struct ifmcaddr6 *mc;
827 mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
828 if (mc == NULL)
831 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
833 mc->mca_addr = *addr;
834 mc->idev = idev; /* reference taken by caller */
835 mc->mca_users = 1;
837 mc->mca_cstamp = mc->mca_tstamp = jiffies;
838 atomic_set(&mc->mca_refcnt, 1);
839 spin_lock_init(&mc->mca_lock);
842 mc->mca_sfmode = MCAST_EXCLUDE;
843 mc->mca_sfcount[MCAST_EXCLUDE] = 1;
845 if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
846 IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
847 mc->mca_flags |= MAF_NOREPORT;
849 return mc;
857 struct ifmcaddr6 *mc;
875 for (mc = idev->mc_list; mc; mc = mc->next) {
876 if (ipv6_addr_equal(&mc->mca_addr, addr)) {
877 mc->mca_users++;
879 ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
886 mc = mca_alloc(idev, addr);
887 if (!mc) {
893 mc->next = idev->mc_list;
894 idev->mc_list = mc;
899 mca_get(mc);
902 mld_del_delrec(idev, &mc->mca_addr);
903 igmp6_group_added(mc);
904 ma_put(mc);
961 struct ifmcaddr6 *mc;
968 for (mc = idev->mc_list; mc; mc = mc->next) {
969 if (ipv6_addr_equal(&mc->mca_addr, group))
972 if (mc) {
976 spin_lock_bh(&mc->mca_lock);
977 for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
984 mc->mca_sfcount[MCAST_EXCLUDE];
986 rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
987 spin_unlock_bh(&mc->mca_lock);