1/* 2 * GRE over IPv4 demultiplexer driver 3 * 4 * Authors: Dmitry Kozlov (xeb@mail.ru) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 */ 12 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15#include <linux/module.h> 16#include <linux/kernel.h> 17#include <linux/kmod.h> 18#include <linux/skbuff.h> 19#include <linux/in.h> 20#include <linux/ip.h> 21#include <linux/netdevice.h> 22#include <linux/if_tunnel.h> 23#include <linux/spinlock.h> 24#include <net/protocol.h> 25#include <net/gre.h> 26 27 28static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; 29static DEFINE_SPINLOCK(gre_proto_lock); 30 31int gre_add_protocol(const struct gre_protocol *proto, u8 version) 32{ 33 if (version >= GREPROTO_MAX) 34 goto err_out; 35 36 spin_lock(&gre_proto_lock); 37 if (gre_proto[version]) 38 goto err_out_unlock; 39 40 RCU_INIT_POINTER(gre_proto[version], proto); 41 spin_unlock(&gre_proto_lock); 42 return 0; 43 44err_out_unlock: 45 spin_unlock(&gre_proto_lock); 46err_out: 47 return -1; 48} 49EXPORT_SYMBOL_GPL(gre_add_protocol); 50 51int gre_del_protocol(const struct gre_protocol *proto, u8 version) 52{ 53 if (version >= GREPROTO_MAX) 54 goto err_out; 55 56 spin_lock(&gre_proto_lock); 57 if (rcu_dereference_protected(gre_proto[version], 58 lockdep_is_held(&gre_proto_lock)) != proto) 59 goto err_out_unlock; 60 RCU_INIT_POINTER(gre_proto[version], NULL); 61 spin_unlock(&gre_proto_lock); 62 synchronize_rcu(); 63 return 0; 64 65err_out_unlock: 66 spin_unlock(&gre_proto_lock); 67err_out: 68 return -1; 69} 70EXPORT_SYMBOL_GPL(gre_del_protocol); 71 72static int gre_rcv(struct sk_buff *skb) 73{ 74 const struct gre_protocol *proto; 75 u8 ver; 76 int ret; 77 78 if (!pskb_may_pull(skb, 12)) 79 goto drop; 80 81 ver = skb->data[1]&0x7f; 82 if (ver >= GREPROTO_MAX) 83 goto drop; 84 85 rcu_read_lock(); 86 proto = rcu_dereference(gre_proto[ver]); 87 if (!proto || !proto->handler) 88 goto drop_unlock; 89 ret = proto->handler(skb); 90 rcu_read_unlock(); 91 return ret; 92 93drop_unlock: 94 rcu_read_unlock(); 95drop: 96 kfree_skb(skb); 97 return NET_RX_DROP; 98} 99 100static void gre_err(struct sk_buff *skb, u32 info) 101{ 102 const struct gre_protocol *proto; 103 const struct iphdr *iph = (const struct iphdr *)skb->data; 104 u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; 105 106 if (ver >= GREPROTO_MAX) 107 return; 108 109 rcu_read_lock(); 110 proto = rcu_dereference(gre_proto[ver]); 111 if (proto && proto->err_handler) 112 proto->err_handler(skb, info); 113 rcu_read_unlock(); 114} 115 116static struct sk_buff *gre_gso_segment(struct sk_buff *skb, 117 netdev_features_t features) 118{ 119 struct sk_buff *segs = ERR_PTR(-EINVAL); 120 netdev_features_t enc_features; 121 int ghl = GRE_HEADER_SECTION; 122 struct gre_base_hdr *greh; 123 int mac_len = skb->mac_len; 124 __be16 protocol = skb->protocol; 125 int tnl_hlen; 126 bool csum; 127 128 if (unlikely(skb_shinfo(skb)->gso_type & 129 ~(SKB_GSO_TCPV4 | 130 SKB_GSO_TCPV6 | 131 SKB_GSO_UDP | 132 SKB_GSO_DODGY | 133 SKB_GSO_TCP_ECN | 134 SKB_GSO_GRE))) 135 goto out; 136 137 if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) 138 goto out; 139 140 greh = (struct gre_base_hdr *)skb_transport_header(skb); 141 142 if (greh->flags & GRE_KEY) 143 ghl += GRE_HEADER_SECTION; 144 if (greh->flags & GRE_SEQ) 145 ghl += GRE_HEADER_SECTION; 146 if (greh->flags & GRE_CSUM) { 147 ghl += GRE_HEADER_SECTION; 148 csum = true; 149 } else 150 csum = false; 151 152 /* setup inner skb. */ 153 skb->protocol = greh->protocol; 154 skb->encapsulation = 0; 155 156 if (unlikely(!pskb_may_pull(skb, ghl))) 157 goto out; 158 __skb_pull(skb, ghl); 159 skb_reset_mac_header(skb); 160 skb_set_network_header(skb, skb_inner_network_offset(skb)); 161 skb->mac_len = skb_inner_network_offset(skb); 162 163 /* segment inner packet. */ 164 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 165 segs = skb_mac_gso_segment(skb, enc_features); 166 if (!segs || IS_ERR(segs)) 167 goto out; 168 169 skb = segs; 170 tnl_hlen = skb_tnl_header_len(skb); 171 do { 172 __skb_push(skb, ghl); 173 if (csum) { 174 __be32 *pcsum; 175 176 if (skb_has_shared_frag(skb)) { 177 int err; 178 179 err = __skb_linearize(skb); 180 if (err) { 181 kfree_skb_list(segs); 182 segs = ERR_PTR(err); 183 goto out; 184 } 185 } 186 187 greh = (struct gre_base_hdr *)(skb->data); 188 pcsum = (__be32 *)(greh + 1); 189 *pcsum = 0; 190 *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); 191 } 192 __skb_push(skb, tnl_hlen - ghl); 193 194 skb_reset_mac_header(skb); 195 skb_set_network_header(skb, mac_len); 196 skb->mac_len = mac_len; 197 skb->protocol = protocol; 198 } while ((skb = skb->next)); 199out: 200 return segs; 201} 202 203static int gre_gso_send_check(struct sk_buff *skb) 204{ 205 if (!skb->encapsulation) 206 return -EINVAL; 207 return 0; 208} 209 210static const struct net_protocol net_gre_protocol = { 211 .handler = gre_rcv, 212 .err_handler = gre_err, 213 .netns_ok = 1, 214}; 215 216static const struct net_offload gre_offload = { 217 .callbacks = { 218 .gso_send_check = gre_gso_send_check, 219 .gso_segment = gre_gso_segment, 220 }, 221}; 222 223static int __init gre_init(void) 224{ 225 pr_info("GRE over IPv4 demultiplexor driver\n"); 226 227 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { 228 pr_err("can't add protocol\n"); 229 return -EAGAIN; 230 } 231 232 if (inet_add_offload(&gre_offload, IPPROTO_GRE)) { 233 pr_err("can't add protocol offload\n"); 234 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); 235 return -EAGAIN; 236 } 237 238 return 0; 239} 240 241static void __exit gre_exit(void) 242{ 243 inet_del_offload(&gre_offload, IPPROTO_GRE); 244 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); 245} 246 247module_init(gre_init); 248module_exit(gre_exit); 249 250MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); 251MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 252MODULE_LICENSE("GPL"); 253 254