diag.c revision 257b529876cb45ec791eaa89e3d2ee0d16b49383
1#include <linux/types.h> 2#include <linux/spinlock.h> 3#include <linux/sock_diag.h> 4#include <linux/unix_diag.h> 5#include <linux/skbuff.h> 6#include <linux/module.h> 7#include <net/netlink.h> 8#include <net/af_unix.h> 9#include <net/tcp_states.h> 10 11#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \ 12 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 13 14static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 15{ 16 struct unix_address *addr = unix_sk(sk)->addr; 17 char *s; 18 19 if (addr) { 20 s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short)); 21 memcpy(s, addr->name->sun_path, addr->len - sizeof(short)); 22 } 23 24 return 0; 25 26rtattr_failure: 27 return -EMSGSIZE; 28} 29 30static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) 31{ 32 struct dentry *dentry = unix_sk(sk)->dentry; 33 struct unix_diag_vfs *uv; 34 35 if (dentry) { 36 uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv)); 37 uv->udiag_vfs_ino = dentry->d_inode->i_ino; 38 uv->udiag_vfs_dev = dentry->d_sb->s_dev; 39 } 40 41 return 0; 42 43rtattr_failure: 44 return -EMSGSIZE; 45} 46 47static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) 48{ 49 struct sock *peer; 50 int ino; 51 52 peer = unix_peer_get(sk); 53 if (peer) { 54 unix_state_lock(peer); 55 ino = sock_i_ino(peer); 56 unix_state_unlock(peer); 57 sock_put(peer); 58 59 RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino); 60 } 61 62 return 0; 63rtattr_failure: 64 return -EMSGSIZE; 65} 66 67static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) 68{ 69 struct sk_buff *skb; 70 u32 *buf; 71 int i; 72 73 if (sk->sk_state == TCP_LISTEN) { 74 spin_lock(&sk->sk_receive_queue.lock); 75 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS, 76 sk->sk_receive_queue.qlen * sizeof(u32)); 77 i = 0; 78 skb_queue_walk(&sk->sk_receive_queue, skb) { 79 struct sock *req, *peer; 80 81 req = skb->sk; 82 /* 83 * The state lock is outer for the same sk's 84 * queue lock. With the other's queue locked it's 85 * OK to lock the state. 86 */ 87 unix_state_lock_nested(req); 88 peer = unix_sk(req)->peer; 89 buf[i++] = (peer ? sock_i_ino(peer) : 0); 90 unix_state_unlock(req); 91 } 92 spin_unlock(&sk->sk_receive_queue.lock); 93 } 94 95 return 0; 96 97rtattr_failure: 98 spin_unlock(&sk->sk_receive_queue.lock); 99 return -EMSGSIZE; 100} 101 102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) 103{ 104 RTA_PUT_U32(nlskb, UNIX_DIAG_RQLEN, sk->sk_receive_queue.qlen); 105 return 0; 106 107rtattr_failure: 108 return -EMSGSIZE; 109} 110 111static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 112 u32 pid, u32 seq, u32 flags, int sk_ino) 113{ 114 unsigned char *b = skb_tail_pointer(skb); 115 struct nlmsghdr *nlh; 116 struct unix_diag_msg *rep; 117 118 nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep)); 119 nlh->nlmsg_flags = flags; 120 121 rep = NLMSG_DATA(nlh); 122 123 rep->udiag_family = AF_UNIX; 124 rep->udiag_type = sk->sk_type; 125 rep->udiag_state = sk->sk_state; 126 rep->udiag_ino = sk_ino; 127 sock_diag_save_cookie(sk, rep->udiag_cookie); 128 129 if ((req->udiag_show & UDIAG_SHOW_NAME) && 130 sk_diag_dump_name(sk, skb)) 131 goto nlmsg_failure; 132 133 if ((req->udiag_show & UDIAG_SHOW_VFS) && 134 sk_diag_dump_vfs(sk, skb)) 135 goto nlmsg_failure; 136 137 if ((req->udiag_show & UDIAG_SHOW_PEER) && 138 sk_diag_dump_peer(sk, skb)) 139 goto nlmsg_failure; 140 141 if ((req->udiag_show & UDIAG_SHOW_ICONS) && 142 sk_diag_dump_icons(sk, skb)) 143 goto nlmsg_failure; 144 145 if ((req->udiag_show & UDIAG_SHOW_RQLEN) && 146 sk_diag_show_rqlen(sk, skb)) 147 goto nlmsg_failure; 148 149 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && 150 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) 151 goto nlmsg_failure; 152 153 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 154 return skb->len; 155 156nlmsg_failure: 157 nlmsg_trim(skb, b); 158 return -EMSGSIZE; 159} 160 161static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, 162 u32 pid, u32 seq, u32 flags) 163{ 164 int sk_ino; 165 166 unix_state_lock(sk); 167 sk_ino = sock_i_ino(sk); 168 unix_state_unlock(sk); 169 170 if (!sk_ino) 171 return 0; 172 173 return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino); 174} 175 176static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 177{ 178 struct unix_diag_req *req; 179 int num, s_num, slot, s_slot; 180 181 req = NLMSG_DATA(cb->nlh); 182 183 s_slot = cb->args[0]; 184 num = s_num = cb->args[1]; 185 186 spin_lock(&unix_table_lock); 187 for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) { 188 struct sock *sk; 189 struct hlist_node *node; 190 191 num = 0; 192 sk_for_each(sk, node, &unix_socket_table[slot]) { 193 if (num < s_num) 194 goto next; 195 if (!(req->udiag_states & (1 << sk->sk_state))) 196 goto next; 197 if (sk_diag_dump(sk, skb, req, 198 NETLINK_CB(cb->skb).pid, 199 cb->nlh->nlmsg_seq, 200 NLM_F_MULTI) < 0) 201 goto done; 202next: 203 num++; 204 } 205 } 206done: 207 spin_unlock(&unix_table_lock); 208 cb->args[0] = slot; 209 cb->args[1] = num; 210 211 return skb->len; 212} 213 214static struct sock *unix_lookup_by_ino(int ino) 215{ 216 int i; 217 struct sock *sk; 218 219 spin_lock(&unix_table_lock); 220 for (i = 0; i <= UNIX_HASH_SIZE; i++) { 221 struct hlist_node *node; 222 223 sk_for_each(sk, node, &unix_socket_table[i]) 224 if (ino == sock_i_ino(sk)) { 225 sock_hold(sk); 226 spin_unlock(&unix_table_lock); 227 228 return sk; 229 } 230 } 231 232 spin_unlock(&unix_table_lock); 233 return NULL; 234} 235 236static int unix_diag_get_exact(struct sk_buff *in_skb, 237 const struct nlmsghdr *nlh, 238 struct unix_diag_req *req) 239{ 240 int err = -EINVAL; 241 struct sock *sk; 242 struct sk_buff *rep; 243 unsigned int extra_len; 244 245 if (req->udiag_ino == 0) 246 goto out_nosk; 247 248 sk = unix_lookup_by_ino(req->udiag_ino); 249 err = -ENOENT; 250 if (sk == NULL) 251 goto out_nosk; 252 253 err = sock_diag_check_cookie(sk, req->udiag_cookie); 254 if (err) 255 goto out; 256 257 extra_len = 256; 258again: 259 err = -ENOMEM; 260 rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)), 261 GFP_KERNEL); 262 if (!rep) 263 goto out; 264 265 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid, 266 nlh->nlmsg_seq, 0, req->udiag_ino); 267 if (err < 0) { 268 kfree_skb(rep); 269 extra_len += 256; 270 if (extra_len >= PAGE_SIZE) 271 goto out; 272 273 goto again; 274 } 275 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, 276 MSG_DONTWAIT); 277 if (err > 0) 278 err = 0; 279out: 280 if (sk) 281 sock_put(sk); 282out_nosk: 283 return err; 284} 285 286static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 287{ 288 int hdrlen = sizeof(struct unix_diag_req); 289 290 if (nlmsg_len(h) < hdrlen) 291 return -EINVAL; 292 293 if (h->nlmsg_flags & NLM_F_DUMP) 294 return netlink_dump_start(sock_diag_nlsk, skb, h, 295 unix_diag_dump, NULL, 0); 296 else 297 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); 298} 299 300static struct sock_diag_handler unix_diag_handler = { 301 .family = AF_UNIX, 302 .dump = unix_diag_handler_dump, 303}; 304 305static int __init unix_diag_init(void) 306{ 307 return sock_diag_register(&unix_diag_handler); 308} 309 310static void __exit unix_diag_exit(void) 311{ 312 sock_diag_unregister(&unix_diag_handler); 313} 314 315module_init(unix_diag_init); 316module_exit(unix_diag_exit); 317MODULE_LICENSE("GPL"); 318MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); 319