af_iucv.c revision aac6399c6a08334282653a86ce760cff3e1755b7
1/*
2 *  IUCV protocol stack for Linux on zSeries
3 *
4 *  Copyright IBM Corp. 2006, 2009
5 *
6 *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com>
7 *		Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8 *  PM functions:
9 *		Ursula Braun <ursula.braun@de.ibm.com>
10 */
11
12#define KMSG_COMPONENT "af_iucv"
13#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/init.h>
24#include <linux/poll.h>
25#include <net/sock.h>
26#include <asm/ebcdic.h>
27#include <asm/cpcmd.h>
28#include <linux/kmod.h>
29
30#include <net/iucv/af_iucv.h>
31
32#define VERSION "1.2"
33
34static char iucv_userid[80];
35
36static const struct proto_ops iucv_sock_ops;
37
38static struct proto iucv_proto = {
39	.name		= "AF_IUCV",
40	.owner		= THIS_MODULE,
41	.obj_size	= sizeof(struct iucv_sock),
42};
43
44static struct iucv_interface *pr_iucv;
45
46/* special AF_IUCV IPRM messages */
47static const u8 iprm_shutdown[8] =
48	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49
50#define TRGCLS_SIZE	(sizeof(((struct iucv_message *)0)->class))
51
52/* macros to set/get socket control buffer at correct offset */
53#define CB_TAG(skb)	((skb)->cb)		/* iucv message tag */
54#define CB_TAG_LEN	(sizeof(((struct iucv_message *) 0)->tag))
55#define CB_TRGCLS(skb)	((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56#define CB_TRGCLS_LEN	(TRGCLS_SIZE)
57
58#define __iucv_sock_wait(sk, condition, timeo, ret)			\
59do {									\
60	DEFINE_WAIT(__wait);						\
61	long __timeo = timeo;						\
62	ret = 0;							\
63	prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);	\
64	while (!(condition)) {						\
65		if (!__timeo) {						\
66			ret = -EAGAIN;					\
67			break;						\
68		}							\
69		if (signal_pending(current)) {				\
70			ret = sock_intr_errno(__timeo);			\
71			break;						\
72		}							\
73		release_sock(sk);					\
74		__timeo = schedule_timeout(__timeo);			\
75		lock_sock(sk);						\
76		ret = sock_error(sk);					\
77		if (ret)						\
78			break;						\
79	}								\
80	finish_wait(sk_sleep(sk), &__wait);				\
81} while (0)
82
83#define iucv_sock_wait(sk, condition, timeo)				\
84({									\
85	int __ret = 0;							\
86	if (!(condition))						\
87		__iucv_sock_wait(sk, condition, timeo, __ret);		\
88	__ret;								\
89})
90
91static void iucv_sock_kill(struct sock *sk);
92static void iucv_sock_close(struct sock *sk);
93
94static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
95	struct packet_type *pt, struct net_device *orig_dev);
96static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
97		   struct sk_buff *skb, u8 flags);
98static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
99
100/* Call Back functions */
101static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
102static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
103static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
104static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
105				 u8 ipuser[16]);
106static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
107static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
108
109static struct iucv_sock_list iucv_sk_list = {
110	.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
111	.autobind_name = ATOMIC_INIT(0)
112};
113
114static struct iucv_handler af_iucv_handler = {
115	.path_pending	  = iucv_callback_connreq,
116	.path_complete	  = iucv_callback_connack,
117	.path_severed	  = iucv_callback_connrej,
118	.message_pending  = iucv_callback_rx,
119	.message_complete = iucv_callback_txdone,
120	.path_quiesced	  = iucv_callback_shutdown,
121};
122
123static inline void high_nmcpy(unsigned char *dst, char *src)
124{
125       memcpy(dst, src, 8);
126}
127
128static inline void low_nmcpy(unsigned char *dst, char *src)
129{
130       memcpy(&dst[8], src, 8);
131}
132
133static void iucv_skb_queue_purge(struct sk_buff_head *list)
134{
135	struct sk_buff *skb;
136
137	while ((skb = skb_dequeue(list)) != NULL) {
138		if (skb->dev)
139			dev_put(skb->dev);
140		kfree_skb(skb);
141	}
142}
143
144static int afiucv_pm_prepare(struct device *dev)
145{
146#ifdef CONFIG_PM_DEBUG
147	printk(KERN_WARNING "afiucv_pm_prepare\n");
148#endif
149	return 0;
150}
151
152static void afiucv_pm_complete(struct device *dev)
153{
154#ifdef CONFIG_PM_DEBUG
155	printk(KERN_WARNING "afiucv_pm_complete\n");
156#endif
157}
158
159/**
160 * afiucv_pm_freeze() - Freeze PM callback
161 * @dev:	AFIUCV dummy device
162 *
163 * Sever all established IUCV communication pathes
164 */
165static int afiucv_pm_freeze(struct device *dev)
166{
167	struct iucv_sock *iucv;
168	struct sock *sk;
169	struct hlist_node *node;
170	int err = 0;
171
172#ifdef CONFIG_PM_DEBUG
173	printk(KERN_WARNING "afiucv_pm_freeze\n");
174#endif
175	read_lock(&iucv_sk_list.lock);
176	sk_for_each(sk, node, &iucv_sk_list.head) {
177		iucv = iucv_sk(sk);
178		iucv_skb_queue_purge(&iucv->send_skb_q);
179		skb_queue_purge(&iucv->backlog_skb_q);
180		switch (sk->sk_state) {
181		case IUCV_DISCONN:
182		case IUCV_CLOSING:
183		case IUCV_CONNECTED:
184			if (iucv->path) {
185				err = pr_iucv->path_sever(iucv->path, NULL);
186				iucv_path_free(iucv->path);
187				iucv->path = NULL;
188			}
189			break;
190		case IUCV_OPEN:
191		case IUCV_BOUND:
192		case IUCV_LISTEN:
193		case IUCV_CLOSED:
194		default:
195			break;
196		}
197	}
198	read_unlock(&iucv_sk_list.lock);
199	return err;
200}
201
202/**
203 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
204 * @dev:	AFIUCV dummy device
205 *
206 * socket clean up after freeze
207 */
208static int afiucv_pm_restore_thaw(struct device *dev)
209{
210	struct sock *sk;
211	struct hlist_node *node;
212
213#ifdef CONFIG_PM_DEBUG
214	printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
215#endif
216	read_lock(&iucv_sk_list.lock);
217	sk_for_each(sk, node, &iucv_sk_list.head) {
218		switch (sk->sk_state) {
219		case IUCV_CONNECTED:
220			sk->sk_err = EPIPE;
221			sk->sk_state = IUCV_DISCONN;
222			sk->sk_state_change(sk);
223			break;
224		case IUCV_DISCONN:
225		case IUCV_CLOSING:
226		case IUCV_LISTEN:
227		case IUCV_BOUND:
228		case IUCV_OPEN:
229		default:
230			break;
231		}
232	}
233	read_unlock(&iucv_sk_list.lock);
234	return 0;
235}
236
237static const struct dev_pm_ops afiucv_pm_ops = {
238	.prepare = afiucv_pm_prepare,
239	.complete = afiucv_pm_complete,
240	.freeze = afiucv_pm_freeze,
241	.thaw = afiucv_pm_restore_thaw,
242	.restore = afiucv_pm_restore_thaw,
243};
244
245static struct device_driver af_iucv_driver = {
246	.owner = THIS_MODULE,
247	.name = "afiucv",
248	.bus  = NULL,
249	.pm   = &afiucv_pm_ops,
250};
251
252/* dummy device used as trigger for PM functions */
253static struct device *af_iucv_dev;
254
255/**
256 * iucv_msg_length() - Returns the length of an iucv message.
257 * @msg:	Pointer to struct iucv_message, MUST NOT be NULL
258 *
259 * The function returns the length of the specified iucv message @msg of data
260 * stored in a buffer and of data stored in the parameter list (PRMDATA).
261 *
262 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
263 * data:
264 *	PRMDATA[0..6]	socket data (max 7 bytes);
265 *	PRMDATA[7]	socket data length value (len is 0xff - PRMDATA[7])
266 *
267 * The socket data length is computed by subtracting the socket data length
268 * value from 0xFF.
269 * If the socket data len is greater 7, then PRMDATA can be used for special
270 * notifications (see iucv_sock_shutdown); and further,
271 * if the socket data len is > 7, the function returns 8.
272 *
273 * Use this function to allocate socket buffers to store iucv message data.
274 */
275static inline size_t iucv_msg_length(struct iucv_message *msg)
276{
277	size_t datalen;
278
279	if (msg->flags & IUCV_IPRMDATA) {
280		datalen = 0xff - msg->rmmsg[7];
281		return (datalen < 8) ? datalen : 8;
282	}
283	return msg->length;
284}
285
286/**
287 * iucv_sock_in_state() - check for specific states
288 * @sk:		sock structure
289 * @state:	first iucv sk state
290 * @state:	second iucv sk state
291 *
292 * Returns true if the socket in either in the first or second state.
293 */
294static int iucv_sock_in_state(struct sock *sk, int state, int state2)
295{
296	return (sk->sk_state == state || sk->sk_state == state2);
297}
298
299/**
300 * iucv_below_msglim() - function to check if messages can be sent
301 * @sk:		sock structure
302 *
303 * Returns true if the send queue length is lower than the message limit.
304 * Always returns true if the socket is not connected (no iucv path for
305 * checking the message limit).
306 */
307static inline int iucv_below_msglim(struct sock *sk)
308{
309	struct iucv_sock *iucv = iucv_sk(sk);
310
311	if (sk->sk_state != IUCV_CONNECTED)
312		return 1;
313	if (iucv->transport == AF_IUCV_TRANS_IUCV)
314		return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
315	else
316		return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
317			(atomic_read(&iucv->pendings) <= 0));
318}
319
320/**
321 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
322 */
323static void iucv_sock_wake_msglim(struct sock *sk)
324{
325	struct socket_wq *wq;
326
327	rcu_read_lock();
328	wq = rcu_dereference(sk->sk_wq);
329	if (wq_has_sleeper(wq))
330		wake_up_interruptible_all(&wq->wait);
331	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
332	rcu_read_unlock();
333}
334
335/**
336 * afiucv_hs_send() - send a message through HiperSockets transport
337 */
338static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
339		   struct sk_buff *skb, u8 flags)
340{
341	struct net *net = sock_net(sock);
342	struct iucv_sock *iucv = iucv_sk(sock);
343	struct af_iucv_trans_hdr *phs_hdr;
344	struct sk_buff *nskb;
345	int err, confirm_recv = 0;
346
347	memset(skb->head, 0, ETH_HLEN);
348	phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
349					sizeof(struct af_iucv_trans_hdr));
350	skb_reset_mac_header(skb);
351	skb_reset_network_header(skb);
352	skb_push(skb, ETH_HLEN);
353	skb_reset_mac_header(skb);
354	memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
355
356	phs_hdr->magic = ETH_P_AF_IUCV;
357	phs_hdr->version = 1;
358	phs_hdr->flags = flags;
359	if (flags == AF_IUCV_FLAG_SYN)
360		phs_hdr->window = iucv->msglimit;
361	else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
362		confirm_recv = atomic_read(&iucv->msg_recv);
363		phs_hdr->window = confirm_recv;
364		if (confirm_recv)
365			phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
366	}
367	memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
368	memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
369	memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
370	memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
371	ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
372	ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
373	ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
374	ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
375	if (imsg)
376		memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
377
378	skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
379	if (!skb->dev)
380		return -ENODEV;
381	if (!(skb->dev->flags & IFF_UP))
382		return -ENETDOWN;
383	if (skb->len > skb->dev->mtu) {
384		if (sock->sk_type == SOCK_SEQPACKET)
385			return -EMSGSIZE;
386		else
387			skb_trim(skb, skb->dev->mtu);
388	}
389	skb->protocol = ETH_P_AF_IUCV;
390	skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
391	nskb = skb_clone(skb, GFP_ATOMIC);
392	if (!nskb)
393		return -ENOMEM;
394	skb_queue_tail(&iucv->send_skb_q, nskb);
395	err = dev_queue_xmit(skb);
396	if (err) {
397		skb_unlink(nskb, &iucv->send_skb_q);
398		dev_put(nskb->dev);
399		kfree_skb(nskb);
400	} else {
401		atomic_sub(confirm_recv, &iucv->msg_recv);
402		WARN_ON(atomic_read(&iucv->msg_recv) < 0);
403	}
404	return err;
405}
406
407static struct sock *__iucv_get_sock_by_name(char *nm)
408{
409	struct sock *sk;
410	struct hlist_node *node;
411
412	sk_for_each(sk, node, &iucv_sk_list.head)
413		if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
414			return sk;
415
416	return NULL;
417}
418
419static void iucv_sock_destruct(struct sock *sk)
420{
421	skb_queue_purge(&sk->sk_receive_queue);
422	skb_queue_purge(&sk->sk_write_queue);
423}
424
425/* Cleanup Listen */
426static void iucv_sock_cleanup_listen(struct sock *parent)
427{
428	struct sock *sk;
429
430	/* Close non-accepted connections */
431	while ((sk = iucv_accept_dequeue(parent, NULL))) {
432		iucv_sock_close(sk);
433		iucv_sock_kill(sk);
434	}
435
436	parent->sk_state = IUCV_CLOSED;
437}
438
439/* Kill socket (only if zapped and orphaned) */
440static void iucv_sock_kill(struct sock *sk)
441{
442	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
443		return;
444
445	iucv_sock_unlink(&iucv_sk_list, sk);
446	sock_set_flag(sk, SOCK_DEAD);
447	sock_put(sk);
448}
449
450/* Close an IUCV socket */
451static void iucv_sock_close(struct sock *sk)
452{
453	unsigned char user_data[16];
454	struct iucv_sock *iucv = iucv_sk(sk);
455	unsigned long timeo;
456	int err, blen;
457	struct sk_buff *skb;
458
459	lock_sock(sk);
460
461	switch (sk->sk_state) {
462	case IUCV_LISTEN:
463		iucv_sock_cleanup_listen(sk);
464		break;
465
466	case IUCV_CONNECTED:
467		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
468			/* send fin */
469			blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
470			skb = sock_alloc_send_skb(sk, blen, 1, &err);
471			if (skb) {
472				skb_reserve(skb, blen);
473				err = afiucv_hs_send(NULL, sk, skb,
474						     AF_IUCV_FLAG_FIN);
475			}
476			sk->sk_state = IUCV_DISCONN;
477			sk->sk_state_change(sk);
478		}
479	case IUCV_DISCONN:   /* fall through */
480		sk->sk_state = IUCV_CLOSING;
481		sk->sk_state_change(sk);
482
483		if (!skb_queue_empty(&iucv->send_skb_q)) {
484			if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
485				timeo = sk->sk_lingertime;
486			else
487				timeo = IUCV_DISCONN_TIMEOUT;
488			iucv_sock_wait(sk,
489					iucv_sock_in_state(sk, IUCV_CLOSED, 0),
490					timeo);
491		}
492
493	case IUCV_CLOSING:   /* fall through */
494		sk->sk_state = IUCV_CLOSED;
495		sk->sk_state_change(sk);
496
497		if (iucv->path) {
498			low_nmcpy(user_data, iucv->src_name);
499			high_nmcpy(user_data, iucv->dst_name);
500			ASCEBC(user_data, sizeof(user_data));
501			pr_iucv->path_sever(iucv->path, user_data);
502			iucv_path_free(iucv->path);
503			iucv->path = NULL;
504		}
505
506		sk->sk_err = ECONNRESET;
507		sk->sk_state_change(sk);
508
509		iucv_skb_queue_purge(&iucv->send_skb_q);
510		skb_queue_purge(&iucv->backlog_skb_q);
511		break;
512
513	default:
514		/* nothing to do here */
515		break;
516	}
517
518	/* mark socket for deletion by iucv_sock_kill() */
519	sock_set_flag(sk, SOCK_ZAPPED);
520
521	release_sock(sk);
522}
523
524static void iucv_sock_init(struct sock *sk, struct sock *parent)
525{
526	if (parent)
527		sk->sk_type = parent->sk_type;
528}
529
530static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
531{
532	struct sock *sk;
533	struct iucv_sock *iucv;
534
535	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
536	if (!sk)
537		return NULL;
538	iucv = iucv_sk(sk);
539
540	sock_init_data(sock, sk);
541	INIT_LIST_HEAD(&iucv->accept_q);
542	spin_lock_init(&iucv->accept_q_lock);
543	skb_queue_head_init(&iucv->send_skb_q);
544	INIT_LIST_HEAD(&iucv->message_q.list);
545	spin_lock_init(&iucv->message_q.lock);
546	skb_queue_head_init(&iucv->backlog_skb_q);
547	iucv->send_tag = 0;
548	atomic_set(&iucv->pendings, 0);
549	iucv->flags = 0;
550	iucv->msglimit = 0;
551	atomic_set(&iucv->msg_sent, 0);
552	atomic_set(&iucv->msg_recv, 0);
553	iucv->path = NULL;
554	iucv->sk_txnotify = afiucv_hs_callback_txnotify;
555	memset(&iucv->src_user_id , 0, 32);
556	if (pr_iucv)
557		iucv->transport = AF_IUCV_TRANS_IUCV;
558	else
559		iucv->transport = AF_IUCV_TRANS_HIPER;
560
561	sk->sk_destruct = iucv_sock_destruct;
562	sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
563	sk->sk_allocation = GFP_DMA;
564
565	sock_reset_flag(sk, SOCK_ZAPPED);
566
567	sk->sk_protocol = proto;
568	sk->sk_state	= IUCV_OPEN;
569
570	iucv_sock_link(&iucv_sk_list, sk);
571	return sk;
572}
573
574/* Create an IUCV socket */
575static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
576			    int kern)
577{
578	struct sock *sk;
579
580	if (protocol && protocol != PF_IUCV)
581		return -EPROTONOSUPPORT;
582
583	sock->state = SS_UNCONNECTED;
584
585	switch (sock->type) {
586	case SOCK_STREAM:
587		sock->ops = &iucv_sock_ops;
588		break;
589	case SOCK_SEQPACKET:
590		/* currently, proto ops can handle both sk types */
591		sock->ops = &iucv_sock_ops;
592		break;
593	default:
594		return -ESOCKTNOSUPPORT;
595	}
596
597	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
598	if (!sk)
599		return -ENOMEM;
600
601	iucv_sock_init(sk, NULL);
602
603	return 0;
604}
605
606void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
607{
608	write_lock_bh(&l->lock);
609	sk_add_node(sk, &l->head);
610	write_unlock_bh(&l->lock);
611}
612
613void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
614{
615	write_lock_bh(&l->lock);
616	sk_del_node_init(sk);
617	write_unlock_bh(&l->lock);
618}
619
620void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
621{
622	unsigned long flags;
623	struct iucv_sock *par = iucv_sk(parent);
624
625	sock_hold(sk);
626	spin_lock_irqsave(&par->accept_q_lock, flags);
627	list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
628	spin_unlock_irqrestore(&par->accept_q_lock, flags);
629	iucv_sk(sk)->parent = parent;
630	sk_acceptq_added(parent);
631}
632
633void iucv_accept_unlink(struct sock *sk)
634{
635	unsigned long flags;
636	struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
637
638	spin_lock_irqsave(&par->accept_q_lock, flags);
639	list_del_init(&iucv_sk(sk)->accept_q);
640	spin_unlock_irqrestore(&par->accept_q_lock, flags);
641	sk_acceptq_removed(iucv_sk(sk)->parent);
642	iucv_sk(sk)->parent = NULL;
643	sock_put(sk);
644}
645
646struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
647{
648	struct iucv_sock *isk, *n;
649	struct sock *sk;
650
651	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
652		sk = (struct sock *) isk;
653		lock_sock(sk);
654
655		if (sk->sk_state == IUCV_CLOSED) {
656			iucv_accept_unlink(sk);
657			release_sock(sk);
658			continue;
659		}
660
661		if (sk->sk_state == IUCV_CONNECTED ||
662		    sk->sk_state == IUCV_DISCONN ||
663		    !newsock) {
664			iucv_accept_unlink(sk);
665			if (newsock)
666				sock_graft(sk, newsock);
667
668			release_sock(sk);
669			return sk;
670		}
671
672		release_sock(sk);
673	}
674	return NULL;
675}
676
677/* Bind an unbound socket */
678static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
679			  int addr_len)
680{
681	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
682	struct sock *sk = sock->sk;
683	struct iucv_sock *iucv;
684	int err = 0;
685	struct net_device *dev;
686	char uid[9];
687
688	/* Verify the input sockaddr */
689	if (!addr || addr->sa_family != AF_IUCV)
690		return -EINVAL;
691
692	lock_sock(sk);
693	if (sk->sk_state != IUCV_OPEN) {
694		err = -EBADFD;
695		goto done;
696	}
697
698	write_lock_bh(&iucv_sk_list.lock);
699
700	iucv = iucv_sk(sk);
701	if (__iucv_get_sock_by_name(sa->siucv_name)) {
702		err = -EADDRINUSE;
703		goto done_unlock;
704	}
705	if (iucv->path)
706		goto done_unlock;
707
708	/* Bind the socket */
709
710	if (pr_iucv)
711		if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
712			goto vm_bind; /* VM IUCV transport */
713
714	/* try hiper transport */
715	memcpy(uid, sa->siucv_user_id, sizeof(uid));
716	ASCEBC(uid, 8);
717	rcu_read_lock();
718	for_each_netdev_rcu(&init_net, dev) {
719		if (!memcmp(dev->perm_addr, uid, 8)) {
720			memcpy(iucv->src_name, sa->siucv_name, 8);
721			memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
722			sk->sk_bound_dev_if = dev->ifindex;
723			sk->sk_state = IUCV_BOUND;
724			iucv->transport = AF_IUCV_TRANS_HIPER;
725			if (!iucv->msglimit)
726				iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
727			rcu_read_unlock();
728			goto done_unlock;
729		}
730	}
731	rcu_read_unlock();
732vm_bind:
733	if (pr_iucv) {
734		/* use local userid for backward compat */
735		memcpy(iucv->src_name, sa->siucv_name, 8);
736		memcpy(iucv->src_user_id, iucv_userid, 8);
737		sk->sk_state = IUCV_BOUND;
738		iucv->transport = AF_IUCV_TRANS_IUCV;
739		if (!iucv->msglimit)
740			iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
741		goto done_unlock;
742	}
743	/* found no dev to bind */
744	err = -ENODEV;
745done_unlock:
746	/* Release the socket list lock */
747	write_unlock_bh(&iucv_sk_list.lock);
748done:
749	release_sock(sk);
750	return err;
751}
752
753/* Automatically bind an unbound socket */
754static int iucv_sock_autobind(struct sock *sk)
755{
756	struct iucv_sock *iucv = iucv_sk(sk);
757	char name[12];
758	int err = 0;
759
760	if (unlikely(!pr_iucv))
761		return -EPROTO;
762
763	memcpy(iucv->src_user_id, iucv_userid, 8);
764
765	write_lock_bh(&iucv_sk_list.lock);
766
767	sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
768	while (__iucv_get_sock_by_name(name)) {
769		sprintf(name, "%08x",
770			atomic_inc_return(&iucv_sk_list.autobind_name));
771	}
772
773	write_unlock_bh(&iucv_sk_list.lock);
774
775	memcpy(&iucv->src_name, name, 8);
776
777	if (!iucv->msglimit)
778		iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
779
780	return err;
781}
782
783static int afiucv_hs_connect(struct socket *sock)
784{
785	struct sock *sk = sock->sk;
786	struct sk_buff *skb;
787	int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
788	int err = 0;
789
790	/* send syn */
791	skb = sock_alloc_send_skb(sk, blen, 1, &err);
792	if (!skb) {
793		err = -ENOMEM;
794		goto done;
795	}
796	skb->dev = NULL;
797	skb_reserve(skb, blen);
798	err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
799done:
800	return err;
801}
802
803static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
804{
805	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
806	struct sock *sk = sock->sk;
807	struct iucv_sock *iucv = iucv_sk(sk);
808	unsigned char user_data[16];
809	int err;
810
811	high_nmcpy(user_data, sa->siucv_name);
812	low_nmcpy(user_data, iucv->src_name);
813	ASCEBC(user_data, sizeof(user_data));
814
815	/* Create path. */
816	iucv->path = iucv_path_alloc(iucv->msglimit,
817				     IUCV_IPRMDATA, GFP_KERNEL);
818	if (!iucv->path) {
819		err = -ENOMEM;
820		goto done;
821	}
822	err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
823				    sa->siucv_user_id, NULL, user_data,
824				    sk);
825	if (err) {
826		iucv_path_free(iucv->path);
827		iucv->path = NULL;
828		switch (err) {
829		case 0x0b:	/* Target communicator is not logged on */
830			err = -ENETUNREACH;
831			break;
832		case 0x0d:	/* Max connections for this guest exceeded */
833		case 0x0e:	/* Max connections for target guest exceeded */
834			err = -EAGAIN;
835			break;
836		case 0x0f:	/* Missing IUCV authorization */
837			err = -EACCES;
838			break;
839		default:
840			err = -ECONNREFUSED;
841			break;
842		}
843	}
844done:
845	return err;
846}
847
848/* Connect an unconnected socket */
849static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
850			     int alen, int flags)
851{
852	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
853	struct sock *sk = sock->sk;
854	struct iucv_sock *iucv = iucv_sk(sk);
855	int err;
856
857	if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
858		return -EINVAL;
859
860	if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
861		return -EBADFD;
862
863	if (sk->sk_state == IUCV_OPEN &&
864	    iucv->transport == AF_IUCV_TRANS_HIPER)
865		return -EBADFD; /* explicit bind required */
866
867	if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
868		return -EINVAL;
869
870	if (sk->sk_state == IUCV_OPEN) {
871		err = iucv_sock_autobind(sk);
872		if (unlikely(err))
873			return err;
874	}
875
876	lock_sock(sk);
877
878	/* Set the destination information */
879	memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
880	memcpy(iucv->dst_name, sa->siucv_name, 8);
881
882	if (iucv->transport == AF_IUCV_TRANS_HIPER)
883		err = afiucv_hs_connect(sock);
884	else
885		err = afiucv_path_connect(sock, addr);
886	if (err)
887		goto done;
888
889	if (sk->sk_state != IUCV_CONNECTED)
890		err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
891							    IUCV_DISCONN),
892				     sock_sndtimeo(sk, flags & O_NONBLOCK));
893
894	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
895		err = -ECONNREFUSED;
896
897	if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
898		pr_iucv->path_sever(iucv->path, NULL);
899		iucv_path_free(iucv->path);
900		iucv->path = NULL;
901	}
902
903done:
904	release_sock(sk);
905	return err;
906}
907
908/* Move a socket into listening state. */
909static int iucv_sock_listen(struct socket *sock, int backlog)
910{
911	struct sock *sk = sock->sk;
912	int err;
913
914	lock_sock(sk);
915
916	err = -EINVAL;
917	if (sk->sk_state != IUCV_BOUND)
918		goto done;
919
920	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
921		goto done;
922
923	sk->sk_max_ack_backlog = backlog;
924	sk->sk_ack_backlog = 0;
925	sk->sk_state = IUCV_LISTEN;
926	err = 0;
927
928done:
929	release_sock(sk);
930	return err;
931}
932
933/* Accept a pending connection */
934static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
935			    int flags)
936{
937	DECLARE_WAITQUEUE(wait, current);
938	struct sock *sk = sock->sk, *nsk;
939	long timeo;
940	int err = 0;
941
942	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
943
944	if (sk->sk_state != IUCV_LISTEN) {
945		err = -EBADFD;
946		goto done;
947	}
948
949	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
950
951	/* Wait for an incoming connection */
952	add_wait_queue_exclusive(sk_sleep(sk), &wait);
953	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
954		set_current_state(TASK_INTERRUPTIBLE);
955		if (!timeo) {
956			err = -EAGAIN;
957			break;
958		}
959
960		release_sock(sk);
961		timeo = schedule_timeout(timeo);
962		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
963
964		if (sk->sk_state != IUCV_LISTEN) {
965			err = -EBADFD;
966			break;
967		}
968
969		if (signal_pending(current)) {
970			err = sock_intr_errno(timeo);
971			break;
972		}
973	}
974
975	set_current_state(TASK_RUNNING);
976	remove_wait_queue(sk_sleep(sk), &wait);
977
978	if (err)
979		goto done;
980
981	newsock->state = SS_CONNECTED;
982
983done:
984	release_sock(sk);
985	return err;
986}
987
988static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
989			     int *len, int peer)
990{
991	struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
992	struct sock *sk = sock->sk;
993	struct iucv_sock *iucv = iucv_sk(sk);
994
995	addr->sa_family = AF_IUCV;
996	*len = sizeof(struct sockaddr_iucv);
997
998	if (peer) {
999		memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1000		memcpy(siucv->siucv_name, iucv->dst_name, 8);
1001	} else {
1002		memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1003		memcpy(siucv->siucv_name, iucv->src_name, 8);
1004	}
1005	memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1006	memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1007	memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1008
1009	return 0;
1010}
1011
1012/**
1013 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1014 * @path:	IUCV path
1015 * @msg:	Pointer to a struct iucv_message
1016 * @skb:	The socket data to send, skb->len MUST BE <= 7
1017 *
1018 * Send the socket data in the parameter list in the iucv message
1019 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1020 * list and the socket data len at index 7 (last byte).
1021 * See also iucv_msg_length().
1022 *
1023 * Returns the error code from the iucv_message_send() call.
1024 */
1025static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1026			  struct sk_buff *skb)
1027{
1028	u8 prmdata[8];
1029
1030	memcpy(prmdata, (void *) skb->data, skb->len);
1031	prmdata[7] = 0xff - (u8) skb->len;
1032	return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1033				 (void *) prmdata, 8);
1034}
1035
1036static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1037			     struct msghdr *msg, size_t len)
1038{
1039	struct sock *sk = sock->sk;
1040	struct iucv_sock *iucv = iucv_sk(sk);
1041	struct sk_buff *skb;
1042	struct iucv_message txmsg;
1043	struct cmsghdr *cmsg;
1044	int cmsg_done;
1045	long timeo;
1046	char user_id[9];
1047	char appl_id[9];
1048	int err;
1049	int noblock = msg->msg_flags & MSG_DONTWAIT;
1050
1051	err = sock_error(sk);
1052	if (err)
1053		return err;
1054
1055	if (msg->msg_flags & MSG_OOB)
1056		return -EOPNOTSUPP;
1057
1058	/* SOCK_SEQPACKET: we do not support segmented records */
1059	if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1060		return -EOPNOTSUPP;
1061
1062	lock_sock(sk);
1063
1064	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1065		err = -EPIPE;
1066		goto out;
1067	}
1068
1069	/* Return if the socket is not in connected state */
1070	if (sk->sk_state != IUCV_CONNECTED) {
1071		err = -ENOTCONN;
1072		goto out;
1073	}
1074
1075	/* initialize defaults */
1076	cmsg_done   = 0;	/* check for duplicate headers */
1077	txmsg.class = 0;
1078
1079	/* iterate over control messages */
1080	for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1081		cmsg = CMSG_NXTHDR(msg, cmsg)) {
1082
1083		if (!CMSG_OK(msg, cmsg)) {
1084			err = -EINVAL;
1085			goto out;
1086		}
1087
1088		if (cmsg->cmsg_level != SOL_IUCV)
1089			continue;
1090
1091		if (cmsg->cmsg_type & cmsg_done) {
1092			err = -EINVAL;
1093			goto out;
1094		}
1095		cmsg_done |= cmsg->cmsg_type;
1096
1097		switch (cmsg->cmsg_type) {
1098		case SCM_IUCV_TRGCLS:
1099			if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1100				err = -EINVAL;
1101				goto out;
1102			}
1103
1104			/* set iucv message target class */
1105			memcpy(&txmsg.class,
1106				(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1107
1108			break;
1109
1110		default:
1111			err = -EINVAL;
1112			goto out;
1113			break;
1114		}
1115	}
1116
1117	/* allocate one skb for each iucv message:
1118	 * this is fine for SOCK_SEQPACKET (unless we want to support
1119	 * segmented records using the MSG_EOR flag), but
1120	 * for SOCK_STREAM we might want to improve it in future */
1121	if (iucv->transport == AF_IUCV_TRANS_HIPER)
1122		skb = sock_alloc_send_skb(sk,
1123			len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1124			noblock, &err);
1125	else
1126		skb = sock_alloc_send_skb(sk, len, noblock, &err);
1127	if (!skb)
1128		goto out;
1129	if (iucv->transport == AF_IUCV_TRANS_HIPER)
1130		skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1131	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1132		err = -EFAULT;
1133		goto fail;
1134	}
1135
1136	/* wait if outstanding messages for iucv path has reached */
1137	timeo = sock_sndtimeo(sk, noblock);
1138	err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1139	if (err)
1140		goto fail;
1141
1142	/* return -ECONNRESET if the socket is no longer connected */
1143	if (sk->sk_state != IUCV_CONNECTED) {
1144		err = -ECONNRESET;
1145		goto fail;
1146	}
1147
1148	/* increment and save iucv message tag for msg_completion cbk */
1149	txmsg.tag = iucv->send_tag++;
1150	memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
1151	if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1152		atomic_inc(&iucv->msg_sent);
1153		err = afiucv_hs_send(&txmsg, sk, skb, 0);
1154		if (err) {
1155			atomic_dec(&iucv->msg_sent);
1156			goto fail;
1157		}
1158		goto release;
1159	}
1160	skb_queue_tail(&iucv->send_skb_q, skb);
1161
1162	if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1163	      && skb->len <= 7) {
1164		err = iucv_send_iprm(iucv->path, &txmsg, skb);
1165
1166		/* on success: there is no message_complete callback
1167		 * for an IPRMDATA msg; remove skb from send queue */
1168		if (err == 0) {
1169			skb_unlink(skb, &iucv->send_skb_q);
1170			kfree_skb(skb);
1171		}
1172
1173		/* this error should never happen since the
1174		 * IUCV_IPRMDATA path flag is set... sever path */
1175		if (err == 0x15) {
1176			pr_iucv->path_sever(iucv->path, NULL);
1177			skb_unlink(skb, &iucv->send_skb_q);
1178			err = -EPIPE;
1179			goto fail;
1180		}
1181	} else
1182		err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1183					(void *) skb->data, skb->len);
1184	if (err) {
1185		if (err == 3) {
1186			user_id[8] = 0;
1187			memcpy(user_id, iucv->dst_user_id, 8);
1188			appl_id[8] = 0;
1189			memcpy(appl_id, iucv->dst_name, 8);
1190			pr_err("Application %s on z/VM guest %s"
1191				" exceeds message limit\n",
1192				appl_id, user_id);
1193			err = -EAGAIN;
1194		} else
1195			err = -EPIPE;
1196		skb_unlink(skb, &iucv->send_skb_q);
1197		goto fail;
1198	}
1199
1200release:
1201	release_sock(sk);
1202	return len;
1203
1204fail:
1205	if (skb->dev)
1206		dev_put(skb->dev);
1207	kfree_skb(skb);
1208out:
1209	release_sock(sk);
1210	return err;
1211}
1212
1213/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1214 *
1215 * Locking: must be called with message_q.lock held
1216 */
1217static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1218{
1219	int dataleft, size, copied = 0;
1220	struct sk_buff *nskb;
1221
1222	dataleft = len;
1223	while (dataleft) {
1224		if (dataleft >= sk->sk_rcvbuf / 4)
1225			size = sk->sk_rcvbuf / 4;
1226		else
1227			size = dataleft;
1228
1229		nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1230		if (!nskb)
1231			return -ENOMEM;
1232
1233		/* copy target class to control buffer of new skb */
1234		memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
1235
1236		/* copy data fragment */
1237		memcpy(nskb->data, skb->data + copied, size);
1238		copied += size;
1239		dataleft -= size;
1240
1241		skb_reset_transport_header(nskb);
1242		skb_reset_network_header(nskb);
1243		nskb->len = size;
1244
1245		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1246	}
1247
1248	return 0;
1249}
1250
1251/* iucv_process_message() - Receive a single outstanding IUCV message
1252 *
1253 * Locking: must be called with message_q.lock held
1254 */
1255static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256				 struct iucv_path *path,
1257				 struct iucv_message *msg)
1258{
1259	int rc;
1260	unsigned int len;
1261
1262	len = iucv_msg_length(msg);
1263
1264	/* store msg target class in the second 4 bytes of skb ctrl buffer */
1265	/* Note: the first 4 bytes are reserved for msg tag */
1266	memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
1267
1268	/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1269	if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1270		if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1271			skb->data = NULL;
1272			skb->len = 0;
1273		}
1274	} else {
1275		rc = pr_iucv->message_receive(path, msg,
1276					      msg->flags & IUCV_IPRMDATA,
1277					      skb->data, len, NULL);
1278		if (rc) {
1279			kfree_skb(skb);
1280			return;
1281		}
1282		/* we need to fragment iucv messages for SOCK_STREAM only;
1283		 * for SOCK_SEQPACKET, it is only relevant if we support
1284		 * record segmentation using MSG_EOR (see also recvmsg()) */
1285		if (sk->sk_type == SOCK_STREAM &&
1286		    skb->truesize >= sk->sk_rcvbuf / 4) {
1287			rc = iucv_fragment_skb(sk, skb, len);
1288			kfree_skb(skb);
1289			skb = NULL;
1290			if (rc) {
1291				pr_iucv->path_sever(path, NULL);
1292				return;
1293			}
1294			skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1295		} else {
1296			skb_reset_transport_header(skb);
1297			skb_reset_network_header(skb);
1298			skb->len = len;
1299		}
1300	}
1301
1302	if (sock_queue_rcv_skb(sk, skb))
1303		skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1304}
1305
1306/* iucv_process_message_q() - Process outstanding IUCV messages
1307 *
1308 * Locking: must be called with message_q.lock held
1309 */
1310static void iucv_process_message_q(struct sock *sk)
1311{
1312	struct iucv_sock *iucv = iucv_sk(sk);
1313	struct sk_buff *skb;
1314	struct sock_msg_q *p, *n;
1315
1316	list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1317		skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1318		if (!skb)
1319			break;
1320		iucv_process_message(sk, skb, p->path, &p->msg);
1321		list_del(&p->list);
1322		kfree(p);
1323		if (!skb_queue_empty(&iucv->backlog_skb_q))
1324			break;
1325	}
1326}
1327
1328static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1329			     struct msghdr *msg, size_t len, int flags)
1330{
1331	int noblock = flags & MSG_DONTWAIT;
1332	struct sock *sk = sock->sk;
1333	struct iucv_sock *iucv = iucv_sk(sk);
1334	unsigned int copied, rlen;
1335	struct sk_buff *skb, *rskb, *cskb, *sskb;
1336	int blen;
1337	int err = 0;
1338
1339	if ((sk->sk_state == IUCV_DISCONN) &&
1340	    skb_queue_empty(&iucv->backlog_skb_q) &&
1341	    skb_queue_empty(&sk->sk_receive_queue) &&
1342	    list_empty(&iucv->message_q.list))
1343		return 0;
1344
1345	if (flags & (MSG_OOB))
1346		return -EOPNOTSUPP;
1347
1348	/* receive/dequeue next skb:
1349	 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1350	skb = skb_recv_datagram(sk, flags, noblock, &err);
1351	if (!skb) {
1352		if (sk->sk_shutdown & RCV_SHUTDOWN)
1353			return 0;
1354		return err;
1355	}
1356
1357	rlen   = skb->len;		/* real length of skb */
1358	copied = min_t(unsigned int, rlen, len);
1359
1360	cskb = skb;
1361	if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
1362		if (!(flags & MSG_PEEK))
1363			skb_queue_head(&sk->sk_receive_queue, skb);
1364		return -EFAULT;
1365	}
1366
1367	/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1368	if (sk->sk_type == SOCK_SEQPACKET) {
1369		if (copied < rlen)
1370			msg->msg_flags |= MSG_TRUNC;
1371		/* each iucv message contains a complete record */
1372		msg->msg_flags |= MSG_EOR;
1373	}
1374
1375	/* create control message to store iucv msg target class:
1376	 * get the trgcls from the control buffer of the skb due to
1377	 * fragmentation of original iucv message. */
1378	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1379			CB_TRGCLS_LEN, CB_TRGCLS(skb));
1380	if (err) {
1381		if (!(flags & MSG_PEEK))
1382			skb_queue_head(&sk->sk_receive_queue, skb);
1383		return err;
1384	}
1385
1386	/* Mark read part of skb as used */
1387	if (!(flags & MSG_PEEK)) {
1388
1389		/* SOCK_STREAM: re-queue skb if it contains unreceived data */
1390		if (sk->sk_type == SOCK_STREAM) {
1391			skb_pull(skb, copied);
1392			if (skb->len) {
1393				skb_queue_head(&sk->sk_receive_queue, skb);
1394				goto done;
1395			}
1396		}
1397
1398		kfree_skb(skb);
1399		atomic_inc(&iucv->msg_recv);
1400
1401		/* Queue backlog skbs */
1402		spin_lock_bh(&iucv->message_q.lock);
1403		rskb = skb_dequeue(&iucv->backlog_skb_q);
1404		while (rskb) {
1405			if (sock_queue_rcv_skb(sk, rskb)) {
1406				skb_queue_head(&iucv->backlog_skb_q,
1407						rskb);
1408				break;
1409			} else {
1410				rskb = skb_dequeue(&iucv->backlog_skb_q);
1411			}
1412		}
1413		if (skb_queue_empty(&iucv->backlog_skb_q)) {
1414			if (!list_empty(&iucv->message_q.list))
1415				iucv_process_message_q(sk);
1416			if (atomic_read(&iucv->msg_recv) >=
1417							iucv->msglimit / 2) {
1418				/* send WIN to peer */
1419				blen = sizeof(struct af_iucv_trans_hdr) +
1420					ETH_HLEN;
1421				sskb = sock_alloc_send_skb(sk, blen, 1, &err);
1422				if (sskb) {
1423					skb_reserve(sskb, blen);
1424					err = afiucv_hs_send(NULL, sk, sskb,
1425							     AF_IUCV_FLAG_WIN);
1426				}
1427				if (err) {
1428					sk->sk_state = IUCV_DISCONN;
1429					sk->sk_state_change(sk);
1430				}
1431			}
1432		}
1433		spin_unlock_bh(&iucv->message_q.lock);
1434	}
1435
1436done:
1437	/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1438	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1439		copied = rlen;
1440
1441	return copied;
1442}
1443
1444static inline unsigned int iucv_accept_poll(struct sock *parent)
1445{
1446	struct iucv_sock *isk, *n;
1447	struct sock *sk;
1448
1449	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1450		sk = (struct sock *) isk;
1451
1452		if (sk->sk_state == IUCV_CONNECTED)
1453			return POLLIN | POLLRDNORM;
1454	}
1455
1456	return 0;
1457}
1458
1459unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1460			    poll_table *wait)
1461{
1462	struct sock *sk = sock->sk;
1463	unsigned int mask = 0;
1464
1465	sock_poll_wait(file, sk_sleep(sk), wait);
1466
1467	if (sk->sk_state == IUCV_LISTEN)
1468		return iucv_accept_poll(sk);
1469
1470	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1471		mask |= POLLERR;
1472
1473	if (sk->sk_shutdown & RCV_SHUTDOWN)
1474		mask |= POLLRDHUP;
1475
1476	if (sk->sk_shutdown == SHUTDOWN_MASK)
1477		mask |= POLLHUP;
1478
1479	if (!skb_queue_empty(&sk->sk_receive_queue) ||
1480	    (sk->sk_shutdown & RCV_SHUTDOWN))
1481		mask |= POLLIN | POLLRDNORM;
1482
1483	if (sk->sk_state == IUCV_CLOSED)
1484		mask |= POLLHUP;
1485
1486	if (sk->sk_state == IUCV_DISCONN)
1487		mask |= POLLIN;
1488
1489	if (sock_writeable(sk))
1490		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1491	else
1492		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1493
1494	return mask;
1495}
1496
1497static int iucv_sock_shutdown(struct socket *sock, int how)
1498{
1499	struct sock *sk = sock->sk;
1500	struct iucv_sock *iucv = iucv_sk(sk);
1501	struct iucv_message txmsg;
1502	int err = 0;
1503
1504	how++;
1505
1506	if ((how & ~SHUTDOWN_MASK) || !how)
1507		return -EINVAL;
1508
1509	lock_sock(sk);
1510	switch (sk->sk_state) {
1511	case IUCV_DISCONN:
1512	case IUCV_CLOSING:
1513	case IUCV_CLOSED:
1514		err = -ENOTCONN;
1515		goto fail;
1516
1517	default:
1518		sk->sk_shutdown |= how;
1519		break;
1520	}
1521
1522	if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1523		txmsg.class = 0;
1524		txmsg.tag = 0;
1525		err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
1526					0, (void *) iprm_shutdown, 8);
1527		if (err) {
1528			switch (err) {
1529			case 1:
1530				err = -ENOTCONN;
1531				break;
1532			case 2:
1533				err = -ECONNRESET;
1534				break;
1535			default:
1536				err = -ENOTCONN;
1537				break;
1538			}
1539		}
1540	}
1541
1542	if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1543		err = pr_iucv->path_quiesce(iucv->path, NULL);
1544		if (err)
1545			err = -ENOTCONN;
1546
1547		skb_queue_purge(&sk->sk_receive_queue);
1548	}
1549
1550	/* Wake up anyone sleeping in poll */
1551	sk->sk_state_change(sk);
1552
1553fail:
1554	release_sock(sk);
1555	return err;
1556}
1557
1558static int iucv_sock_release(struct socket *sock)
1559{
1560	struct sock *sk = sock->sk;
1561	int err = 0;
1562
1563	if (!sk)
1564		return 0;
1565
1566	iucv_sock_close(sk);
1567
1568	/* Unregister with IUCV base support */
1569	if (iucv_sk(sk)->path) {
1570		pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
1571		iucv_path_free(iucv_sk(sk)->path);
1572		iucv_sk(sk)->path = NULL;
1573	}
1574
1575	sock_orphan(sk);
1576	iucv_sock_kill(sk);
1577	return err;
1578}
1579
1580/* getsockopt and setsockopt */
1581static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1582				char __user *optval, unsigned int optlen)
1583{
1584	struct sock *sk = sock->sk;
1585	struct iucv_sock *iucv = iucv_sk(sk);
1586	int val;
1587	int rc;
1588
1589	if (level != SOL_IUCV)
1590		return -ENOPROTOOPT;
1591
1592	if (optlen < sizeof(int))
1593		return -EINVAL;
1594
1595	if (get_user(val, (int __user *) optval))
1596		return -EFAULT;
1597
1598	rc = 0;
1599
1600	lock_sock(sk);
1601	switch (optname) {
1602	case SO_IPRMDATA_MSG:
1603		if (val)
1604			iucv->flags |= IUCV_IPRMDATA;
1605		else
1606			iucv->flags &= ~IUCV_IPRMDATA;
1607		break;
1608	case SO_MSGLIMIT:
1609		switch (sk->sk_state) {
1610		case IUCV_OPEN:
1611		case IUCV_BOUND:
1612			if (val < 1 || val > (u16)(~0))
1613				rc = -EINVAL;
1614			else
1615				iucv->msglimit = val;
1616			break;
1617		default:
1618			rc = -EINVAL;
1619			break;
1620		}
1621		break;
1622	default:
1623		rc = -ENOPROTOOPT;
1624		break;
1625	}
1626	release_sock(sk);
1627
1628	return rc;
1629}
1630
1631static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1632				char __user *optval, int __user *optlen)
1633{
1634	struct sock *sk = sock->sk;
1635	struct iucv_sock *iucv = iucv_sk(sk);
1636	int val, len;
1637
1638	if (level != SOL_IUCV)
1639		return -ENOPROTOOPT;
1640
1641	if (get_user(len, optlen))
1642		return -EFAULT;
1643
1644	if (len < 0)
1645		return -EINVAL;
1646
1647	len = min_t(unsigned int, len, sizeof(int));
1648
1649	switch (optname) {
1650	case SO_IPRMDATA_MSG:
1651		val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1652		break;
1653	case SO_MSGLIMIT:
1654		lock_sock(sk);
1655		val = (iucv->path != NULL) ? iucv->path->msglim	/* connected */
1656					   : iucv->msglimit;	/* default */
1657		release_sock(sk);
1658		break;
1659	default:
1660		return -ENOPROTOOPT;
1661	}
1662
1663	if (put_user(len, optlen))
1664		return -EFAULT;
1665	if (copy_to_user(optval, &val, len))
1666		return -EFAULT;
1667
1668	return 0;
1669}
1670
1671
1672/* Callback wrappers - called from iucv base support */
1673static int iucv_callback_connreq(struct iucv_path *path,
1674				 u8 ipvmid[8], u8 ipuser[16])
1675{
1676	unsigned char user_data[16];
1677	unsigned char nuser_data[16];
1678	unsigned char src_name[8];
1679	struct hlist_node *node;
1680	struct sock *sk, *nsk;
1681	struct iucv_sock *iucv, *niucv;
1682	int err;
1683
1684	memcpy(src_name, ipuser, 8);
1685	EBCASC(src_name, 8);
1686	/* Find out if this path belongs to af_iucv. */
1687	read_lock(&iucv_sk_list.lock);
1688	iucv = NULL;
1689	sk = NULL;
1690	sk_for_each(sk, node, &iucv_sk_list.head)
1691		if (sk->sk_state == IUCV_LISTEN &&
1692		    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1693			/*
1694			 * Found a listening socket with
1695			 * src_name == ipuser[0-7].
1696			 */
1697			iucv = iucv_sk(sk);
1698			break;
1699		}
1700	read_unlock(&iucv_sk_list.lock);
1701	if (!iucv)
1702		/* No socket found, not one of our paths. */
1703		return -EINVAL;
1704
1705	bh_lock_sock(sk);
1706
1707	/* Check if parent socket is listening */
1708	low_nmcpy(user_data, iucv->src_name);
1709	high_nmcpy(user_data, iucv->dst_name);
1710	ASCEBC(user_data, sizeof(user_data));
1711	if (sk->sk_state != IUCV_LISTEN) {
1712		err = pr_iucv->path_sever(path, user_data);
1713		iucv_path_free(path);
1714		goto fail;
1715	}
1716
1717	/* Check for backlog size */
1718	if (sk_acceptq_is_full(sk)) {
1719		err = pr_iucv->path_sever(path, user_data);
1720		iucv_path_free(path);
1721		goto fail;
1722	}
1723
1724	/* Create the new socket */
1725	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1726	if (!nsk) {
1727		err = pr_iucv->path_sever(path, user_data);
1728		iucv_path_free(path);
1729		goto fail;
1730	}
1731
1732	niucv = iucv_sk(nsk);
1733	iucv_sock_init(nsk, sk);
1734
1735	/* Set the new iucv_sock */
1736	memcpy(niucv->dst_name, ipuser + 8, 8);
1737	EBCASC(niucv->dst_name, 8);
1738	memcpy(niucv->dst_user_id, ipvmid, 8);
1739	memcpy(niucv->src_name, iucv->src_name, 8);
1740	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1741	niucv->path = path;
1742
1743	/* Call iucv_accept */
1744	high_nmcpy(nuser_data, ipuser + 8);
1745	memcpy(nuser_data + 8, niucv->src_name, 8);
1746	ASCEBC(nuser_data + 8, 8);
1747
1748	/* set message limit for path based on msglimit of accepting socket */
1749	niucv->msglimit = iucv->msglimit;
1750	path->msglim = iucv->msglimit;
1751	err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1752	if (err) {
1753		err = pr_iucv->path_sever(path, user_data);
1754		iucv_path_free(path);
1755		iucv_sock_kill(nsk);
1756		goto fail;
1757	}
1758
1759	iucv_accept_enqueue(sk, nsk);
1760
1761	/* Wake up accept */
1762	nsk->sk_state = IUCV_CONNECTED;
1763	sk->sk_data_ready(sk, 1);
1764	err = 0;
1765fail:
1766	bh_unlock_sock(sk);
1767	return 0;
1768}
1769
1770static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1771{
1772	struct sock *sk = path->private;
1773
1774	sk->sk_state = IUCV_CONNECTED;
1775	sk->sk_state_change(sk);
1776}
1777
1778static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1779{
1780	struct sock *sk = path->private;
1781	struct iucv_sock *iucv = iucv_sk(sk);
1782	struct sk_buff *skb;
1783	struct sock_msg_q *save_msg;
1784	int len;
1785
1786	if (sk->sk_shutdown & RCV_SHUTDOWN) {
1787		pr_iucv->message_reject(path, msg);
1788		return;
1789	}
1790
1791	spin_lock(&iucv->message_q.lock);
1792
1793	if (!list_empty(&iucv->message_q.list) ||
1794	    !skb_queue_empty(&iucv->backlog_skb_q))
1795		goto save_message;
1796
1797	len = atomic_read(&sk->sk_rmem_alloc);
1798	len += SKB_TRUESIZE(iucv_msg_length(msg));
1799	if (len > sk->sk_rcvbuf)
1800		goto save_message;
1801
1802	skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1803	if (!skb)
1804		goto save_message;
1805
1806	iucv_process_message(sk, skb, path, msg);
1807	goto out_unlock;
1808
1809save_message:
1810	save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1811	if (!save_msg)
1812		goto out_unlock;
1813	save_msg->path = path;
1814	save_msg->msg = *msg;
1815
1816	list_add_tail(&save_msg->list, &iucv->message_q.list);
1817
1818out_unlock:
1819	spin_unlock(&iucv->message_q.lock);
1820}
1821
1822static void iucv_callback_txdone(struct iucv_path *path,
1823				 struct iucv_message *msg)
1824{
1825	struct sock *sk = path->private;
1826	struct sk_buff *this = NULL;
1827	struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1828	struct sk_buff *list_skb = list->next;
1829	unsigned long flags;
1830
1831	if (!skb_queue_empty(list)) {
1832		spin_lock_irqsave(&list->lock, flags);
1833
1834		while (list_skb != (struct sk_buff *)list) {
1835			if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1836				this = list_skb;
1837				break;
1838			}
1839			list_skb = list_skb->next;
1840		}
1841		if (this)
1842			__skb_unlink(this, list);
1843
1844		spin_unlock_irqrestore(&list->lock, flags);
1845
1846		if (this) {
1847			kfree_skb(this);
1848			/* wake up any process waiting for sending */
1849			iucv_sock_wake_msglim(sk);
1850		}
1851	}
1852	BUG_ON(!this);
1853
1854	if (sk->sk_state == IUCV_CLOSING) {
1855		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1856			sk->sk_state = IUCV_CLOSED;
1857			sk->sk_state_change(sk);
1858		}
1859	}
1860
1861}
1862
1863static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1864{
1865	struct sock *sk = path->private;
1866
1867	sk->sk_state = IUCV_DISCONN;
1868
1869	sk->sk_state_change(sk);
1870}
1871
1872/* called if the other communication side shuts down its RECV direction;
1873 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1874 */
1875static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1876{
1877	struct sock *sk = path->private;
1878
1879	bh_lock_sock(sk);
1880	if (sk->sk_state != IUCV_CLOSED) {
1881		sk->sk_shutdown |= SEND_SHUTDOWN;
1882		sk->sk_state_change(sk);
1883	}
1884	bh_unlock_sock(sk);
1885}
1886
1887/***************** HiperSockets transport callbacks ********************/
1888static void afiucv_swap_src_dest(struct sk_buff *skb)
1889{
1890	struct af_iucv_trans_hdr *trans_hdr =
1891				(struct af_iucv_trans_hdr *)skb->data;
1892	char tmpID[8];
1893	char tmpName[8];
1894
1895	ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1896	ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1897	ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1898	ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1899	memcpy(tmpID, trans_hdr->srcUserID, 8);
1900	memcpy(tmpName, trans_hdr->srcAppName, 8);
1901	memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1902	memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1903	memcpy(trans_hdr->destUserID, tmpID, 8);
1904	memcpy(trans_hdr->destAppName, tmpName, 8);
1905	skb_push(skb, ETH_HLEN);
1906	memset(skb->data, 0, ETH_HLEN);
1907}
1908
1909/**
1910 * afiucv_hs_callback_syn - react on received SYN
1911 **/
1912static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1913{
1914	struct sock *nsk;
1915	struct iucv_sock *iucv, *niucv;
1916	struct af_iucv_trans_hdr *trans_hdr;
1917	int err;
1918
1919	iucv = iucv_sk(sk);
1920	trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1921	if (!iucv) {
1922		/* no sock - connection refused */
1923		afiucv_swap_src_dest(skb);
1924		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1925		err = dev_queue_xmit(skb);
1926		goto out;
1927	}
1928
1929	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1930	bh_lock_sock(sk);
1931	if ((sk->sk_state != IUCV_LISTEN) ||
1932	    sk_acceptq_is_full(sk) ||
1933	    !nsk) {
1934		/* error on server socket - connection refused */
1935		if (nsk)
1936			sk_free(nsk);
1937		afiucv_swap_src_dest(skb);
1938		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1939		err = dev_queue_xmit(skb);
1940		bh_unlock_sock(sk);
1941		goto out;
1942	}
1943
1944	niucv = iucv_sk(nsk);
1945	iucv_sock_init(nsk, sk);
1946	niucv->transport = AF_IUCV_TRANS_HIPER;
1947	niucv->msglimit = iucv->msglimit;
1948	if (!trans_hdr->window)
1949		niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1950	else
1951		niucv->msglimit_peer = trans_hdr->window;
1952	memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1953	memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1954	memcpy(niucv->src_name, iucv->src_name, 8);
1955	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1956	nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1957	afiucv_swap_src_dest(skb);
1958	trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1959	trans_hdr->window = niucv->msglimit;
1960	/* if receiver acks the xmit connection is established */
1961	err = dev_queue_xmit(skb);
1962	if (!err) {
1963		iucv_accept_enqueue(sk, nsk);
1964		nsk->sk_state = IUCV_CONNECTED;
1965		sk->sk_data_ready(sk, 1);
1966	} else
1967		iucv_sock_kill(nsk);
1968	bh_unlock_sock(sk);
1969
1970out:
1971	return NET_RX_SUCCESS;
1972}
1973
1974/**
1975 * afiucv_hs_callback_synack() - react on received SYN-ACK
1976 **/
1977static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1978{
1979	struct iucv_sock *iucv = iucv_sk(sk);
1980	struct af_iucv_trans_hdr *trans_hdr =
1981					(struct af_iucv_trans_hdr *)skb->data;
1982
1983	if (!iucv)
1984		goto out;
1985	if (sk->sk_state != IUCV_BOUND)
1986		goto out;
1987	bh_lock_sock(sk);
1988	iucv->msglimit_peer = trans_hdr->window;
1989	sk->sk_state = IUCV_CONNECTED;
1990	sk->sk_state_change(sk);
1991	bh_unlock_sock(sk);
1992out:
1993	kfree_skb(skb);
1994	return NET_RX_SUCCESS;
1995}
1996
1997/**
1998 * afiucv_hs_callback_synfin() - react on received SYN_FIN
1999 **/
2000static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2001{
2002	struct iucv_sock *iucv = iucv_sk(sk);
2003
2004	if (!iucv)
2005		goto out;
2006	if (sk->sk_state != IUCV_BOUND)
2007		goto out;
2008	bh_lock_sock(sk);
2009	sk->sk_state = IUCV_DISCONN;
2010	sk->sk_state_change(sk);
2011	bh_unlock_sock(sk);
2012out:
2013	kfree_skb(skb);
2014	return NET_RX_SUCCESS;
2015}
2016
2017/**
2018 * afiucv_hs_callback_fin() - react on received FIN
2019 **/
2020static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2021{
2022	struct iucv_sock *iucv = iucv_sk(sk);
2023
2024	/* other end of connection closed */
2025	if (iucv) {
2026		bh_lock_sock(sk);
2027		sk->sk_state = IUCV_DISCONN;
2028		sk->sk_state_change(sk);
2029		bh_unlock_sock(sk);
2030	}
2031	kfree_skb(skb);
2032	return NET_RX_SUCCESS;
2033}
2034
2035/**
2036 * afiucv_hs_callback_win() - react on received WIN
2037 **/
2038static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2039{
2040	struct iucv_sock *iucv = iucv_sk(sk);
2041	struct af_iucv_trans_hdr *trans_hdr =
2042					(struct af_iucv_trans_hdr *)skb->data;
2043
2044	if (!iucv)
2045		return NET_RX_SUCCESS;
2046
2047	if (sk->sk_state != IUCV_CONNECTED)
2048		return NET_RX_SUCCESS;
2049
2050	atomic_sub(trans_hdr->window, &iucv->msg_sent);
2051	iucv_sock_wake_msglim(sk);
2052	return NET_RX_SUCCESS;
2053}
2054
2055/**
2056 * afiucv_hs_callback_rx() - react on received data
2057 **/
2058static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2059{
2060	struct iucv_sock *iucv = iucv_sk(sk);
2061
2062	if (!iucv) {
2063		kfree_skb(skb);
2064		return NET_RX_SUCCESS;
2065	}
2066
2067	if (sk->sk_state != IUCV_CONNECTED) {
2068		kfree_skb(skb);
2069		return NET_RX_SUCCESS;
2070	}
2071
2072		/* write stuff from iucv_msg to skb cb */
2073	if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
2074		kfree_skb(skb);
2075		return NET_RX_SUCCESS;
2076	}
2077	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2078	skb_reset_transport_header(skb);
2079	skb_reset_network_header(skb);
2080	spin_lock(&iucv->message_q.lock);
2081	if (skb_queue_empty(&iucv->backlog_skb_q)) {
2082		if (sock_queue_rcv_skb(sk, skb)) {
2083			/* handle rcv queue full */
2084			skb_queue_tail(&iucv->backlog_skb_q, skb);
2085		}
2086	} else
2087		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2088	spin_unlock(&iucv->message_q.lock);
2089	return NET_RX_SUCCESS;
2090}
2091
2092/**
2093 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2094 *                   transport
2095 *                   called from netif RX softirq
2096 **/
2097static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2098	struct packet_type *pt, struct net_device *orig_dev)
2099{
2100	struct hlist_node *node;
2101	struct sock *sk;
2102	struct iucv_sock *iucv;
2103	struct af_iucv_trans_hdr *trans_hdr;
2104	char nullstring[8];
2105	int err = 0;
2106
2107	skb_pull(skb, ETH_HLEN);
2108	trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2109	EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2110	EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2111	EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2112	EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2113	memset(nullstring, 0, sizeof(nullstring));
2114	iucv = NULL;
2115	sk = NULL;
2116	read_lock(&iucv_sk_list.lock);
2117	sk_for_each(sk, node, &iucv_sk_list.head) {
2118		if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2119			if ((!memcmp(&iucv_sk(sk)->src_name,
2120				     trans_hdr->destAppName, 8)) &&
2121			    (!memcmp(&iucv_sk(sk)->src_user_id,
2122				     trans_hdr->destUserID, 8)) &&
2123			    (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2124			    (!memcmp(&iucv_sk(sk)->dst_user_id,
2125				     nullstring, 8))) {
2126				iucv = iucv_sk(sk);
2127				break;
2128			}
2129		} else {
2130			if ((!memcmp(&iucv_sk(sk)->src_name,
2131				     trans_hdr->destAppName, 8)) &&
2132			    (!memcmp(&iucv_sk(sk)->src_user_id,
2133				     trans_hdr->destUserID, 8)) &&
2134			    (!memcmp(&iucv_sk(sk)->dst_name,
2135				     trans_hdr->srcAppName, 8)) &&
2136			    (!memcmp(&iucv_sk(sk)->dst_user_id,
2137				     trans_hdr->srcUserID, 8))) {
2138				iucv = iucv_sk(sk);
2139				break;
2140			}
2141		}
2142	}
2143	read_unlock(&iucv_sk_list.lock);
2144	if (!iucv)
2145		sk = NULL;
2146
2147	/* no sock
2148	how should we send with no sock
2149	1) send without sock no send rc checking?
2150	2) introduce default sock to handle this cases
2151
2152	 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2153	 data -> send FIN
2154	 SYN|ACK, SYN|FIN, FIN -> no action? */
2155
2156	switch (trans_hdr->flags) {
2157	case AF_IUCV_FLAG_SYN:
2158		/* connect request */
2159		err = afiucv_hs_callback_syn(sk, skb);
2160		break;
2161	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2162		/* connect request confirmed */
2163		err = afiucv_hs_callback_synack(sk, skb);
2164		break;
2165	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2166		/* connect request refused */
2167		err = afiucv_hs_callback_synfin(sk, skb);
2168		break;
2169	case (AF_IUCV_FLAG_FIN):
2170		/* close request */
2171		err = afiucv_hs_callback_fin(sk, skb);
2172		break;
2173	case (AF_IUCV_FLAG_WIN):
2174		err = afiucv_hs_callback_win(sk, skb);
2175		if (skb->len > sizeof(struct af_iucv_trans_hdr))
2176			err = afiucv_hs_callback_rx(sk, skb);
2177		else
2178			kfree(skb);
2179		break;
2180	case 0:
2181		/* plain data frame */
2182		memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
2183		       CB_TRGCLS_LEN);
2184		err = afiucv_hs_callback_rx(sk, skb);
2185		break;
2186	default:
2187		;
2188	}
2189
2190	return err;
2191}
2192
2193/**
2194 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2195 *                                 transport
2196 **/
2197static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2198					enum iucv_tx_notify n)
2199{
2200	struct sock *isk = skb->sk;
2201	struct sock *sk = NULL;
2202	struct iucv_sock *iucv = NULL;
2203	struct sk_buff_head *list;
2204	struct sk_buff *list_skb;
2205	struct sk_buff *this = NULL;
2206	unsigned long flags;
2207	struct hlist_node *node;
2208
2209	read_lock(&iucv_sk_list.lock);
2210	sk_for_each(sk, node, &iucv_sk_list.head)
2211		if (sk == isk) {
2212			iucv = iucv_sk(sk);
2213			break;
2214		}
2215	read_unlock(&iucv_sk_list.lock);
2216
2217	if (!iucv)
2218		return;
2219
2220	bh_lock_sock(sk);
2221	list = &iucv->send_skb_q;
2222	list_skb = list->next;
2223	if (skb_queue_empty(list))
2224		goto out_unlock;
2225
2226	spin_lock_irqsave(&list->lock, flags);
2227	while (list_skb != (struct sk_buff *)list) {
2228		if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2229			this = list_skb;
2230			switch (n) {
2231			case TX_NOTIFY_OK:
2232				__skb_unlink(this, list);
2233				iucv_sock_wake_msglim(sk);
2234				dev_put(this->dev);
2235				kfree_skb(this);
2236				break;
2237			case TX_NOTIFY_PENDING:
2238				atomic_inc(&iucv->pendings);
2239				break;
2240			case TX_NOTIFY_DELAYED_OK:
2241				__skb_unlink(this, list);
2242				atomic_dec(&iucv->pendings);
2243				if (atomic_read(&iucv->pendings) <= 0)
2244					iucv_sock_wake_msglim(sk);
2245				dev_put(this->dev);
2246				kfree_skb(this);
2247				break;
2248			case TX_NOTIFY_UNREACHABLE:
2249			case TX_NOTIFY_DELAYED_UNREACHABLE:
2250			case TX_NOTIFY_TPQFULL: /* not yet used */
2251			case TX_NOTIFY_GENERALERROR:
2252			case TX_NOTIFY_DELAYED_GENERALERROR:
2253				__skb_unlink(this, list);
2254				dev_put(this->dev);
2255				kfree_skb(this);
2256				sk->sk_state = IUCV_DISCONN;
2257				sk->sk_state_change(sk);
2258				break;
2259			}
2260			break;
2261		}
2262		list_skb = list_skb->next;
2263	}
2264	spin_unlock_irqrestore(&list->lock, flags);
2265
2266	if (sk->sk_state == IUCV_CLOSING) {
2267		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2268			sk->sk_state = IUCV_CLOSED;
2269			sk->sk_state_change(sk);
2270		}
2271	}
2272
2273out_unlock:
2274	bh_unlock_sock(sk);
2275}
2276static const struct proto_ops iucv_sock_ops = {
2277	.family		= PF_IUCV,
2278	.owner		= THIS_MODULE,
2279	.release	= iucv_sock_release,
2280	.bind		= iucv_sock_bind,
2281	.connect	= iucv_sock_connect,
2282	.listen		= iucv_sock_listen,
2283	.accept		= iucv_sock_accept,
2284	.getname	= iucv_sock_getname,
2285	.sendmsg	= iucv_sock_sendmsg,
2286	.recvmsg	= iucv_sock_recvmsg,
2287	.poll		= iucv_sock_poll,
2288	.ioctl		= sock_no_ioctl,
2289	.mmap		= sock_no_mmap,
2290	.socketpair	= sock_no_socketpair,
2291	.shutdown	= iucv_sock_shutdown,
2292	.setsockopt	= iucv_sock_setsockopt,
2293	.getsockopt	= iucv_sock_getsockopt,
2294};
2295
2296static const struct net_proto_family iucv_sock_family_ops = {
2297	.family	= AF_IUCV,
2298	.owner	= THIS_MODULE,
2299	.create	= iucv_sock_create,
2300};
2301
2302static struct packet_type iucv_packet_type = {
2303	.type = cpu_to_be16(ETH_P_AF_IUCV),
2304	.func = afiucv_hs_rcv,
2305};
2306
2307static int afiucv_iucv_init(void)
2308{
2309	int err;
2310
2311	err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2312	if (err)
2313		goto out;
2314	/* establish dummy device */
2315	af_iucv_driver.bus = pr_iucv->bus;
2316	err = driver_register(&af_iucv_driver);
2317	if (err)
2318		goto out_iucv;
2319	af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2320	if (!af_iucv_dev) {
2321		err = -ENOMEM;
2322		goto out_driver;
2323	}
2324	dev_set_name(af_iucv_dev, "af_iucv");
2325	af_iucv_dev->bus = pr_iucv->bus;
2326	af_iucv_dev->parent = pr_iucv->root;
2327	af_iucv_dev->release = (void (*)(struct device *))kfree;
2328	af_iucv_dev->driver = &af_iucv_driver;
2329	err = device_register(af_iucv_dev);
2330	if (err)
2331		goto out_driver;
2332	return 0;
2333
2334out_driver:
2335	driver_unregister(&af_iucv_driver);
2336out_iucv:
2337	pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2338out:
2339	return err;
2340}
2341
2342static int __init afiucv_init(void)
2343{
2344	int err;
2345
2346	if (MACHINE_IS_VM) {
2347		cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2348		if (unlikely(err)) {
2349			WARN_ON(err);
2350			err = -EPROTONOSUPPORT;
2351			goto out;
2352		}
2353
2354		pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2355		if (!pr_iucv) {
2356			printk(KERN_WARNING "iucv_if lookup failed\n");
2357			memset(&iucv_userid, 0, sizeof(iucv_userid));
2358		}
2359	} else {
2360		memset(&iucv_userid, 0, sizeof(iucv_userid));
2361		pr_iucv = NULL;
2362	}
2363
2364	err = proto_register(&iucv_proto, 0);
2365	if (err)
2366		goto out;
2367	err = sock_register(&iucv_sock_family_ops);
2368	if (err)
2369		goto out_proto;
2370
2371	if (pr_iucv) {
2372		err = afiucv_iucv_init();
2373		if (err)
2374			goto out_sock;
2375	}
2376	dev_add_pack(&iucv_packet_type);
2377	return 0;
2378
2379out_sock:
2380	sock_unregister(PF_IUCV);
2381out_proto:
2382	proto_unregister(&iucv_proto);
2383out:
2384	if (pr_iucv)
2385		symbol_put(iucv_if);
2386	return err;
2387}
2388
2389static void __exit afiucv_exit(void)
2390{
2391	if (pr_iucv) {
2392		device_unregister(af_iucv_dev);
2393		driver_unregister(&af_iucv_driver);
2394		pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2395		symbol_put(iucv_if);
2396	}
2397	dev_remove_pack(&iucv_packet_type);
2398	sock_unregister(PF_IUCV);
2399	proto_unregister(&iucv_proto);
2400}
2401
2402module_init(afiucv_init);
2403module_exit(afiucv_exit);
2404
2405MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2406MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2407MODULE_VERSION(VERSION);
2408MODULE_LICENSE("GPL");
2409MODULE_ALIAS_NETPROTO(PF_IUCV);
2410
2411