1/*
2 * linux/fs/9p/trans_fd.c
3 *
4 * Fd transport layer.  Includes deprecated socket layer.
5 *
6 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10 *
11 *  This program is free software; you can redistribute it and/or modify
12 *  it under the terms of the GNU General Public License version 2
13 *  as published by the Free Software Foundation.
14 *
15 *  This program is distributed in the hope that it will be useful,
16 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *  GNU General Public License for more details.
19 *
20 *  You should have received a copy of the GNU General Public License
21 *  along with this program; if not, write to:
22 *  Free Software Foundation
23 *  51 Franklin Street, Fifth Floor
24 *  Boston, MA  02111-1301  USA
25 *
26 */
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#include <linux/in.h>
31#include <linux/module.h>
32#include <linux/net.h>
33#include <linux/ipv6.h>
34#include <linux/kthread.h>
35#include <linux/errno.h>
36#include <linux/kernel.h>
37#include <linux/un.h>
38#include <linux/uaccess.h>
39#include <linux/inet.h>
40#include <linux/idr.h>
41#include <linux/file.h>
42#include <linux/parser.h>
43#include <linux/slab.h>
44#include <net/9p/9p.h>
45#include <net/9p/client.h>
46#include <net/9p/transport.h>
47
48#include <linux/syscalls.h> /* killme */
49
50#define P9_PORT 564
51#define MAX_SOCK_BUF (64*1024)
52#define MAXPOLLWADDR	2
53
54/**
55 * struct p9_fd_opts - per-transport options
56 * @rfd: file descriptor for reading (trans=fd)
57 * @wfd: file descriptor for writing (trans=fd)
58 * @port: port to connect to (trans=tcp)
59 *
60 */
61
62struct p9_fd_opts {
63	int rfd;
64	int wfd;
65	u16 port;
66	int privport;
67};
68
69/*
70  * Option Parsing (code inspired by NFS code)
71  *  - a little lazy - parse all fd-transport options
72  */
73
74enum {
75	/* Options that take integer arguments */
76	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
77	/* Options that take no arguments */
78	Opt_privport,
79};
80
81static const match_table_t tokens = {
82	{Opt_port, "port=%u"},
83	{Opt_rfdno, "rfdno=%u"},
84	{Opt_wfdno, "wfdno=%u"},
85	{Opt_privport, "privport"},
86	{Opt_err, NULL},
87};
88
89enum {
90	Rworksched = 1,		/* read work scheduled or running */
91	Rpending = 2,		/* can read */
92	Wworksched = 4,		/* write work scheduled or running */
93	Wpending = 8,		/* can write */
94};
95
96struct p9_poll_wait {
97	struct p9_conn *conn;
98	wait_queue_t wait;
99	wait_queue_head_t *wait_addr;
100};
101
102/**
103 * struct p9_conn - fd mux connection state information
104 * @mux_list: list link for mux to manage multiple connections (?)
105 * @client: reference to client instance for this connection
106 * @err: error state
107 * @req_list: accounting for requests which have been sent
108 * @unsent_req_list: accounting for requests that haven't been sent
109 * @req: current request being processed (if any)
110 * @tmp_buf: temporary buffer to read in header
111 * @rsize: amount to read for current frame
112 * @rpos: read position in current frame
113 * @rbuf: current read buffer
114 * @wpos: write position for current frame
115 * @wsize: amount of data to write for current frame
116 * @wbuf: current write buffer
117 * @poll_pending_link: pending links to be polled per conn
118 * @poll_wait: array of wait_q's for various worker threads
119 * @pt: poll state
120 * @rq: current read work
121 * @wq: current write work
122 * @wsched: ????
123 *
124 */
125
126struct p9_conn {
127	struct list_head mux_list;
128	struct p9_client *client;
129	int err;
130	struct list_head req_list;
131	struct list_head unsent_req_list;
132	struct p9_req_t *req;
133	char tmp_buf[7];
134	int rsize;
135	int rpos;
136	char *rbuf;
137	int wpos;
138	int wsize;
139	char *wbuf;
140	struct list_head poll_pending_link;
141	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
142	poll_table pt;
143	struct work_struct rq;
144	struct work_struct wq;
145	unsigned long wsched;
146};
147
148/**
149 * struct p9_trans_fd - transport state
150 * @rd: reference to file to read from
151 * @wr: reference of file to write to
152 * @conn: connection state reference
153 *
154 */
155
156struct p9_trans_fd {
157	struct file *rd;
158	struct file *wr;
159	struct p9_conn conn;
160};
161
162static void p9_poll_workfn(struct work_struct *work);
163
164static DEFINE_SPINLOCK(p9_poll_lock);
165static LIST_HEAD(p9_poll_pending_list);
166static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
167
168static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
169static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
170
171static void p9_mux_poll_stop(struct p9_conn *m)
172{
173	unsigned long flags;
174	int i;
175
176	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
177		struct p9_poll_wait *pwait = &m->poll_wait[i];
178
179		if (pwait->wait_addr) {
180			remove_wait_queue(pwait->wait_addr, &pwait->wait);
181			pwait->wait_addr = NULL;
182		}
183	}
184
185	spin_lock_irqsave(&p9_poll_lock, flags);
186	list_del_init(&m->poll_pending_link);
187	spin_unlock_irqrestore(&p9_poll_lock, flags);
188}
189
190/**
191 * p9_conn_cancel - cancel all pending requests with error
192 * @m: mux data
193 * @err: error code
194 *
195 */
196
197static void p9_conn_cancel(struct p9_conn *m, int err)
198{
199	struct p9_req_t *req, *rtmp;
200	unsigned long flags;
201	LIST_HEAD(cancel_list);
202
203	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
204
205	spin_lock_irqsave(&m->client->lock, flags);
206
207	if (m->err) {
208		spin_unlock_irqrestore(&m->client->lock, flags);
209		return;
210	}
211
212	m->err = err;
213
214	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
215		list_move(&req->req_list, &cancel_list);
216	}
217	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
218		list_move(&req->req_list, &cancel_list);
219	}
220	spin_unlock_irqrestore(&m->client->lock, flags);
221
222	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
223		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
224		list_del(&req->req_list);
225		if (!req->t_err)
226			req->t_err = err;
227		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
228	}
229}
230
231static int
232p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
233{
234	int ret, n;
235	struct p9_trans_fd *ts = NULL;
236
237	if (client && client->status == Connected)
238		ts = client->trans;
239
240	if (!ts)
241		return -EREMOTEIO;
242
243	if (!ts->rd->f_op->poll)
244		return -EIO;
245
246	if (!ts->wr->f_op->poll)
247		return -EIO;
248
249	ret = ts->rd->f_op->poll(ts->rd, pt);
250	if (ret < 0)
251		return ret;
252
253	if (ts->rd != ts->wr) {
254		n = ts->wr->f_op->poll(ts->wr, pt);
255		if (n < 0)
256			return n;
257		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
258	}
259
260	return ret;
261}
262
263/**
264 * p9_fd_read- read from a fd
265 * @client: client instance
266 * @v: buffer to receive data into
267 * @len: size of receive buffer
268 *
269 */
270
271static int p9_fd_read(struct p9_client *client, void *v, int len)
272{
273	int ret;
274	struct p9_trans_fd *ts = NULL;
275
276	if (client && client->status != Disconnected)
277		ts = client->trans;
278
279	if (!ts)
280		return -EREMOTEIO;
281
282	if (!(ts->rd->f_flags & O_NONBLOCK))
283		p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
284
285	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
286	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
287		client->status = Disconnected;
288	return ret;
289}
290
291/**
292 * p9_read_work - called when there is some data to be read from a transport
293 * @work: container of work to be done
294 *
295 */
296
297static void p9_read_work(struct work_struct *work)
298{
299	int n, err;
300	struct p9_conn *m;
301	int status = REQ_STATUS_ERROR;
302
303	m = container_of(work, struct p9_conn, rq);
304
305	if (m->err < 0)
306		return;
307
308	p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
309
310	if (!m->rbuf) {
311		m->rbuf = m->tmp_buf;
312		m->rpos = 0;
313		m->rsize = 7; /* start by reading header */
314	}
315
316	clear_bit(Rpending, &m->wsched);
317	p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n",
318		 m, m->rpos, m->rsize, m->rsize-m->rpos);
319	err = p9_fd_read(m->client, m->rbuf + m->rpos,
320						m->rsize - m->rpos);
321	p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
322	if (err == -EAGAIN) {
323		goto end_clear;
324	}
325
326	if (err <= 0)
327		goto error;
328
329	m->rpos += err;
330
331	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
332		u16 tag;
333		p9_debug(P9_DEBUG_TRANS, "got new header\n");
334
335		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
336		if (n >= m->client->msize) {
337			p9_debug(P9_DEBUG_ERROR,
338				 "requested packet size too big: %d\n", n);
339			err = -EIO;
340			goto error;
341		}
342
343		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
344		p9_debug(P9_DEBUG_TRANS,
345			 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
346
347		m->req = p9_tag_lookup(m->client, tag);
348		if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
349			p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
350				 tag);
351			err = -EIO;
352			goto error;
353		}
354
355		if (m->req->rc == NULL) {
356			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
357						m->client->msize, GFP_NOFS);
358			if (!m->req->rc) {
359				m->req = NULL;
360				err = -ENOMEM;
361				goto error;
362			}
363		}
364		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
365		memcpy(m->rbuf, m->tmp_buf, m->rsize);
366		m->rsize = n;
367	}
368
369	/* not an else because some packets (like clunk) have no payload */
370	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
371		p9_debug(P9_DEBUG_TRANS, "got new packet\n");
372		spin_lock(&m->client->lock);
373		if (m->req->status != REQ_STATUS_ERROR)
374			status = REQ_STATUS_RCVD;
375		list_del(&m->req->req_list);
376		spin_unlock(&m->client->lock);
377		p9_client_cb(m->client, m->req, status);
378		m->rbuf = NULL;
379		m->rpos = 0;
380		m->rsize = 0;
381		m->req = NULL;
382	}
383
384end_clear:
385	clear_bit(Rworksched, &m->wsched);
386
387	if (!list_empty(&m->req_list)) {
388		if (test_and_clear_bit(Rpending, &m->wsched))
389			n = POLLIN;
390		else
391			n = p9_fd_poll(m->client, NULL);
392
393		if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
394			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
395			schedule_work(&m->rq);
396		}
397	}
398
399	return;
400error:
401	p9_conn_cancel(m, err);
402	clear_bit(Rworksched, &m->wsched);
403}
404
405/**
406 * p9_fd_write - write to a socket
407 * @client: client instance
408 * @v: buffer to send data from
409 * @len: size of send buffer
410 *
411 */
412
413static int p9_fd_write(struct p9_client *client, void *v, int len)
414{
415	int ret;
416	mm_segment_t oldfs;
417	struct p9_trans_fd *ts = NULL;
418
419	if (client && client->status != Disconnected)
420		ts = client->trans;
421
422	if (!ts)
423		return -EREMOTEIO;
424
425	if (!(ts->wr->f_flags & O_NONBLOCK))
426		p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
427
428	oldfs = get_fs();
429	set_fs(get_ds());
430	/* The cast to a user pointer is valid due to the set_fs() */
431	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
432	set_fs(oldfs);
433
434	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
435		client->status = Disconnected;
436	return ret;
437}
438
439/**
440 * p9_write_work - called when a transport can send some data
441 * @work: container for work to be done
442 *
443 */
444
445static void p9_write_work(struct work_struct *work)
446{
447	int n, err;
448	struct p9_conn *m;
449	struct p9_req_t *req;
450
451	m = container_of(work, struct p9_conn, wq);
452
453	if (m->err < 0) {
454		clear_bit(Wworksched, &m->wsched);
455		return;
456	}
457
458	if (!m->wsize) {
459		spin_lock(&m->client->lock);
460		if (list_empty(&m->unsent_req_list)) {
461			clear_bit(Wworksched, &m->wsched);
462			spin_unlock(&m->client->lock);
463			return;
464		}
465
466		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
467			       req_list);
468		req->status = REQ_STATUS_SENT;
469		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
470		list_move_tail(&req->req_list, &m->req_list);
471
472		m->wbuf = req->tc->sdata;
473		m->wsize = req->tc->size;
474		m->wpos = 0;
475		spin_unlock(&m->client->lock);
476	}
477
478	p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
479		 m, m->wpos, m->wsize);
480	clear_bit(Wpending, &m->wsched);
481	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
482	p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
483	if (err == -EAGAIN)
484		goto end_clear;
485
486
487	if (err < 0)
488		goto error;
489	else if (err == 0) {
490		err = -EREMOTEIO;
491		goto error;
492	}
493
494	m->wpos += err;
495	if (m->wpos == m->wsize)
496		m->wpos = m->wsize = 0;
497
498end_clear:
499	clear_bit(Wworksched, &m->wsched);
500
501	if (m->wsize || !list_empty(&m->unsent_req_list)) {
502		if (test_and_clear_bit(Wpending, &m->wsched))
503			n = POLLOUT;
504		else
505			n = p9_fd_poll(m->client, NULL);
506
507		if ((n & POLLOUT) &&
508		   !test_and_set_bit(Wworksched, &m->wsched)) {
509			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
510			schedule_work(&m->wq);
511		}
512	}
513
514	return;
515
516error:
517	p9_conn_cancel(m, err);
518	clear_bit(Wworksched, &m->wsched);
519}
520
521static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
522{
523	struct p9_poll_wait *pwait =
524		container_of(wait, struct p9_poll_wait, wait);
525	struct p9_conn *m = pwait->conn;
526	unsigned long flags;
527
528	spin_lock_irqsave(&p9_poll_lock, flags);
529	if (list_empty(&m->poll_pending_link))
530		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
531	spin_unlock_irqrestore(&p9_poll_lock, flags);
532
533	schedule_work(&p9_poll_work);
534	return 1;
535}
536
537/**
538 * p9_pollwait - add poll task to the wait queue
539 * @filp: file pointer being polled
540 * @wait_address: wait_q to block on
541 * @p: poll state
542 *
543 * called by files poll operation to add v9fs-poll task to files wait queue
544 */
545
546static void
547p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
548{
549	struct p9_conn *m = container_of(p, struct p9_conn, pt);
550	struct p9_poll_wait *pwait = NULL;
551	int i;
552
553	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
554		if (m->poll_wait[i].wait_addr == NULL) {
555			pwait = &m->poll_wait[i];
556			break;
557		}
558	}
559
560	if (!pwait) {
561		p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
562		return;
563	}
564
565	pwait->conn = m;
566	pwait->wait_addr = wait_address;
567	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
568	add_wait_queue(wait_address, &pwait->wait);
569}
570
571/**
572 * p9_conn_create - initialize the per-session mux data
573 * @client: client instance
574 *
575 * Note: Creates the polling task if this is the first session.
576 */
577
578static void p9_conn_create(struct p9_client *client)
579{
580	int n;
581	struct p9_trans_fd *ts = client->trans;
582	struct p9_conn *m = &ts->conn;
583
584	p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
585
586	INIT_LIST_HEAD(&m->mux_list);
587	m->client = client;
588
589	INIT_LIST_HEAD(&m->req_list);
590	INIT_LIST_HEAD(&m->unsent_req_list);
591	INIT_WORK(&m->rq, p9_read_work);
592	INIT_WORK(&m->wq, p9_write_work);
593	INIT_LIST_HEAD(&m->poll_pending_link);
594	init_poll_funcptr(&m->pt, p9_pollwait);
595
596	n = p9_fd_poll(client, &m->pt);
597	if (n & POLLIN) {
598		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
599		set_bit(Rpending, &m->wsched);
600	}
601
602	if (n & POLLOUT) {
603		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
604		set_bit(Wpending, &m->wsched);
605	}
606}
607
608/**
609 * p9_poll_mux - polls a mux and schedules read or write works if necessary
610 * @m: connection to poll
611 *
612 */
613
614static void p9_poll_mux(struct p9_conn *m)
615{
616	int n;
617
618	if (m->err < 0)
619		return;
620
621	n = p9_fd_poll(m->client, NULL);
622	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
623		p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
624		if (n >= 0)
625			n = -ECONNRESET;
626		p9_conn_cancel(m, n);
627	}
628
629	if (n & POLLIN) {
630		set_bit(Rpending, &m->wsched);
631		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
632		if (!test_and_set_bit(Rworksched, &m->wsched)) {
633			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
634			schedule_work(&m->rq);
635		}
636	}
637
638	if (n & POLLOUT) {
639		set_bit(Wpending, &m->wsched);
640		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
641		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
642		    !test_and_set_bit(Wworksched, &m->wsched)) {
643			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
644			schedule_work(&m->wq);
645		}
646	}
647}
648
649/**
650 * p9_fd_request - send 9P request
651 * The function can sleep until the request is scheduled for sending.
652 * The function can be interrupted. Return from the function is not
653 * a guarantee that the request is sent successfully.
654 *
655 * @client: client instance
656 * @req: request to be sent
657 *
658 */
659
660static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
661{
662	int n;
663	struct p9_trans_fd *ts = client->trans;
664	struct p9_conn *m = &ts->conn;
665
666	p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
667		 m, current, req->tc, req->tc->id);
668	if (m->err < 0)
669		return m->err;
670
671	spin_lock(&client->lock);
672	req->status = REQ_STATUS_UNSENT;
673	list_add_tail(&req->req_list, &m->unsent_req_list);
674	spin_unlock(&client->lock);
675
676	if (test_and_clear_bit(Wpending, &m->wsched))
677		n = POLLOUT;
678	else
679		n = p9_fd_poll(m->client, NULL);
680
681	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
682		schedule_work(&m->wq);
683
684	return 0;
685}
686
687static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
688{
689	int ret = 1;
690
691	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
692
693	spin_lock(&client->lock);
694
695	if (req->status == REQ_STATUS_UNSENT) {
696		list_del(&req->req_list);
697		req->status = REQ_STATUS_FLSHD;
698		ret = 0;
699	}
700	spin_unlock(&client->lock);
701
702	return ret;
703}
704
705static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
706{
707	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
708
709	/* we haven't received a response for oldreq,
710	 * remove it from the list.
711	 */
712	spin_lock(&client->lock);
713	list_del(&req->req_list);
714	spin_unlock(&client->lock);
715
716	return 0;
717}
718
719/**
720 * parse_opts - parse mount options into p9_fd_opts structure
721 * @params: options string passed from mount
722 * @opts: fd transport-specific structure to parse options into
723 *
724 * Returns 0 upon success, -ERRNO upon failure
725 */
726
727static int parse_opts(char *params, struct p9_fd_opts *opts)
728{
729	char *p;
730	substring_t args[MAX_OPT_ARGS];
731	int option;
732	char *options, *tmp_options;
733
734	opts->port = P9_PORT;
735	opts->rfd = ~0;
736	opts->wfd = ~0;
737
738	if (!params)
739		return 0;
740
741	tmp_options = kstrdup(params, GFP_KERNEL);
742	if (!tmp_options) {
743		p9_debug(P9_DEBUG_ERROR,
744			 "failed to allocate copy of option string\n");
745		return -ENOMEM;
746	}
747	options = tmp_options;
748
749	while ((p = strsep(&options, ",")) != NULL) {
750		int token;
751		int r;
752		if (!*p)
753			continue;
754		token = match_token(p, tokens, args);
755		if ((token != Opt_err) && (token != Opt_privport)) {
756			r = match_int(&args[0], &option);
757			if (r < 0) {
758				p9_debug(P9_DEBUG_ERROR,
759					 "integer field, but no integer?\n");
760				continue;
761			}
762		}
763		switch (token) {
764		case Opt_port:
765			opts->port = option;
766			break;
767		case Opt_rfdno:
768			opts->rfd = option;
769			break;
770		case Opt_wfdno:
771			opts->wfd = option;
772			break;
773		case Opt_privport:
774			opts->privport = 1;
775			break;
776		default:
777			continue;
778		}
779	}
780
781	kfree(tmp_options);
782	return 0;
783}
784
785static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
786{
787	struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
788					   GFP_KERNEL);
789	if (!ts)
790		return -ENOMEM;
791
792	ts->rd = fget(rfd);
793	ts->wr = fget(wfd);
794	if (!ts->rd || !ts->wr) {
795		if (ts->rd)
796			fput(ts->rd);
797		if (ts->wr)
798			fput(ts->wr);
799		kfree(ts);
800		return -EIO;
801	}
802
803	client->trans = ts;
804	client->status = Connected;
805
806	return 0;
807}
808
809static int p9_socket_open(struct p9_client *client, struct socket *csocket)
810{
811	struct p9_trans_fd *p;
812	struct file *file;
813
814	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
815	if (!p)
816		return -ENOMEM;
817
818	csocket->sk->sk_allocation = GFP_NOIO;
819	file = sock_alloc_file(csocket, 0, NULL);
820	if (IS_ERR(file)) {
821		pr_err("%s (%d): failed to map fd\n",
822		       __func__, task_pid_nr(current));
823		sock_release(csocket);
824		kfree(p);
825		return PTR_ERR(file);
826	}
827
828	get_file(file);
829	p->wr = p->rd = file;
830	client->trans = p;
831	client->status = Connected;
832
833	p->rd->f_flags |= O_NONBLOCK;
834
835	p9_conn_create(client);
836	return 0;
837}
838
839/**
840 * p9_mux_destroy - cancels all pending requests of mux
841 * @m: mux to destroy
842 *
843 */
844
845static void p9_conn_destroy(struct p9_conn *m)
846{
847	p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
848		 m, m->mux_list.prev, m->mux_list.next);
849
850	p9_mux_poll_stop(m);
851	cancel_work_sync(&m->rq);
852	cancel_work_sync(&m->wq);
853
854	p9_conn_cancel(m, -ECONNRESET);
855
856	m->client = NULL;
857}
858
859/**
860 * p9_fd_close - shutdown file descriptor transport
861 * @client: client instance
862 *
863 */
864
865static void p9_fd_close(struct p9_client *client)
866{
867	struct p9_trans_fd *ts;
868
869	if (!client)
870		return;
871
872	ts = client->trans;
873	if (!ts)
874		return;
875
876	client->status = Disconnected;
877
878	p9_conn_destroy(&ts->conn);
879
880	if (ts->rd)
881		fput(ts->rd);
882	if (ts->wr)
883		fput(ts->wr);
884
885	kfree(ts);
886}
887
888/*
889 * stolen from NFS - maybe should be made a generic function?
890 */
891static inline int valid_ipaddr4(const char *buf)
892{
893	int rc, count, in[4];
894
895	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
896	if (rc != 4)
897		return -EINVAL;
898	for (count = 0; count < 4; count++) {
899		if (in[count] > 255)
900			return -EINVAL;
901	}
902	return 0;
903}
904
905static int p9_bind_privport(struct socket *sock)
906{
907	struct sockaddr_in cl;
908	int port, err = -EINVAL;
909
910	memset(&cl, 0, sizeof(cl));
911	cl.sin_family = AF_INET;
912	cl.sin_addr.s_addr = INADDR_ANY;
913	for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
914		cl.sin_port = htons((ushort)port);
915		err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
916		if (err != -EADDRINUSE)
917			break;
918	}
919	return err;
920}
921
922
923static int
924p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
925{
926	int err;
927	struct socket *csocket;
928	struct sockaddr_in sin_server;
929	struct p9_fd_opts opts;
930
931	err = parse_opts(args, &opts);
932	if (err < 0)
933		return err;
934
935	if (valid_ipaddr4(addr) < 0)
936		return -EINVAL;
937
938	csocket = NULL;
939
940	sin_server.sin_family = AF_INET;
941	sin_server.sin_addr.s_addr = in_aton(addr);
942	sin_server.sin_port = htons(opts.port);
943	err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
944			    SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
945	if (err) {
946		pr_err("%s (%d): problem creating socket\n",
947		       __func__, task_pid_nr(current));
948		return err;
949	}
950
951	if (opts.privport) {
952		err = p9_bind_privport(csocket);
953		if (err < 0) {
954			pr_err("%s (%d): problem binding to privport\n",
955			       __func__, task_pid_nr(current));
956			sock_release(csocket);
957			return err;
958		}
959	}
960
961	err = csocket->ops->connect(csocket,
962				    (struct sockaddr *)&sin_server,
963				    sizeof(struct sockaddr_in), 0);
964	if (err < 0) {
965		pr_err("%s (%d): problem connecting socket to %s\n",
966		       __func__, task_pid_nr(current), addr);
967		sock_release(csocket);
968		return err;
969	}
970
971	return p9_socket_open(client, csocket);
972}
973
974static int
975p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
976{
977	int err;
978	struct socket *csocket;
979	struct sockaddr_un sun_server;
980
981	csocket = NULL;
982
983	if (strlen(addr) >= UNIX_PATH_MAX) {
984		pr_err("%s (%d): address too long: %s\n",
985		       __func__, task_pid_nr(current), addr);
986		return -ENAMETOOLONG;
987	}
988
989	sun_server.sun_family = PF_UNIX;
990	strcpy(sun_server.sun_path, addr);
991	err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
992			    SOCK_STREAM, 0, &csocket, 1);
993	if (err < 0) {
994		pr_err("%s (%d): problem creating socket\n",
995		       __func__, task_pid_nr(current));
996
997		return err;
998	}
999	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1000			sizeof(struct sockaddr_un) - 1, 0);
1001	if (err < 0) {
1002		pr_err("%s (%d): problem connecting socket: %s: %d\n",
1003		       __func__, task_pid_nr(current), addr, err);
1004		sock_release(csocket);
1005		return err;
1006	}
1007
1008	return p9_socket_open(client, csocket);
1009}
1010
1011static int
1012p9_fd_create(struct p9_client *client, const char *addr, char *args)
1013{
1014	int err;
1015	struct p9_fd_opts opts;
1016	struct p9_trans_fd *p;
1017
1018	parse_opts(args, &opts);
1019
1020	if (opts.rfd == ~0 || opts.wfd == ~0) {
1021		pr_err("Insufficient options for proto=fd\n");
1022		return -ENOPROTOOPT;
1023	}
1024
1025	err = p9_fd_open(client, opts.rfd, opts.wfd);
1026	if (err < 0)
1027		return err;
1028
1029	p = (struct p9_trans_fd *) client->trans;
1030	p9_conn_create(client);
1031
1032	return 0;
1033}
1034
1035static struct p9_trans_module p9_tcp_trans = {
1036	.name = "tcp",
1037	.maxsize = MAX_SOCK_BUF,
1038	.def = 0,
1039	.create = p9_fd_create_tcp,
1040	.close = p9_fd_close,
1041	.request = p9_fd_request,
1042	.cancel = p9_fd_cancel,
1043	.cancelled = p9_fd_cancelled,
1044	.owner = THIS_MODULE,
1045};
1046
1047static struct p9_trans_module p9_unix_trans = {
1048	.name = "unix",
1049	.maxsize = MAX_SOCK_BUF,
1050	.def = 0,
1051	.create = p9_fd_create_unix,
1052	.close = p9_fd_close,
1053	.request = p9_fd_request,
1054	.cancel = p9_fd_cancel,
1055	.cancelled = p9_fd_cancelled,
1056	.owner = THIS_MODULE,
1057};
1058
1059static struct p9_trans_module p9_fd_trans = {
1060	.name = "fd",
1061	.maxsize = MAX_SOCK_BUF,
1062	.def = 0,
1063	.create = p9_fd_create,
1064	.close = p9_fd_close,
1065	.request = p9_fd_request,
1066	.cancel = p9_fd_cancel,
1067	.cancelled = p9_fd_cancelled,
1068	.owner = THIS_MODULE,
1069};
1070
1071/**
1072 * p9_poll_proc - poll worker thread
1073 * @a: thread state and arguments
1074 *
1075 * polls all v9fs transports for new events and queues the appropriate
1076 * work to the work queue
1077 *
1078 */
1079
1080static void p9_poll_workfn(struct work_struct *work)
1081{
1082	unsigned long flags;
1083
1084	p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1085
1086	spin_lock_irqsave(&p9_poll_lock, flags);
1087	while (!list_empty(&p9_poll_pending_list)) {
1088		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1089							struct p9_conn,
1090							poll_pending_link);
1091		list_del_init(&conn->poll_pending_link);
1092		spin_unlock_irqrestore(&p9_poll_lock, flags);
1093
1094		p9_poll_mux(conn);
1095
1096		spin_lock_irqsave(&p9_poll_lock, flags);
1097	}
1098	spin_unlock_irqrestore(&p9_poll_lock, flags);
1099
1100	p9_debug(P9_DEBUG_TRANS, "finish\n");
1101}
1102
1103int p9_trans_fd_init(void)
1104{
1105	v9fs_register_trans(&p9_tcp_trans);
1106	v9fs_register_trans(&p9_unix_trans);
1107	v9fs_register_trans(&p9_fd_trans);
1108
1109	return 0;
1110}
1111
1112void p9_trans_fd_exit(void)
1113{
1114	flush_work(&p9_poll_work);
1115	v9fs_unregister_trans(&p9_tcp_trans);
1116	v9fs_unregister_trans(&p9_unix_trans);
1117	v9fs_unregister_trans(&p9_fd_trans);
1118}
1119