Lines Matching defs:xprt

16 #include <linux/sunrpc/xprt.h>
21 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
25 static void svc_delete_xprt(struct svc_xprt *xprt);
47 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
128 struct svc_xprt *xprt =
130 struct module *owner = xprt->xpt_class->xcl_owner;
131 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
132 svcauth_unix_info_release(xprt);
133 put_net(xprt->xpt_net);
135 if (xprt->xpt_bc_xprt)
136 xprt_put(xprt->xpt_bc_xprt);
137 xprt->xpt_ops->xpo_free(xprt);
141 void svc_xprt_put(struct svc_xprt *xprt)
143 kref_put(&xprt->xpt_ref, svc_xprt_free);
152 struct svc_xprt *xprt, struct svc_serv *serv)
154 memset(xprt, 0, sizeof(*xprt));
155 xprt->xpt_class = xcl;
156 xprt->xpt_ops = xcl->xcl_ops;
157 kref_init(&xprt->xpt_ref);
158 xprt->xpt_server = serv;
159 INIT_LIST_HEAD(&xprt->xpt_list);
160 INIT_LIST_HEAD(&xprt->xpt_ready);
161 INIT_LIST_HEAD(&xprt->xpt_deferred);
162 INIT_LIST_HEAD(&xprt->xpt_users);
163 mutex_init(&xprt->xpt_mutex);
164 spin_lock_init(&xprt->xpt_lock);
165 set_bit(XPT_BUSY, &xprt->xpt_flags);
166 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
167 xprt->xpt_net = get_net(net);
219 static void svc_xprt_received(struct svc_xprt *xprt)
221 WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags));
222 if (!test_bit(XPT_BUSY, &xprt->xpt_flags))
224 /* As soon as we clear busy, the xprt could be closed and
227 svc_xprt_get(xprt);
228 clear_bit(XPT_BUSY, &xprt->xpt_flags);
229 svc_xprt_enqueue(xprt);
230 svc_xprt_put(xprt);
281 * Copy the local and remote xprt addresses to the rqstp structure
283 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
285 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
286 rqstp->rq_addrlen = xprt->xpt_remotelen;
292 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
293 rqstp->rq_daddrlen = xprt->xpt_locallen;
329 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
331 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
333 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
334 return xprt->xpt_ops->xpo_has_wspace(xprt);
343 void svc_xprt_enqueue(struct svc_xprt *xprt)
349 if (!svc_xprt_has_something_to_do(xprt))
353 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
371 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
373 dprintk("svc: transport %p busy, not enqueued\n", xprt);
382 xprt, rqstp);
388 rqstp->rq_xprt = xprt;
389 svc_xprt_get(xprt);
393 dprintk("svc: transport %p put into queue\n", xprt);
394 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
408 struct svc_xprt *xprt;
413 xprt = list_entry(pool->sp_sockets.next,
415 list_del_init(&xprt->xpt_ready);
418 xprt, atomic_read(&xprt->xpt_ref.refcount));
420 return xprt;
438 struct svc_xprt *xprt = rqstp->rq_xprt;
439 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
442 svc_xprt_enqueue(xprt);
449 struct svc_xprt *xprt = rqstp->rq_xprt;
474 svc_xprt_put(xprt);
547 struct svc_xprt *xprt = NULL;
559 xprt = list_entry(serv->sv_tempsocks.prev,
562 set_bit(XPT_CLOSE, &xprt->xpt_flags);
563 svc_xprt_get(xprt);
567 if (xprt) {
568 svc_xprt_enqueue(xprt);
569 svc_xprt_put(xprt);
617 struct svc_xprt *xprt;
628 xprt = svc_xprt_dequeue(pool);
629 if (xprt) {
630 rqstp->rq_xprt = xprt;
631 svc_xprt_get(xprt);
679 xprt = rqstp->rq_xprt;
680 if (!xprt) {
691 return xprt;
711 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
716 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
718 svc_delete_xprt(xprt);
719 /* Leave XPT_BUSY set on the dead xprt: */
722 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
728 __module_get(xprt->xpt_class->xcl_owner);
729 svc_check_conn_limits(xprt->xpt_server);
730 newxpt = xprt->xpt_ops->xpo_accept(xprt);
733 } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
736 rqstp, rqstp->rq_pool->sp_id, xprt,
737 atomic_read(&xprt->xpt_ref.refcount));
738 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
742 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
745 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
748 svc_xprt_received(xprt);
759 struct svc_xprt *xprt = NULL;
784 xprt = svc_get_next_xprt(rqstp, timeout);
785 if (IS_ERR(xprt))
786 return PTR_ERR(xprt);
788 len = svc_handle_xprt(rqstp, xprt);
794 clear_bit(XPT_OLD, &xprt->xpt_flags);
814 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
824 struct svc_xprt *xprt;
828 xprt = rqstp->rq_xprt;
829 if (!xprt)
842 mutex_lock(&xprt->xpt_mutex);
843 if (test_bit(XPT_DEAD, &xprt->xpt_flags)
844 || test_bit(XPT_CLOSE, &xprt->xpt_flags))
847 len = xprt->xpt_ops->xpo_sendto(rqstp);
848 mutex_unlock(&xprt->xpt_mutex);
849 rpc_wake_up(&xprt->xpt_bc_pending);
864 struct svc_xprt *xprt;
877 xprt = list_entry(le, struct svc_xprt, xpt_list);
881 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
883 if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
884 test_bit(XPT_BUSY, &xprt->xpt_flags))
887 set_bit(XPT_CLOSE, &xprt->xpt_flags);
888 set_bit(XPT_DETACHED, &xprt->xpt_flags);
889 dprintk("queuing xprt %p for closing\n", xprt);
892 svc_xprt_enqueue(xprt);
899 static void call_xpt_users(struct svc_xprt *xprt)
903 spin_lock(&xprt->xpt_lock);
904 while (!list_empty(&xprt->xpt_users)) {
905 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
909 spin_unlock(&xprt->xpt_lock);
915 static void svc_delete_xprt(struct svc_xprt *xprt)
917 struct svc_serv *serv = xprt->xpt_server;
921 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
924 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
925 xprt->xpt_ops->xpo_detach(xprt);
928 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
929 list_del_init(&xprt->xpt_list);
930 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
931 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
935 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
938 call_xpt_users(xprt);
939 svc_xprt_put(xprt);
942 void svc_close_xprt(struct svc_xprt *xprt)
944 set_bit(XPT_CLOSE, &xprt->xpt_flags);
945 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
954 svc_delete_xprt(xprt);
960 struct svc_xprt *xprt;
964 list_for_each_entry(xprt, xprt_list, xpt_list) {
965 if (xprt->xpt_net != net)
968 set_bit(XPT_CLOSE, &xprt->xpt_flags);
969 svc_xprt_enqueue(xprt);
978 struct svc_xprt *xprt;
986 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
987 if (xprt->xpt_net != net)
989 list_del_init(&xprt->xpt_ready);
991 return xprt;
1000 struct svc_xprt *xprt;
1002 while ((xprt = svc_dequeue_net(serv, net))) {
1003 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1004 svc_delete_xprt(xprt);
1040 struct svc_xprt *xprt = dr->xprt;
1042 spin_lock(&xprt->xpt_lock);
1043 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1044 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
1045 spin_unlock(&xprt->xpt_lock);
1047 svc_xprt_put(xprt);
1052 dr->xprt = NULL;
1053 list_add(&dr->handle.recent, &xprt->xpt_deferred);
1054 spin_unlock(&xprt->xpt_lock);
1055 svc_xprt_enqueue(xprt);
1056 svc_xprt_put(xprt);
1063 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
1065 * This code can only handle requests that consist of an xprt-header
1101 dr->xprt = rqstp->rq_xprt;
1133 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1137 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1139 spin_lock(&xprt->xpt_lock);
1140 if (!list_empty(&xprt->xpt_deferred)) {
1141 dr = list_entry(xprt->xpt_deferred.next,
1146 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1147 spin_unlock(&xprt->xpt_lock);
1171 struct svc_xprt *xprt;
1179 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1180 if (xprt->xpt_net != net)
1182 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1184 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1186 if (port != 0 && port != svc_xprt_local_port(xprt))
1188 found = xprt;
1189 svc_xprt_get(xprt);
1197 static int svc_one_xprt_name(const struct svc_xprt *xprt,
1203 xprt->xpt_class->xcl_name,
1204 svc_xprt_local_port(xprt));
1224 struct svc_xprt *xprt;
1236 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1237 len = svc_one_xprt_name(xprt, pos, buflen - totlen);