iovec.c revision ab1a2d7773b23dbbb863fd63fcf83b67cf361e34
1/*
2 *	iovec manipulation routines.
3 *
4 *
5 *		This program is free software; you can redistribute it and/or
6 *		modify it under the terms of the GNU General Public License
7 *		as published by the Free Software Foundation; either version
8 *		2 of the License, or (at your option) any later version.
9 *
10 *	Fixes:
11 *		Andrew Lunn	:	Errors in iovec copying.
12 *		Pedro Roque	:	Added memcpy_fromiovecend and
13 *					csum_..._fromiovecend.
14 *		Andi Kleen	:	fixed error handling for 2.1
15 *		Alexey Kuznetsov:	2.1 optimisations
16 *		Andi Kleen	:	Fix csum*fromiovecend for IPv6.
17 */
18
19#include <linux/errno.h>
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/net.h>
24#include <linux/in6.h>
25#include <asm/uaccess.h>
26#include <asm/byteorder.h>
27#include <net/checksum.h>
28#include <net/sock.h>
29
30/*
31 *	Verify iovec. The caller must ensure that the iovec is big enough
32 *	to hold the message iovec.
33 *
34 *	Save time not doing access_ok. copy_*_user will make this work
35 *	in any case.
36 */
37
38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode)
39{
40	int size, ct, err;
41
42	if (m->msg_namelen) {
43		if (mode == VERIFY_READ) {
44			void __user *namep;
45			namep = (void __user __force *) m->msg_name;
46			err = move_addr_to_kernel(namep, m->msg_namelen,
47						  address);
48			if (err < 0)
49				return err;
50		}
51		m->msg_name = address;
52	} else {
53		m->msg_name = NULL;
54	}
55
56	size = m->msg_iovlen * sizeof(struct iovec);
57	if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
58		return -EFAULT;
59
60	m->msg_iov = iov;
61	err = 0;
62
63	for (ct = 0; ct < m->msg_iovlen; ct++) {
64		size_t len = iov[ct].iov_len;
65
66		if (len > INT_MAX - err) {
67			len = INT_MAX - err;
68			iov[ct].iov_len = len;
69		}
70		err += len;
71	}
72
73	return err;
74}
75
76/*
77 *	Copy kernel to iovec. Returns -EFAULT on error.
78 */
79
80int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
81		      int offset, int len)
82{
83	int copy;
84	for (; len > 0; ++iov) {
85		/* Skip over the finished iovecs */
86		if (unlikely(offset >= iov->iov_len)) {
87			offset -= iov->iov_len;
88			continue;
89		}
90		copy = min_t(unsigned int, iov->iov_len - offset, len);
91		if (copy_to_user(iov->iov_base + offset, kdata, copy))
92			return -EFAULT;
93		offset = 0;
94		kdata += copy;
95		len -= copy;
96	}
97
98	return 0;
99}
100EXPORT_SYMBOL(memcpy_toiovecend);
101
102/*
103 *	Copy iovec to kernel. Returns -EFAULT on error.
104 */
105
106int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
107			int offset, int len)
108{
109	/* Skip over the finished iovecs */
110	while (offset >= iov->iov_len) {
111		offset -= iov->iov_len;
112		iov++;
113	}
114
115	while (len > 0) {
116		u8 __user *base = iov->iov_base + offset;
117		int copy = min_t(unsigned int, len, iov->iov_len - offset);
118
119		offset = 0;
120		if (copy_from_user(kdata, base, copy))
121			return -EFAULT;
122		len -= copy;
123		kdata += copy;
124		iov++;
125	}
126
127	return 0;
128}
129EXPORT_SYMBOL(memcpy_fromiovecend);
130
131/*
132 *	And now for the all-in-one: copy and checksum from a user iovec
133 *	directly to a datagram
134 *	Calls to csum_partial but the last must be in 32 bit chunks
135 *
136 *	ip_build_xmit must ensure that when fragmenting only the last
137 *	call to this function will be unaligned also.
138 */
139int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
140				 int offset, unsigned int len, __wsum *csump)
141{
142	__wsum csum = *csump;
143	int partial_cnt = 0, err = 0;
144
145	/* Skip over the finished iovecs */
146	while (offset >= iov->iov_len) {
147		offset -= iov->iov_len;
148		iov++;
149	}
150
151	while (len > 0) {
152		u8 __user *base = iov->iov_base + offset;
153		int copy = min_t(unsigned int, len, iov->iov_len - offset);
154
155		offset = 0;
156
157		/* There is a remnant from previous iov. */
158		if (partial_cnt) {
159			int par_len = 4 - partial_cnt;
160
161			/* iov component is too short ... */
162			if (par_len > copy) {
163				if (copy_from_user(kdata, base, copy))
164					goto out_fault;
165				kdata += copy;
166				base += copy;
167				partial_cnt += copy;
168				len -= copy;
169				iov++;
170				if (len)
171					continue;
172				*csump = csum_partial(kdata - partial_cnt,
173							 partial_cnt, csum);
174				goto out;
175			}
176			if (copy_from_user(kdata, base, par_len))
177				goto out_fault;
178			csum = csum_partial(kdata - partial_cnt, 4, csum);
179			kdata += par_len;
180			base  += par_len;
181			copy  -= par_len;
182			len   -= par_len;
183			partial_cnt = 0;
184		}
185
186		if (len > copy) {
187			partial_cnt = copy % 4;
188			if (partial_cnt) {
189				copy -= partial_cnt;
190				if (copy_from_user(kdata + copy, base + copy,
191						partial_cnt))
192					goto out_fault;
193			}
194		}
195
196		if (copy) {
197			csum = csum_and_copy_from_user(base, kdata, copy,
198							csum, &err);
199			if (err)
200				goto out;
201		}
202		len   -= copy + partial_cnt;
203		kdata += copy + partial_cnt;
204		iov++;
205	}
206	*csump = csum;
207out:
208	return err;
209
210out_fault:
211	err = -EFAULT;
212	goto out;
213}
214EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
215
216unsigned long iov_pages(const struct iovec *iov, int offset,
217			unsigned long nr_segs)
218{
219	unsigned long seg, base;
220	int pages = 0, len, size;
221
222	while (nr_segs && (offset >= iov->iov_len)) {
223		offset -= iov->iov_len;
224		++iov;
225		--nr_segs;
226	}
227
228	for (seg = 0; seg < nr_segs; seg++) {
229		base = (unsigned long)iov[seg].iov_base + offset;
230		len = iov[seg].iov_len - offset;
231		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
232		pages += size;
233		offset = 0;
234	}
235
236	return pages;
237}
238EXPORT_SYMBOL(iov_pages);
239