Searched defs:to (Results 1 - 23 of 23) sorted by relevance

/include/linux/netfilter/ipset/
H A Dip_set_bitmap.h18 range_to_mask(u32 from, u32 to, u8 *bits) argument
23 while (--(*bits) > 0 && mask && (to & mask) != from)
/include/linux/
H A Dparser.h25 char *to; member in struct:__anon1043
H A Duaccess.h9 * it will not take any locks and go straight to the fixup table.
11 * They have great resemblance to the preempt_disable/enable calls
13 * no other way to make the pagefault handlers do this. So we do
20 * make sure to have issued the store before a pagefault
29 * make sure to issue those last loads/stores before enabling
43 static inline unsigned long __copy_from_user_inatomic_nocache(void *to, argument
46 return __copy_from_user_inatomic(to, from, n);
49 static inline unsigned long __copy_from_user_nocache(void *to, argument
52 return __copy_from_user(to, from, n);
58 * probe_kernel_address(): safely attempt to rea
[all...]
H A Duser_namespace.h39 uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid);
40 gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid);
58 static inline uid_t user_ns_map_uid(struct user_namespace *to, argument
63 static inline gid_t user_ns_map_gid(struct user_namespace *to, argument
H A Dhwspinlock.h38 * by remote processors that have no other way to achieve synchronization.
45 * hwspinlocks, and in this case, they can be trivially numbered 0 to
52 * This platform data structure should be used to provide the base id
78 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
79 * enabled. We prefer to silently succeed in this case, and let the
84 * we _do_ want users to fail (no point in registering hwspinlock instances if
106 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, argument
131 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
132 * @hwlock: an hwspinlock which we want to trylock
133 * @flags: a pointer to wher
209 hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to, unsigned long *flags) argument
233 hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) argument
258 hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) argument
[all...]
H A Dmigrate.h28 const nodemask_t *from, const nodemask_t *to,
47 const nodemask_t *from, const nodemask_t *to,
46 migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags) argument
H A Dhighmem.h142 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
175 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
176 * @vma: The VMA the page is to be allocated for
180 * to specify via movableflags whether the page will be movable in the
204 * @vma: The VMA the page is to be allocated for
208 * be able to migrate in the future using move_pages() or reclaimed
262 static inline void copy_user_highpage(struct page *to, struct page *from, argument
268 vto = kmap_atomic(to);
269 copy_user_page(vto, vfrom, vaddr, to);
276 copy_highpage(struct page *to, struct page *from) argument
[all...]
H A Dmemcontrol.h48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
272 enum lru_list to)
269 mem_cgroup_lru_move_lists(struct zone *zone, struct page *page, enum lru_list from, enum lru_list to) argument
H A Dmempolicy.h13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
38 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
50 #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
51 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
91 * mpol_put() decrements the reference count to zero.
95 * to the new storage. The reference count of the new object is initialized
96 * to 1, representing the caller of mpol_dup().
108 nodemask_t cpuset_mems_allowed; /* relative to thes
273 mpol_cond_copy(struct mempolicy *to, struct mempolicy *from) argument
[all...]
H A Dgenhd.h171 /* Array of pointers to partitions indexed by partno.
207 static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) argument
211 *to++ = (hex_to_bin(*uuid_str) << 4) |
280 * Macros to operate on percpu disk statistics:
453 __le32 v_sanity; /* to verify vtoc sanity */
499 __u32 d_trkseek; /* track-to-track seek, usec */
H A Dif_link.h138 IFLA_GROUP, /* Group the device belongs to */
179 IFF_BROADCAST devices are able to use multicasts too.
185 can point to real physical interface (f.e. for bandwidth calculations),
187 for IPIP tunnels, when route to endpoint is allowed to change)
249 __u32 to; member in struct:ifla_vlan_qos_mapping
262 MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
263 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
264 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
311 /* We don't want this structure exposed to use
[all...]
H A Dperf_event.h117 * Bits that can be set in attr.sample_type to request information
138 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
142 * not have to match. Branch priv level is checked for permissions.
199 * Hardware event_id to monitor via a performance monitoring event:
246 * 2 - SAMPLE_IP requested to have 0 skid
300 * Bits needed to read the hw events in user-space.
336 __s64 offset; /* add to hardware event value */
349 * to sign extend the result like:
358 * If cap_usr_time the below fields can be used to compute the time
370 * seqcount loop described above. This delta can then be added to
637 __u64 to; member in struct:perf_branch_entry
[all...]
H A Dskbuff.h58 * NONE: device failed to checksum this packet.
64 * Apparently with secret goal to sell you new device, when you
65 * will add new protocol to your host. F.e. IPv6. 8)
70 * is able to produce some skb->csum, it MUST use COMPLETE,
73 * PARTIAL: identical to the case for output below. This may occur
84 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
85 * from skb->csum_start to the end and to record the checksum
90 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
92 * NETIF_F_IP_CSUM - device is dumb. It is able to csu
2158 skb_copy_from_linear_data(const struct sk_buff *skb, void *to, const unsigned int len) argument
2165 skb_copy_from_linear_data_offset(const struct sk_buff *skb, const int offset, void *to, const unsigned int len) argument
2425 skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) argument
2435 skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) argument
2452 skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) argument
[all...]
/include/linux/netfilter/
H A Dxt_connbytes.h20 __aligned_u64 from; /* count to be matched */
21 __aligned_u64 to; /* count to be matched */ member in struct:xt_connbytes_info::__anon961
/include/net/
H A Dudplite.h19 static __inline__ int udplite_getfrag(void *from, char *to, int offset, argument
22 return memcpy_fromiovecend(to, (struct iovec *) from, offset, len);
94 * illegal, we fall back to the defaults here.
H A Dchecksum.h97 static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) argument
99 __be32 diff[] = { ~from, to };
104 static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to) argument
106 csum_replace4(sum, (__force __be32)from, (__force __be32)to);
111 __be32 from, __be32 to, int pseudohdr);
114 __be16 from, __be16 to,
118 (__force __be32)to, pseudohdr);
113 inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, int pseudohdr) argument
H A Dsock.h18 * better to prove they can be removed
20 * Alan Cox : Added a zapped field for tcp to note
29 * protocol specific parts were moved to
87 * This structure really needs to be cleaned up.
92 /* Define this to get the SOCK_DBG debugging facility. */
115 * to the lock validator by explicitly managing
116 * the slock as a lock variant (in addition to
141 * @skc_net: reference to the network namespace of this socket
218 * @sk_gso_max_size: Maximum GSO segment size to build
250 * @sk_send_head: front of stuff to transmi
1693 skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, char __user *from, char *to, int copy, int offset) argument
[all...]
/include/asm-generic/
H A Duaccess.h55 * address of an instruction that is allowed to fault, and the second is
57 * modified, so it is entirely up to the continuation code to figure out
58 * what to do.
62 * we don't even have to jump over them. Further, they do not intrude
78 static inline __must_check long __copy_from_user(void *to, argument
84 *(u8 *)to = *(u8 __force *)from;
87 *(u16 *)to = *(u16 __force *)from;
90 *(u32 *)to = *(u32 __force *)from;
94 *(u64 *)to
108 __copy_to_user(void __user *to, const void *from, unsigned long n) argument
243 copy_from_user(void *to, const void __user * from, unsigned long n) argument
253 copy_to_user(void __user *to, const void *from, unsigned long n) argument
317 __clear_user(void __user *to, unsigned long n) argument
325 clear_user(void __user *to, unsigned long n) argument
[all...]
H A Dsiginfo.h31 * However, some architectures want to override this to "int"
68 int _sys_private; /* not to be passed to user */
107 * How these fields are to be accessed.
195 #define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
245 * It seems likely that SIGEV_THREAD will have to be handled from
246 * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
248 * However, everything is written out here so as to not get lost.
253 #define SIGEV_THREAD_ID 4 /* deliver to threa
295 copy_siginfo(struct siginfo *to, struct siginfo *from) argument
[all...]
/include/sound/
H A Dpcm_params.h20 * along with this program; if not, write to the Free Software
119 unsigned int from, unsigned int to)
122 for (i = from; i <= to; i++)
127 unsigned int from, unsigned int to)
130 for (i = from; i <= to; i++)
118 snd_mask_set_range(struct snd_mask *mask, unsigned int from, unsigned int to) argument
126 snd_mask_reset_range(struct snd_mask *mask, unsigned int from, unsigned int to) argument
/include/linux/mtd/
H A Dmap.h15 * along with this program; if not, write to the Free Software
78 * to zero, and ensure we'll never miss the end of an comparison (bjd) */
193 to a chip probe routine -- either JEDEC or CFI probe or both -- via
201 The mtd->priv field will point to the struct map_info, and any further
203 mtd->priv->fldrv_priv field. This allows the map driver to get at
220 in bytes, before you are talking to the first chip again.
231 want to enable XIP for non-linear mappings. Not yet though. */
233 /* It's possible for the map driver to use cached memory in its
236 it will signal it to the map driver through this routine to le
431 inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) argument
439 inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) argument
[all...]
H A Dmtd.h15 * along with this program; if not, write to the Free Software
46 * or was not specific to any particular block.
74 * @len: number of data bytes to write/read
78 * @ooblen: number of oob bytes to write/read
85 * Note, it is allowed to read more than one OOB area at one go, but not write.
105 * for export to user-space via the ECCGETLAYOUT ioctl.
123 * to be the only erase size available, or may use the more detailed
130 * it is of ECC block size, etc. It is illegal to have writesize = 0.
139 * writing 4 * writesize bytes to a device with 2 * writesize bytes
140 * buffer the MTD driver can (but doesn't have to) d
268 mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) argument
[all...]
/include/net/sctp/
H A Dcommand.h22 * along with GNU CC; see the file COPYING. If not, write to
26 * Please send any bug reports or fixes you make to one of the
34 * Any bugs reported given to us we will try to fix... any fixes shared will
57 SCTP_CMD_CHUNK_ULP, /* Send a chunk to the sockets layer. */
58 SCTP_CMD_EVENT_ULP, /* Send a notification to the sockets layer. */
59 SCTP_CMD_REPLY, /* Send a chunk to our peer. */
60 SCTP_CMD_SEND_PKT, /* Send a full packet to our peer. */
90 SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
132 sctp_event_timeout_t to; member in union:__anon1603
[all...]

Completed in 276 milliseconds