Searched defs:to (Results 1 - 25 of 27) sorted by relevance

12

/include/linux/netfilter/ipset/
H A Dip_set_bitmap.h17 range_to_mask(u32 from, u32 to, u8 *bits) argument
22 while (--(*bits) > 0 && mask && (to & mask) != from)
/include/linux/
H A Dparser.h25 char *to; member in struct:__anon874
H A Duaccess.h9 * it will not take any locks and go straight to the fixup table.
11 * They have great resemblance to the preempt_disable/enable calls
13 * no other way to make the pagefault handlers do this. So we do
20 * make sure to have issued the store before a pagefault
30 * make sure to issue those last loads/stores before enabling
42 static inline unsigned long __copy_from_user_inatomic_nocache(void *to, argument
45 return __copy_from_user_inatomic(to, from, n);
48 static inline unsigned long __copy_from_user_nocache(void *to, argument
51 return __copy_from_user(to, from, n);
57 * probe_kernel_address(): safely attempt to rea
[all...]
H A Dhwspinlock.h38 * by remote processors that have no other way to achieve synchronization.
45 * hwspinlocks, and in this case, they can be trivially numbered 0 to
52 * This platform data structure should be used to provide the base id
78 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
79 * enabled. We prefer to silently succeed in this case, and let the
84 * we _do_ want users to fail (no point in registering hwspinlock instances if
106 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, argument
131 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
132 * @hwlock: an hwspinlock which we want to trylock
133 * @flags: a pointer to wher
209 hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, unsigned int to, unsigned long *flags) argument
233 hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) argument
258 hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) argument
[all...]
H A Dprojid.h11 * to detect when we overlook these differences.
54 extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid);
55 extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid);
69 static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid) argument
74 static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid) argument
76 projid_t projid = from_kprojid(to, kprojid);
H A Dhighmem.h120 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
143 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
144 * @vma: The VMA the page is to be allocated for
148 * to specify via movableflags whether the page will be movable in the
172 * @vma: The VMA the page is to be allocated for
176 * be able to migrate in the future using move_pages() or reclaimed
224 static inline void copy_user_highpage(struct page *to, struct page *from, argument
230 vto = kmap_atomic(to);
231 copy_user_page(vto, vfrom, vaddr, to);
238 copy_highpage(struct page *to, struct page *from) argument
[all...]
H A Dassoc_array_priv.h53 * The segments correspond to levels of the tree (the first segment is used at
64 * A shortcut through the index space out to where a collection of nodes/leaves
95 struct assoc_array_ptr *to; member in struct:assoc_array_edit::__anon71
99 u8 to; member in struct:assoc_array_edit::__anon72
105 * Internal tree member pointers are marked in the bottom one or two bits to
106 * indicate what type they are so that we don't have to look behind every
107 * pointer to see what it points to.
109 * We provide functions to test type annotations and to creat
[all...]
H A Dmempolicy.h36 * mpol_put() decrements the reference count to zero.
40 * to the new storage. The reference count of the new object is initialized
41 * to 1, representing the caller of mpol_dup().
53 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
165 const nodemask_t *to, int flags);
187 * do so then migration (at least from node to node) is not
272 const nodemask_t *to, int flags)
271 do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) argument
H A Dmigrate.h23 MR_SYSCALL, /* also applies to cpusets */
40 const nodemask_t *from, const nodemask_t *to,
61 const nodemask_t *from, const nodemask_t *to,
60 migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags) argument
H A Duidgid.h11 * to detect when we overlook these differences.
113 extern uid_t from_kuid(struct user_namespace *to, kuid_t uid);
114 extern gid_t from_kgid(struct user_namespace *to, kgid_t gid);
115 extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid);
116 extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid);
140 static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid) argument
145 static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid) argument
150 static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid) argument
152 uid_t uid = from_kuid(to, kuid);
158 static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_ argument
[all...]
H A Dlockdep.h88 * to every node we attach a list of "forward" and a list of
95 * to ensure that we check one node only once:
147 * Map the lock object (the lock instance) to the lock-class object.
160 static inline void lockdep_copy_map(struct lockdep_map *to, argument
165 *to = *from;
175 to->class_cache[i] = NULL;
189 * The parent field is used to implement breadth-first search, and the
190 * bit 0 is reused to indicate if the lock has been accessed in BFS.
209 * to make 0 mean no class. This avoids overflowing the class_idx
216 * One-way hash of the dependency chain up to thi
[all...]
H A Dgenhd.h106 * partition while IO is happening to it and update of nr_sects
180 /* Array of pointers to partitions indexed by partno.
216 static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) argument
220 *to++ = (hex_to_bin(*uuid_str) << 4) |
234 static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to) argument
236 part_pack_uuid(uuid_str, to);
295 * Macros to operate on percpu disk statistics:
468 __le32 v_sanity; /* to verify vtoc sanity */
514 __u32 d_trkseek; /* track-to-track seek, usec */
699 * to provid
727 blk_part_pack_uuid(const u8 *uuid_str, u8 *to) argument
[all...]
H A Dsecurity.h15 * Due to this file being licensed under the GPL there is controversy over
16 * whether this permits you to write a module that #includes this file
144 /* forward declares to avoid warnings */
159 /* security_inode_init_security callback function to write xattrs */
209 * checking to see if @bprm->security is non-NULL. If so, then the hook
210 * may decide either to retain the security information saved earlier or
211 * to replace it.
224 * Prepare to install the new security attributes of a process being
226 * pointed to by @current->cred and the information set in @bprm->cred by
227 * the bprm_set_creds hook. @bprm points to th
1944 security_binder_transaction(struct task_struct *from, struct task_struct *to) argument
1949 security_binder_transfer_binder(struct task_struct *from, struct task_struct *to) argument
1954 security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file) argument
[all...]
/include/asm-generic/
H A Dsiginfo.h24 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) argument
27 memcpy(to, from, sizeof(*to));
30 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
35 extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
H A Duaccess.h56 * address of an instruction that is allowed to fault, and the second is
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
63 * we don't even have to jump over them. Further, they do not intrude
79 static inline __must_check long __copy_from_user(void *to, argument
85 *(u8 *)to = *(u8 __force *)from;
88 *(u16 *)to = *(u16 __force *)from;
91 *(u32 *)to = *(u32 __force *)from;
95 *(u64 *)to
109 __copy_to_user(void __user *to, const void *from, unsigned long n) argument
255 copy_from_user(void *to, const void __user * from, unsigned long n) argument
265 copy_to_user(void __user *to, const void *from, unsigned long n) argument
329 __clear_user(void __user *to, unsigned long n) argument
337 clear_user(void __user *to, unsigned long n) argument
[all...]
/include/uapi/linux/netfilter/
H A Dxt_connbytes.h20 __aligned_u64 from; /* count to be matched */
21 __aligned_u64 to; /* count to be matched */ member in struct:xt_connbytes_info::__anon1999
/include/net/
H A Dudplite.h19 static __inline__ int udplite_getfrag(void *from, char *to, int offset, argument
22 return memcpy_fromiovecend(to, (struct iovec *) from, offset, len);
94 * illegal, we fall back to the defaults here.
H A Dchecksum.h123 static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) argument
125 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
141 __be32 from, __be32 to, int pseudohdr);
143 const __be32 *from, const __be32 *to,
147 __be16 from, __be16 to,
151 (__force __be32)to, pseudohdr);
146 inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, int pseudohdr) argument
H A Dinetpeer.h31 /* group together avl_left,avl_right,v4daddr to speedup lookups */
46 * We can share memory with rcu_head to help keep inet_peer small.
110 static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from) argument
114 *to = val;
166 * temporary check to make sure we dont access rid, tcp_ts,
/include/linux/mtd/
H A Dmap.h15 * along with this program; if not, write to the Free Software
78 * to zero, and ensure we'll never miss the end of an comparison (bjd) */
193 to a chip probe routine -- either JEDEC or CFI probe or both -- via
201 The mtd->priv field will point to the struct map_info, and any further
203 mtd->priv->fldrv_priv field. This allows the map driver to get at
220 in bytes, before you are talking to the first chip again.
231 want to enable XIP for non-linear mappings. Not yet though. */
233 /* It's possible for the map driver to use cached memory in its
236 it will signal it to the map driver through this routine to le
436 inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) argument
444 inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) argument
[all...]
H A Dmtd.h15 * along with this program; if not, write to the Free Software
43 * or was not specific to any particular block.
71 * @len: number of data bytes to write/read
75 * @ooblen: number of oob bytes to write/read
82 * Note, it is allowed to read more than one OOB area at one go, but not write.
102 * for export to user-space via the ECCGETLAYOUT ioctl.
120 * to be the only erase size available, or may use the more detailed
127 * it is of ECC block size, etc. It is illegal to have writesize = 0.
136 * writing 4 * writesize bytes to a device with 2 * writesize bytes
137 * buffer the MTD driver can (but doesn't have to) d
271 mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) argument
[all...]
/include/sound/
H A Dpcm_params.h20 * along with this program; if not, write to the Free Software
121 unsigned int from, unsigned int to)
124 for (i = from; i <= to; i++)
129 unsigned int from, unsigned int to)
132 for (i = from; i <= to; i++)
120 snd_mask_set_range(struct snd_mask *mask, unsigned int from, unsigned int to) argument
128 snd_mask_reset_range(struct snd_mask *mask, unsigned int from, unsigned int to) argument
/include/net/sctp/
H A Dcommand.h25 * Please send any bug reports or fixes you make to the
54 SCTP_CMD_CHUNK_ULP, /* Send a chunk to the sockets layer. */
55 SCTP_CMD_EVENT_ULP, /* Send a notification to the sockets layer. */
56 SCTP_CMD_REPLY, /* Send a chunk to our peer. */
57 SCTP_CMD_SEND_PKT, /* Send a full packet to our peer. */
87 SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */
121 void *zero_all; /* Set to NULL to clear the entire union */
130 sctp_event_timeout_t to; member in union:__anon1320
171 SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to)
[all...]
/include/uapi/linux/
H A Dif_link.h138 IFLA_GROUP, /* Group the device belongs to */
186 IFF_BROADCAST devices are able to use multicasts too.
192 can point to real physical interface (f.e. for bandwidth calculations),
194 for IPIP tunnels, when route to endpoint is allowed to change)
298 __u32 to; member in struct:ifla_vlan_qos_mapping
316 MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
317 MACVLAN_MODE_VEPA = 2, /* talk to other ports through ext bridge */
318 MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
320 MACVLAN_MODE_SOURCE = 16,/* use source MAC address list to assig
[all...]
H A Dperf_event.h118 * Bits that can be set in attr.sample_type to request information
145 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
149 * not have to match. Branch priv level is checked for permissions.
177 * Values to determine ABI of the registers dump.
243 * Hardware event_id to monitor via a performance monitoring event:
290 * 2 - SAMPLE_IP requested to have 0 skid
305 comm_exec : 1, /* flag comm events that are due to an exec */
325 * Defines set of user regs to dump on samples.
331 * Defines size of the user stack to dump on samples.
335 /* Align to u6
816 __u64 to; member in struct:perf_branch_entry
[all...]

Completed in 3765 milliseconds

12