Searched refs:mask (Results 1 - 25 of 45) sorted by relevance

12

/kernel/time/
H A Dtimekeeping_internal.h16 static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) argument
18 cycle_t ret = (now - last) & mask;
23 static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) argument
25 return (now - last) & mask;
H A Dclocksource.c67 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
94 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
102 if (cycle_delta > tc->cc->mask / 2) {
103 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
286 delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
290 delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
547 * @mask: bitmask for two's complement subtraction of non 64 bit counters
549 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) argument
571 * determined by the minimum of max_cycles and mask.
575 max_cycles = min(max_cycles, mask);
[all...]
H A Djiffies.c71 .mask = 0xffffffff, /*32bits*/
H A Dtick-broadcast.c136 static void err_broadcast(const struct cpumask *mask) argument
200 * unconditionally clear the oneshot mask bit,
219 * If we kept the cpu in the broadcast mask,
254 * Broadcast the event to the cpus, which are set in the mask (mangled).
256 static void tick_do_broadcast(struct cpumask *mask) argument
262 * Check, if the current cpu is in the mask
264 if (cpumask_test_cpu(cpu, mask)) {
265 cpumask_clear_cpu(cpu, mask);
270 if (!cpumask_empty(mask)) {
277 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
823 tick_broadcast_init_next_event(struct cpumask *mask, ktime_t expires) argument
[all...]
/kernel/irq/
H A Dgeneric-chip.c32 * Chip has separate enable/disable registers instead of a single mask
39 u32 mask = d->mask; local
42 irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
43 *ct->mask_cache &= ~mask;
48 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
51 * Chip has a single mask register. Values of this register are cached
58 u32 mask = d->mask; local
61 *ct->mask_cache |= mask;
78 u32 mask = d->mask; local
98 u32 mask = d->mask; local
114 u32 mask = d->mask; local
130 u32 mask = ~d->mask; local
145 u32 mask = d->mask; local
161 u32 mask = d->mask; local
180 u32 mask = d->mask; local
[all...]
H A Dautoprobe.c28 * and a mask of potential interrupt lines is returned.
34 unsigned long mask = 0; local
97 mask |= 1 << i;
102 return mask;
108 * @val: mask of interrupts to consider
120 unsigned int mask = 0; local
128 mask |= 1 << i;
137 return mask & val;
143 * @val: mask of potential interrupts (unused)
H A Dinternals.h109 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
177 static inline void irqd_clear(struct irq_data *d, unsigned int mask) argument
179 d->state_use_accessors &= ~mask;
182 static inline void irqd_set(struct irq_data *d, unsigned int mask) argument
184 d->state_use_accessors |= mask;
187 static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) argument
189 return d->state_use_accessors & mask;
H A Dmanage.c158 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) argument
160 cpumask_copy(desc->pending_mask, mask);
163 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) argument
165 cpumask_copy(mask, desc->pending_mask);
171 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } argument
173 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } argument
176 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, argument
183 ret = chip->irq_set_affinity(data, mask, force);
186 cpumask_copy(data->affinity, mask);
195 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, argument
221 __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) argument
322 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) argument
356 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) argument
365 irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) argument
379 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) argument
742 cpumask_var_t mask; local
960 cpumask_var_t mask; local
[all...]
H A Dproc.c25 const struct cpumask *mask = desc->irq_data.affinity; local
29 mask = desc->pending_mask;
32 seq_cpumask_list(m, mask);
34 seq_cpumask(m, mask);
43 cpumask_var_t mask; local
45 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
50 cpumask_copy(mask, desc->affinity_hint);
53 seq_cpumask(m, mask);
55 free_cpumask_var(mask);
H A Dsettings.h72 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) argument
75 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
/kernel/sched/
H A Dcpupri.c57 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
84 * do a memory barrier, then read the mask.
89 * If a mask is not set, then the only thing wrong is that we
92 * If we read a zero count but the mask is set, because of the
106 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
110 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
116 * second reads of vec->mask. If we hit this
162 cpumask_set_cpu(cpu, vec->mask);
164 * When adding a new vector, we update the mask first,
192 * do a memory barrier and then clear the mask
[all...]
H A Dcpupri.h15 cpumask_var_t mask; member in struct:cpupri_vec
/kernel/
H A Dup.c48 * Note we still need to test the mask even for UP
49 * because we actually can get an empty mask from
51 * CPU in the mask.
53 void on_each_cpu_mask(const struct cpumask *mask, argument
58 if (cpumask_test_cpu(0, mask)) {
H A Dsmp.c333 * @mask: The mask of cpus it can run on.
341 * 1) current cpu if in @mask
342 * 2) any cpu of current node if in @mask
343 * 3) any other online cpu in @mask
345 int smp_call_function_any(const struct cpumask *mask, argument
354 if (cpumask_test_cpu(cpu, mask))
359 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
360 cpu = cpumask_next_and(cpu, nodemask, mask)) {
366 cpu = cpumask_any_and(mask, cpu_online_mas
388 smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) argument
604 on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) argument
[all...]
H A Dtaskstats.c288 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) argument
295 if (!cpumask_subset(mask, cpu_possible_mask))
305 for_each_cpu(cpu, mask) {
332 for_each_cpu(cpu, mask) {
347 static int parse(struct nlattr *na, struct cpumask *mask) argument
364 ret = cpulist_parse(data, mask);
477 cpumask_var_t mask; local
480 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
482 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
485 rc = add_del_listener(info->snd_portid, mask, REGISTE
493 cpumask_var_t mask; local
[all...]
H A Dsignal.c165 /* Given the mask, find the first available signal that should be serviced. */
171 int next_signal(struct sigpending *pending, sigset_t *mask) argument
177 m = mask->sig;
234 * @mask: pending bits to set
236 * Clear @mask from @task->jobctl. @mask must be subset of
246 * %true if @mask is set, %false if made noop because @task was dying.
248 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) argument
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
252 BUG_ON((mask
300 task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) argument
521 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) argument
583 __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info) argument
610 dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) argument
710 flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) argument
2793 sigset_t mask = *which; local
3077 sigset_t mask; local
3094 sigset_t mask; local
3374 compat_sigset_t mask; local
3425 old_sigset_t mask; local
3459 compat_old_sigset_t mask; local
[all...]
H A Dauditfilter.c207 static inline int audit_match_class_bits(int class, u32 *mask) argument
213 if (mask[i] & classes[class][i])
227 entry->rule.mask) &&
229 entry->rule.mask));
235 entry->rule.mask));
238 entry->rule.mask));
288 entry->rule.mask[i] = rule->mask[i];
292 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)];
302 entry->rule.mask[
[all...]
H A Daudit_watch.c160 parent->mark.mask = AUDIT_FS_WATCH;
473 u32 mask, void *data, int data_type,
496 if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
498 else if (mask & (FS_DELETE|FS_MOVED_FROM))
500 else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
469 audit_watch_handle_event(struct fsnotify_group *group, struct inode *to_tell, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, u32 mask, void *data, int data_type, const unsigned char *dname, u32 cookie) argument
H A Dpadata.c703 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
704 * The @mask may be any combination of the following flags:
709 int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) argument
713 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
719 if (mask & PADATA_CPU_SERIAL)
721 if (mask & PADATA_CPU_PARALLEL)
763 * @mask: bitmask specifying from which cpumask @cpu should be removed
764 * The @mask may be any combination of the following flags:
768 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) argument
772 if (!(mask
[all...]
H A Dcompat.c641 cpumask_var_t mask; local
648 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
651 ret = sched_getaffinity(pid, mask);
655 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
660 free_cpumask_var(mask);
891 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, argument
926 *mask++ = m;
932 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, argument
949 m = *mask++;
/kernel/debug/kdb/
H A Dkdb_bt.c80 kdb_bt1(struct task_struct *p, unsigned long mask, argument
87 if (!kdb_task_state(p, mask))
120 unsigned long mask = kdb_task_state_string(argc ? argv[1] : local
127 if (kdb_bt1(p, mask, argcount, btaprompt))
136 if (kdb_bt1(p, mask, argcount, btaprompt))
H A Dkdb_private.h207 unsigned long mask);
/kernel/rcu/
H A Dtree.c1957 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1965 if (!(rnp->qsmask & mask)) {
1971 rnp->qsmask &= ~mask;
1973 mask, rnp->qsmask, rnp->level,
1982 mask = rnp->grpmask;
2018 unsigned long mask; local
2038 mask = rdp->grpmask;
2039 if ((rnp->qsmask & mask) == 0) {
2050 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
2199 RCU_TRACE(unsigned long mask);
2219 unsigned long mask; local
2445 unsigned long mask; local
3397 unsigned long mask; local
[all...]
/kernel/trace/
H A Dtrace_irqsoff.c569 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) argument
573 if (mask & TRACE_ITER_FUNCTION)
576 return trace_keep_overwrite(tracer, mask, set);
H A Dtrace_output.c76 unsigned long mask; local
83 mask = flag_array[i].mask;
84 if ((flags & mask) != mask)
88 flags &= ~mask;
118 if (val != symbol_array[i].mask)
144 if (val != symbol_array[i].mask)

Completed in 206 milliseconds

12