/kernel/trace/ |
H A D | trace_nop.c | 65 static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 71 if (bit == TRACE_NOP_OPT_ACCEPT) { 78 if (bit == TRACE_NOP_OPT_REFUSE) {
|
H A D | trace_functions.c | 130 int bit; local 140 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); 141 if (bit < 0) 150 trace_clear_recursion(bit); 223 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 225 switch (bit) {
|
H A D | trace_sched_wakeup.c | 209 wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 212 if (!(bit & TRACE_DISPLAY_GRAPH)) 312 wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument
|
H A D | trace_irqsoff.c | 158 irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 162 if (!(bit & TRACE_DISPLAY_GRAPH)) 265 irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument
|
H A D | trace_functions_graph.c | 498 * to the right a bit when trace output is pasted into 1516 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 1518 if (bit == TRACE_GRAPH_PRINT_IRQS)
|
H A D | trace_output.c | 602 int bit = state ? __ffs(state) + 1 : 0; local 604 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
|
H A D | blktrace.c | 186 * Data direction bit lookup 1433 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 1436 if (bit == TRACE_BLK_OPT_CLASSIC) {
|
H A D | trace.h | 314 * The bit is the bit index that sets its value on the 319 u32 bit; /* Mask assigned in val field in tracer_flags */ member in struct:tracer_opt 332 #define TRACER_OPT(s, b) .name = #s, .bit = b 383 u32 old_flags, u32 bit, int set); 421 * bit is set that is higher than the MAX bit of the current 455 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 456 #define trace_recursion_clear(bit) d 471 int bit; local 490 int bit; local 507 trace_clear_recursion(int bit) argument [all...] |
H A D | ring_buffer.c | 136 * In case of an anomaly, this global flag has a bit set that 182 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 660 * checking if the ring buffer is empty. Once the waiters_pending bit 749 * bit that points to the head page, move the head, and then set 750 * the bit that points to the new head page. 756 * head->list->prev->next bit 1 bit 0 804 * rb_list_head - remove any bit 2634 * a bit of overhead in something as critical as function tracing, 2637 * bit 2669 int bit; local [all...] |
H A D | ftrace.c | 302 * For static tracing, we need to be a bit more careful. 4814 int bit; local 4816 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 4817 if (bit < 0) 4836 trace_clear_recursion(bit); 4873 int bit; local 4875 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 4876 if (bit < 0) 4881 trace_clear_recursion(bit);
|
H A D | trace.c | 77 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) argument 787 /* These must match the bit postions in trace_iterator_flags */ 3426 * about to flip a bit in the cpumask: 3482 if (tracer_flags & trace_opts[i].bit) 3499 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 3504 tracer_flags->val &= ~opts->bit; 3506 tracer_flags->val |= opts->bit; 6034 if (topt->flags->val & topt->opt->bit) 6057 if (!!(topt->flags->val & topt->opt->bit) != val) { 6759 /* use static because iter can be a bit bi [all...] |
/kernel/ |
H A D | panic.c | 210 u8 bit; member in struct:tnt 265 *s++ = test_bit(t->bit, &tainted_mask) ? 354 * This is a bit racy.. 384 * 64-bit random ID for oopses:
|
H A D | auditfilter.c | 236 case 1: /* 32bit on biarch */ 291 int bit = AUDIT_BITMASK_SIZE * 32 - i - 1; local 292 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; 295 if (!(*p & AUDIT_BIT(bit))) 297 *p &= ~AUDIT_BIT(bit); 366 /* bit ops are only useful on syscall args */
|
H A D | auditsc.c | 148 case 1: /* 32bit on biarch */ 726 int word, bit; local 735 bit = AUDIT_BIT(val); 737 return rule->mask[word] & bit;
|
H A D | cpuset.c | 1264 * bit: the bit to update (see cpuset_flagbits_t) 1271 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, argument 1284 set_bit(bit, &trialcs->flags); 1286 clear_bit(bit, &trialcs->flags); 1328 * is 1 second. Arithmetic is done using 32-bit integers scaled to 1344 * Given the simple 32 bit integer arithmetic used, this meter works 2478 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 2485 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 2499 * variable 'wait' is not set, and the bit ALLOC_CPUSE [all...] |
/kernel/sched/ |
H A D | wait.c | 262 * @key: key to identify a wait bit queue or %NULL 336 int __sched out_of_line_wait_on_bit(void *word, int bit, argument 339 wait_queue_head_t *wq = bit_waitqueue(word, bit); 340 DEFINE_WAIT_BIT(wait, word, bit); 347 void *word, int bit, wait_bit_action_f *action, 350 wait_queue_head_t *wq = bit_waitqueue(word, bit); 351 DEFINE_WAIT_BIT(wait, word, bit); 379 int __sched out_of_line_wait_on_bit_lock(void *word, int bit, argument 382 wait_queue_head_t *wq = bit_waitqueue(word, bit); 383 DEFINE_WAIT_BIT(wait, word, bit); 346 out_of_line_wait_on_bit_timeout( void *word, int bit, wait_bit_action_f *action, unsigned mode, unsigned long timeout) argument 389 __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) argument 414 wake_up_bit(void *word, int bit) argument 416 __wake_up_bit(bit_waitqueue(word, bit), word, bit); local 420 bit_waitqueue(void *word, int bit) argument [all...] |
/kernel/power/ |
H A D | snapshot.c | 310 struct bm_position cur; /* most recently used bit position */ 624 * memory_bm_find_bit - Find the bit for pfn in the memory 627 * Find the bit in the bitmap @bm that corresponds to given pfn. 630 * It walks the radix tree to find the page which contains the bit for 631 * pfn and returns the bit position in **addr and *bit_nr. 696 unsigned int bit; local 699 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 701 set_bit(bit, addr); 707 unsigned int bit; local 710 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 720 unsigned int bit; local 730 int bit; local 739 unsigned int bit; local 750 unsigned int bit; local 804 int bit; local [all...] |
/kernel/rcu/ |
H A D | tree.c | 982 * force-quiescent-state pass. So lost bit sets do not result 1967 /* Our bit has already been cleared, so done. */ 2442 unsigned long bit; local 2462 bit = 1; 2463 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { 2464 if ((rnp->qsmask & bit) != 0) { 2465 if ((rnp->qsmaskinit & bit) != 0) 2468 mask |= bit; 2898 * more than 2 billion grace periods (and way more on a 64-bit system!), 2975 * counter wrap on a 32-bit syste [all...] |
/kernel/time/ |
H A D | timer.c | 383 /* If the timeout is larger than MAX_TVAL (on 64-bit 831 * 2) calculate the highest bit where the expires and new max are different 832 * 3) use this bit to make a mask 840 int bit; local 856 bit = find_last_bit(&mask, BITS_PER_LONG); 858 mask = (1UL << bit) - 1; 1479 * Another bit of PARANOID. Note that the retval will be
|
/kernel/locking/ |
H A D | lockdep.c | 311 * It's a 64-bit hash, because it's important for the keys to be 478 static inline unsigned long lock_flag(enum lock_usage_bit bit) argument 480 return 1UL << bit; 483 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) argument 487 if (class->usage_mask & lock_flag(bit + 2)) 489 if (class->usage_mask & lock_flag(bit)) { 491 if (class->usage_mask & lock_flag(bit + 2)) 1316 static inline int usage_match(struct lock_list *entry, void *bit) argument 1318 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1325 * at @root->class that matches @bit 1334 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, struct lock_list **target_entry) argument 1357 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, struct lock_list **target_entry) argument 1371 int bit; local 1602 state_name(enum lock_usage_bit bit) argument 1629 check_irq_usage(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit) argument 2347 check_usage_forwards(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit bit, const char *irqclass) argument 2371 check_usage_backwards(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit bit, const char *irqclass) argument 2436 state_verbose(enum lock_usage_bit bit, struct lock_class *class) argument [all...] |
/kernel/events/ |
H A D | core.c | 1723 * But this is a bit hairy. 1996 * First we add the event to the list with the hardware enable bit 2450 * sets the disabled bit in the control field of event _before_ 2688 * sets the enabled bit in the control field of event _before_ 2742 * Reduce accuracy by one bit such that @a and @b converge 3871 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ 3944 /* Allow new userspace to detect that bit 0 is deprecated */ 4452 int bit; local 4454 for_each_set_bit(bit, (const unsigned long *) &mask, 4458 val = perf_reg_value(regs, bit); [all...] |