Searched defs:range (Results 1 - 17 of 17) sorted by relevance

/arch/sparc/mm/
H A Dextable.c13 /* Caller knows they are in a range if ret->fixup == 0 */
41 /* A range entry, skip both parts. */
54 /* 2. Try to find a range match. */
73 bool range; local
75 for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
76 range = m->extable[i].fixup == 0;
80 if (range)
83 if (range)
98 /* Inside range? Fix g2 and return correct fixup */
/arch/powerpc/kernel/
H A Disa-bridge.c65 const struct isa_range *range; local
71 range = of_get_property(isa_node, "ranges", &rlen);
72 if (range == NULL || (rlen < sizeof(struct isa_range)))
81 * cell 5: the size of the range
83 if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO) {
84 range++;
89 if ((range->isa_addr.a_hi & ISA_SPACE_MASK) != ISA_SPACE_IO)
92 isa_addr = range->isa_addr.a_lo;
93 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 |
94 range
[all...]
/arch/sparc/kernel/
H A Dof_device_common.c91 /* Make sure the least significant 64-bits are in-range. Even
110 int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna) argument
120 if (of_out_of_range(addr, range, range + na + pna, na, ns))
123 /* Start with the parent range base. */
124 memcpy(result, range + na, pna * 4);
130 range[na - 1 - i]);
H A Dof_device_32.c48 static int of_bus_pci_map(u32 *addr, const u32 *range, argument
55 if ((addr[0] ^ range[0]) & 0x03000000)
58 if (of_out_of_range(addr + 1, range + 1, range + na + pna,
62 /* Start with the parent range base. */
63 memcpy(result, range + na, pna * 4);
69 range[na - 1 - i]);
120 static int of_bus_ambapp_map(u32 *addr, const u32 *range, argument
123 return of_bus_default_map(addr, range, na, ns, pna);
H A Dof_device_64.c86 static int of_bus_simba_map(u32 *addr, const u32 *range, argument
101 static int of_bus_pci_map(u32 *addr, const u32 *range, argument
108 if (!((addr[0] ^ range[0]) & 0x03000000))
112 * a 32-bit range.
115 (range[0] & 0x03000000) == 0x02000000)
121 if (of_out_of_range(addr + 1, range + 1, range + na + pna,
125 /* Start with the parent range base. */
126 memcpy(result, range + na, pna * 4);
132 range[n
[all...]
/arch/x86/kernel/
H A Dmmconf-fam10h_64.c10 #include <linux/range.h>
36 const struct range *r1 = x1;
37 const struct range *r2 = x2;
65 struct range range[8]; local
114 * need to check if the range is in the high mmio range that is
133 range[hi_mmio_num].start = start;
134 range[hi_mmio_num].end = end;
141 /* sort the range */
[all...]
/arch/arm/mach-davinci/
H A Ddevices-tnetv107x.c268 u32 range; local
271 /* Figure out the resource range from the ale/cle masks */
272 range = max(data->mask_cle, data->mask_ale);
273 range = PAGE_ALIGN(range + 4) - 1;
275 if (range >= emif_window_sizes[chipsel])
289 res[0].end = res[0].start + range;
373 * This forces the clock down to a range that allows the ADC to
/arch/x86/kernel/cpu/
H A Damd.c306 /* core id has to be in the [0 .. cores_per_node - 1] range */
548 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
764 u32 range; local
791 /* OSVW unavailable or ID unknown, match family-model-stepping range */
793 while ((range = *erratum++))
794 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
795 (ms >= AMD_MODEL_RANGE_START(range)) &&
796 (ms <= AMD_MODEL_RANGE_END(range)))
/arch/x86/pci/
H A Damd_bus.c5 #include <linux/range.h>
56 struct range range[RANGE_NUM]; local
95 /* Check if that register is enabled for bus range */
123 memset(range, 0, sizeof(range));
124 add_range(range, RANGE_NUM, 0, 0, 0xffff + 1);
154 subtract_range(range, RANGE_NUM, start, end + 1);
156 /* add left over io port range to def node/link, [0, 0xffff] */
166 if (!range[
[all...]
/arch/powerpc/boot/
H A Ddevtree.c179 * range, then the whole reg block fits.
181 static int compare_reg(u32 *reg, u32 *range, u32 *rangesize) argument
187 if (reg[i] < range[i])
189 if (reg[i] > range[i])
194 end = range[i] + rangesize[i];
/arch/s390/mm/
H A Dextmem.c52 struct qrange range[6]; member in struct:qout64
67 struct qrange_old range[6]; member in struct:qout64_old
90 struct qrange range[6]; member in struct:dcss_segment
304 qout->range[i].start =
305 (unsigned long) qout_old->range[i].start;
306 qout->range[i].end =
307 (unsigned long) qout_old->range[i].end;
318 seg->vm_segtype = qout->range[0].start & 0xff;
326 if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) &&
327 ((qout->range[
[all...]
/arch/x86/kernel/cpu/mtrr/
H A Dgeneric.c66 /* Get the size of contiguous MTRR range */
108 * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
238 * MTRR range. Do repeated lookups for that case here.
252 /* Get the MSR pair relating to a var range */
260 /* Fill the MSR pair relating to a var range */
468 * set_fixed_range - checks & updates a fixed-range MTRR if it
529 /* Invalid (i.e. free) range */
556 * contiguous range:
567 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
569 * @frs: pointer to fixed-range MTR
575 int block = -1, range; local
[all...]
H A Dcleanup.c28 #include <linux/range.h>
54 static struct range __initdata range[RANGE_NUM]; variable in typeref:struct:__initdata
66 x86_get_mtrr_mem_range(struct range *range, int nr_range, argument
80 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
87 range[i].start, range[i].end);
109 subtract_range(range, RANGE_NUM, base, base + size);
112 subtract_range(range, RANGE_NU
139 sum_ranges(struct range *range, int nr_range) argument
474 x86_setup_var_mtrrs(struct range *range, int nr_range, u64 chunk_size, u64 gran_size) argument
[all...]
/arch/mips/kernel/
H A Dperf_event_mipsxx.c70 } range; member in struct:mips_perf_event
314 * We only need to care the counter mask. The range has been
657 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
661 return ((unsigned int)pev->range << 24) |
1173 if (pev->range > V) {
1228 * by the single CPU operates (the mode exclusion and the range).
1232 /* Calculate range bits and validate it. */
1424 * CPUs will not check and calculate the range.
1426 raw_event.range = P;
1437 raw_event.range
[all...]
/arch/parisc/mm/
H A Dinit.c274 * Initialize and free the full range of memory in each range.
763 int range; local
765 /* Map each physical memory range to its kernel vaddr */
767 for (range = 0; range < npmem_ranges; range++) {
772 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
773 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
774 size = pmem_ranges[range]
[all...]
/arch/tile/kernel/
H A Dsetup.c318 HV_PhysAddrRange range = hv_inquire_physical(i); local
319 if (range.size == 0)
324 range.size, range.start + range.size);
329 if ((unsigned long)range.start) {
331 range.start, range.start + range.size);
335 if ((range
1057 HV_VirtAddrRange range = hv_inquire_virtual(i); local
[all...]
/arch/x86/xen/
H A Dmmu.c139 * if the PFN is in the linear mapped vaddr range, we can just use
1171 /* reserve the range used */
1175 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1405 * If the new pfn is within the range of the newly allocated
2105 * Update the pfn-to-mfn mappings for a virtual address range, either to
2335 unsigned long range; local
2348 range = (unsigned long)batch << PAGE_SHIFT;
2351 err = apply_to_page_range(vma->vm_mm, addr, range,
2361 addr += range;

Completed in 304 milliseconds