1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28#ifndef _SCHED_H_ 29#define _SCHED_H_ 30 31#include <sys/cdefs.h> 32#include <sys/time.h> 33 34__BEGIN_DECLS 35 36#define SCHED_NORMAL 0 37#define SCHED_OTHER 0 38#define SCHED_FIFO 1 39#define SCHED_RR 2 40 41struct sched_param { 42 int sched_priority; 43}; 44 45extern int sched_setscheduler(pid_t, int, const struct sched_param *); 46extern int sched_getscheduler(pid_t); 47extern int sched_yield(void); 48extern int sched_get_priority_max(int policy); 49extern int sched_get_priority_min(int policy); 50extern int sched_setparam(pid_t, const struct sched_param *); 51extern int sched_getparam(pid_t, struct sched_param *); 52extern int sched_rr_get_interval(pid_t pid, struct timespec *tp); 53 54#define CLONE_VM 0x00000100 55#define CLONE_FS 0x00000200 56#define CLONE_FILES 0x00000400 57#define CLONE_SIGHAND 0x00000800 58#define CLONE_PTRACE 0x00002000 59#define CLONE_VFORK 0x00004000 60#define CLONE_PARENT 0x00008000 61#define CLONE_THREAD 0x00010000 62#define CLONE_NEWNS 0x00020000 63#define CLONE_SYSVSEM 0x00040000 64#define CLONE_SETTLS 0x00080000 65#define CLONE_PARENT_SETTID 0x00100000 66#define CLONE_CHILD_CLEARTID 0x00200000 67#define CLONE_DETACHED 0x00400000 68#define CLONE_UNTRACED 0x00800000 69#define CLONE_CHILD_SETTID 0x01000000 70#define CLONE_STOPPED 0x02000000 71 72#ifdef _GNU_SOURCE 73extern int clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...); 74extern int unshare(int); 75#endif 76 77/* Support for cpu thread affinity */ 78#ifdef _GNU_SOURCE 79 80extern int sched_getcpu(void); 81 82 83/* Our implementation supports up to 32 independent CPUs, which is also 84 * the maximum supported by the kernel at the moment. GLibc uses 1024 by 85 * default. 86 * 87 * If you want to use more than that, you should use CPU_ALLOC() / CPU_FREE() 88 * and the CPU_XXX_S() macro variants. 89 */ 90#define CPU_SETSIZE 32 91 92#define __CPU_BITTYPE unsigned long int /* mandated by the kernel */ 93#define __CPU_BITSHIFT 5 /* should be log2(BITTYPE) */ 94#define __CPU_BITS (1 << __CPU_BITSHIFT) 95#define __CPU_ELT(x) ((x) >> __CPU_BITSHIFT) 96#define __CPU_MASK(x) ((__CPU_BITTYPE)1 << ((x) & (__CPU_BITS-1))) 97 98typedef struct { 99 __CPU_BITTYPE __bits[ CPU_SETSIZE / __CPU_BITS ]; 100} cpu_set_t; 101 102extern int sched_setaffinity(pid_t pid, size_t setsize, const cpu_set_t* set); 103 104extern int sched_getaffinity(pid_t pid, size_t setsize, cpu_set_t* set); 105 106/* Provide optimized implementation for 32-bit cpu_set_t */ 107#if CPU_SETSIZE == __CPU_BITS 108 109# define CPU_ZERO(set_) \ 110 do{ \ 111 (set_)->__bits[0] = 0; \ 112 }while(0) 113 114# define CPU_SET(cpu_,set_) \ 115 do {\ 116 size_t __cpu = (cpu_); \ 117 if (__cpu < CPU_SETSIZE) \ 118 (set_)->__bits[0] |= __CPU_MASK(__cpu); \ 119 }while (0) 120 121# define CPU_CLR(cpu_,set_) \ 122 do {\ 123 size_t __cpu = (cpu_); \ 124 if (__cpu < CPU_SETSIZE) \ 125 (set_)->__bits[0] &= ~__CPU_MASK(__cpu); \ 126 }while (0) 127 128# define CPU_ISSET(cpu_, set_) \ 129 (__extension__({\ 130 size_t __cpu = (cpu_); \ 131 (cpu_ < CPU_SETSIZE) \ 132 ? ((set_)->__bits[0] & __CPU_MASK(__cpu)) != 0 \ 133 : 0; \ 134 })) 135 136# define CPU_EQUAL(set1_, set2_) \ 137 ((set1_)->__bits[0] == (set2_)->__bits[0]) 138 139# define __CPU_OP(dst_, set1_, set2_, op_) \ 140 do { \ 141 (dst_)->__bits[0] = (set1_)->__bits[0] op_ (set2_)->__bits[0]; \ 142 } while (0) 143 144# define CPU_COUNT(set_) __builtin_popcountl((set_)->__bits[0]) 145 146#else /* CPU_SETSIZE != __CPU_BITS */ 147 148# define CPU_ZERO(set_) CPU_ZERO_S(sizeof(cpu_set_t), set_) 149# define CPU_SET(cpu_,set_) CPU_SET_S(cpu_,sizeof(cpu_set_t),set_) 150# define CPU_CLR(cpu_,set_) CPU_CLR_S(cpu_,sizeof(cpu_set_t),set_) 151# define CPU_ISSET(cpu_,set_) CPU_ISSET_S(cpu_,sizeof(cpu_set_t),set_) 152# define CPU_COUNT(set_) CPU_COUNT_S(sizeof(cpu_set_t),set_) 153# define CPU_EQUAL(set1_,set2_) CPU_EQUAL_S(sizeof(cpu_set_t),set1_,set2_) 154 155# define __CPU_OP(dst_,set1_,set2_,op_) __CPU_OP_S(sizeof(cpu_set_t),dst_,set1_,set2_,op_) 156 157#endif /* CPU_SETSIZE != __CPU_BITS */ 158 159#define CPU_AND(set1_,set2_) __CPU_OP(set1_,set2_,&) 160#define CPU_OR(set1_,set2_) __CPU_OP(set1_,set2_,|) 161#define CPU_XOR(set1_,set2_) __CPU_OP(set1_,set2_,^) 162 163/* Support for dynamically-allocated cpu_set_t */ 164 165#define CPU_ALLOC_SIZE(count) \ 166 __CPU_ELT((count) + (__CPU_BITS-1))*sizeof(__CPU_BITTYPE) 167 168#define CPU_ALLOC(count) __sched_cpualloc((count)); 169#define CPU_FREE(set) __sched_cpufree((set)) 170 171extern cpu_set_t* __sched_cpualloc(size_t count); 172extern void __sched_cpufree(cpu_set_t* set); 173 174#define CPU_ZERO_S(setsize_,set_) \ 175 do { \ 176 size_t __nn = 0; \ 177 size_t __nn_max = (setsize_)/sizeof(__CPU_BITTYPE); \ 178 for (; __nn < __nn_max; __nn++) \ 179 (set_)->__bits[__nn] = 0; \ 180 } while (0) 181 182#define CPU_SET_S(cpu_,setsize_,set_) \ 183 do { \ 184 size_t __cpu = (cpu_); \ 185 if (__cpu < 8*(setsize_)) \ 186 (set_)->__bits[__CPU_ELT(__cpu)] |= __CPU_MASK(__cpu); \ 187 } while (0) 188 189#define CPU_CLR_S(cpu_,setsize_,set_) \ 190 do { \ 191 size_t __cpu = (cpu_); \ 192 if (__cpu < 8*(setsize_)) \ 193 (set_)->__bits[__CPU_ELT(__cpu)] &= ~__CPU_MASK(__cpu); \ 194 } while (0) 195 196#define CPU_ISSET_S(cpu_, setsize_, set_) \ 197 (__extension__ ({ \ 198 size_t __cpu = (cpu_); \ 199 (__cpu < 8*(setsize_)) \ 200 ? ((set_)->__bits[__CPU_ELT(__cpu)] & __CPU_MASK(__cpu)) != 0 \ 201 : 0; \ 202 })) 203 204#define CPU_EQUAL_S(setsize_, set1_, set2_) \ 205 (__extension__ ({ \ 206 __const __CPU_BITTYPE* __src1 = (set1_)->__bits; \ 207 __const __CPU_BITTYPE* __src2 = (set2_)->__bits; \ 208 size_t __nn = 0, __nn_max = (setsize_)/sizeof(__CPU_BITTYPE); \ 209 for (; __nn < __nn_max; __nn++) { \ 210 if (__src1[__nn] != __src2[__nn]) \ 211 break; \ 212 } \ 213 __nn == __nn_max; \ 214 })) 215 216#define __CPU_OP_S(setsize_, dstset_, srcset1_, srcset2_, op) \ 217 do { \ 218 cpu_set_t* __dst = (dstset); \ 219 const __CPU_BITTYPE* __src1 = (srcset1)->__bits; \ 220 const __CPU_BITTYPE* __src2 = (srcset2)->__bits; \ 221 size_t __nn = 0, __nn_max = (setsize_)/sizeof(__CPU_BITTYPE); \ 222 for (; __nn < __nn_max; __nn++) \ 223 (__dst)->__bits[__nn] = __src1[__nn] op __src2[__nn]; \ 224 } while (0) 225 226#define CPU_COUNT_S(setsize_, set_) \ 227 __sched_cpucount((setsize_), (set_)) 228 229extern int __sched_cpucount(size_t setsize, cpu_set_t* set); 230 231#endif /* _GNU_SOURCE */ 232 233__END_DECLS 234 235#endif /* _SCHED_H_ */ 236