Lines Matching refs:thread

54 // This code is used both by each new pthread and the code that initializes the main thread.
55 void __init_tls(pthread_internal_t* thread) {
57 thread->tls[TLS_SLOT_SELF] = thread->tls;
58 thread->tls[TLS_SLOT_THREAD_ID] = thread;
60 thread->tls[TLS_SLOT_STACK_GUARD] = reinterpret_cast<void*>(__stack_chk_guard);
63 void __init_alternate_signal_stack(pthread_internal_t* thread) {
78 thread->alternate_signal_stack = stack_base;
82 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ss.ss_sp, ss.ss_size, "thread signal stack");
83 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, stack_base, PAGE_SIZE, "thread signal stack guard page");
87 int __init_thread(pthread_internal_t* thread) {
90 if (__predict_true((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) == 0)) {
91 atomic_init(&thread->join_state, THREAD_NOT_JOINED);
93 atomic_init(&thread->join_state, THREAD_DETACHED);
96 // Set the scheduling policy/priority of the thread.
97 if (thread->attr.sched_policy != SCHED_NORMAL) {
99 param.sched_priority = thread->attr.sched_priority;
100 if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) {
110 thread->cleanup_stack = NULL;
129 // Set the stack guard region to PROT_NONE, so we can detect thread stack overflow.
137 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, space, stack_guard_size, "thread stack guard page");
164 // thread stack (including guard page)
166 // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
170 pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
172 // If thread was not allocated by mmap(), it may not have been cleared to zero.
174 memset(thread, 0, sizeof(pthread_internal_t));
178 thread->mmap_size = mmap_size;
179 thread->attr = *attr;
180 __init_tls(thread);
182 *threadp = thread;
188 pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(arg);
190 // Wait for our creating thread to release us. This lets it have time to
191 // notify gdb about this thread before we start doing anything.
193 // accesses previously made by the creating thread are visible to us.
194 thread->startup_handshake_lock.lock();
196 __init_alternate_signal_stack(thread);
198 void* result = thread->start_routine(thread->start_routine_arg);
204 // A dummy start routine for pthread_create failures where we've created a thread but aren't
206 // of the regular thread teardown to free up resources.
215 // Inform the rest of the C library that at least one thread was created.
226 pthread_internal_t* thread = NULL;
228 int result = __allocate_thread(&thread_attr, &thread, &child_stack);
233 // Create a lock for the thread to wait on once it starts so we can keep
237 // memory accesses previously performed by this thread are visible to
238 // the new thread.
239 thread->startup_handshake_lock.init(false);
240 thread->startup_handshake_lock.lock();
242 thread->start_routine = start_routine;
243 thread->start_routine_arg = arg;
245 thread->set_cached_pid(getpid());
249 void* tls = reinterpret_cast<void*>(thread->tls);
257 int rc = clone(__pthread_start, child_stack, flags, thread, &(thread->tid), tls, &(thread->tid));
263 thread->startup_handshake_lock.unlock();
264 if (thread->mmap_size != 0) {
265 munmap(thread->attr.stack_base, thread->mmap_size);
271 int init_errno = __init_thread(thread);
273 // Mark the thread detached and replace its start_routine with a no-op.
274 // Letting the thread run is the easiest way to clean up its resources.
275 atomic_store(&thread->join_state, THREAD_DETACHED);
276 __pthread_internal_add(thread);
277 thread->start_routine = __do_nothing;
278 thread->startup_handshake_lock.unlock();
282 // Publish the pthread_t and unlock the mutex to let the new thread start running.
283 *thread_out = __pthread_internal_add(thread);
284 thread->startup_handshake_lock.unlock();