summaryrefslogtreecommitdiff
path: root/nptl/pthread_create.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/pthread_create.c')
-rw-r--r--nptl/pthread_create.c50
1 files changed, 50 insertions, 0 deletions
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 9d7f52f57e..dd11a560f1 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -61,8 +61,13 @@ unsigned int __nptl_nthreads = 1;
struct pthread *
internal_function
+#ifndef PTHREAD_T_IS_TID
__find_in_stack_list (pd)
struct pthread *pd;
+#else
+__find_in_stack_list (tid)
+ pthread_t tid;
+#endif
{
list_t *entry;
struct pthread *result = NULL;
@@ -74,7 +79,11 @@ __find_in_stack_list (pd)
struct pthread *curp;
curp = list_entry (entry, struct pthread, list);
+#ifndef PTHREAD_T_IS_TID
if (curp == pd)
+#else
+ if (curp->tid == tid)
+#endif
{
result = curp;
break;
@@ -87,7 +96,11 @@ __find_in_stack_list (pd)
struct pthread *curp;
curp = list_entry (entry, struct pthread, list);
+#ifndef PTHREAD_T_IS_TID
if (curp == pd)
+#else
+ if (curp->tid == tid)
+#endif
{
result = curp;
break;
@@ -205,10 +218,12 @@ __free_tcb (struct pthread *pd)
TERMINATED_BIT) == 0, 1))
{
/* Remove the descriptor from the list. */
+#ifndef PTHREAD_T_IS_TID
if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
/* Something is really wrong. The descriptor for a still
running thread is gone. */
abort ();
+#endif
/* Free TPP data. */
if (__builtin_expect (pd->tpp != NULL, 0))
@@ -232,6 +247,14 @@ start_thread (void *arg)
{
struct pthread *pd = (struct pthread *) arg;
+#ifdef ATTR_FLAG_CREATE_FAILED
+ if ((pd->flags & ATTR_FLAG_CREATE_FAILED) && IS_DETACHED (pd))
+ {
+ __free_tcb (pd);
+ __exit_thread_inline (0);
+ }
+#endif
+
#if HP_TIMING_AVAIL
/* Remember the time when the thread was started. */
hp_timing_t now;
@@ -239,8 +262,10 @@ start_thread (void *arg)
THREAD_SETMEM (pd, cpuclock_offset, now);
#endif
+#if USE___THREAD
/* Initialize resolver state pointer. */
__resp = &pd->res;
+#endif
/* Initialize pointers to locale data. */
__ctype_init ();
@@ -249,6 +274,7 @@ start_thread (void *arg)
if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0) == -2, 0))
lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
+#ifndef NO_ROBUST_LIST_SUPPORT
#ifdef __NR_set_robust_list
# ifndef __ASSUME_SET_ROBUST_LIST
if (__set_robust_list_avail >= 0)
@@ -261,6 +287,11 @@ start_thread (void *arg)
sizeof (struct robust_list_head));
}
#endif
+#endif
+
+#ifdef PLATFORM_THREAD_START
+PLATFORM_THREAD_START
+#endif
/* If the parent was running cancellation handlers while creating
the thread the new thread inherited the signal mask. Reset the
@@ -364,6 +395,7 @@ start_thread (void *arg)
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
+#ifndef NO_ROBUST_LIST_SUPPORT
#ifndef __ASSUME_SET_ROBUST_LIST
/* If this thread has any robust mutexes locked, handle them now. */
# ifdef __PTHREAD_MUTEX_HAVE_PREV
@@ -394,6 +426,7 @@ start_thread (void *arg)
while (robust != (void *) &pd->robust_head);
}
#endif
+#endif /* NO_ROBUST_LIST_SUPPORT */
/* Mark the memory of the stack as usable to the kernel. We free
everything except for the space used for the TCB itself. */
@@ -412,6 +445,7 @@ start_thread (void *arg)
if (IS_DETACHED (pd))
/* Free the TCB. */
__free_tcb (pd);
+#ifndef NO_SETXID_SUPPORT
else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
{
/* Some other thread might call any of the setXid functions and expect
@@ -423,6 +457,7 @@ start_thread (void *arg)
/* Reset the value so that the stack can be reused. */
pd->setxid_futex = 0;
}
+#endif
/* We cannot call '_exit' here. '_exit' will terminate the process.
@@ -550,7 +585,12 @@ __pthread_create_2_1 (newthread, attr, start_routine, arg)
pd->schedpolicy = iattr->schedpolicy;
else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
{
+#ifndef TPP_PTHREAD_SCHED
pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
+#else
+ struct sched_param _param;
+ pthread_getschedparam (pthread_self (), &pd->schedpolicy, &_param);
+#endif
pd->flags |= ATTR_FLAG_POLICY_SET;
}
@@ -559,7 +599,12 @@ __pthread_create_2_1 (newthread, attr, start_routine, arg)
sizeof (struct sched_param));
else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
{
+#ifndef TPP_PTHREAD_SCHED
INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
+#else
+ int _policy;
+ pthread_getschedparam (pthread_self (), &_policy, &pd->schedparam);
+#endif
pd->flags |= ATTR_FLAG_SCHED_SET;
}
@@ -584,8 +629,13 @@ __pthread_create_2_1 (newthread, attr, start_routine, arg)
}
}
+#ifndef PTHREAD_T_IS_TID
/* Pass the descriptor to the caller. */
*newthread = (pthread_t) pd;
+#else
+ /* Pass the tid to the caller. */
+ *newthread = (pthread_t) pd->tid;
+#endif
LIBC_PROBE (pthread_create, 4, newthread, attr, start_routine, arg);