diff options
Diffstat (limited to 'devel/linuxthreads/files/patch-aa')
| -rw-r--r-- | devel/linuxthreads/files/patch-aa | 1245 |
1 files changed, 1245 insertions, 0 deletions
diff --git a/devel/linuxthreads/files/patch-aa b/devel/linuxthreads/files/patch-aa new file mode 100644 index 000000000000..a9bea47a4db8 --- /dev/null +++ b/devel/linuxthreads/files/patch-aa @@ -0,0 +1,1245 @@ +diff -ur ./Makefile ../linuxthreads-0.71.new/Makefile +--- ./Makefile Wed Sep 17 02:17:23 1997 ++++ ../linuxthreads-0.71.new/Makefile Wed Oct 27 18:13:29 1999 +@@ -1,107 +1,74 @@ +-#### Configuration section ++LIB=lthread ++SHLIB_MAJOR= 0 ++SHLIB_MINOR= 7 ++ ++.if !defined(MACHINE_ARCH) ++MACHINE_ARCH!= /usr/bin/uname -m ++.endif ++ ++.if !defined(LIBSRC_BASE) ++LIBSRC_BASE= /usr/src/lib ++.endif ++ ++.if !defined(PREFIX) ++PREFIX= /usr/local ++.endif ++ ++LIBDIR= ${PREFIX}/lib ++ ++.PATH: ${.CURDIR}/libc_r ++ ++#CFLAGS+=-Wall -O ++CFLAGS+=-g -O0 -Wall -DDEBUG ++CFLAGS+=-DCOMPILING_LINUXTHREADS ++ ++#This option should not be enabled unless libc has been patched ++#CLAGS+= -DUSE_RECURSIVE_SPINLOCK ++ ++# USETHR_FUNCTIONS will use the FreeBSD syscalls thr_sleep and thr_wakeup ++# instead of the default linux threads suspend/restart. I thought this ++# would be a lot faster, but in testing, it doesn't seem to be. Also, ++# there might be a thread exit problem with this code still. ++#CFLAGS+= -DUSETHR_FUNCTIONS ++ ++CFLAGS+= -I${.CURDIR} ++CFLAGS+= -I${LIBSRC_BASE}/libc/stdtime ++CFLAGS+= -I${LIBSRC_BASE}/libc/${MACHINE_ARCH} ++CFLAGS+= -DLIBC_RCS ++CFLAGS+= -DLINUXTHREADS ++ ++# Only use if libc has been patched to include the new thread safe ++# lib/libc/stdtime/localtime.c ++#CFLAGS+= -DNEWLIBC + +-# Where to install +- +-INCLUDEDIR=/usr/include +-LIBDIR=/usr/lib +-SHAREDLIBDIR=/lib +-MANDIR=/usr/man/man3 +- +-# Compilation options +- +-CC=gcc +- +-CFLAGS=-pipe -O2 -Wall +-#CFLAGS+=-g -DDEBUG # for debugging +- +-PICCFLAGS=-fpic +-PICLDFLAGS=-shared -Wl,-soname,$(shell echo $@ | sed 's/\.[^.]$$//') +- +-# Define this as "yes" if you're using H.J.Lu's libc 5.2.18, 5.3.12, or 5.4.x +-# (standard on most Linux distributions for Intel processors). +-# Define this as "no" if you're using a different C library, +-# e.g. libc 6, also known as glibc +- +-LIBC_5_SUPPORT=yes +- +-#### End of configuration section +- +-# Determine architecture +- +-ARCH:=$(shell uname -m | sed -e 's/i.86/i386/') +- +-ifeq ($(ARCH),i386) +-CFLAGS+=-m486 +-endif +- +-CFLAGS+=-D__BUILDING_LINUXTHREADS -Isysdeps/$(ARCH) ++AINC= -I${LIBSRC_BASE}/libc/${MACHINE_ARCH} + + # Contents of the library +-OBJS=pthread.o manager.o attr.o join.o mutex.o condvar.o specific.o cancel.o \ +- signals.o lockfile.o errno.o fork.o sleep.o semaphore.o ++SRCS= attr.c join.c mutex.c condvar.c specific.c cancel.c ++SRCS+= signals.c fork.c errno.c manager.c pthread.c ++SRCS+= clone.S _atomic_lock.S sched.c uthread_file.c lclone.c ++SRCS+= getservby_r.c getpw_r.c getprotoby_r.c getnetby_r.c gethostby_r.c ++SRCS+= getgr_r.c libc_thread.c uthread_rwlock.c uthread_rwlockattr.c ++SRCS+= stack.c stack_attr.c ++#Note: spinlock.c appears redundant to the spinlock calls in linux threads. ++#However, this particular implementation is needed to make libc thread ++#safe. the _spinlock call overrides a stub function in libc. ++SRCS+= spinlock.c ++ ++#libc is not ready for this ++#SRCS+= syscalls.c libc_calls.c ++ ++beforeinstall: ++ ${INSTALL} -d -o ${BINOWN} -g ${BINGRP} -m 555 \ ++ ${PREFIX}/include/pthread/linuxthreads ++ ${INSTALL} -C -o ${BINOWN} -g ${BINGRP} -m 444 ${.CURDIR}/pthread.h \ ++ ${PREFIX}/include/pthread/linuxthreads/pthread.h ++ ${INSTALL} -C -o ${BINOWN} -g ${BINGRP} -m 444 ${.CURDIR}/pthread_rw.h \ ++ ${PREFIX}/include/pthread/linuxthreads/pthread_rw.h ++ ${INSTALL} -C -o ${BINOWN} -g ${BINGRP} -m 444 ${.CURDIR}/pthread_stack.h \ ++ ${PREFIX}/include/pthread/linuxthreads/pthread_stack.h ++ ++.include <bsd.lib.mk> + +-ifneq ($(wildcard sysdeps/$(ARCH)/clone.[cS]),) +-OBJS+=clone.o +-endif +-ifneq ($(wildcard sysdeps/$(ARCH)/syscalls.[cS]),) +-OBJS+=syscalls.o +-endif +- +-vpath %.c sysdeps/$(ARCH) +-vpath %.S sysdeps/$(ARCH) +- +-# The reentrant libc code (taken from libc-5.3.9) +-ifeq ($(LIBC_5_SUPPORT),yes) +-vpath %.h libc_r +-vpath %.c libc_r +-CFLAGS+=-Ilibc_r -D_POSIX_THREADS +-OBJS+=stdio.o getnetby_r.o getprotoby_r.o getservby_r.o \ +- gethostby_r.o getpw_r.o malloc.o dirent.o +-endif +- +-LIB=libpthread.a +-SHOBJS=$(OBJS:%.o=%.pic) +-SHLIB=libpthread.so.0.7 +-SHLIB0=libpthread.so +- +-all: $(LIB) $(SHLIB) +- cd man; $(MAKE) all +- +-$(LIB): $(OBJS) +- ar rc $(LIB) $(OBJS) +- +-$(SHLIB): $(SHOBJS) +- $(CC) $(PICLDFLAGS) -o $@ $(SHOBJS) +- +-clean: +- rm -f $(LIB) $(SHLIB) *.o *.pic *~ libc_r/*~ sysdeps/*/*~ +- cd man; $(MAKE) clean +- +-install: +- install pthread.h $(INCLUDEDIR)/pthread.h +- install semaphore.h $(INCLUDEDIR)/semaphore.h +-ifeq ($(LIBC_5_SUPPORT),yes) +- test -f /usr/include/sched.h || install sched.h $(INCLUDEDIR)/sched.h +-endif +- install $(LIB) $(LIBDIR)/$(LIB) +- install $(SHLIB) $(SHAREDLIBDIR)/$(SHLIB) +- rm -f $(LIBDIR)/$(SHLIB0) +- ln -s $(SHAREDLIBDIR)/$(SHLIB) $(LIBDIR)/$(SHLIB0) +- ldconfig -n $(SHAREDLIBDIR) +- cd man; $(MAKE) MANDIR=$(MANDIR) install +- +-.SUFFIXES: .pic +- +-%.pic: %.c +- $(CC) $(CFLAGS) $(PICCFLAGS) -c $< -o $@ +- +-%.pic: %.S +- $(CC) $(CFLAGS) $(PICCFLAGS) -c $< -o $@ +- +-depend: +- $(CC) $(CFLAGS) -MM *.c libc_r/*.c | \ +- sed -e 's/^\(.*\)\.o:/\1.o \1.pic:/' \ +- -e 's/sysdeps\/$(ARCH)/sysdeps\/$$(ARCH)/' > .depend + +-include .depend + +Only in ../linuxthreads-0.71.new: README.FreeBSD +Only in ../linuxthreads-0.71.new: _atomic_lock.S +diff -ur ./attr.c ../linuxthreads-0.71.new/attr.c +--- ./attr.c Sun Mar 9 10:14:32 1997 ++++ ../linuxthreads-0.71.new/attr.c Wed Oct 27 18:13:29 1999 +@@ -22,10 +22,10 @@ + { + attr->detachstate = PTHREAD_CREATE_JOINABLE; + attr->schedpolicy = SCHED_OTHER; +- attr->schedparam.sched_priority = 0; ++ attr->schedparam.sched_priority = DEFAULT_PRIORITY; + attr->inheritsched = PTHREAD_EXPLICIT_SCHED; + attr->scope = PTHREAD_SCOPE_SYSTEM; +- return 0; ++ return (_pthread_set_stack_defaults (attr)); + } + + int pthread_attr_destroy(pthread_attr_t *attr) +diff -ur ./cancel.c ../linuxthreads-0.71.new/cancel.c +--- ./cancel.c Sun Dec 29 08:12:09 1996 ++++ ../linuxthreads-0.71.new/cancel.c Wed Oct 27 18:13:29 1999 +@@ -16,7 +16,6 @@ + + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + #include "restart.h" + + int pthread_setcancelstate(int state, int * oldstate) +Only in ../linuxthreads-0.71.new: clone.S +Only in ../linuxthreads-0.71.new: clone.h +diff -ur ./condvar.c ../linuxthreads-0.71.new/condvar.c +--- ./condvar.c Sun Jun 15 03:26:04 1997 ++++ ../linuxthreads-0.71.new/condvar.c Wed Oct 27 18:13:29 1999 +@@ -19,16 +19,13 @@ + #include <sys/time.h> + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + #include "queue.h" + #include "restart.h" + +-static void remove_from_queue(pthread_queue * q, pthread_descr th); +- + int pthread_cond_init(pthread_cond_t *cond, + const pthread_condattr_t *cond_attr) + { +- cond->c_spinlock = 0; ++ _spin_init(&cond->c_spinlock); + queue_init(&cond->c_waiting); + return 0; + } +@@ -52,6 +49,7 @@ + release(&cond->c_spinlock); + pthread_mutex_unlock(mutex); + suspend_with_cancellation(self); ++ ASSERT(self->p_nextwaiting == NULL && cond->c_waiting.head != self); + pthread_mutex_lock(mutex); + /* This is a cancellation point */ + if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) { +@@ -70,8 +68,9 @@ + const struct timespec * reltime) + { + volatile pthread_descr self = thread_self(); +- sigset_t unblock, initial_mask; + int retsleep; ++#ifndef USETHR_FUNCTIONS ++ sigset_t unblock, initial_mask; + sigjmp_buf jmpbuf; + + /* Wait on the condition */ +@@ -107,24 +106,39 @@ + or the timeout occurred (retsleep == 0) + or another interrupt occurred (retsleep == -1) */ + /* Re-acquire the spinlock */ +- acquire(&cond->c_spinlock); + /* This is a cancellation point */ +- if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) { +- remove_from_queue(&cond->c_waiting, self); +- release(&cond->c_spinlock); +- pthread_mutex_lock(mutex); ++ acquire(&cond->c_spinlock); ++ remove_from_queue(&cond->c_waiting, self); ++ release(&cond->c_spinlock); ++ pthread_mutex_lock(mutex); ++ if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) + pthread_exit(PTHREAD_CANCELED); +- } + /* If not signaled: also remove ourselves and return an error code */ +- if (self->p_signal == 0) { +- remove_from_queue(&cond->c_waiting, self); +- release(&cond->c_spinlock); +- pthread_mutex_lock(mutex); ++ if (self->p_signal == 0) + return retsleep == 0 ? ETIMEDOUT : EINTR; +- } +- /* Otherwise, return normally */ ++#else ++ acquire(&cond->c_spinlock); ++ enqueue(&cond->c_waiting, self); ++ release(&cond->c_spinlock); ++ pthread_mutex_unlock(mutex); ++ retsleep = 0; ++ if (!(self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE)) ++ /* We really should make thr_sleep return EINTR too. It just ++ returns EAGAIN if it timed out, or 0 if awakened (or ++ EINVAL if bad parameter. ++ */ ++ retsleep = syscall(SYS_thr_sleep, reltime); ++ ++ acquire(&cond->c_spinlock); ++ if (self->p_nextwaiting != NULL || cond->c_waiting.head == self) ++ remove_from_queue(&cond->c_waiting, self); + release(&cond->c_spinlock); + pthread_mutex_lock(mutex); ++ if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) ++ pthread_exit(PTHREAD_CANCELED); ++ if (retsleep) ++ return retsleep == EAGAIN ? ETIMEDOUT : EINTR; ++#endif + return 0; + } + +@@ -181,25 +195,24 @@ + return 0; + } + +-/* Auxiliary function on queues */ +- +-static void remove_from_queue(pthread_queue * q, pthread_descr th) ++int remove_from_queue(pthread_queue * q, pthread_descr th) + { + pthread_descr t; + +- if (q->head == NULL) return; ++ if (q->head == NULL) return 0; + if (q->head == th) { + q->head = th->p_nextwaiting; + if (q->head == NULL) q->tail = NULL; + th->p_nextwaiting = NULL; +- return; ++ return 1; + } + for (t = q->head; t->p_nextwaiting != NULL; t = t->p_nextwaiting) { + if (t->p_nextwaiting == th) { + t->p_nextwaiting = th->p_nextwaiting; + if (th->p_nextwaiting == NULL) q->tail = t; + th->p_nextwaiting = NULL; +- return; ++ return 1; + } + } ++ return 0; + } +diff -ur ./errno.c ../linuxthreads-0.71.new/errno.c +--- ./errno.c Sun Dec 29 06:05:37 1996 ++++ ../linuxthreads-0.71.new/errno.c Wed Oct 27 18:13:29 1999 +@@ -19,15 +19,8 @@ + #include "pthread.h" + #include "internals.h" + +-int * __errno_location() ++int * __error() + { + pthread_descr self = thread_self(); + return &(self->p_errno); + } +- +-int * __h_errno_location() +-{ +- pthread_descr self = thread_self(); +- return &(self->p_h_errno); +-} +- +Only in ../linuxthreads-0.71.new: getgr_r.c +diff -ur ./internals.h ../linuxthreads-0.71.new/internals.h +--- ./internals.h Fri Dec 5 02:28:20 1997 ++++ ../linuxthreads-0.71.new/internals.h Wed Oct 27 18:15:29 1999 +@@ -17,12 +17,37 @@ + /* Includes */ + + #include <sys/types.h> ++#include <sys/queue.h> ++#include <sys/mman.h> + #include <setjmp.h> + #include <signal.h> +-#include <gnu-stabs.h> /* for weak_alias */ +-#include <linux/mm.h> ++#include "clone.h" ++#include "spinlock.h" ++#include "private.h" ++ ++#define __getpid getpid ++#define __fork _fork ++#define __nanosleep _nanosleep ++#if 0 ++#define __WCLONE WLINUXCLONE ++#else ++#define __WCLONE clone_flag ++#endif ++#ifndef MAP_STACK ++#define MAP_STACK 0 ++#endif ++#define DEFAULT_PRIORITY 20 ++#define THREAD_SELF \ ++{ \ ++ char *sp = CURRENT_STACK_FRAME; \ ++ if (sp >= __pthread_initial_thread_bos) \ ++ return &__pthread_initial_thread; \ ++ else if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos) \ ++ return &__pthread_manager_thread; \ ++ else \ ++ return GET_TLS_FROM_STACK(sp); \ ++} + +-#include "pt-machine.h" + + /* Arguments passed to thread creation routine */ + +@@ -34,7 +59,7 @@ + struct sched_param schedparam; /* initial scheduling parameters (if any) */ + }; + +-#define PTHREAD_START_ARGS_INITIALIZER { NULL, NULL, 0, 0, { 0 } } ++#define PTHREAD_START_ARGS_INITIALIZER { NULL, NULL, { { 0 } }, 0, { 0 } } + + /* We keep thread specific data in a special data structure, a two-level + array. The top-level array contains pointers to dynamically allocated +@@ -61,7 +86,7 @@ + pthread_t p_tid; /* Thread identifier */ + int p_pid; /* PID of Unix process */ + int p_priority; /* Thread priority (== 0 if not realtime) */ +- int * p_spinlock; /* Spinlock for synchronized accesses */ ++ spinlock_t * p_spinlock; /* Spinlock for synchronized accesses */ + int p_signal; /* last signal received */ + sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */ + sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */ +@@ -77,16 +102,20 @@ + char p_canceled; /* cancellation request pending */ + int p_errno; /* error returned by last system call */ + int p_h_errno; /* error returned by last netdb function */ ++ int stacksize; ++ pthread_mutex_t smutex; + struct pthread_start_args p_start_args; /* arguments for thread creation */ + void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE]; /* thread-specific data */ ++ TAILQ_ENTRY(_pthread_descr_struct) qe; ++ char time_buf[3 * 2 + 5 * INT_STRLEN_MAXIMUM(int) + 3 + 2 + 1 + 1]; ++ struct tm local_tm; + }; + + /* The type of thread handles. */ + + typedef struct pthread_handle_struct * pthread_handle; +- + struct pthread_handle_struct { +- int h_spinlock; /* Spinlock for sychronized access */ ++ spinlock_t h_spinlock; /* Spinlock for sychronized access */ + pthread_descr h_descr; /* Thread descriptor or NULL if invalid */ + }; + +@@ -255,12 +284,13 @@ + void __pthread_reset_main_thread(void); + void __fresetlockfiles(void); + +-/* System calls not declared in libc 5 */ ++int _sigsuspend __P((const sigset_t *)); ++pid_t _fork __P((void)); ++ssize_t _write __P((int, const void *, size_t)); ++int _close __P((int)); ++int _nanosleep __P((const struct timespec *, struct timespec *)); ++int _sched_yield __P((void)); + +-int __clone(int (*child_function)(void *), void ** child_stack, int flags, +- void * arg); +-int __nanosleep(const struct timespec * rqtp, struct timespec * rmtp); +-int __sched_yield(void); + int __sched_setparam(pid_t pid, const struct sched_param *param); + int __sched_getparam(pid_t pid, struct sched_param *param); + int __sched_setscheduler(pid_t pid, int policy, +diff -ur ./join.c ../linuxthreads-0.71.new/join.c +--- ./join.c Sun Dec 29 08:12:10 1996 ++++ ../linuxthreads-0.71.new/join.c Wed Oct 27 18:13:29 1999 +@@ -17,7 +17,6 @@ + #include <unistd.h> + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + #include "restart.h" + + void pthread_exit(void * retval) +@@ -48,7 +47,7 @@ + if (self == __pthread_main_thread && __pthread_manager_request >= 0) { + request.req_thread = self; + request.req_kind = REQ_MAIN_THREAD_EXIT; +- write(__pthread_manager_request, (char *)&request, sizeof(request)); ++ _write(__pthread_manager_request, (char *)&request, sizeof(request)); + suspend(self); + } + /* Exit the process (but don't flush stdio streams, and don't run +@@ -98,7 +97,7 @@ + request.req_thread = self; + request.req_kind = REQ_FREE; + request.req_args.free.thread = th; +- write(__pthread_manager_request, (char *) &request, sizeof(request)); ++ _write(__pthread_manager_request, (char *)&request, sizeof(request)); + } + return 0; + } +@@ -135,7 +134,7 @@ + request.req_thread = thread_self(); + request.req_kind = REQ_FREE; + request.req_args.free.thread = th; +- write(__pthread_manager_request, (char *) &request, sizeof(request)); ++ _write(__pthread_manager_request, (char *)&request, sizeof(request)); + } + return 0; + } +Only in ../linuxthreads-0.71.new: lclone.c +Only in ../linuxthreads-0.71.new: libc_calls.c +Only in ../linuxthreads-0.71.new: libc_private.h +diff -ur ./libc_r/getprotoby_r.c ../linuxthreads-0.71.new/libc_r/getprotoby_r.c +--- ./libc_r/getprotoby_r.c Sat Nov 16 06:38:10 1996 ++++ ../linuxthreads-0.71.new/libc_r/getprotoby_r.c Wed Oct 27 18:13:29 1999 +@@ -1,4 +1,4 @@ +-#include "../pthread.h" ++#include "pthread.h" + #include <netdb.h> + #include <string.h> + +Only in ../linuxthreads-0.71.new/libc_r: getprotoby_r.c.orig +diff -ur ./libc_r/getpw_r.c ../linuxthreads-0.71.new/libc_r/getpw_r.c +--- ./libc_r/getpw_r.c Sat Nov 2 08:01:49 1996 ++++ ../linuxthreads-0.71.new/libc_r/getpw_r.c Wed Oct 27 18:13:29 1999 +@@ -2,7 +2,7 @@ + #include <string.h> + #include <errno.h> + #include <pwd.h> +-#include "../pthread.h" ++#include "pthread.h" + + static pthread_mutex_t getpw_mutex = PTHREAD_MUTEX_INITIALIZER; + +Only in ../linuxthreads-0.71.new/libc_r: getpw_r.c.orig +diff -ur ./libc_r/getservby_r.c ../linuxthreads-0.71.new/libc_r/getservby_r.c +--- ./libc_r/getservby_r.c Sat Nov 16 06:38:10 1996 ++++ ../linuxthreads-0.71.new/libc_r/getservby_r.c Wed Oct 27 18:13:29 1999 +@@ -1,4 +1,4 @@ +-#include "../pthread.h" ++#include "pthread.h" + #include <netdb.h> + #include <string.h> + +Only in ../linuxthreads-0.71.new/libc_r: getservby_r.c.orig +Only in ../linuxthreads-0.71.new: libc_spinlock.h +Only in ../linuxthreads-0.71.new: libc_thread.c +diff -ur ./manager.c ../linuxthreads-0.71.new/manager.c +--- ./manager.c Mon Dec 1 02:48:51 1997 ++++ ../linuxthreads-0.71.new/manager.c Wed Oct 27 18:13:29 1999 +@@ -22,20 +22,16 @@ + #include <string.h> + #include <unistd.h> + #include <sys/time.h> /* for select */ +-#include <sys/types.h> /* for select */ +-#include <sys/mman.h> /* for mmap */ + #include <sys/wait.h> /* for waitpid macros */ +-#include <linux/sched.h> + + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + #include "restart.h" + + /* Array of active threads. Entry 0 is reserved for the initial thread. */ + + struct pthread_handle_struct __pthread_handles[PTHREAD_THREADS_MAX] = +-{ { 0, &__pthread_initial_thread}, /* All NULLs */ }; ++{ { _SPINLOCK_INITIALIZER, &__pthread_initial_thread}, /* All NULLs */ }; + + /* Mapping from stack segment to thread descriptor. */ + /* Stack segment numbers are also indices into the __pthread_handles array. */ +@@ -43,7 +39,7 @@ + + static inline pthread_descr thread_segment(int seg) + { +- return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE) ++ return (pthread_descr)(_thread_stack_start - (seg - 1) * _stackspacing) + - 1; + } + +@@ -71,6 +67,8 @@ + static void pthread_reap_children(); + static void pthread_kill_all_threads(int sig, int main_thread_also); + ++extern int clone_flag; ++ + /* The server thread managing requests for thread creation and termination */ + + int __pthread_manager(void * arg) +@@ -147,6 +145,9 @@ + { + pthread_descr self = (pthread_descr) arg; + void * outcome; ++ ++ pthread_mutex_lock (&self->smutex); ++ + /* Initialize special thread_self processing, if any. */ + #ifdef INIT_THREAD_SELF + INIT_THREAD_SELF(self); +@@ -157,9 +158,8 @@ + /* Initial signal mask is that of the creating thread. (Otherwise, + we'd just inherit the mask of the thread manager.) */ + sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL); +- /* Set the scheduling policy and priority for the new thread, if needed */ +- if (self->p_start_args.schedpolicy != SCHED_OTHER) +- __sched_setscheduler(self->p_pid, self->p_start_args.schedpolicy, ++ /* Set the scheduling policy and priority for the new thread */ ++ __sched_setscheduler(self->p_pid, self->p_start_args.schedpolicy, + &self->p_start_args.schedparam); + /* Run the thread code */ + outcome = self->p_start_args.start_routine(self->p_start_args.arg); +@@ -176,27 +176,47 @@ + int pid; + pthread_descr new_thread; + pthread_t new_thread_id; ++ pthread_attr_t *cattr, _cattr; + int i; ++ caddr_t newaddr; ++ int newsize; ++ ++ cattr = &_cattr; ++ if (attr == NULL) { ++ pthread_attr_init (cattr); ++ } else { ++ _cattr = *attr; ++ if (_pthread_check_stackattr (cattr)){ ++ return (EINVAL); ++ } ++ } ++ newsize = _tlspagesize + cattr->stack_size; + + /* Find a free stack segment for the current stack */ + for (sseg = 1; ; sseg++) { + if (sseg >= PTHREAD_THREADS_MAX) return EAGAIN; ++ /* XXXX do we need to acquire a lock on the handle here ? */ + if (__pthread_handles[sseg].h_descr != NULL) continue; + new_thread = thread_segment(sseg); ++ if (cattr->stack_addr != NULL && cattr->stack_addr != new_thread) ++ continue; + /* Allocate space for stack and thread descriptor. */ +- if (mmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE), +- INITIAL_STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, +- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_GROWSDOWN, -1, 0) +- != (caddr_t) -1) break; ++ newaddr = (caddr_t)(new_thread+1) - newsize; ++ if (mmap(newaddr, newsize, ++ PROT_READ | PROT_WRITE | PROT_EXEC, ++ MAP_STACK | MAP_PRIVATE | MAP_ANON | MAP_FIXED, ++ -1, 0) ++ != MAP_FAILED) break; + /* It seems part of this segment is already mapped. Try the next. */ + } ++ + /* Allocate new thread identifier */ + pthread_threads_counter += PTHREAD_THREADS_MAX; + new_thread_id = sseg + pthread_threads_counter; + /* Initialize the thread descriptor */ + new_thread->p_nextwaiting = NULL; + new_thread->p_tid = new_thread_id; +- new_thread->p_priority = 0; ++ new_thread->p_priority = DEFAULT_PRIORITY; + new_thread->p_spinlock = &(__pthread_handles[sseg].h_spinlock); + new_thread->p_signal = 0; + new_thread->p_signal_jmp = NULL; +@@ -212,14 +232,16 @@ + new_thread->p_canceled = 0; + new_thread->p_errno = 0; + new_thread->p_h_errno = 0; ++ new_thread->stacksize = newsize; + for (i = 0; i < PTHREAD_KEY_1STLEVEL_SIZE; i++) + new_thread->p_specific[i] = NULL; + /* Initialize the thread handle */ +- __pthread_handles[sseg].h_spinlock = 0; /* should already be 0 */ ++ _spin_init (new_thread->p_spinlock); + __pthread_handles[sseg].h_descr = new_thread; + /* Determine scheduling parameters for the thread */ + new_thread->p_start_args.schedpolicy = SCHED_OTHER; +- if (attr != NULL && attr->schedpolicy != SCHED_OTHER) { ++ new_thread->p_start_args.schedparam.sched_priority = new_thread->p_priority; ++ if (attr != NULL) { + switch(attr->inheritsched) { + case PTHREAD_EXPLICIT_SCHED: + new_thread->p_start_args.schedpolicy = attr->schedpolicy; +@@ -237,6 +259,9 @@ + new_thread->p_start_args.start_routine = start_routine; + new_thread->p_start_args.arg = arg; + new_thread->p_start_args.mask = *mask; ++ ++ pthread_mutex_init (&new_thread->smutex, NULL); ++ pthread_mutex_lock (&new_thread->smutex); + /* Do the cloning */ + pid = __clone(pthread_start_thread, (void **) new_thread, + CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | +@@ -245,11 +270,15 @@ + /* Check if cloning succeeded */ + if (pid == -1) { + /* Free the stack */ +- munmap((caddr_t)((char *)(new_thread+1) - INITIAL_STACK_SIZE), +- INITIAL_STACK_SIZE); ++ munmap(newaddr, newsize); + __pthread_handles[sseg].h_descr = NULL; +- return errno; ++ return EAGAIN; + } ++ /* Shouldn't we have blocked pthread_start_thread at its inception ++ so we can complete the rest of the pthread_create routines ++ before it runs? Otherwise, pthread_start_thread and its ++ user function can begin before we're done? ++ */ + /* Insert new thread in doubly linked list of active threads */ + new_thread->p_prevlive = __pthread_main_thread; + new_thread->p_nextlive = __pthread_main_thread->p_nextlive; +@@ -260,6 +289,7 @@ + new_thread->p_pid = pid; + /* We're all set */ + *thread = new_thread_id; ++ pthread_mutex_unlock (&new_thread->smutex); + return 0; + } + +@@ -277,7 +307,7 @@ + /* If initial thread, nothing to free */ + if (th == &__pthread_initial_thread) return; + /* Free the stack and thread descriptor area */ +- munmap((caddr_t) ((char *)(th+1) - STACK_SIZE), STACK_SIZE); ++ munmap((caddr_t) ((char *)(th+1) - th->stacksize), th->stacksize); + } + + /* Handle threads that have exited */ +diff -ur ./mutex.c ../linuxthreads-0.71.new/mutex.c +--- ./mutex.c Thu Dec 4 06:33:42 1997 ++++ ../linuxthreads-0.71.new/mutex.c Wed Oct 27 18:13:29 1999 +@@ -17,14 +17,13 @@ + #include <stddef.h> + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + #include "queue.h" + #include "restart.h" + + int pthread_mutex_init(pthread_mutex_t * mutex, + const pthread_mutexattr_t * mutex_attr) + { +- mutex->m_spinlock = 0; ++ _spin_init (&mutex->m_spinlock); + mutex->m_count = 0; + mutex->m_owner = NULL; + mutex->m_kind = +@@ -84,10 +83,11 @@ + + int pthread_mutex_lock(pthread_mutex_t * mutex) + { +- pthread_descr self; ++ pthread_descr self = thread_self(); + + while(1) { + acquire(&mutex->m_spinlock); ++ remove_from_queue(&mutex->m_waiting, self); + switch(mutex->m_kind) { + case PTHREAD_MUTEX_FAST_NP: + if (mutex->m_count == 0) { +@@ -95,10 +95,8 @@ + release(&mutex->m_spinlock); + return 0; + } +- self = thread_self(); + break; + case PTHREAD_MUTEX_RECURSIVE_NP: +- self = thread_self(); + if (mutex->m_count == 0 || mutex->m_owner == self) { + mutex->m_count++; + mutex->m_owner = self; +@@ -107,7 +105,6 @@ + } + break; + case PTHREAD_MUTEX_ERRORCHECK_NP: +- self = thread_self(); + if (mutex->m_count == 0) { + mutex->m_count = 1; + mutex->m_owner = self; +@@ -183,14 +180,14 @@ + attr->mutexkind = kind; + return 0; + } +-weak_alias(__pthread_mutexattr_setkind_np, pthread_mutexattr_setkind_np); ++#pragma weak pthread_mutexattr_setkind_np=__pthread_mutexattr_setkind_np + + int __pthread_mutexattr_getkind_np(const pthread_mutexattr_t *attr, int *kind) + { + *kind = attr->mutexkind; + return 0; + } +-weak_alias(__pthread_mutexattr_getkind_np, pthread_mutexattr_getkind_np); ++#pragma weak pthread_mutexattr_getkind_np=__pthread_mutexattr_getkind_np + + /* Once-only execution */ + +@@ -223,18 +220,3 @@ + return 0; + } + +-/* Internal locks for libc 5.2.18 */ +- +-static pthread_mutex_t libc_libio_lock = PTHREAD_MUTEX_INITIALIZER; +-static pthread_mutex_t libc_localtime_lock = PTHREAD_MUTEX_INITIALIZER; +-static pthread_mutex_t libc_gmtime_lock = PTHREAD_MUTEX_INITIALIZER; +- +-/* The variables below are defined as weak symbols in libc, +- initialized to NULL pointers, and with dummy pthread_mutex_* +- functions (weak symbols also) that do nothing. If we provide +- our implementations of pthread_mutex_*, we must also provide +- initialized pointers to mutexes for those variables. */ +- +-pthread_mutex_t * __libc_libio_lock = &libc_libio_lock; +-pthread_mutex_t * __libc_localtime_lock = &libc_localtime_lock; +-pthread_mutex_t * __libc_gmtime_lock = &libc_gmtime_lock; +diff -ur ./pthread.c ../linuxthreads-0.71.new/pthread.c +--- ./pthread.c Sun Nov 23 09:58:49 1997 ++++ ../linuxthreads-0.71.new/pthread.c Wed Oct 27 18:13:29 1999 +@@ -1,4 +1,4 @@ +-/* Linuxthreads - a simple clone()-based implementation of Posix */ ++/* Linuxthread - a simple clone()-based implementation of Posix */ + /* threads for Linux. */ + /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */ + /* */ +@@ -24,7 +24,6 @@ + #include <sys/wait.h> + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + #include "restart.h" + + /* Descriptor of the initial thread */ +@@ -35,7 +34,7 @@ + NULL, /* pthread_descr p_nextwaiting */ + PTHREAD_THREADS_MAX, /* pthread_t p_tid */ + 0, /* int p_pid */ +- 0, /* int p_priority */ ++ DEFAULT_PRIORITY, /* int p_priority */ + &__pthread_handles[0].h_spinlock, /* int * p_spinlock */ + 0, /* int p_signal */ + NULL, /* sigjmp_buf * p_signal_buf */ +@@ -52,6 +51,8 @@ + 0, /* char p_canceled */ + 0, /* int p_errno */ + 0, /* int p_h_errno */ ++ 0, /* int stacksize */ ++ PTHREAD_MUTEX_INITIALIZER, + PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */ + {NULL} /* void ** p_specific[PTHREAD_KEY_1STLEVEL] */ + }; +@@ -65,7 +66,7 @@ + NULL, /* pthread_descr p_nextwaiting */ + 0, /* int p_tid */ + 0, /* int p_pid */ +- 0, /* int p_priority */ ++ DEFAULT_PRIORITY, /* int p_priority */ + NULL, /* int * p_spinlock */ + 0, /* int p_signal */ + NULL, /* sigjmp_buf * p_signal_buf */ +@@ -82,6 +83,8 @@ + 0, /* char p_canceled */ + 0, /* int p_errno */ + 0, /* int p_h_errno */ ++ 0, /* int stacksize */ ++ PTHREAD_MUTEX_INITIALIZER, + PTHREAD_START_ARGS_INITIALIZER, /* struct pthread_start_args p_start_args */ + {NULL} /* void ** p_specific[PTHREAD_KEY_1STLEVEL] */ + }; +@@ -119,9 +122,12 @@ + int __pthread_exit_requested = 0; + int __pthread_exit_code = 0; + ++int clone_flag = 0; ++ + /* Forward declarations */ + +-static void pthread_exit_process(int retcode, void * arg); ++/* XXXX fix this */ ++static void pthread_exit_process(void); + static void pthread_handle_sigcancel(int sig); + + /* Initialize the pthread library. +@@ -137,14 +143,15 @@ + { + struct sigaction sa; + sigset_t mask; ++ int status; + + /* If already done (e.g. by a constructor called earlier!), bail out */ + if (__pthread_initial_thread_bos != NULL) return; + /* For the initial stack, reserve at least STACK_SIZE bytes of stack + below the current stack address, and align that on a + STACK_SIZE boundary. */ +- __pthread_initial_thread_bos = +- (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1)); ++ ++ __pthread_initial_thread_bos = (char *)STACK_START; + /* Update the descriptor for the initial thread. */ + __pthread_initial_thread.p_pid = __getpid(); + /* If we have special thread_self processing, initialize that for the +@@ -168,10 +175,17 @@ + sigemptyset(&mask); + sigaddset(&mask, PTHREAD_SIG_RESTART); + sigprocmask(SIG_BLOCK, &mask, NULL); ++ ++ /* This is FreeBSD specific, and is designed to detect pre/post March 1 ++ * kernels, and adjust wait processing accordingly. ++ */ ++ if (waitpid(-1, &status, WNOHANG | WLINUXCLONE) >= 0 || errno != EINVAL) ++ clone_flag = WLINUXCLONE; ++ + /* Register an exit function to kill all other threads. */ + /* Do it early so that user-registered atexit functions are called + before pthread_exit_process. */ +- on_exit(pthread_exit_process, NULL); ++ atexit(pthread_exit_process); + } + + static int pthread_initialize_manager(void) +@@ -196,7 +210,7 @@ + /* Start the thread manager */ + __pthread_manager_pid = + __clone(__pthread_manager, (void **) __pthread_manager_thread_tos, +- CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, ++ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | PTHREAD_SIG_RESTART, + (void *) manager_pipe[0]); + if (__pthread_manager_pid == -1) { + free(__pthread_manager_thread_bos); +@@ -205,6 +219,7 @@ + __pthread_manager_request = -1; + return -1; + } ++ _pthread_stack_init(); + return 0; + } + +@@ -215,6 +230,7 @@ + { + pthread_descr self = thread_self(); + struct pthread_request request; ++ + if (__pthread_manager_request < 0) { + if (pthread_initialize_manager() < 0) return EAGAIN; + } +@@ -225,7 +241,7 @@ + request.req_args.create.arg = arg; + sigprocmask(SIG_SETMASK, (const sigset_t *) NULL, + &request.req_args.create.mask); +- write(__pthread_manager_request, (char *) &request, sizeof(request)); ++ _write(__pthread_manager_request, (char *) &request, sizeof(request)); + suspend(self); + if (self->p_retcode == 0) *thread = (pthread_t) self->p_retval; + return self->p_retcode; +@@ -262,7 +278,7 @@ + release(&handle->h_spinlock); + return errno; + } +- th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority; ++ th->p_priority = param->sched_priority; + release(&handle->h_spinlock); + return 0; + } +@@ -289,8 +305,10 @@ + + /* Process-wide exit() request */ + +-static void pthread_exit_process(int retcode, void * arg) ++static void pthread_exit_process(void) + { ++ int retcode = 0; ++ + struct pthread_request request; + pthread_descr self = thread_self(); + +@@ -298,7 +316,7 @@ + request.req_thread = self; + request.req_kind = REQ_PROCESS_EXIT; + request.req_args.exit.code = retcode; +- write(__pthread_manager_request, (char *) &request, sizeof(request)); ++ _write(__pthread_manager_request, (char *) &request, sizeof(request)); + suspend(self); + /* Main thread should accumulate times for thread manager and its + children, so that timings for main thread account for all threads. */ +@@ -365,8 +383,8 @@ + free(__pthread_manager_thread_bos); + __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL; + /* Close the two ends of the pipe */ +- close(__pthread_manager_request); +- close(__pthread_manager_reader); ++ _close(__pthread_manager_request); ++ _close(__pthread_manager_reader); + __pthread_manager_request = __pthread_manager_reader = -1; + } + /* Update the pid of the main thread */ +@@ -382,12 +400,12 @@ + void __pthread_kill_other_threads_np(void) + { + /* Terminate all other threads and thread manager */ +- pthread_exit_process(0, NULL); ++ pthread_exit_process(); + /* Make current thread the main thread in case the calling thread + changes its mind, does not exec(), and creates new threads instead. */ + __pthread_reset_main_thread(); + } +-weak_alias(__pthread_kill_other_threads_np, pthread_kill_other_threads_np); ++#pragma weak pthread_kill_other_threads_np=__pthread_kill_other_threads_np + + /* Debugging aid */ + +@@ -398,7 +416,7 @@ + char buffer[1024]; + sprintf(buffer, "%05d : ", __getpid()); + sprintf(buffer + 8, fmt, arg); +- write(2, buffer, strlen(buffer)); ++ _write(2, buffer, strlen(buffer)); + } + + #endif +diff -ur ./pthread.h ../linuxthreads-0.71.new/pthread.h +--- ./pthread.h Thu Dec 4 06:33:41 1997 ++++ ../linuxthreads-0.71.new/pthread.h Wed Oct 27 18:13:29 1999 +@@ -14,19 +14,13 @@ + + #ifndef _PTHREAD_H + +-#define _PTHREAD_H 1 +-#include <features.h> +- ++#define _PTHREAD_H + #define __need_sigset_t + #include <signal.h> + #define __need_timespec + #include <time.h> + +-#ifdef __BUILDING_LINUXTHREADS +-#include <linux/sched.h> +-#else +-#include <sched.h> +-#endif ++#include <posix4/sched.h> + + #ifndef _REENTRANT + #define _REENTRANT +@@ -67,6 +61,17 @@ + /* Thread descriptors */ + typedef struct _pthread_descr_struct * _pthread_descr; + ++#ifndef SPINLOCK_DEFINED ++typedef struct { ++ volatile long access_lock; ++ volatile long lock_owner; ++ volatile char *fname; ++ volatile int lineno; ++} spinlock_t; ++#define _SPINLOCK_INITIALIZER { 0, 0, 0, 0 } ++#define SPINLOCK_DEFINED ++#endif ++ + /* Waiting queues (not abstract because mutexes and conditions aren't). */ + struct _pthread_queue { + _pthread_descr head; /* First element, or NULL if queue empty. */ +@@ -75,24 +80,24 @@ + + /* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER). */ + typedef struct { +- int m_spinlock; /* Spin lock to guarantee mutual exclusion. */ ++ spinlock_t m_spinlock; /* Spin lock to guarantee mutual exclusion. */ + int m_count; /* 0 if free, > 0 if taken. */ + _pthread_descr m_owner; /* Owner of mutex (for recursive mutexes) */ + int m_kind; /* Kind of mutex */ + struct _pthread_queue m_waiting; /* Threads waiting on this mutex. */ + } pthread_mutex_t; + +-#define PTHREAD_MUTEX_INITIALIZER {0, 0, 0, PTHREAD_MUTEX_FAST_NP, {0, 0}} +-#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, {0, 0}} +-#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}} ++#define PTHREAD_MUTEX_INITIALIZER {_SPINLOCK_INITIALIZER, 0, 0, PTHREAD_MUTEX_FAST_NP, {0, 0}} ++#define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP {_SPINLOCK_INITIALIZER, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, {0, 0}} ++#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP {_SPINLOCK_INITIALIZER, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}} + + /* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */ + typedef struct { +- int c_spinlock; /* Spin lock to protect the queue. */ ++ spinlock_t c_spinlock; /* Spin lock to protect the queue. */ + struct _pthread_queue c_waiting; /* Threads waiting on this condition. */ + } pthread_cond_t; + +-#define PTHREAD_COND_INITIALIZER {0, {0, 0}} ++#define PTHREAD_COND_INITIALIZER {_SPINLOCK_INITIALIZER, {0, 0}} + + /* Attributes */ + +@@ -117,6 +122,9 @@ + struct sched_param schedparam; + int inheritsched; + int scope; ++ void * stack_addr; ++ int stack_size; ++ int guard_size; + } pthread_attr_t; + + enum { +@@ -464,6 +472,10 @@ + Should be called just before invoking one of the exec*() functions. */ + + extern void pthread_kill_other_threads_np __P((void)); ++ ++#ifdef COMPILING_LINUXTHREADS ++#include "pthread_stack.h" ++#endif + + #if defined(__cplusplus) + } +Only in ../linuxthreads-0.71.new: pthread_private.h +Only in ../linuxthreads-0.71.new: pthread_rw.h +Only in ../linuxthreads-0.71.new: pthread_stack.h +diff -ur ./queue.h ../linuxthreads-0.71.new/queue.h +--- ./queue.h Fri Dec 5 02:28:21 1997 ++++ ../linuxthreads-0.71.new/queue.h Wed Oct 27 18:13:29 1999 +@@ -60,3 +60,5 @@ + } + return th; + } ++ ++int remove_from_queue(pthread_queue * q, pthread_descr th); +diff -ur ./restart.h ../linuxthreads-0.71.new/restart.h +--- ./restart.h Fri Dec 5 02:28:21 1997 ++++ ../linuxthreads-0.71.new/restart.h Wed Oct 27 18:13:29 1999 +@@ -14,6 +14,9 @@ + + /* Primitives for controlling thread execution */ + ++#include <stdio.h> ++ ++#ifndef USETHR_FUNCTIONS + static inline void restart(pthread_descr th) + { + kill(th->p_pid, PTHREAD_SIG_RESTART); +@@ -27,7 +30,7 @@ + sigdelset(&mask, PTHREAD_SIG_RESTART); /* Unblock the restart signal */ + do { + self->p_signal = 0; +- sigsuspend(&mask); /* Wait for signal */ ++ _sigsuspend(&mask); /* Wait for signal */ + } while (self->p_signal != PTHREAD_SIG_RESTART); + } + +@@ -44,7 +47,7 @@ + if (! (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE)) { + do { + self->p_signal = 0; +- sigsuspend(&mask); /* Wait for a signal */ ++ _sigsuspend(&mask); /* Wait for signal */ + } while (self->p_signal != PTHREAD_SIG_RESTART); + } + self->p_cancel_jmp = NULL; +@@ -53,3 +56,29 @@ + sigprocmask(SIG_SETMASK, &mask, NULL); + } + } ++#else ++ ++#include <sys/syscall.h> ++#include <unistd.h> ++ ++static inline void restart(pthread_descr th) ++{ ++ syscall(SYS_thr_wakeup, th->p_pid); ++} ++ ++static inline void suspend(pthread_descr self) ++{ ++ syscall(SYS_thr_sleep, NULL); ++} ++ ++static inline void suspend_with_cancellation(pthread_descr self) ++{ ++ /* What we think happens here is that if a PTHREAD_SIG_CANCEL ++ is sent, thr_sleep will be awaken. It should return ++ EINTR, but it will just return 0 unless we fix it. ++ ++ So we shouldn't need any of the fancy jmpbuf stuff ++ */ ++ syscall(SYS_thr_sleep, NULL); ++} ++#endif +diff -ur ./signals.c ../linuxthreads-0.71.new/signals.c +--- ./signals.c Fri Dec 12 09:21:47 1997 ++++ ../linuxthreads-0.71.new/signals.c Wed Oct 27 18:13:29 1999 +@@ -18,7 +18,6 @@ + #include <errno.h> + #include "pthread.h" + #include "internals.h" +-#include "spinlock.h" + + int pthread_sigmask(int how, const sigset_t * newmask, sigset_t * oldmask) + { +@@ -36,9 +35,11 @@ + case SIG_BLOCK: + sigdelset(&mask, PTHREAD_SIG_CANCEL); + break; ++ + case SIG_UNBLOCK: + sigdelset(&mask, PTHREAD_SIG_RESTART); + break; ++ + } + newmask = &mask; + } +@@ -67,7 +68,7 @@ + } + + /* The set of signals on which some thread is doing a sigwait */ +-static sigset_t sigwaited = 0; ++static sigset_t sigwaited = { { 0 } }; + static pthread_mutex_t sigwaited_mut = PTHREAD_MUTEX_INITIALIZER; + static pthread_cond_t sigwaited_changed = PTHREAD_COND_INITIALIZER; + +@@ -115,7 +116,7 @@ + /* Reset the signal count */ + self->p_signal = 0; + /* Unblock the signals and wait for them */ +- sigsuspend(&mask); ++ _sigsuspend(&mask); + } + } + self->p_cancel_jmp = NULL; +diff -ur ./spinlock.h ../linuxthreads-0.71.new/spinlock.h +--- ./spinlock.h Fri Dec 5 02:28:22 1997 ++++ ../linuxthreads-0.71.new/spinlock.h Wed Oct 27 18:13:29 1999 +@@ -15,17 +15,20 @@ + + /* Spin locks */ + +-static inline void acquire(int * spinlock) ++#include "libc_spinlock.h" ++ ++ ++static inline void acquire(spinlock_t *lck) + { +- while (testandset(spinlock)) __sched_yield(); ++ _spin_lock (lck); + } + +-static inline void release(int * spinlock) ++static inline void release(spinlock_t *lck) + { + #ifndef RELEASE +- *spinlock = 0; ++ _spin_unlock (lck);; + #else +- RELEASE(spinlock); ++ RELEASE(lck); + #endif + } + |
