summaryrefslogtreecommitdiff
path: root/libthreads
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>2002-05-27 02:13:47 +0000
committerRoland McGrath <roland@gnu.org>2002-05-27 02:13:47 +0000
commit9403905d538fbb6fd33fa8439978a868aef56d9e (patch)
tree90e4e2a2e9680ec2384dfcb93e5c284302e0a87e /libthreads
parent629724c3ff3aeddc26ec64104f2d05915c6b3d4a (diff)
2002-05-26 Roland McGrath <roland@frob.com>
* alpha/cthreads.h, alpha/thread.c, alpha/csw.S, alpha/lock.S: New files, verbatim from CMU release MK83a user/threads/alpha.
Diffstat (limited to 'libthreads')
-rw-r--r--libthreads/alpha/csw.S200
-rw-r--r--libthreads/alpha/cthreads.h61
-rw-r--r--libthreads/alpha/lock.S84
-rw-r--r--libthreads/alpha/thread.c90
4 files changed, 435 insertions, 0 deletions
diff --git a/libthreads/alpha/csw.S b/libthreads/alpha/csw.S
new file mode 100644
index 00000000..8c6ae309
--- /dev/null
+++ b/libthreads/alpha/csw.S
@@ -0,0 +1,200 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: csw.s,v $
+ * Revision 2.3 93/01/19 08:55:56 danner
+ * Locks are longs now. Put MBs before and after releasing
+ * locks.
+ * [93/01/15 af]
+ *
+ * Revision 2.2 93/01/14 18:04:23 danner
+ * Fixed bug in cproc_prepare, it was not setting up
+ * the PV register properly. Safer GP usage too.
+ * ANSIfied comments.
+ * [92/12/22 03:00:50 af]
+ *
+ * Created.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * alpha/csw.s
+ *
+ * Context switch and cproc startup for ALPHA COROUTINE implementation.
+ */
+#include <mach/alpha/asm.h>
+
+ .text
+ .align 4
+
+#define CSW_IMASK \
+ IM_S0|IM_S1|IM_S2|IM_S3|IM_S4|IM_S5|IM_S6|IM_RA|IM_GP
+
+#define ARG_SAVE (6*8)
+#define SAVED_S0 (6*8)
+#define SAVED_S1 (7*8)
+#define SAVED_S2 (8*8)
+#define SAVED_S3 (9*8)
+#define SAVED_S4 (10*8)
+#define SAVED_S5 (11*8)
+#define SAVED_S6 (12*8)
+#define SAVED_GP (13*8)
+#define SAVED_PC (14*8)
+#define SAVED_BYTES (15*8)
+
+/*
+ * Suspend the current thread and resume the next one.
+ *
+ * void
+ * cproc_switch(cur, next, lock)
+ * long *cur;
+ * long *next;
+ * simple_lock *lock;
+ */
+LEAF(cproc_switch,3)
+ subq sp,SAVED_BYTES,sp /* allocate space for registers */
+ /* Save them registers */
+ stq ra,SAVED_PC(sp)
+ stq gp,SAVED_GP(sp)
+ stq s0,SAVED_S0(sp)
+ stq s1,SAVED_S1(sp)
+ stq s2,SAVED_S2(sp)
+ stq s3,SAVED_S3(sp)
+ stq s4,SAVED_S4(sp)
+ stq s5,SAVED_S5(sp)
+ stq s6,SAVED_S6(sp)
+
+ stq sp,0(a0) /* save current sp */
+ ldq sp,0(a1) /* restore next sp */
+ /* Reload them registers */
+ ldq ra,SAVED_PC(sp)
+ ldq gp,SAVED_GP(sp)
+ ldq s0,SAVED_S0(sp)
+ ldq s1,SAVED_S1(sp)
+ ldq s2,SAVED_S2(sp)
+ ldq s3,SAVED_S3(sp)
+ ldq s4,SAVED_S4(sp)
+ ldq s5,SAVED_S5(sp)
+ ldq s6,SAVED_S6(sp)
+ /* return to next thread */
+ .set noreorder
+ mb
+ stq zero,0(a2) /* clear lock */
+ mb
+ .set reorder
+ addq sp,SAVED_BYTES,sp
+ RET
+ END(cproc_switch)
+
+/*
+ * void
+ * cproc_start_wait(parent_context, child, stackp, lock)
+ * long *parent_context;
+ * cproc_t child;
+ * long stackp;
+ * simple_lock *lock;
+ */
+NESTED(cproc_start_wait, 4, SAVED_BYTES, zero, CSW_IMASK, 0)
+ ldgp gp,0(pv)
+ subq sp,SAVED_BYTES,sp /* allocate space for registers */
+ /* Save parent registers */
+ stq ra,SAVED_PC(sp)
+ stq gp,SAVED_GP(sp)
+ stq s0,SAVED_S0(sp)
+ stq s1,SAVED_S1(sp)
+ stq s2,SAVED_S2(sp)
+ stq s3,SAVED_S3(sp)
+ stq s4,SAVED_S4(sp)
+ stq s5,SAVED_S5(sp)
+ stq s6,SAVED_S6(sp)
+
+ stq sp,0(a0) /* save parent sp */
+
+ .set noreorder
+ mb
+ stq zero,0(a3) /* release lock */
+ mb
+ .set reorder
+
+ mov a2,sp /* get child sp */
+ subq sp,ARG_SAVE,sp /* regsave (sanity) */
+ mov a1,a0
+ CALL(cproc_waiting) /* cproc_waiting(child) */
+ /*
+ * Control never returns here.
+ */
+ END(cproc_start_wait)
+
+/*
+ * void
+ * cproc_prepare(child, child_context, stack)
+ * long *child_context;
+ * long *stack;
+ */
+LEAF(cproc_prepare,3)
+ ldgp gp,0(pv)
+ subq a2,ARG_SAVE,a2 /* cthread_body's fake frame */
+ stq a0,0(a2) /* cthread_body(child) */
+ subq a2,SAVED_BYTES,a2 /* cproc_switch's ``frame'' */
+ stq s0,SAVED_S0(a2)
+ stq s1,SAVED_S1(a2)
+ stq s2,SAVED_S2(a2)
+ stq s3,SAVED_S3(a2)
+ stq s4,SAVED_S4(a2)
+ stq s5,SAVED_S5(a2)
+ stq s6,SAVED_S6(a2)
+ stq gp,SAVED_GP(a2)
+ stq a2,0(a1) /* child context */
+ lda v0,1f
+ stq v0,SAVED_PC(a2)
+ RET
+
+ /*
+ * The reason we are getting here is to load
+ * arguments in registers where they are supposed
+ * to be. The code above only put the argument(s)
+ * on the stack, now we'll load them.
+ */
+1: ldgp gp,0(ra) /* we get here from a cswitch */
+ lda v0,cthread_body
+ ldq a0,0(sp)
+ mov v0,ra
+ mov ra,pv /* funcall or return, either way */
+ RET
+ END(cproc_prepare)
+
+/*
+ * unsigned long
+ * cthread_sp()
+ *
+ * Returns the current stack pointer.
+ */
+
+LEAF(cthread_sp,0)
+ mov sp, v0
+ RET
+ END(cthread_sp);
diff --git a/libthreads/alpha/cthreads.h b/libthreads/alpha/cthreads.h
new file mode 100644
index 00000000..f2218f40
--- /dev/null
+++ b/libthreads/alpha/cthreads.h
@@ -0,0 +1,61 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthreads.h,v $
+ * Revision 2.4 93/11/17 19:00:42 dbg
+ * When compiling with GCC, inline cthread_sp().
+ * [93/09/21 af]
+ *
+ * Revision 2.3 93/01/19 08:56:02 danner
+ * Locks are now 64bits.
+ * [92/12/30 af]
+ *
+ * Revision 2.2 93/01/14 18:04:28 danner
+ * Created.
+ * [92/05/31 af]
+ *
+ */
+
+#ifndef _MACHINE_CTHREADS_H_
+#define _MACHINE_CTHREADS_H_
+
+typedef long spin_lock_t;
+#define SPIN_LOCK_INITIALIZER 0
+#define spin_lock_init(s) *(s)=0
+#define spin_lock_locked(s) (*(s) != 0)
+
+#if defined(__GNUC__)
+
+#define cthread_sp() \
+ ({ register vm_offset_t _sp__; \
+ __asm__("or $31,$30,%0" \
+ : "=r" (_sp__) ); \
+ _sp__; })
+
+#endif /* __GNUC__ */
+
+#endif _MACHINE_CTHREADS_H_
diff --git a/libthreads/alpha/lock.S b/libthreads/alpha/lock.S
new file mode 100644
index 00000000..657ac1f8
--- /dev/null
+++ b/libthreads/alpha/lock.S
@@ -0,0 +1,84 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: lock.s,v $
+ * Revision 2.3 93/03/09 10:59:04 danner
+ * Lost in previous merge:
+ * Added memory barriers where needed.
+ * Locks are longs now.
+ * [93/01/15 af]
+ *
+ * Revision 2.2 93/01/14 18:04:31 danner
+ * Mumble, "try_lock" really means "try_hard_once".
+ * That is, try hard until you either get it or lose it.
+ * [92/12/24 af]
+ * Created a while back.
+ * [92/12/10 af]
+ *
+ */
+
+#include <mach/alpha/asm.h>
+
+/*
+ The C interface for this function is
+
+ boolean_t
+ spin_try_lock_sw(m)
+ long * m;
+
+ The function has a slightly different semantics than TAS: it will
+ return a boolean value that indicates whether the lock was acquired
+ or not. If not, we'll presume that the user will retry after some
+ appropriate delay.
+ */
+ .text
+ .align 4
+ .set noreorder
+
+LEAF(spin_try_lock,1)
+ mb
+ ldq_l t0,0(a0)
+ or zero,2,v0 /* build lock value */
+ bne t0,nope /* already set, forget it */
+ stq_c v0,0(a0) /* see if we still had the lock */
+ beq v0,yipe /* if we just took an interrupt.. */
+ RET /* if v0 != 0 then we got it */
+nope:
+ mov zero,v0 /* failed to acquire lock */
+ RET
+yipe: br zero,spin_try_lock /* I love branch predictions.. */
+
+ END(spin_try_lock)
+
+LEAF(spin_unlock,1)
+ mb /* but this might be needed.. */
+ stq zero,0(a0) /* no need for interlocks (sec 10.5.2) */
+ mb /* but this might be needed.. */
+ RET
+
+ END(spin_unlock)
+
diff --git a/libthreads/alpha/thread.c b/libthreads/alpha/thread.c
new file mode 100644
index 00000000..25e65df1
--- /dev/null
+++ b/libthreads/alpha/thread.c
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: thread.c,v $
+ * Revision 2.3 93/02/01 09:56:49 danner
+ * mach/mach.h -> mach.h
+ * [93/01/28 danner]
+ *
+ * Revision 2.2 93/01/14 18:04:35 danner
+ * Created.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * alpha/thread.c
+ *
+ * Cproc startup for ALPHA Cthreads implementation.
+ */
+
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+#include <mach.h>
+
+/*
+ * C library imports:
+ */
+extern bzero();
+
+/*
+ * Set up the initial state of a MACH thread
+ * so that it will invoke routine(child)
+ * when it is resumed.
+ */
+void
+cproc_setup(
+ register cproc_t child,
+ thread_t thread,
+ void (*routine)(cproc_t))
+{
+ register integer_t *top;
+ struct alpha_thread_state state;
+ register struct alpha_thread_state *ts;
+ kern_return_t r;
+
+ /*
+ * Set up ALPHA call frame and registers.
+ */
+ ts = &state;
+ bzero((char *) ts, sizeof(struct alpha_thread_state));
+
+ top = (integer_t *) (child->stack_base + child->stack_size);
+
+ /*
+ * Set pc & pv to procedure entry, pass one arg in register,
+ * allocate room for 6 regsave on the stack frame (sanity).
+ */
+ ts->pc = (natural_t) routine;
+ ts->r27 = (natural_t) routine;
+ ts->r16 = (integer_t) child;
+ ts->r30 = (integer_t) (top - 6); /* see ARG_SAVE in csw.s */
+
+
+ MACH_CALL(thread_set_state(thread,ALPHA_THREAD_STATE,(thread_state_t) &state,ALPHA_THREAD_STATE_COUNT),r);
+}