mirror of
https://git.freebsd.org/ports.git
synced 2025-05-18 01:53:13 -04:00
- Update emulators/qemu-devel to 1.4.0 with preliminary bsd-user patches. Thanx to: sson, cognet, and others for much improved bsd-user support - it now runs at least quite a few mips64 and single-threaded arm binaries, see: https://wiki.freebsd.org/QemuUserModeHowTo
2301 lines
60 KiB
Text
2301 lines
60 KiB
Text
diff --git a/bsd-user/arm/target_signal.h b/bsd-user/arm/target_signal.h
|
|
index 19cc188..6b7bb67 100644
|
|
--- a/bsd-user/arm/target_signal.h
|
|
+++ b/bsd-user/arm/target_signal.h
|
|
@@ -11,4 +11,29 @@ static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
|
|
#define TARGET_MINSIGSTKSZ (1024 * 4)
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
|
|
+typedef target_ulong target_mcontext_t; /* dummy */
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
+static inline int
|
|
+get_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "ARM doesn't have support for get_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
+static inline int
|
|
+set_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "ARM doesn't have support for set_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
diff --git a/bsd-user/freebsd/strace.list b/bsd-user/freebsd/strace.list
|
|
index 1edf412..b09f766 100644
|
|
--- a/bsd-user/freebsd/strace.list
|
|
+++ b/bsd-user/freebsd/strace.list
|
|
@@ -38,6 +38,7 @@
|
|
{ TARGET_FREEBSD_NR_fsync, "fsync", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_ftruncate, "ftruncate", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_futimes, "futimes", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_getcontext, "getcontext", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_getdirentries, "getdirentries", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_freebsd6_mmap, "freebsd6_mmap", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_getegid, "getegid", "%s()", NULL, NULL },
|
|
@@ -123,6 +124,7 @@
|
|
{ TARGET_FREEBSD_NR_semop, "semop", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_sendmsg, "sendmsg", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_sendto, "sendto", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_setcontext, "setcontext", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_setegid, "setegid", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_seteuid, "seteuid", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_setgid, "setgid", NULL, NULL, NULL },
|
|
@@ -160,6 +162,15 @@
|
|
{ TARGET_FREEBSD_NR_sync, "sync", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_sysarch, "sysarch", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_syscall, "syscall", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_create, "thr_create", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_exit, "thr_exit", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_kill, "thr_kill", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_kill2, "thr_kill2", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_new, "thr_new", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_self, "thr_self", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_set_name, "thr_set_name", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_suspend, "thr_suspend", NULL, NULL, NULL },
|
|
+{ TARGET_FREEBSD_NR_thr_wake, "thr_wake", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_truncate, "truncate", NULL, NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_umask, "umask", "%s(%#o)", NULL, NULL },
|
|
{ TARGET_FREEBSD_NR_unlink, "unlink", "%s(\"%s\")", NULL, NULL },
|
|
diff --git a/bsd-user/i386/target_signal.h b/bsd-user/i386/target_signal.h
|
|
index 285e7f9..28481ce 100644
|
|
--- a/bsd-user/i386/target_signal.h
|
|
+++ b/bsd-user/i386/target_signal.h
|
|
@@ -11,4 +11,29 @@ static inline abi_ulong get_sp_from_cpustate(CPUX86State *state)
|
|
#define TARGET_MINSIGSTKSZ (512 * 4)
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
|
|
+typedef target_ulong target_mcontext_t; /* dummy */
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
+static inline int
|
|
+get_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "i386 doesn't have support for get_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
+static inline int
|
|
+set_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "i386 doesn't have support for set_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
diff --git a/bsd-user/main.c b/bsd-user/main.c
|
|
index 146f022..7a99537 100644
|
|
--- a/bsd-user/main.c
|
|
+++ b/bsd-user/main.c
|
|
@@ -34,6 +34,10 @@
|
|
#include "qemu-timer.h"
|
|
#include "envlist.h"
|
|
|
|
+#if defined(CONFIG_USE_NPTL) && defined(__FreeBSD__)
|
|
+#include <sys/thr.h>
|
|
+#endif
|
|
+
|
|
#define DEBUG_LOGFILE "/tmp/qemu.log"
|
|
|
|
int singlestep;
|
|
@@ -70,42 +74,186 @@ int cpu_get_pic_interrupt(CPUX86State *env)
|
|
}
|
|
#endif
|
|
|
|
-/* These are no-ops because we are not threadsafe. */
|
|
-static inline void cpu_exec_start(CPUArchState *env)
|
|
+#if defined(CONFIG_USE_NPTL)
|
|
+/* Helper routines for implementing atomic operations. */
|
|
+
|
|
+/*
|
|
+ * To implement exclusive operations we force all cpus to synchronize.
|
|
+ * We don't require a full sync, only that no cpus are executing guest code.
|
|
+ * The alternative is to map target atomic ops onto host eqivalents,
|
|
+ * which requires quite a lot of per host/target work.
|
|
+ */
|
|
+static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
+static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
+static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
|
|
+static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
|
|
+static int pending_cpus;
|
|
+
|
|
+/* Make sure everything is in a consistent state for calling fork(). */
|
|
+void fork_start(void)
|
|
{
|
|
+ pthread_mutex_lock(&tb_lock);
|
|
+ pthread_mutex_lock(&exclusive_lock);
|
|
+ mmap_fork_start();
|
|
}
|
|
|
|
-static inline void cpu_exec_end(CPUArchState *env)
|
|
+void fork_end(int child)
|
|
{
|
|
+ mmap_fork_end(child);
|
|
+ if (child) {
|
|
+ /*
|
|
+ * Child processes created by fork() only have a single thread.
|
|
+ * Discard information about the parent threads.
|
|
+ */
|
|
+ first_cpu = thread_env;
|
|
+ thread_env->next_cpu = NULL;
|
|
+ pending_cpus = 0;
|
|
+ pthread_mutex_init(&exclusive_lock, NULL);
|
|
+ pthread_mutex_init(&cpu_list_mutex, NULL);
|
|
+ pthread_cond_init(&exclusive_cond, NULL);
|
|
+ pthread_cond_init(&exclusive_resume, NULL);
|
|
+ pthread_mutex_init(&tb_lock, NULL);
|
|
+ gdbserver_fork(thread_env);
|
|
+ } else {
|
|
+ pthread_mutex_unlock(&exclusive_lock);
|
|
+ pthread_mutex_unlock(&tb_lock);
|
|
+ }
|
|
}
|
|
|
|
-static inline void start_exclusive(void)
|
|
+/*
|
|
+ * Wait for pending exclusive operations to complete. The exclusive lock
|
|
+ * must be held.
|
|
+ */
|
|
+static inline void
|
|
+exclusive_idle(void)
|
|
{
|
|
+ while (pending_cpus) {
|
|
+ pthread_cond_wait(&exclusive_resume, &exclusive_lock);
|
|
+ }
|
|
}
|
|
|
|
-static inline void end_exclusive(void)
|
|
+/* Start an exclusive operation. Must only be called outside of cpu_exec. */
|
|
+static inline void
|
|
+start_exclusive(void)
|
|
{
|
|
+ CPUArchState *other;
|
|
+
|
|
+ pthread_mutex_lock(&exclusive_lock);
|
|
+ exclusive_idle();
|
|
+
|
|
+ pending_cpus = 1;
|
|
+ /* Make all other cpus stop executing. */
|
|
+ for (other = first_cpu; other; other = other->next_cpu) {
|
|
+ if (other->running) {
|
|
+ pending_cpus++;
|
|
+ cpu_exit(other);
|
|
+ }
|
|
+ }
|
|
+ if (pending_cpus > 1) {
|
|
+ pthread_cond_wait(&exclusive_cond, &exclusive_lock);
|
|
+ }
|
|
}
|
|
|
|
-void fork_start(void)
|
|
+/* Finish an exclusive operation. */
|
|
+static inline void
|
|
+end_exclusive(void)
|
|
{
|
|
+ pending_cpus = 0;
|
|
+ pthread_cond_broadcast(&exclusive_resume);
|
|
+ pthread_mutex_unlock(&exclusive_lock);
|
|
}
|
|
|
|
-void fork_end(int child)
|
|
+/* Wait for exclusive ops to finish, and begin cpu execution. */
|
|
+static inline void
|
|
+cpu_exec_start(CPUArchState *env)
|
|
+{
|
|
+ pthread_mutex_lock(&exclusive_lock);
|
|
+ exclusive_idle();
|
|
+ env->running = 1;
|
|
+ pthread_mutex_unlock(&exclusive_lock);
|
|
+}
|
|
+
|
|
+/* Mark cpu as not excuting, and release pending exclusive ops. */
|
|
+static inline void
|
|
+cpu_exec_end(CPUArchState *env)
|
|
+{
|
|
+ pthread_mutex_lock(&exclusive_lock);
|
|
+ env->running = 0;
|
|
+ if (pending_cpus > 1) {
|
|
+ pending_cpus--;
|
|
+ if (pending_cpus == 1) {
|
|
+ pthread_cond_signal(&exclusive_cond);
|
|
+ }
|
|
+ }
|
|
+ exclusive_idle();
|
|
+ pthread_mutex_unlock(&exclusive_lock);
|
|
+}
|
|
+
|
|
+void
|
|
+cpu_list_lock(void)
|
|
+{
|
|
+ pthread_mutex_lock(&cpu_list_mutex);
|
|
+}
|
|
+
|
|
+void
|
|
+cpu_list_unlock(void)
|
|
+{
|
|
+ pthread_mutex_unlock(&cpu_list_mutex);
|
|
+}
|
|
+
|
|
+#else /* ! CONFIG_USE_NPTL */
|
|
+
|
|
+/* These are no-ops because we are not threadsafe. */
|
|
+void
|
|
+fork_start(void)
|
|
+{
|
|
+}
|
|
+
|
|
+void
|
|
+fork_end(int child)
|
|
{
|
|
if (child) {
|
|
gdbserver_fork(thread_env);
|
|
}
|
|
}
|
|
|
|
-void cpu_list_lock(void)
|
|
+static inline void
|
|
+exclusive_idle(void)
|
|
+{
|
|
+}
|
|
+
|
|
+static inline void
|
|
+start_exclusive(void)
|
|
{
|
|
}
|
|
|
|
-void cpu_list_unlock(void)
|
|
+static inline void
|
|
+end_exclusive(void)
|
|
{
|
|
}
|
|
|
|
+static inline void
|
|
+cpu_exec_start(CPUArchState *env)
|
|
+{
|
|
+}
|
|
+
|
|
+
|
|
+static inline void
|
|
+cpu_exec_end(CPUArchState *env)
|
|
+{
|
|
+}
|
|
+
|
|
+void
|
|
+cpu_list_lock(void)
|
|
+{
|
|
+}
|
|
+
|
|
+void
|
|
+cpu_list_unlock(void)
|
|
+{
|
|
+}
|
|
+#endif /* CONFIG_USE_NPTL */
|
|
+
|
|
#ifdef TARGET_I386
|
|
/***********************************************************/
|
|
/* CPUX86 core interface */
|
|
@@ -740,7 +888,10 @@ void cpu_loop(CPUMIPSState *env)
|
|
|
|
for(;;) {
|
|
cpu_exec_start(env);
|
|
+ /* XXX there is a concurrency problem - giant lock for now */
|
|
+ pthread_mutex_lock(&exclusive_lock); /* XXX */
|
|
trapnr = cpu_mips_exec(env);
|
|
+ pthread_mutex_unlock(&exclusive_lock); /* XXX */
|
|
cpu_exec_end(env);
|
|
switch(trapnr) {
|
|
case EXCP_SYSCALL: /* syscall exception */
|
|
@@ -1206,6 +1357,18 @@ static void usage(void)
|
|
|
|
THREAD CPUArchState *thread_env;
|
|
|
|
+void task_settid(TaskState *ts)
|
|
+{
|
|
+ if (0 == ts->ts_tid) {
|
|
+#ifdef CONFIG_USE_NPTL
|
|
+ (void)thr_self(&ts->ts_tid);
|
|
+#else
|
|
+ /* When no threads then just use PID */
|
|
+ ts->ts_tid = getpid();
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+
|
|
void stop_all_tasks(void)
|
|
{
|
|
/*
|
|
diff --git a/bsd-user/mips/target_signal.h b/bsd-user/mips/target_signal.h
|
|
index 28871c3..484cfd8 100644
|
|
--- a/bsd-user/mips/target_signal.h
|
|
+++ b/bsd-user/mips/target_signal.h
|
|
@@ -6,9 +6,187 @@
|
|
#define TARGET_MINSIGSTKSZ (512 * 4)
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
|
|
-static inline abi_ulong get_sp_from_cpustate(CPUMIPSState *state)
|
|
+struct target_sigcontext {
|
|
+ target_sigset_t sc_mask; /* signal mask to retstore */
|
|
+ int32_t sc_onstack; /* sigstack state to restore */
|
|
+ abi_long sc_pc; /* pc at time of signal */
|
|
+ abi_long sc_reg[32]; /* processor regs 0 to 31 */
|
|
+ abi_long mullo, mulhi; /* mullo and mulhi registers */
|
|
+ int32_t sc_fpused; /* fp has been used */
|
|
+ abi_long sc_fpregs[33]; /* fp regs 0 to 31 & csr */
|
|
+ abi_long sc_fpc_eir; /* fp exception instr reg */
|
|
+ /* int32_t reserved[8]; */
|
|
+};
|
|
+
|
|
+typedef struct target_mcontext {
|
|
+ int32_t mc_onstack; /* sigstack state to restore */
|
|
+ abi_long mc_pc; /* pc at time of signal */
|
|
+ abi_long mc_regs[32]; /* process regs 0 to 31 */
|
|
+ abi_long sr; /* status register */
|
|
+ abi_long mullo, mulhi;
|
|
+ int32_t mc_fpused; /* fp has been used */
|
|
+ abi_long mc_fpregs[33]; /* fp regs 0 to 32 & csr */
|
|
+ abi_long mc_fpc_eir; /* fp exception instr reg */
|
|
+ abi_ulong mc_tls; /* pointer to TLS area */
|
|
+} target_mcontext_t;
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
+struct target_sigframe {
|
|
+ abi_ulong sf_signum;
|
|
+ abi_ulong sf_siginfo; /* code or pointer to sf_si */
|
|
+ abi_ulong sf_ucontext; /* points to sf_uc */
|
|
+ abi_ulong sf_addr; /* undocumented 4th arg */
|
|
+ target_ucontext_t sf_uc; /* = *sf_uncontext */
|
|
+ target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/
|
|
+ uint32_t __spare__[2];
|
|
+};
|
|
+
|
|
+
|
|
+/* Get the stack pointer. */
|
|
+static inline abi_ulong
|
|
+get_sp_from_cpustate(CPUMIPSState *state)
|
|
{
|
|
return state->active_tc.gpr[29];
|
|
}
|
|
|
|
+/*
|
|
+ * Compare to mips/mips/pm_machdep.c sendsig()
|
|
+ * Assumes that "frame" memory is locked.
|
|
+ */
|
|
+static inline int
|
|
+set_sigtramp_args(CPUMIPSState *regs, int sig, struct target_sigframe *frame,
|
|
+ abi_ulong frame_addr, struct target_sigaction *ka)
|
|
+{
|
|
+
|
|
+ frame->sf_signum = sig;
|
|
+ frame->sf_siginfo = 0;
|
|
+ frame->sf_ucontext = 0;
|
|
+
|
|
+ frame->sf_si.si_signo = sig;
|
|
+ frame->sf_si.si_code = TARGET_SA_SIGINFO;
|
|
+ frame->sf_si.si_addr = regs->CP0_BadVAddr;
|
|
+
|
|
+ /*
|
|
+ * Arguments to signal handler:
|
|
+ * a0 ($4) = signal number
|
|
+ * a1 ($5) = siginfo pointer
|
|
+ * a2 ($6) = ucontext pointer
|
|
+ * PC = signal handler pointer
|
|
+ * t9 ($25) = signal handler pointer
|
|
+ * $29 = point to sigframe struct
|
|
+ * ra ($31) = sigtramp at base of user stack
|
|
+ */
|
|
+ regs->active_tc.gpr[ 4] = sig;
|
|
+ regs->active_tc.gpr[ 5] = frame_addr +
|
|
+ offsetof(struct target_sigframe, sf_si);
|
|
+ regs->active_tc.gpr[25] = regs->active_tc.PC = ka->_sa_handler;
|
|
+ regs->active_tc.gpr[29] = frame_addr;
|
|
+ regs->active_tc.gpr[31] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
|
|
+
|
|
+ return (0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Compare to mips/mips/pm_machdep.c get_mcontext()
|
|
+ * Assumes that the memory is locked if mcp points to user memory.
|
|
+ */
|
|
+static inline int
|
|
+get_mcontext(CPUMIPSState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ int i, err = 0;
|
|
+
|
|
+ if (flags & TARGET_MC_ADD_MAGIC) {
|
|
+ mcp->mc_regs[0] = tswapal(TARGET_UCONTEXT_MAGIC);
|
|
+ } else {
|
|
+ mcp->mc_regs[0] = 0;
|
|
+ }
|
|
+
|
|
+ if (flags & TARGET_MC_SET_ONSTACK) {
|
|
+ mcp->mc_onstack = tswapal(1);
|
|
+ } else {
|
|
+ mcp->mc_onstack = 0;
|
|
+ }
|
|
+
|
|
+ for(i = 1; i < 32; i++)
|
|
+ mcp->mc_regs[i] = tswapal(regs->active_tc.gpr[i]);
|
|
+
|
|
+#if 0 /* XXX FP is not used right now. */
|
|
+ abi_ulong used_fp = used_math() ? TARGET_MDTD_FPUSED : 0;
|
|
+
|
|
+ mcp->mc_fpused = used_fp;
|
|
+ if (used_fp) {
|
|
+ preempt_disable();
|
|
+ if (!is_fpu_owner()) {
|
|
+ own_fpu();
|
|
+ for(i = 0; i < 33; i++)
|
|
+ mcp->mc_fpregs[i] = tswapal(regs->active_fpu.fpr[i]);
|
|
+ }
|
|
+ preempt_enable();
|
|
+ }
|
|
+#else
|
|
+ mcp->mc_fpused = 0;
|
|
+#endif
|
|
+
|
|
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
|
|
+ mcp->mc_regs[2] = 0; /* v0 = 0 */
|
|
+ mcp->mc_regs[3] = 0; /* v1 = 0 */
|
|
+ mcp->mc_regs[7] = 0; /* a3 = 0 */
|
|
+ }
|
|
+
|
|
+ mcp->mc_pc = tswapal(regs->active_tc.PC);
|
|
+ mcp->mullo = tswapal(regs->active_tc.LO[0]);
|
|
+ mcp->mulhi = tswapal(regs->active_tc.HI[0]);
|
|
+ mcp->mc_tls = tswapal(regs->tls_value);
|
|
+
|
|
+ /* Don't do any of the status and cause registers. */
|
|
+
|
|
+ return (err);
|
|
+}
|
|
+
|
|
+/* Compare to mips/mips/pm_machdep.c set_mcontext() */
|
|
+static inline int
|
|
+set_mcontext(CPUMIPSState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ int i, err = 0;
|
|
+
|
|
+ for(i = 1; i < 32; i++)
|
|
+ regs->active_tc.gpr[i] = tswapal(mcp->mc_regs[i]);
|
|
+
|
|
+#if 0 /* XXX FP is not used right now */
|
|
+ abi_ulong used_fp = 0;
|
|
+
|
|
+ used_fp = tswapal(mcp->mc_fpused)
|
|
+ conditional_used_math(used_fp);
|
|
+
|
|
+ preempt_disabled();
|
|
+ if (used_math()) {
|
|
+ /* restore fpu context if we have used it before */
|
|
+ own_fpu();
|
|
+ for (i = 0; i < 32; i++)
|
|
+ regs->active_fpu.fpr[i] = tswapal(mcp->mc_fpregs[i]);
|
|
+ } else {
|
|
+ /* Signal handler may have used FPU. Give it up. */
|
|
+ lose_fpu();
|
|
+ }
|
|
+ preempt_enable();
|
|
+#endif
|
|
+
|
|
+ regs->CP0_EPC = tswapal(mcp->mc_pc);
|
|
+ regs->active_tc.LO[0] = tswapal(mcp->mullo);
|
|
+ regs->active_tc.HI[0] = tswapal(mcp->mulhi);
|
|
+ regs->tls_value = tswapal(mcp->mc_tls);
|
|
+
|
|
+ /* Don't do any of the status and cause registers. */
|
|
+
|
|
+ return (err);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
diff --git a/bsd-user/mips64/target_signal.h b/bsd-user/mips64/target_signal.h
|
|
index d671f4e..e9c8a9f 100644
|
|
--- a/bsd-user/mips64/target_signal.h
|
|
+++ b/bsd-user/mips64/target_signal.h
|
|
@@ -7,11 +7,186 @@
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
#define TARGET_SZSIGCODE 16
|
|
|
|
-#define TARGET_UCONTEXT_MAGIC 0xACEDBADE
|
|
+struct target_sigcontext {
|
|
+ target_sigset_t sc_mask; /* signal mask to retstore */
|
|
+ int32_t sc_onstack; /* sigstack state to restore */
|
|
+ abi_long sc_pc; /* pc at time of signal */
|
|
+ abi_long sc_reg[32]; /* processor regs 0 to 31 */
|
|
+ abi_long mullo, mulhi; /* mullo and mulhi registers */
|
|
+ int32_t sc_fpused; /* fp has been used */
|
|
+ abi_long sc_fpregs[33]; /* fp regs 0 to 31 & csr */
|
|
+ abi_long sc_fpc_eir; /* fp exception instr reg */
|
|
+ /* int32_t reserved[8]; */
|
|
+};
|
|
|
|
-static inline abi_ulong get_sp_from_cpustate(CPUMIPSState *state)
|
|
+typedef struct target_mcontext {
|
|
+ int32_t mc_onstack; /* sigstack state to restore */
|
|
+ abi_long mc_pc; /* pc at time of signal */
|
|
+ abi_long mc_regs[32]; /* process regs 0 to 31 */
|
|
+ abi_long sr; /* status register */
|
|
+ abi_long mullo, mulhi;
|
|
+ int32_t mc_fpused; /* fp has been used */
|
|
+ abi_long mc_fpregs[33]; /* fp regs 0 to 32 & csr */
|
|
+ abi_long mc_fpc_eir; /* fp exception instr reg */
|
|
+ abi_ulong mc_tls; /* pointer to TLS area */
|
|
+} target_mcontext_t;
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
+struct target_sigframe {
|
|
+ abi_ulong sf_signum;
|
|
+ abi_ulong sf_siginfo; /* code or pointer to sf_si */
|
|
+ abi_ulong sf_ucontext; /* points to sf_uc */
|
|
+ abi_ulong sf_addr; /* undocumented 4th arg */
|
|
+ target_ucontext_t sf_uc; /* = *sf_uncontext */
|
|
+ target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/
|
|
+ uint32_t __spare__[2];
|
|
+};
|
|
+
|
|
+static inline abi_ulong
|
|
+get_sp_from_cpustate(CPUMIPSState *state)
|
|
{
|
|
return state->active_tc.gpr[29];
|
|
}
|
|
|
|
+/*
|
|
+ * Compare to mips/mips/pm_machdep.c sendsig()
|
|
+ * Assumes that "frame" memory is locked.
|
|
+ */
|
|
+static inline int
|
|
+set_sigtramp_args(CPUMIPSState *regs, int sig, struct target_sigframe *frame,
|
|
+ abi_ulong frame_addr, struct target_sigaction *ka)
|
|
+{
|
|
+
|
|
+ frame->sf_signum = sig;
|
|
+ frame->sf_siginfo = 0;
|
|
+ frame->sf_ucontext = 0;
|
|
+
|
|
+ frame->sf_si.si_signo = sig;
|
|
+ frame->sf_si.si_code = TARGET_SA_SIGINFO;
|
|
+ frame->sf_si.si_addr = regs->CP0_BadVAddr;
|
|
+
|
|
+ /*
|
|
+ * Arguments to signal handler:
|
|
+ * a0 ($4) = signal number
|
|
+ * a1 ($5) = siginfo pointer
|
|
+ * a2 ($6) = ucontext pointer
|
|
+ * PC = signal handler pointer
|
|
+ * t9 ($25) = signal handler pointer
|
|
+ * $29 = point to sigframe struct
|
|
+ * ra ($31) = sigtramp at base of user stack
|
|
+ */
|
|
+ regs->active_tc.gpr[ 4] = sig;
|
|
+ regs->active_tc.gpr[ 5] = frame_addr +
|
|
+ offsetof(struct target_sigframe, sf_si);
|
|
+ regs->active_tc.gpr[25] = regs->active_tc.PC = ka->_sa_handler;
|
|
+ regs->active_tc.gpr[29] = frame_addr;
|
|
+ regs->active_tc.gpr[31] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
|
|
+
|
|
+ return (0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Compare to mips/mips/pm_machdep.c get_mcontext()
|
|
+ * Assumes that the memory is locked if mcp points to user memory.
|
|
+ */
|
|
+static inline int
|
|
+get_mcontext(CPUMIPSState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ int i, err = 0;
|
|
+
|
|
+ if (flags & TARGET_MC_ADD_MAGIC) {
|
|
+ mcp->mc_regs[0] = tswapal(TARGET_UCONTEXT_MAGIC);
|
|
+ } else {
|
|
+ mcp->mc_regs[0] = 0;
|
|
+ }
|
|
+
|
|
+ if (flags & TARGET_MC_SET_ONSTACK) {
|
|
+ mcp->mc_onstack = tswapal(1);
|
|
+ } else {
|
|
+ mcp->mc_onstack = 0;
|
|
+ }
|
|
+
|
|
+ for(i = 1; i < 32; i++)
|
|
+ mcp->mc_regs[i] = tswapal(regs->active_tc.gpr[i]);
|
|
+
|
|
+#if 0 /* XXX FP is not used right now */
|
|
+ abi_ulong used_fp = used_math() ? TARGET_MDTD_FPUSED : 0;
|
|
+
|
|
+ mcp->mc_fpused = used_fp;
|
|
+ if (used_fp) {
|
|
+ preempt_disable();
|
|
+ if (!is_fpu_owner()) {
|
|
+ own_fpu();
|
|
+ for(i = 0; i < 33; i++)
|
|
+ mcp->mc_fpregs[i] = tswapal(regs->active_fpu.fpr[i]);
|
|
+ }
|
|
+ preempt_enable();
|
|
+ }
|
|
+#else
|
|
+ mcp->mc_fpused = 0;
|
|
+#endif
|
|
+
|
|
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
|
|
+ mcp->mc_regs[2] = 0; /* v0 = 0 */
|
|
+ mcp->mc_regs[3] = 0; /* v1 = 0 */
|
|
+ mcp->mc_regs[7] = 0; /* a3 = 0 */
|
|
+ }
|
|
+
|
|
+ mcp->mc_pc = tswapal(regs->active_tc.PC);
|
|
+ mcp->mullo = tswapal(regs->active_tc.LO[0]);
|
|
+ mcp->mulhi = tswapal(regs->active_tc.HI[0]);
|
|
+ mcp->mc_tls = tswapal(regs->tls_value);
|
|
+
|
|
+ /* Don't do any of the status and cause registers. */
|
|
+
|
|
+ return (err);
|
|
+}
|
|
+
|
|
+/* Compare to mips/mips/pm_machdep.c set_mcontext() */
|
|
+static inline int
|
|
+set_mcontext(CPUMIPSState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ int i, err = 0;
|
|
+
|
|
+ for(i = 1; i < 32; i++)
|
|
+ regs->active_tc.gpr[i] = tswapal(mcp->mc_regs[i]);
|
|
+
|
|
+#if 0 /* XXX FP is not used right now */
|
|
+ abi_ulong used_fp = 0;
|
|
+
|
|
+ used_fp = tswapal(mcp->mc_fpused)
|
|
+ conditional_used_math(used_fp);
|
|
+
|
|
+ preempt_disabled();
|
|
+ if (used_math()) {
|
|
+ /* restore fpu context if we have used it before */
|
|
+ own_fpu();
|
|
+ for (i = 0; i < 32; i++)
|
|
+ regs->active_fpu.fpr[i] = tswapal(mcp->mc_fpregs[i]);
|
|
+ } else {
|
|
+ /* Signal handler may have used FPU. Give it up. */
|
|
+ lose_fpu();
|
|
+ }
|
|
+ preempt_enable();
|
|
+#endif
|
|
+
|
|
+ regs->CP0_EPC = tswapal(mcp->mc_pc);
|
|
+ regs->active_tc.LO[0] = tswapal(mcp->mullo);
|
|
+ regs->active_tc.HI[0] = tswapal(mcp->mulhi);
|
|
+ regs->tls_value = tswapal(mcp->mc_tls);
|
|
+
|
|
+ /* Don't do any of the status and cause registers. */
|
|
+
|
|
+ return (err);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
+
|
|
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
|
|
--- a/bsd-user/mmap.c
|
|
+++ b/bsd-user/mmap.c
|
|
@@ -74,6 +74,8 @@ void mmap_unlock(void)
|
|
}
|
|
#endif
|
|
|
|
+#if 0 /* XXX not sure why we need our own g_malloc() and friends.
|
|
+ g_strdup(), however, has serious problems with this g_malloc/g_free */
|
|
static void *bsd_vmalloc(size_t size)
|
|
{
|
|
void *p;
|
|
@@ -133,6 +135,7 @@ void *g_realloc(void *ptr, size_t size)
|
|
g_free(ptr);
|
|
return new_ptr;
|
|
}
|
|
+#endif
|
|
|
|
/* NOTE: all the constants are the HOST ones, but addresses are target. */
|
|
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
|
|
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
|
|
index ab7e18c..9d4edbf 100644
|
|
--- a/bsd-user/qemu.h
|
|
+++ b/bsd-user/qemu.h
|
|
@@ -80,7 +80,7 @@ struct emulated_sigtable {
|
|
typedef struct TaskState {
|
|
struct TaskState *next;
|
|
int used; /* non zero if used */
|
|
-#if 1
|
|
+ long ts_tid; /* tid (or pid) of this task */
|
|
#ifdef TARGET_ARM
|
|
int swi_errno;
|
|
#endif
|
|
@@ -90,7 +90,6 @@ typedef struct TaskState {
|
|
uint32_t heap_limit;
|
|
#endif
|
|
uint32_t stack_base;
|
|
-#endif
|
|
struct image_info *info;
|
|
struct bsd_binprm *bprm;
|
|
|
|
diff --git a/bsd-user/signal.c b/bsd-user/signal.c
|
|
index 52441c4..d56837b 100644
|
|
--- a/bsd-user/signal.c
|
|
+++ b/bsd-user/signal.c
|
|
@@ -31,7 +31,7 @@
|
|
#include "qemu.h"
|
|
#include "target_signal.h"
|
|
|
|
-//#define DEBUG_SIGNAL
|
|
+// #define DEBUG_SIGNAL
|
|
|
|
#ifndef _NSIG
|
|
#define _NSIG 128
|
|
@@ -606,101 +606,31 @@ do_sigaction(int sig, const struct target_sigaction *act,
|
|
return (ret);
|
|
}
|
|
|
|
-#if defined(TARGET_MIPS64)
|
|
-static inline int
|
|
-restore_sigmcontext(CPUMIPSState *regs, target_mcontext_t *mc)
|
|
-{
|
|
- int i, err = 0;
|
|
-
|
|
- for(i = 1; i < 32; i++)
|
|
- err |= __get_user(regs->active_tc.gpr[i],
|
|
- &mc->mc_regs[i]);
|
|
- err |= __get_user(regs->CP0_EPC, &mc->mc_pc);
|
|
- err |= __get_user(regs->active_tc.LO[0], &mc->mullo);
|
|
- err |= __get_user(regs->active_tc.HI[0], &mc->mulhi);
|
|
- err |= __get_user(regs->tls_value, &mc->mc_tls); /* XXX thread tls */
|
|
-
|
|
-#if 0 /* XXX */
|
|
- int used_fp = 0;
|
|
-
|
|
- err |= __get_user(used_fp, &mc->mc_fpused);
|
|
- conditional_used_math(used_fp);
|
|
-
|
|
- preempt_disabled();
|
|
- if (used_math()) {
|
|
- /* restore fpu context if we have used it before */
|
|
- own_fpu();
|
|
- err |= restore_fp_context(mc);
|
|
- } else {
|
|
- /* signal handler may have used FPU. Give it up. */
|
|
- lose_fpu();
|
|
- }
|
|
- preempt_enable();
|
|
-#endif
|
|
-
|
|
- return (err);
|
|
-}
|
|
-
|
|
-static inline int
|
|
-setup_sigmcontext(CPUMIPSState *regs, target_mcontext_t *mc, int32_t oonstack)
|
|
-{
|
|
- int i, err = 0;
|
|
- abi_long ucontext_magic = TARGET_UCONTEXT_MAGIC;
|
|
-
|
|
- err = __put_user(oonstack ? 1 : 0, &mc->mc_onstack);
|
|
- err |= __put_user(regs->active_tc.PC, &mc->mc_pc);
|
|
- err |= __put_user(regs->active_tc.LO[0], &mc->mullo);
|
|
- err |= __put_user(regs->active_tc.HI[0], &mc->mulhi);
|
|
- err |= __put_user(regs->tls_value, &mc->mc_tls); /* XXX thread tls */
|
|
-
|
|
- err |= __put_user(ucontext_magic, &mc->mc_regs[0]);
|
|
- for(i = 1; i < 32; i++)
|
|
- err |= __put_user(regs->active_tc.gpr[i], &mc->mc_regs[i]);
|
|
-
|
|
- err |= __put_user(0, &mc->mc_fpused);
|
|
-
|
|
-#if 0 /* XXX */
|
|
- err |= __put_user(used_math(), &mc->mc_fpused);
|
|
- if (used_math())
|
|
- goto out;
|
|
-
|
|
- /*
|
|
- * Save FPU state to signal context. Signal handler will "inherit"
|
|
- * current FPU state.
|
|
- */
|
|
- preempt_disable();
|
|
-
|
|
- if (!is_fpu_owner()) {
|
|
- own_fpu();
|
|
- for(i = 0; i < 33; i++)
|
|
- err |= __put_user(regs->active_tc.fpregs[i], &mc->mc_fpregs[i]);
|
|
- }
|
|
- err |= save_fp_context(fg);
|
|
-
|
|
- preempt_enable();
|
|
-out:
|
|
-#endif
|
|
- return (err);
|
|
-}
|
|
+#if defined(TARGET_MIPS) || defined(TARGET_SPARC64)
|
|
|
|
static inline abi_ulong
|
|
-get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
|
|
+get_sigframe(struct target_sigaction *ka, CPUArchState *regs, size_t frame_size)
|
|
{
|
|
abi_ulong sp;
|
|
|
|
/* Use default user stack */
|
|
- sp = regs->active_tc.gpr[29];
|
|
+ sp = get_sp_from_cpustate(regs);
|
|
|
|
if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
|
|
- sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
|
|
+ sp = target_sigaltstack_used.ss_sp +
|
|
+ target_sigaltstack_used.ss_size;
|
|
}
|
|
|
|
+#if defined(TARGET_MIPS)
|
|
return ((sp - frame_size) & ~7);
|
|
+#else
|
|
+ return (sp - frame_size);
|
|
+#endif
|
|
}
|
|
|
|
-/* compare to mips/mips/pm_machdep.c sendsig() */
|
|
+/* compare to mips/mips/pm_machdep.c and sparc64/sparc64/machdep.c sendsig() */
|
|
static void setup_frame(int sig, struct target_sigaction *ka,
|
|
- target_sigset_t *set, CPUMIPSState *regs)
|
|
+ target_sigset_t *set, CPUArchState *regs)
|
|
{
|
|
struct target_sigframe *frame;
|
|
abi_ulong frame_addr;
|
|
@@ -709,54 +639,36 @@ static void setup_frame(int sig, struct target_sigaction *ka,
|
|
#ifdef DEBUG_SIGNAL
|
|
fprintf(stderr, "setup_frame()\n");
|
|
#endif
|
|
+#if defined(TARGET_SPARC64)
|
|
+ if (!sparc_user_sigtramp) {
|
|
+ /* No signal trampoline... kill the process. */
|
|
+ fprintf(stderr, "setup_frame(): no sigtramp\n");
|
|
+ force_sig(TARGET_SIGKILL);
|
|
+ }
|
|
+#endif
|
|
|
|
frame_addr = get_sigframe(ka, regs, sizeof(*frame));
|
|
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
|
|
goto give_sigsegv;
|
|
|
|
- if (setup_sigmcontext(regs, &frame->sf_uc.uc_mcontext,
|
|
- ! on_sig_stack(frame_addr)))
|
|
+#if defined(TARGET_MIPS)
|
|
+ int mflags = on_sig_stack(frame_addr) ? TARGET_MC_ADD_MAGIC :
|
|
+ TARGET_MC_SET_ONSTACK | TARGET_MC_ADD_MAGIC;
|
|
+#else
|
|
+ int mflags = 0;
|
|
+#endif
|
|
+ if (get_mcontext(regs, &frame->sf_uc.uc_mcontext, mflags))
|
|
goto give_sigsegv;
|
|
|
|
for(i = 0; i < TARGET_NSIG_WORDS; i++) {
|
|
- if (__put_user(set->__bits[i], &frame->sf_uc.uc_sigmask.__bits[i]))
|
|
+ if (__put_user(set->__bits[i],
|
|
+ &frame->sf_uc.uc_sigmask.__bits[i]))
|
|
goto give_sigsegv;
|
|
}
|
|
|
|
- /* fill in sigframe structure */
|
|
- if (__put_user(sig, &frame->sf_signum))
|
|
- goto give_sigsegv;
|
|
- if (__put_user(0, &frame->sf_siginfo))
|
|
- goto give_sigsegv;
|
|
- if (__put_user(0, &frame->sf_ucontext))
|
|
+ if (set_sigtramp_args(regs, sig, frame, frame_addr, ka))
|
|
goto give_sigsegv;
|
|
|
|
- /* fill in siginfo structure */
|
|
- if (__put_user(sig, &frame->sf_si.si_signo))
|
|
- goto give_sigsegv;
|
|
- if (__put_user(TARGET_SA_SIGINFO, &frame->sf_si.si_code))
|
|
- goto give_sigsegv;
|
|
- if (__put_user(regs->CP0_BadVAddr, &frame->sf_si.si_addr))
|
|
- goto give_sigsegv;
|
|
-
|
|
- /*
|
|
- * Arguments to signal handler:
|
|
- * a0 ($4) = signal number
|
|
- * a1 ($5) = siginfo pointer
|
|
- * a2 ($6) = ucontext pointer
|
|
- * PC = signal handler pointer
|
|
- * t9 ($25) = signal handler pointer
|
|
- * $29 = point to sigframe struct
|
|
- * ra ($31) = sigtramp at base of user stack
|
|
- */
|
|
- regs->active_tc.gpr[ 4] = sig;
|
|
- regs->active_tc.gpr[ 5] = frame_addr +
|
|
- offsetof(struct target_sigframe, sf_si);
|
|
- regs->active_tc.gpr[ 6] = frame_addr +
|
|
- offsetof(struct target_sigframe, sf_uc);
|
|
- regs->active_tc.gpr[25] = regs->active_tc.PC = ka->_sa_handler;
|
|
- regs->active_tc.gpr[29] = frame_addr;
|
|
- regs->active_tc.gpr[31] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
|
|
unlock_user_struct(frame, frame_addr, 1);
|
|
return;
|
|
|
|
@@ -766,7 +678,7 @@ give_sigsegv:
|
|
}
|
|
|
|
long
|
|
-do_sigreturn(CPUMIPSState *regs, abi_ulong uc_addr)
|
|
+do_sigreturn(CPUArchState *regs, abi_ulong uc_addr)
|
|
{
|
|
target_ucontext_t *ucontext;
|
|
sigset_t blocked;
|
|
@@ -784,14 +696,17 @@ do_sigreturn(CPUMIPSState *regs, abi_ulong uc_addr)
|
|
goto badframe;
|
|
}
|
|
|
|
- if (restore_sigmcontext(regs, &ucontext->uc_mcontext))
|
|
+ if (set_mcontext(regs, &ucontext->uc_mcontext, 0))
|
|
goto badframe;
|
|
|
|
target_to_host_sigset_internal(&blocked, &target_set);
|
|
sigprocmask(SIG_SETMASK, &blocked, NULL);
|
|
|
|
- regs->active_tc.PC = regs->CP0_EPC;
|
|
- regs->CP0_EPC = 0; /* XXX for nested signals ? */
|
|
+#if defined(TARGET_MIPS)
|
|
+ CPUMIPSState *mips_regs = (CPUMIPSState *)regs;
|
|
+ mips_regs->active_tc.PC = mips_regs->CP0_EPC;
|
|
+ mips_regs->CP0_EPC = 0; /* XXX for nested signals ? */
|
|
+#endif
|
|
return (-TARGET_QEMU_ESIGRETURN);
|
|
|
|
badframe:
|
|
@@ -799,9 +714,10 @@ badframe:
|
|
return (0);
|
|
}
|
|
|
|
-#elif defined(TARGET_SPARC64)
|
|
|
|
-extern abi_ulong sparc_user_sigtramp;
|
|
+
|
|
+/* #elif defined(TARGET_SPARC64) */
|
|
+#if 0
|
|
|
|
#define mc_flags mc_global[0]
|
|
#define mc_sp mc_out[6]
|
|
@@ -1039,6 +955,7 @@ badframe:
|
|
force_sig(TARGET_SIGSEGV);
|
|
return (0);
|
|
}
|
|
+#endif
|
|
|
|
#else
|
|
|
|
diff --git a/bsd-user/sparc/target_signal.h b/bsd-user/sparc/target_signal.h
|
|
index 79dfc1e..e2fe79c 100644
|
|
--- a/bsd-user/sparc/target_signal.h
|
|
+++ b/bsd-user/sparc/target_signal.h
|
|
@@ -13,9 +13,34 @@
|
|
#define TARGET_MINSIGSTKSZ (512 * 4)
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
|
|
+typedef target_ulong target_mcontext_t; /* dummy */
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
static inline abi_ulong get_sp_from_cpustate(CPUSPARCState *state)
|
|
{
|
|
return state->regwptr[UREG_FP];
|
|
}
|
|
|
|
+static inline int
|
|
+get_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "SPARC doesn't have support for get_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
+static inline int
|
|
+set_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "SPARC doesn't have support for set_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
diff --git a/bsd-user/sparc64/target_signal.h b/bsd-user/sparc64/target_signal.h
|
|
index d3e58bb..1bc7c96 100644
|
|
--- a/bsd-user/sparc64/target_signal.h
|
|
+++ b/bsd-user/sparc64/target_signal.h
|
|
@@ -10,12 +10,239 @@
|
|
#define UREG_FP UREG_I6
|
|
#endif
|
|
|
|
+#define mc_flags mc_global[0]
|
|
+#define mc_sp mc_out[6]
|
|
+#define mc_fprs mc_local[0]
|
|
+#define mc_fsr mc_local[1]
|
|
+#define mc_gsr mc_local[2]
|
|
+#define mc_tnpc mc_in[0]
|
|
+#define mc_tpc mc_in[1]
|
|
+#define mc_tstate mc_in[2]
|
|
+#define mc_y mc_in[4]
|
|
+#define mc_wstate mc_in[5]
|
|
+
|
|
+#define ureg_i0 regwptr[0 ]
|
|
+#define ureg_i1 regwptr[1 ]
|
|
+#define ureg_i2 regwptr[2 ]
|
|
+#define ureg_i3 regwptr[3 ]
|
|
+#define ureg_i4 regwptr[4 ]
|
|
+#define ureg_i5 regwptr[5 ]
|
|
+#define ureg_i6 regwptr[6 ]
|
|
+#define ureg_i7 regwptr[7 ]
|
|
+#define ureg_l0 regwptr[8 ]
|
|
+#define ureg_l1 regwptr[9 ]
|
|
+#define ureg_l2 regwptr[10]
|
|
+#define ureg_l3 regwptr[11]
|
|
+#define ureg_l4 regwptr[12]
|
|
+#define ureg_l5 regwptr[13]
|
|
+#define ureg_l6 regwptr[14]
|
|
+#define ureg_l7 regwptr[15]
|
|
+#define ureg_o0 regwptr[16]
|
|
+#define ureg_o1 regwptr[17]
|
|
+#define ureg_o2 regwptr[18]
|
|
+#define ureg_o3 regwptr[19]
|
|
+#define ureg_o4 regwptr[20]
|
|
+#define ureg_o5 regwptr[21]
|
|
+#define ureg_o6 regwptr[22]
|
|
+#define ureg_o7 regwptr[23]
|
|
+#define ureg_fp ureg_i6
|
|
+#define ureg_sp ureg_o6
|
|
+#define ureg_fprs fprs
|
|
+#define ureg_fsr fsr
|
|
+#define ureg_gsr gsr
|
|
+#define ureg_tnpc npc
|
|
+#define ureg_tpc pc
|
|
+#define ureg_y y
|
|
+
|
|
+#define TARGET_FPRS_FEF (1 << 2)
|
|
+#define TARGET_MC_VERSION 1L
|
|
+
|
|
#define TARGET_MINSIGSTKSZ (1024 * 4)
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
|
|
-static inline abi_ulong get_sp_from_cpustate(CPUSPARCState *state)
|
|
+#define TARGET_STACK_BIAS 2047 /* AKA. SPOFF */
|
|
+
|
|
+struct target_mcontext {
|
|
+ uint64_t mc_global[8];
|
|
+ uint64_t mc_out[8];
|
|
+ uint64_t mc_local[8];
|
|
+ uint64_t mc_in[8];
|
|
+ uint32_t mc_fp[64];
|
|
+} __aligned(64);
|
|
+
|
|
+typedef struct target_mcontext target_mcontext_t;
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
+struct target_sigframe {
|
|
+ target_ucontext_t sf_uc;
|
|
+ target_siginfo_t sf_si;
|
|
+};
|
|
+
|
|
+extern abi_ulong sparc_user_sigtramp;
|
|
+
|
|
+static inline int
|
|
+set_sigtramp_args(CPUSPARCState *regs, int sig, struct target_sigframe *frame,
|
|
+ abi_ulong frame_addr, struct target_sigaction *ka)
|
|
{
|
|
+
|
|
+ frame->sf_si.si_signo = sig;
|
|
+ frame->sf_si.si_code = TARGET_SA_SIGINFO;
|
|
+
|
|
+ /* Arguments to signal handler:
|
|
+ *
|
|
+ * i0 = signal number
|
|
+ * i1 = pointer to siginfo struct
|
|
+ * i2 = pointer to ucontext struct
|
|
+ * i3 = (not used in new style)
|
|
+ * i4 = signal handler address (called by sigtramp)
|
|
+ */
|
|
+ regs->ureg_i0 = sig;
|
|
+ regs->ureg_i1 = frame_addr +
|
|
+ offsetof(struct target_sigframe, sf_si);
|
|
+ regs->ureg_i2 = frame_addr +
|
|
+ offsetof(struct target_sigframe, sf_uc);
|
|
+ /* env->ureg_o3 used in the Old FreeBSD-style arguments. */
|
|
+ regs->ureg_i4 = ka->_sa_handler;
|
|
+ regs->ureg_tpc = sparc_user_sigtramp;
|
|
+ regs->ureg_tnpc = (regs->ureg_tpc + 4);
|
|
+ regs->ureg_sp = frame_addr - TARGET_STACK_BIAS;
|
|
+
|
|
+ return (0);
|
|
+}
|
|
+
|
|
+static inline abi_ulong
|
|
+get_sp_from_cpustate(CPUSPARCState *state)
|
|
+{
|
|
+
|
|
return state->regwptr[UREG_FP];
|
|
}
|
|
|
|
+/* compare to sparc64/sparc64/machdep.c get_mcontext() */
|
|
+static inline int
|
|
+get_mcontext(CPUSPARCState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+
|
|
+ /* Skip over the trap instruction, first. */
|
|
+ regs->pc = regs->npc;
|
|
+ regs->npc += 4;
|
|
+
|
|
+ mcp->mc_flags = TARGET_MC_VERSION; /* mc_global[0] */
|
|
+ mcp->mc_global[1] = tswapal(regs->gregs[1]);
|
|
+ mcp->mc_global[2] = tswapal(regs->gregs[2]);
|
|
+ mcp->mc_global[3] = tswapal(regs->gregs[3]);
|
|
+ mcp->mc_global[4] = tswapal(regs->gregs[4]);
|
|
+ mcp->mc_global[5] = tswapal(regs->gregs[5]);
|
|
+ mcp->mc_global[6] = tswapal(regs->gregs[6]);
|
|
+ /* skip %g7 since it is used as the userland TLS register */
|
|
+
|
|
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
|
|
+ mcp->mc_out[0] = 0;
|
|
+ mcp->mc_out[1] = 0;
|
|
+ } else {
|
|
+ mcp->mc_out[0] = tswapal(regs->ureg_i0);
|
|
+ mcp->mc_out[1] = tswapal(regs->ureg_i1);
|
|
+ }
|
|
+ mcp->mc_out[2] = tswapal(regs->ureg_i2);
|
|
+ mcp->mc_out[3] = tswapal(regs->ureg_i3);
|
|
+ mcp->mc_out[4] = tswapal(regs->ureg_i4);
|
|
+ mcp->mc_out[5] = tswapal(regs->ureg_i5);
|
|
+ mcp->mc_out[6] = tswapal(regs->ureg_i6);
|
|
+ mcp->mc_out[7] = tswapal(regs->ureg_i7);
|
|
+
|
|
+ mcp->mc_fprs = tswapal(regs->fprs); /* mc_local[0] */
|
|
+ mcp->mc_fsr = tswapal(regs->fsr); /* mc_local[1] */
|
|
+ mcp->mc_gsr = tswapal(regs->gsr); /* mc_local[2] */
|
|
+
|
|
+ mcp->mc_tnpc = tswapal(regs->npc); /* mc_in[0] */
|
|
+ mcp->mc_tpc = tswapal(regs->pc); /* mc_in[1] */
|
|
+#if 0
|
|
+ mcp->mc_tstate = tswapal(regs->ureg_tstate); /* mc_in[2] */
|
|
+#else
|
|
+ abi_ulong cwp64 = cpu_get_cwp64(regs);
|
|
+ abi_ulong ccr = cpu_get_ccr(regs) << 32;
|
|
+ abi_ulong asi = (regs->asi & 0xff) << 24;
|
|
+ mcp->mc_tstate = tswapal(ccr | asi | cwp64);
|
|
+#endif
|
|
+
|
|
+ mcp->mc_y = tswapal(regs->y); /* mc_in[4] */
|
|
+
|
|
+ /* XXX
|
|
+ if ((regs->ureg_l0 & TARGET_FPRS_FEF) != 0) {
|
|
+ int i;
|
|
+
|
|
+ for(i = 0; i < 64; i++)
|
|
+ mcp->mc_fp[i] = tswapal(regs->fpr[i]);
|
|
+ }
|
|
+ */
|
|
+
|
|
+ return (0);
|
|
+}
|
|
+
|
|
+extern void helper_flushw(CPUSPARCState *env);
|
|
+
|
|
+/* compare to sparc64/sparc64/machdep.c set_mcontext() */
|
|
+static inline int
|
|
+set_mcontext(CPUSPARCState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ /* XXX need to add version check here. */
|
|
+
|
|
+ /* Make sure the windows are spilled first. */
|
|
+ helper_flushw(regs);
|
|
+
|
|
+ regs->gregs[1] = tswapal(mcp->mc_global[1]);
|
|
+ regs->gregs[2] = tswapal(mcp->mc_global[2]);
|
|
+ regs->gregs[3] = tswapal(mcp->mc_global[3]);
|
|
+ regs->gregs[4] = tswapal(mcp->mc_global[4]);
|
|
+ regs->gregs[5] = tswapal(mcp->mc_global[5]);
|
|
+ regs->gregs[6] = tswapal(mcp->mc_global[6]);
|
|
+
|
|
+ regs->ureg_i0 = tswapal(mcp->mc_out[0]);
|
|
+ regs->ureg_i1 = tswapal(mcp->mc_out[1]);
|
|
+ regs->ureg_i2 = tswapal(mcp->mc_out[2]);
|
|
+ regs->ureg_i3 = tswapal(mcp->mc_out[3]);
|
|
+ regs->ureg_i4 = tswapal(mcp->mc_out[4]);
|
|
+ regs->ureg_i5 = tswapal(mcp->mc_out[5]);
|
|
+ regs->ureg_i6 = tswapal(mcp->mc_out[6]);
|
|
+ regs->ureg_i7 = tswapal(mcp->mc_out[7]);
|
|
+
|
|
+ regs->fprs = tswapal(mcp->mc_fprs); /* mc_local[0] */
|
|
+ regs->fsr = tswapal(mcp->mc_fsr); /* mc_local[1] */
|
|
+ regs->gsr = tswapal(mcp->mc_gsr); /* mc_local[2] */
|
|
+
|
|
+ regs->npc = tswapal(mcp->mc_tnpc); /* mc_in[0] */
|
|
+ regs->pc = tswapal(mcp->mc_tpc); /* mc_in[1] */
|
|
+
|
|
+#if 0
|
|
+ regs->ureg_tstate = tswapal(mcp->mc_tstate); /* mc_in[2] */
|
|
+#else
|
|
+ abi_ulong tstate = tswapal(mcp->mc_tstate); /* mc_in[2] */
|
|
+
|
|
+ regs->asi = (tstate >> 24) & 0xff;
|
|
+ cpu_put_ccr(regs, tstate >> 32);
|
|
+ cpu_put_cwp64(regs, tstate & 0x1f);
|
|
+
|
|
+#endif
|
|
+ regs->ureg_y = tswapal(mcp->mc_y); /* mc_in[4] */
|
|
+
|
|
+ /* XXX
|
|
+ if ((regs->ureg_fprs & TARGET_FPRS_FEF) != 0) {
|
|
+ int i;
|
|
+
|
|
+ regs->ureg_l0 = 0;
|
|
+ for(i = 0; i < 64; i++)
|
|
+ regs->fpr[i] = tswapal(mcp->mc_fp[i]);
|
|
+ }
|
|
+ */
|
|
+
|
|
+ return (0);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c
|
|
index 625c3cf..4deb0db 100644
|
|
--- a/bsd-user/syscall.c
|
|
+++ b/bsd-user/syscall.c
|
|
@@ -43,6 +43,12 @@
|
|
#ifdef __FreeBSD__
|
|
#include <sys/regression.h>
|
|
#include <sys/procdesc.h>
|
|
+#include <sys/ucontext.h>
|
|
+#include <sys/thr.h>
|
|
+#include <sys/rtprio.h>
|
|
+#include <sys/umtx.h>
|
|
+#include <pthread.h>
|
|
+#include <machine/atomic.h>
|
|
#endif
|
|
#include <sys/un.h>
|
|
#include <sys/ipc.h>
|
|
@@ -251,7 +257,24 @@ static abi_long do_freebsd_sysarch(void *env, int op, abi_ulong parms)
|
|
#ifdef TARGET_MIPS
|
|
static abi_long do_freebsd_sysarch(void *env, int op, abi_ulong parms)
|
|
{
|
|
- return -TARGET_EINVAL;
|
|
+ int ret = 0;
|
|
+ CPUMIPSState *mips_env = (CPUMIPSState *)env;
|
|
+
|
|
+ switch(op) {
|
|
+ case TARGET_MIPS_SET_TLS:
|
|
+ if (get_user(mips_env->tls_value, parms, abi_ulong))
|
|
+ ret = -TARGET_EFAULT;
|
|
+ break;
|
|
+ case TARGET_MIPS_GET_TLS:
|
|
+ if (put_user(mips_env->tls_value, parms, abi_ulong))
|
|
+ ret = -TARGET_EFAULT;
|
|
+ break;
|
|
+ default:
|
|
+ ret = -TARGET_EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return (ret);
|
|
}
|
|
#endif
|
|
|
|
@@ -2119,6 +2142,383 @@ do_fork(CPUArchState *env, int num, int flags, int *fdp)
|
|
return (ret);
|
|
}
|
|
|
|
+#if defined(CONFIG_USE_NPTL)
|
|
+
|
|
+#define NEW_STACK_SIZE (0x40000)
|
|
+
|
|
+static pthread_mutex_t new_thread_lock = PTHREAD_MUTEX_INITIALIZER;
|
|
+typedef struct {
|
|
+ CPUArchState *env;
|
|
+ long tid;
|
|
+ pthread_mutex_t mutex;
|
|
+ pthread_cond_t cond;
|
|
+ pthread_t thread;
|
|
+ sigset_t sigmask;
|
|
+ struct target_thr_param param;
|
|
+} new_thread_info_t;
|
|
+
|
|
+static void *
|
|
+new_thread_start(void *arg)
|
|
+{
|
|
+ new_thread_info_t *info = arg;
|
|
+ CPUArchState *env;
|
|
+ TaskState *ts;
|
|
+ long tid;
|
|
+
|
|
+ env = info->env;
|
|
+ thread_env = env;
|
|
+ ts = (TaskState *)thread_env->opaque;
|
|
+ (void)thr_self(&tid);
|
|
+ info->tid = tid;
|
|
+ task_settid(ts);
|
|
+
|
|
+ /* copy out the TID info */
|
|
+ if (info->param.child_tid)
|
|
+ put_user(tid, info->param.child_tid, abi_long);
|
|
+ if (info->param.parent_tid)
|
|
+ put_user(tid, info->param.parent_tid, abi_long);
|
|
+
|
|
+#ifdef TARGET_MIPS64
|
|
+ CPUMIPSState *regs = env;
|
|
+ regs->active_tc.gpr[25] = regs->active_tc.PC = info->param.start_func;
|
|
+ regs->active_tc.gpr[ 4] = info->param.arg;
|
|
+ regs->active_tc.gpr[29] = info->param.stack_base;
|
|
+#endif
|
|
+ /* Eenable signals */
|
|
+ sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
|
|
+ /* Signal to the parent that we're ready. */
|
|
+ pthread_mutex_lock(&info->mutex);
|
|
+ pthread_cond_broadcast(&info->cond);
|
|
+ pthread_mutex_unlock(&info->mutex);
|
|
+ /* Wait until the parent has finished initializing the TLS state. */
|
|
+ pthread_mutex_lock(&new_thread_lock);
|
|
+ pthread_mutex_unlock(&new_thread_lock);
|
|
+
|
|
+ cpu_loop(env);
|
|
+ /* never exits */
|
|
+
|
|
+ return (NULL);
|
|
+}
|
|
+
|
|
+static void
|
|
+rtp_to_schedparam(const struct rtprio *rtp, int *policy, struct sched_param *param)
|
|
+{
|
|
+
|
|
+ switch(rtp->type) {
|
|
+ case RTP_PRIO_REALTIME:
|
|
+ *policy = SCHED_RR;
|
|
+ param->sched_priority = RTP_PRIO_MAX - rtp->prio;
|
|
+ break;
|
|
+
|
|
+ case RTP_PRIO_FIFO:
|
|
+ *policy = SCHED_FIFO;
|
|
+ param->sched_priority = RTP_PRIO_MAX - rtp->prio;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ *policy = SCHED_OTHER;
|
|
+ param->sched_priority = 0;
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_create(CPUArchState *env, ucontext_t *ctx, long *id, int flags)
|
|
+{
|
|
+
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_create));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_new(CPUArchState *env, abi_ulong target_param_addr, int32_t param_size)
|
|
+{
|
|
+ new_thread_info_t info;
|
|
+ pthread_attr_t attr;
|
|
+ TaskState *ts;
|
|
+ CPUArchState *new_env;
|
|
+ struct target_thr_param *target_param;
|
|
+ abi_ulong target_rtp_addr;
|
|
+ struct target_rtprio *target_rtp;
|
|
+ struct rtprio *rtp_ptr, rtp;
|
|
+ TaskState *parent_ts = (TaskState *)env->opaque;
|
|
+ sigset_t sigmask;
|
|
+ struct sched_param sched_param;
|
|
+ int sched_policy;
|
|
+ int ret = 0;
|
|
+
|
|
+ memset(&info, 0, sizeof(info));
|
|
+
|
|
+ if (!lock_user_struct(VERIFY_READ, target_param, target_param_addr, 1))
|
|
+ return (-TARGET_EFAULT);
|
|
+ info.param.start_func = tswapal(target_param->start_func);
|
|
+ info.param.arg = tswapal(target_param->arg);
|
|
+ info.param.stack_base = tswapal(target_param->stack_base);
|
|
+ info.param.stack_size = tswapal(target_param->stack_size);
|
|
+ info.param.tls_base = tswapal(target_param->tls_base);
|
|
+ info.param.tls_size = tswapal(target_param->tls_size);
|
|
+ info.param.child_tid = tswapal(target_param->child_tid);
|
|
+ info.param.parent_tid = tswapal(target_param->parent_tid);
|
|
+ target_rtp_addr = info.param.rtp = tswapal(target_param->rtp);
|
|
+ unlock_user(target_param, target_param_addr, 0);
|
|
+
|
|
+ if (target_rtp_addr) {
|
|
+ if (!lock_user_struct(VERIFY_READ, target_rtp, target_rtp_addr,
|
|
+ 1))
|
|
+ return (-TARGET_EFAULT);
|
|
+ rtp.type = tswap16(target_rtp->type);
|
|
+ rtp.prio = tswap16(target_rtp->prio);
|
|
+ unlock_user(target_rtp, target_rtp_addr, 0);
|
|
+ rtp_ptr = &rtp;
|
|
+ } else {
|
|
+ rtp_ptr = NULL;
|
|
+ }
|
|
+
|
|
+ /* Create a new CPU instance. */
|
|
+ ts = g_malloc0(sizeof(TaskState));
|
|
+ init_task_state(ts);
|
|
+ new_env = cpu_copy(env);
|
|
+#if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
|
|
+ cpu_reset(ENV_GET_CPU(new_env));
|
|
+#endif
|
|
+
|
|
+ /* init regs that differ from the parent thread. */
|
|
+ cpu_clone_regs(new_env, info.param.stack_base);
|
|
+ new_env->opaque = ts;
|
|
+ ts->bprm = parent_ts->bprm;
|
|
+ ts->info = parent_ts->info;
|
|
+
|
|
+#if defined(TARGET_MIPS)
|
|
+ env->tls_value = info.param.tls_base;
|
|
+ /* cpu_set_tls(new_env, info.param.tls_base); */
|
|
+#endif
|
|
+
|
|
+ /* Grab a mutex so that thread setup appears atomic. */
|
|
+ pthread_mutex_lock(&new_thread_lock);
|
|
+
|
|
+ pthread_mutex_init(&info.mutex, NULL);
|
|
+ pthread_mutex_lock(&info.mutex);
|
|
+ pthread_cond_init(&info.cond, NULL);
|
|
+ info.env = new_env;
|
|
+
|
|
+ /* XXX return value needs to be checked... */
|
|
+ pthread_attr_init(&attr);
|
|
+ pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
|
|
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
|
|
+ if (rtp_ptr) {
|
|
+ rtp_to_schedparam(&rtp, &sched_policy, &sched_param);
|
|
+ pthread_attr_setschedpolicy(&attr, sched_policy);
|
|
+ pthread_attr_setschedparam(&attr, &sched_param);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * It is not safe to deliver signals until the child has finished
|
|
+ * initializing, so temporarily block all signals.
|
|
+ */
|
|
+ sigfillset(&sigmask);
|
|
+ sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
|
|
+
|
|
+ /* XXX return value needs to be checked... */
|
|
+ ret = pthread_create(&info.thread, &attr, new_thread_start, &info);
|
|
+ /* XXX Free new CPU state if thread creation fails. */
|
|
+
|
|
+ sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
|
|
+ pthread_attr_destroy(&attr);
|
|
+ if (0 == ret) {
|
|
+ /* Wait for the child to initialize. */
|
|
+ pthread_cond_wait(&info.cond, &info.mutex);
|
|
+ } else {
|
|
+ /* pthread_create failed. */
|
|
+ }
|
|
+
|
|
+ pthread_mutex_unlock(&info.mutex);
|
|
+ pthread_cond_destroy(&info.cond);
|
|
+ pthread_mutex_destroy(&info.mutex);
|
|
+ pthread_mutex_unlock(&new_thread_lock);
|
|
+
|
|
+ return (ret);
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_self(long *id)
|
|
+{
|
|
+
|
|
+ return (get_errno(thr_self(id)));
|
|
+}
|
|
+
|
|
+static void
|
|
+do_thr_exit(CPUArchState *cpu_env, abi_ulong tid_addr)
|
|
+{
|
|
+
|
|
+ if (first_cpu->next_cpu) {
|
|
+ TaskState *ts;
|
|
+ CPUArchState **lastp, *p;
|
|
+
|
|
+ /*
|
|
+ * *XXX This probably breaks if a signal arrives.
|
|
+ * We should disable signals.
|
|
+ */
|
|
+ cpu_list_lock();
|
|
+ lastp = &first_cpu;
|
|
+ p = first_cpu;
|
|
+ while (p && p != (CPUArchState *)cpu_env) {
|
|
+ lastp = &p->next_cpu;
|
|
+ p = p->next_cpu;
|
|
+ }
|
|
+ /*
|
|
+ * if we didn't find the CPU for this thread then something
|
|
+ * is horribly wrong.
|
|
+ */
|
|
+ if (!p)
|
|
+ abort();
|
|
+ /* Remove the CPU from the list. */
|
|
+ *lastp = p->next_cpu;
|
|
+ cpu_list_unlock();
|
|
+ ts = ((CPUArchState *)cpu_env)->opaque;
|
|
+
|
|
+ if (tid_addr) {
|
|
+ /* Signal target userland that it can free the stack. */
|
|
+ if (! put_user_u32(1, tid_addr))
|
|
+ _umtx_op(g2h(tid_addr), UMTX_OP_WAKE, INT_MAX,
|
|
+ NULL, NULL);
|
|
+ }
|
|
+
|
|
+ thread_env = NULL;
|
|
+ object_delete(OBJECT(ENV_GET_CPU(cpu_env)));
|
|
+ g_free(ts);
|
|
+ pthread_exit(NULL);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_kill(long id, int sig)
|
|
+{
|
|
+
|
|
+ return (get_errno(thr_kill(id, sig)));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_kill2(pid_t pid, long id, int sig)
|
|
+{
|
|
+
|
|
+ return (get_errno(thr_kill2(pid, id, sig)));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_suspend(const struct timespec *timeout)
|
|
+{
|
|
+
|
|
+ return (get_errno(thr_suspend(timeout)));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_wake(long tid)
|
|
+{
|
|
+
|
|
+ return (get_errno(thr_wake(tid)));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_set_name(long tid, char *name)
|
|
+{
|
|
+
|
|
+ return (get_errno(thr_set_name(tid, name)));
|
|
+}
|
|
+
|
|
+
|
|
+#else /* ! CONFIG_USE_NPTL */
|
|
+
|
|
+static int
|
|
+do_thr_create(CPUArchState *env, ucontext_t *ctx, long *id, int flags)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_create));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_new(CPUArchState *env, abi_ulong target_param_addr, int32_t param_size)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_new));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_self(long *tid)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_self));
|
|
+}
|
|
+
|
|
+static void
|
|
+do_thr_exit(CPUArchState *cpu_env, abi_ulong state_addr)
|
|
+{
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_kill(long tid, int sig)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_kill2));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_kill2(pid_t pid, long tid, int sig)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_kill2));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_suspend(const struct timespec *timeout)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_suspend));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_wake(long tid)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_wake));
|
|
+}
|
|
+
|
|
+static int
|
|
+do_thr_set_name(long tid, char *name)
|
|
+{
|
|
+ return (unimplemented(TARGET_FREEBSD_NR_thr_set_name));
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_USE_NPTL */
|
|
+
|
|
+static int
|
|
+do_umtx_lock(abi_ulong umtx_addr, uint32_t id)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ for (;;) {
|
|
+ ret = get_errno(_umtx_op(g2h(umtx_addr +
|
|
+ offsetof(struct target_umtx, u_owner)),
|
|
+ UMTX_OP_MUTEX_WAIT, UMTX_UNOWNED, 0, 0));
|
|
+ if (ret)
|
|
+ return (ret);
|
|
+ if (atomic_cmpset_acq_32(g2h(umtx_addr +
|
|
+ offsetof(struct target_umtx, u_owner)),
|
|
+ UMTX_UNOWNED, id))
|
|
+ return (0);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int
|
|
+do_umtx_unlock(abi_ulong umtx_addr, uint32 id)
|
|
+{
|
|
+ uint32_t owner;
|
|
+
|
|
+ do {
|
|
+ if (get_user_u32(owner, umtx_addr +
|
|
+ offsetof(struct target_umtx, u_owner)))
|
|
+ return (-TARGET_EFAULT);
|
|
+ if (owner != id)
|
|
+ return (-TARGET_EPERM);
|
|
+ } while (!atomic_cmpset_rel_32(g2h(umtx_addr +
|
|
+ offsetof(struct target_umtx, u_owner)), owner,
|
|
+ UMUTEX_UNOWNED));
|
|
+
|
|
+ return (0);
|
|
+}
|
|
+
|
|
+
|
|
/* do_syscall() should always have a single exit point at the end so
|
|
that actions, such as logging of syscall results, can be performed.
|
|
All errnos that do_syscall() returns must be -TARGET_<errcode>. */
|
|
@@ -4091,6 +4491,23 @@ do_stat:
|
|
break;
|
|
#endif
|
|
|
|
+#ifdef TARGET_FREEBSD_NR_getdomainname
|
|
+ case TARGET_FREEBSD_NR_getdomainname:
|
|
+ ret = unimplemented(num);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef TARGET_FREEBSD_NR_setdomainname
|
|
+ case TARGET_FREEBSD_NR_setdomainname:
|
|
+ ret = unimplemented(num);
|
|
+ break;
|
|
+#endif
|
|
+#ifdef TARGET_FREEBSD_NR_uname
|
|
+ case TARGET_FREEBSD_NR_uname:
|
|
+ ret = unimplemented(num);
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+
|
|
#if 0 /* XXX not supported in libc yet, it seems (10.0 addition). */
|
|
case TARGET_FREEBSD_NR_posix_fadvise:
|
|
{
|
|
@@ -4136,6 +4553,211 @@ do_stat:
|
|
break;
|
|
#endif
|
|
|
|
+ case TARGET_FREEBSD_NR_thr_new:
|
|
+ ret = do_thr_new(cpu_env, arg1, arg2);
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_create:
|
|
+ {
|
|
+ ucontext_t ucxt;
|
|
+ long tid;
|
|
+
|
|
+ ret = do_thr_create(cpu_env, &ucxt, &tid, arg3);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_set_name:
|
|
+ if (!(p = lock_user_string(arg2)))
|
|
+ goto efault;
|
|
+ ret = do_thr_set_name(arg1, p);
|
|
+ unlock_user(p, arg2, 0);
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_self:
|
|
+ {
|
|
+ long tid;
|
|
+
|
|
+ if ((ret = do_thr_self(&tid)) == 0) {
|
|
+ if (put_user((abi_long)tid, arg1, abi_long))
|
|
+ goto efault;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_suspend:
|
|
+ {
|
|
+ struct timespec ts;
|
|
+
|
|
+ if (target_to_host_timespec(&ts, arg1))
|
|
+ goto efault;
|
|
+
|
|
+ ret = do_thr_suspend(&ts);
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_wake:
|
|
+ ret = do_thr_wake(arg1);
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_kill:
|
|
+ ret = do_thr_kill(arg1, arg2);
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_kill2:
|
|
+ ret = do_thr_kill2(arg1, arg2, arg3);
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_thr_exit:
|
|
+ ret = 0; /* suspress compile warning */
|
|
+ do_thr_exit(cpu_env, arg1);
|
|
+ /* Shouldn't be reached. */
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_rtprio_thread:
|
|
+ ret = 0;
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_getcontext:
|
|
+ {
|
|
+ target_ucontext_t *ucp;
|
|
+ sigset_t sigmask;
|
|
+
|
|
+ if (0 == arg1) {
|
|
+ ret = -TARGET_EINVAL;
|
|
+ } else {
|
|
+ ret = get_errno(sigprocmask(0, NULL, &sigmask));
|
|
+ if (!is_error(ret)) {
|
|
+ if (!(ucp = lock_user(VERIFY_WRITE, arg1,
|
|
+ sizeof(target_ucontext_t), 0)))
|
|
+ goto efault;
|
|
+ ret = get_mcontext(cpu_env, &ucp->uc_mcontext,
|
|
+ TARGET_MC_GET_CLEAR_RET);
|
|
+ host_to_target_sigset(&ucp->uc_sigmask,
|
|
+ &sigmask);
|
|
+ memset(ucp->__spare__, 0,
|
|
+ sizeof(ucp->__spare__));
|
|
+ unlock_user(ucp, arg1,
|
|
+ sizeof(target_ucontext_t));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_setcontext:
|
|
+ {
|
|
+ target_ucontext_t *ucp;
|
|
+ sigset_t sigmask;
|
|
+
|
|
+ if (0 == arg1) {
|
|
+ ret = -TARGET_EINVAL;
|
|
+ } else {
|
|
+ if (!(ucp = lock_user(VERIFY_READ, arg1,
|
|
+ sizeof(target_ucontext_t), 1)))
|
|
+ goto efault;
|
|
+ ret = set_mcontext(cpu_env, &ucp->uc_mcontext, 0);
|
|
+ target_to_host_sigset(&sigmask, &ucp->uc_sigmask);
|
|
+ unlock_user(ucp, arg1, sizeof(target_ucontext_t));
|
|
+ if (0 == ret)
|
|
+ (void)sigprocmask(SIG_SETMASK, &sigmask, NULL);
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR_swapcontext:
|
|
+ /*
|
|
+ * XXX Does anything besides old implementations of
|
|
+ * setjmp()/longjmp() uses these?
|
|
+ */
|
|
+ ret = unimplemented(num);
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR__umtx_lock:
|
|
+ {
|
|
+ long tid;
|
|
+
|
|
+ thr_self(&tid);
|
|
+ ret = do_umtx_lock(arg1, tswap32(tid));
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR__umtx_unlock:
|
|
+ {
|
|
+ long tid;
|
|
+
|
|
+ thr_self(&tid);
|
|
+ ret = do_umtx_unlock(arg1, tswap32(tid));
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ case TARGET_FREEBSD_NR__umtx_op:
|
|
+ {
|
|
+ struct timespec ts;
|
|
+ void *object = NULL;
|
|
+ int operation;
|
|
+ void *addr = NULL;
|
|
+ void *addr2 = NULL;
|
|
+
|
|
+
|
|
+ /* int _umtx_op(void *obj, int op, u_long val,
|
|
+ * void *uaddr, void *uaddr2); */
|
|
+
|
|
+ abi_ulong obj = arg1;
|
|
+ int op = (int)arg2;
|
|
+ u_long val = arg3;
|
|
+ /* abi_ulong uaddr = arg4; */
|
|
+ abi_ulong uaddr2 = arg5;
|
|
+
|
|
+ switch(op) {
|
|
+ case TARGET_UMTX_OP_LOCK:
|
|
+ ret = do_umtx_lock(obj, tswap32((uint32_t)val));
|
|
+ break;
|
|
+
|
|
+ case TARGET_UMTX_OP_UNLOCK:
|
|
+ ret = do_umtx_unlock(obj, tswap32((uint32_t)val));
|
|
+ break;
|
|
+
|
|
+ case TARGET_UMTX_OP_WAIT:
|
|
+ if (uaddr2) {
|
|
+ if (target_to_host_timespec(&ts, uaddr2))
|
|
+ goto efault;
|
|
+ addr2 = (void *)&ts;
|
|
+ }
|
|
+ ret = get_errno(_umtx_op(g2h(obj), UMTX_OP_WAIT,
|
|
+ tswap32(val), addr, addr2));
|
|
+ break;
|
|
+
|
|
+ case TARGET_UMTX_OP_WAKE:
|
|
+ operation = UMTX_OP_WAKE;
|
|
+ object = g2h(obj);
|
|
+ ret = get_errno(_umtx_op(g2h(obj), UMTX_OP_WAKE,
|
|
+ val, 0, 0));
|
|
+ break;
|
|
+
|
|
+ case TARGET_UMTX_OP_MUTEX_TRYLOCK:
|
|
+ case TARGET_UMTX_OP_MUTEX_LOCK:
|
|
+ case TARGET_UMTX_OP_MUTEX_UNLOCK:
|
|
+ case TARGET_UMTX_OP_SET_CEILING:
|
|
+ case TARGET_UMTX_OP_CV_WAIT:
|
|
+ case TARGET_UMTX_OP_CV_SIGNAL:
|
|
+ case TARGET_UMTX_OP_CV_BROADCAST:
|
|
+ case TARGET_UMTX_OP_WAIT_UINT:
|
|
+ case TARGET_UMTX_OP_RW_RDLOCK:
|
|
+ case TARGET_UMTX_OP_RW_WRLOCK:
|
|
+ case TARGET_UMTX_OP_RW_UNLOCK:
|
|
+ case TARGET_UMTX_OP_WAIT_UINT_PRIVATE:
|
|
+ case TARGET_UMTX_OP_WAKE_PRIVATE:
|
|
+ case TARGET_UMTX_OP_MUTEX_WAIT:
|
|
+ case TARGET_UMTX_OP_MUTEX_WAKE:
|
|
+ case TARGET_UMTX_OP_SEM_WAIT:
|
|
+ case TARGET_UMTX_OP_SEM_WAKE:
|
|
+ case TARGET_UMTX_OP_NWAKE_PRIVATE:
|
|
+ default:
|
|
+ ret = -TARGET_EINVAL;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+
|
|
case TARGET_FREEBSD_NR_yield:
|
|
case TARGET_FREEBSD_NR_sched_setparam:
|
|
case TARGET_FREEBSD_NR_sched_getparam:
|
|
@@ -4146,36 +4768,18 @@ do_stat:
|
|
case TARGET_FREEBSD_NR_sched_get_priority_min:
|
|
case TARGET_FREEBSD_NR_sched_rr_get_interval:
|
|
|
|
-
|
|
case TARGET_FREEBSD_NR_reboot:
|
|
case TARGET_FREEBSD_NR_shutdown:
|
|
|
|
case TARGET_FREEBSD_NR_swapon:
|
|
case TARGET_FREEBSD_NR_swapoff:
|
|
|
|
- case TARGET_FREEBSD_NR_thr_create:
|
|
- case TARGET_FREEBSD_NR_thr_exit:
|
|
- case TARGET_FREEBSD_NR_thr_self:
|
|
- case TARGET_FREEBSD_NR_thr_suspend:
|
|
- case TARGET_FREEBSD_NR_thr_wake:
|
|
- case TARGET_FREEBSD_NR_thr_new:
|
|
- case TARGET_FREEBSD_NR_thr_set_name:
|
|
- case TARGET_FREEBSD_NR_thr_kill2:
|
|
-
|
|
- case TARGET_FREEBSD_NR_getcontext:
|
|
- case TARGET_FREEBSD_NR_setcontext:
|
|
- case TARGET_FREEBSD_NR_swapcontext:
|
|
-
|
|
- case TARGET_FREEBSD_NR_rtprio_thread:
|
|
case TARGET_FREEBSD_NR_cpuset:
|
|
case TARGET_FREEBSD_NR_cpuset_getid:
|
|
case TARGET_FREEBSD_NR_cpuset_setid:
|
|
case TARGET_FREEBSD_NR_cpuset_getaffinity:
|
|
case TARGET_FREEBSD_NR_cpuset_setaffinity:
|
|
|
|
- case TARGET_FREEBSD_NR__umtx_lock:
|
|
- case TARGET_FREEBSD_NR__umtx_unlock:
|
|
-
|
|
case TARGET_FREEBSD_NR_rctl_get_racct:
|
|
case TARGET_FREEBSD_NR_rctl_get_rules:
|
|
case TARGET_FREEBSD_NR_rctl_add_rule:
|
|
@@ -4185,16 +4789,6 @@ do_stat:
|
|
case TARGET_FREEBSD_NR_ntp_adjtime:
|
|
case TARGET_FREEBSD_NR_ntp_gettime:
|
|
|
|
-#ifdef TARGET_FREEBSD_NR_getdomainname
|
|
- case TARGET_FREEBSD_NR_getdomainname:
|
|
-#endif
|
|
-#ifdef TARGET_FREEBSD_NR_setdomainname
|
|
- case TARGET_FREEBSD_NR_setdomainname:
|
|
-#endif
|
|
-#ifdef TARGET_FREEBSD_NR_uname
|
|
- case TARGET_FREEBSD_NR_uname:
|
|
-#endif
|
|
-
|
|
case TARGET_FREEBSD_NR_sctp_peeloff:
|
|
case TARGET_FREEBSD_NR_sctp_generic_sendmsg:
|
|
case TARGET_FREEBSD_NR_sctp_generic_recvmsg:
|
|
diff --git a/bsd-user/syscall_defs.h b/bsd-user/syscall_defs.h
|
|
index ea1d25d..2879d83 100644
|
|
--- a/bsd-user/syscall_defs.h
|
|
+++ b/bsd-user/syscall_defs.h
|
|
@@ -416,6 +416,11 @@ struct target_shmid_ds {
|
|
abi_ulong shm_ctime; /* time of last change by shmctl() */
|
|
};
|
|
|
|
+#define TARGET_UCONTEXT_MAGIC 0xACEDBADE
|
|
+#define TARGET_MC_GET_CLEAR_RET 0x0001
|
|
+#define TARGET_MC_ADD_MAGIC 0x0002
|
|
+#define TARGET_MC_SET_ONSTACK 0x0004
|
|
+
|
|
/* this struct defines a stack used during syscall handling */
|
|
typedef struct target_sigaltstack {
|
|
abi_long ss_sp;
|
|
@@ -477,95 +482,6 @@ typedef struct target_siginfo {
|
|
} _reason;
|
|
} target_siginfo_t;
|
|
|
|
-#if defined(TARGET_MIPS)
|
|
-
|
|
-struct target_sigcontext {
|
|
- target_sigset_t sc_mask; /* signal mask to retstore */
|
|
- int32_t sc_onstack; /* sigstack state to restore */
|
|
- abi_long sc_pc; /* pc at time of signal */
|
|
- abi_long sc_reg[32]; /* processor regs 0 to 31 */
|
|
- abi_long mullo, mulhi; /* mullo and mulhi registers */
|
|
- int32_t sc_fpused; /* fp has been used */
|
|
- abi_long sc_fpregs[33]; /* fp regs 0 to 31 & csr */
|
|
- abi_long sc_fpc_eir; /* fp exception instr reg */
|
|
- /* int32_t reserved[8]; */
|
|
-};
|
|
-
|
|
-typedef struct target_mcontext {
|
|
- int32_t mc_onstack; /* sigstack state to restore */
|
|
- abi_long mc_pc; /* pc at time of signal */
|
|
- abi_long mc_regs[32]; /* process regs 0 to 31 */
|
|
- abi_long sr; /* status register */
|
|
- abi_long mullo, mulhi;
|
|
- int32_t mc_fpused; /* fp has been used */
|
|
- abi_long mc_fpregs[33]; /* fp regs 0 to 32 & csr */
|
|
- abi_long mc_fpc_eir; /* fp exception instr reg */
|
|
- abi_ulong mc_tls; /* pointer to TLS area */
|
|
-} target_mcontext_t;
|
|
-
|
|
-typedef struct target_ucontext {
|
|
- target_sigset_t uc_sigmask;
|
|
- target_mcontext_t uc_mcontext;
|
|
- target_ulong uc_link;
|
|
- target_stack_t uc_stack;
|
|
- int32_t uc_flags;
|
|
- int32_t __space__[8];
|
|
-} target_ucontext_t;
|
|
-
|
|
-struct target_sigframe {
|
|
- abi_ulong sf_signum;
|
|
- abi_ulong sf_siginfo; /* code or pointer to sf_si */
|
|
- abi_ulong sf_ucontext; /* points to sf_uc */
|
|
- abi_ulong sf_addr; /* undocumented 4th arg */
|
|
- target_ucontext_t sf_uc; /* = *sf_uncontext */
|
|
- target_siginfo_t sf_si; /* = *sf_siginfo (SA_SIGINFO case)*/
|
|
- uint32_t __spare__[2];
|
|
-};
|
|
-
|
|
-#elif defined(TARGET_SPARC64)
|
|
-
|
|
-struct target_mcontext {
|
|
- uint64_t mc_global[8];
|
|
- uint64_t mc_out[8];
|
|
- uint64_t mc_local[8];
|
|
- uint64_t mc_in[8];
|
|
- uint32_t mc_fp[64];
|
|
-} __aligned(64);
|
|
-
|
|
-typedef struct target_mcontext target_mcontext_t;
|
|
-
|
|
-typedef struct target_ucontext {
|
|
- target_sigset_t uc_sigmask;
|
|
- target_mcontext_t uc_mcontext;
|
|
- target_ulong uc_link;
|
|
- target_stack_t uc_stack;
|
|
- int32_t uc_flags;
|
|
- int32_t __space__[8];
|
|
-} target_ucontext_t;
|
|
-
|
|
-struct target_sigframe {
|
|
- target_ucontext_t sf_uc;
|
|
- target_siginfo_t sf_si;
|
|
-};
|
|
-
|
|
-#else
|
|
-
|
|
-typedef target_ulong target_mcontext_t; /* dummy */
|
|
-
|
|
-#endif
|
|
-
|
|
-/* XXX where did this come from?
|
|
-typedef struct target_ucontext {
|
|
- target_ulong uc_flags;
|
|
- target_ulong uc_link;
|
|
- target_stack_t uc_stack;
|
|
- target_mcontext_t uc_mcontext;
|
|
- target_ulong uc_filer[80];
|
|
- target_sigset_t uc_sigmask;
|
|
-} target_ucontext_t;
|
|
-*/
|
|
-
|
|
-
|
|
#ifdef BSWAP_NEEDED
|
|
static inline void
|
|
tswap_sigset(target_sigset_t *d, const target_sigset_t *s)
|
|
@@ -603,3 +519,101 @@ void host_to_target_old_sigset(abi_ulong *old_sigset, const sigset_t *sigset);
|
|
void target_to_host_old_sigset(sigset_t *sigset, const abi_ulong *old_sigset);
|
|
int do_sigaction(int sig, const struct target_sigaction *act,
|
|
struct target_sigaction *oact);
|
|
+
|
|
+
|
|
+/*
|
|
+ * FreeBSD thread support.
|
|
+ */
|
|
+
|
|
+#define TARGET_THR_SUSPENDED 0x0001
|
|
+#define TARGET_THR_SYSTEM_SCOPE 0x0002
|
|
+
|
|
+/* sysarch() ops */
|
|
+#define TARGET_MIPS_SET_TLS 1
|
|
+#define TARGET_MIPS_GET_TLS 2
|
|
+
|
|
+struct target_thr_param {
|
|
+ abi_ulong start_func; /* thread entry function. */
|
|
+ abi_ulong arg; /* argument for entry function. */
|
|
+ abi_ulong stack_base; /* stack base address. */
|
|
+ abi_ulong stack_size; /* stack size. */
|
|
+ abi_ulong tls_base; /* tls base address. */
|
|
+ abi_ulong tls_size; /* tls size. */
|
|
+ abi_ulong child_tid; /* address to store new TID. */
|
|
+ abi_ulong parent_tid; /* parent access the new TID here. */
|
|
+ abi_ulong rtp; /* Real-time scheduling priority. */
|
|
+ abi_ulong spare[3]; /* spares. */
|
|
+};
|
|
+
|
|
+struct target_rtprio {
|
|
+ uint16_t type;
|
|
+ uint16_t prio;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * sys/_umtx.h
|
|
+ */
|
|
+
|
|
+struct target_umtx {
|
|
+ uint32_t u_owner; /* Owner of the mutex. */
|
|
+};
|
|
+
|
|
+struct target_umutex {
|
|
+ uint32_t m_owner; /* Owner of the mutex */
|
|
+ uint32_t m_flags; /* Flags of the mutex */
|
|
+ uint32_t m_ceiling[2]; /* Priority protect ceiling */
|
|
+ uint32_t m_spare[4];
|
|
+};
|
|
+
|
|
+struct target_ucond {
|
|
+ uint32_t c_has_waiters; /* Has waiters in kernel */
|
|
+ uint32_t c_flags; /* Flags of the condition variable */
|
|
+ uint32_t c_clockid; /* Clock id */
|
|
+ uint32_t c_spare[1];
|
|
+};
|
|
+
|
|
+struct target_urwlock {
|
|
+ int32_t rw_state;
|
|
+ uint32_t rw_flags;
|
|
+ uint32_t rw_blocked_readers;
|
|
+ uint32_t rw_blocked_writers;
|
|
+ uint32_t rw_spare[4];
|
|
+};
|
|
+
|
|
+struct target__usem {
|
|
+ uint32_t _has_waiters;
|
|
+ uint32_t _count;
|
|
+ uint32_t _flags;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * sys/utmx.h
|
|
+ */
|
|
+
|
|
+/* op code for _umtx_op */
|
|
+#define TARGET_UMTX_OP_LOCK 0
|
|
+#define TARGET_UMTX_OP_UNLOCK 1
|
|
+#define TARGET_UMTX_OP_WAIT 2
|
|
+#define TARGET_UMTX_OP_WAKE 3
|
|
+#define TARGET_UMTX_OP_MUTEX_TRYLOCK 4
|
|
+#define TARGET_UMTX_OP_MUTEX_LOCK 5
|
|
+#define TARGET_UMTX_OP_MUTEX_UNLOCK 6
|
|
+#define TARGET_UMTX_OP_SET_CEILING 7
|
|
+#define TARGET_UMTX_OP_CV_WAIT 8
|
|
+#define TARGET_UMTX_OP_CV_SIGNAL 9
|
|
+#define TARGET_UMTX_OP_CV_BROADCAST 10
|
|
+#define TARGET_UMTX_OP_WAIT_UINT 11
|
|
+#define TARGET_UMTX_OP_RW_RDLOCK 12
|
|
+#define TARGET_UMTX_OP_RW_WRLOCK 13
|
|
+#define TARGET_UMTX_OP_RW_UNLOCK 14
|
|
+#define TARGET_UMTX_OP_WAIT_UINT_PRIVATE 15
|
|
+#define TARGET_UMTX_OP_WAKE_PRIVATE 16
|
|
+#define TARGET_UMTX_OP_MUTEX_WAIT 17
|
|
+#define TARGET_UMTX_OP_MUTEX_WAKE 18
|
|
+#define TARGET_UMTX_OP_SEM_WAIT 19
|
|
+#define TARGET_UMTX_OP_SEM_WAKE 20
|
|
+#define TARGET_UMTX_OP_NWAKE_PRIVATE 21
|
|
+#define TARGET_UMTX_OP_MAX 22
|
|
+
|
|
+/* flags for UMTX_OP_CV_WAIT */
|
|
+#define TARGET_CHECK_UNPARKING 0x01
|
|
diff --git a/bsd-user/x86_64/target_signal.h b/bsd-user/x86_64/target_signal.h
|
|
index ea89f5a..a14e0b9 100644
|
|
--- a/bsd-user/x86_64/target_signal.h
|
|
+++ b/bsd-user/x86_64/target_signal.h
|
|
@@ -15,4 +15,29 @@ static inline abi_ulong get_sp_from_cpustate(CPUX86State *state)
|
|
#define TARGET_MINSIGSTKSZ (512 * 4)
|
|
#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768)
|
|
|
|
+typedef target_ulong target_mcontext_t; /* dummy */
|
|
+
|
|
+typedef struct target_ucontext {
|
|
+ target_sigset_t uc_sigmask;
|
|
+ target_mcontext_t uc_mcontext;
|
|
+ abi_ulong uc_link;
|
|
+ target_stack_t uc_stack;
|
|
+ int32_t uc_flags;
|
|
+ int32_t __spare__[4];
|
|
+} target_ucontext_t;
|
|
+
|
|
+static inline int
|
|
+get_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "x86_64 doesn't have support for get_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
+static inline int
|
|
+set_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags)
|
|
+{
|
|
+ fprintf(stderr, "x86_64 doesn't have support for set_mcontext()\n");
|
|
+ return (-TARGET_ENOSYS);
|
|
+}
|
|
+
|
|
#endif /* TARGET_SIGNAL_H */
|
|
diff --git a/configure b/configure
|
|
index 34eca43..be75584 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -1386,6 +1386,11 @@ fi
|
|
|
|
if test "$nptl" != "no" ; then
|
|
cat > $TMPC <<EOF
|
|
+#ifdef __FreeBSD__
|
|
+int main(void) {
|
|
+ return (0);
|
|
+}
|
|
+#else
|
|
#include <sched.h>
|
|
#include <linux/futex.h>
|
|
int main(void) {
|
|
@@ -1394,6 +1399,7 @@ int main(void) {
|
|
#endif
|
|
return 0;
|
|
}
|
|
+#endif
|
|
EOF
|
|
|
|
if compile_object ; then
|
|
@@ -3751,5 +3757,6 @@ case "$target_arch2" in
|
|
TARGET_ARCH=mips64
|
|
TARGET_BASE_ARCH=mips
|
|
echo "TARGET_ABI_MIPSN64=y" >> $config_target_mak
|
|
+ target_nptl="yes"
|
|
target_long_alignment=8
|
|
;;
|