Update to 0.7.2s.20050909.

PR:		ports/85947
Submitted by:	Juergen Lock <nox@jelal.kn-bremen.de> (maintainer)
This commit is contained in:
Norikatsu Shigemura 2005-09-10 17:04:42 +00:00
parent 70827272de
commit 21c9816960
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=142364
18 changed files with 1232 additions and 1332 deletions

View file

@ -6,12 +6,12 @@
#
PORTNAME= qemu
PORTVERSION= 0.7.0s.20050717
PORTVERSION= 0.7.2s.20050909
CATEGORIES= emulators
MASTER_SITES= http://www.qemu.org/ \
http://people.fruitsalad.org/nox/qemu/ \
http://dad-answers.com/qemu/
DISTNAME= ${PORTNAME}-snapshot-2005-07-17_23
DISTNAME= ${PORTNAME}-snapshot-2005-09-09_23
EXTRACT_ONLY= ${DISTNAME}${EXTRACT_SUFX}
MAINTAINER= nox@jelal.kn-bremen.de
@ -23,8 +23,9 @@ RUN_DEPENDS+= ${LOCALBASE}/sbin/smbd:${PORTSDIR}/net/samba
.endif
.if defined(WITH_KQEMU)
DISTKQEMU= kqemu-0.6.2-1.tar.gz
DISTKQEMU= kqemu-0.7.2.tar.gz
DISTFILES= ${EXTRACT_ONLY} ${DISTKQEMU}
EXTRA_PATCHES= ${FILESDIR}/kqemu-freebsd-patch
.endif
HAS_CONFIGURE= yes
@ -40,9 +41,11 @@ MAN1= qemu.1 qemu-img.1
ONLY_FOR_ARCHS= amd64 i386
.if defined(WITH_KQEMU)
NO_PACKAGE= Depends on kernel, and module not redistributable
CONFIGURE_ARGS+= --enable-kqemu
PLIST_SUB= WITH_KQEMU=""
PLIST_SUB+= KMODDIR=${KMODDIR}
.else
CONFIGURE_ARGS+= --disable-kqemu
PLIST_SUB= WITH_KQEMU="@comment "
.endif
@ -52,7 +55,7 @@ PLIST_SUB= WITH_KQEMU="@comment "
.if ${ARCH} == "amd64"
ARCH= x86_64
.if ${OSVERSION} >= 502126
.if ${OSVERSION} >= 502126 && ${OSVERSION} <= 600029
BUILD_DEPENDS+= gcc34:${PORTSDIR}/lang/gcc34
GCCVERSION= 030402
CC= gcc34
@ -63,16 +66,12 @@ USE_GCC= 3.4
USE_GCC= 3.4
.endif
.if defined(WITH_KQEMU) && ${ARCH} != "i386"
IGNORE= kqemu only supported on i386
.endif
.if defined(WITH_KQEMU) && !exists(${SRC_BASE}/sys/Makefile)
IGNORE= kqemu requires kernel source to be installed
.endif
pre-everything::
.if !defined(WITH_KQEMU) && ${ARCH} == "i386"
.if !defined(WITH_KQEMU)
@${ECHO_MSG} "Notice: you can build qemu with the (alpha!) kqemu accelerator kernel module"
@${ECHO_MSG} "by defining WITH_KQEMU."
.endif
@ -85,7 +84,7 @@ pre-everything::
.if defined(WITH_KQEMU)
post-extract:
@cd ${WRKSRC} && ${TAR} xfz ${_DISTDIR}/${DISTKQEMU}
@${CP} ${FILESDIR}/BSDmakefile ${FILESDIR}/kmod_bsd.c ${WRKSRC}/kqemu
@${LN} -s Makefile.freebsd ${WRKSRC}/kqemu/BSDmakefile
.endif
pre-patch:

View file

@ -1,4 +1,4 @@
MD5 (qemu-snapshot-2005-07-17_23.tar.bz2) = 5d21295c1f328ea00de19a54715ee7c3
SIZE (qemu-snapshot-2005-07-17_23.tar.bz2) = 1114748
MD5 (kqemu-0.6.2-1.tar.gz) = c6bb3b40fb3d526d731eb0f1f9dee7ee
SIZE (kqemu-0.6.2-1.tar.gz) = 21002
MD5 (qemu-snapshot-2005-09-09_23.tar.bz2) = db4ffeb081666c7352f5c0231e3f09c7
SIZE (qemu-snapshot-2005-09-09_23.tar.bz2) = 1122120
MD5 (kqemu-0.7.2.tar.gz) = 02cfdecda90458d6393781496ec6b48b
SIZE (kqemu-0.7.2.tar.gz) = 79314

View file

@ -1,9 +0,0 @@
KMOD= kqemu
SRCS= kmod_bsd.c
OBJS= kqemu-mod-i386.o
.if ${OSVERSION} >= 500000
CC= cc
.endif
WERROR=
.include <bsd.kmod.mk>

View file

@ -1,642 +0,0 @@
/*
* FreeBSD kernel wrapper for KQEMU
* Copyright (c) 2005 Antony T Curtis
*
* Based upon the Linux wrapper by Fabrice Bellard
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#if __FreeBSD_version < 500000
#include <sys/buf.h>
#endif
#include <sys/uio.h>
#include <sys/conf.h>
#include <sys/ctype.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#if __FreeBSD_version > 500000
#include <sys/ktr.h>
#include <sys/sched.h>
#endif
#include <sys/ioccom.h>
#include <sys/signalvar.h>
#include <sys/resourcevar.h>
#include <sys/module.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <machine/stdarg.h>
#define __KERNEL__
#include "kqemu.h"
static unsigned long cache_page(vm_offset_t paddr, caddr_t addr);
static caddr_t find_page(vm_offset_t paddr, int free);
static MALLOC_DEFINE(M_KQEMU, "KQEMU", "KQEMU Resources");
struct pagecache {
caddr_t addr;
};
static struct pagecache **pagecache;
#if __FreeBSD_version > 500000
static struct mtx cache_lock;
#endif
static unsigned long cache_page(vm_offset_t paddr, caddr_t addr)
{
unsigned long ppn = (unsigned long)(paddr >> PAGE_SHIFT);
int pci = (int)(ppn >> 10);
struct pagecache *cache;
#if __FreeBSD_version > 500000
mtx_lock_spin(&cache_lock);
#endif
if (!(cache = pagecache[pci])) {
if (!addr) {
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return 0;
}
cache = pagecache[pci] = (struct pagecache *)
kqemu_vmalloc(1024 * sizeof(struct pagecache));
memset(cache, 0, 1024 * sizeof(struct pagecache));
}
if (!addr) {
int i;
cache[ppn & 1023].addr = (caddr_t) 0;
for (i = 1023; i >= 0; i--, cache++)
if (cache->addr)
break;
if (i < 0) {
kqemu_vfree(pagecache[pci]);
pagecache[pci] = 0;
}
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return 0;
}
cache[ppn & 1023].addr = (caddr_t) (((unsigned long) addr) & ~PAGE_MASK);
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return ppn;
}
static caddr_t find_page(vm_offset_t paddr, int free)
{
unsigned long ppn = (unsigned long)(paddr >> PAGE_SHIFT);
struct pagecache *cache;
caddr_t addr;
#if __FreeBSD_version > 500000
mtx_lock_spin(&cache_lock);
#endif
if (!(cache = pagecache[ppn >> 10])) {
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return 0;
}
addr = (caddr_t)(((unsigned long)cache[ppn & 1023].addr)
| ((unsigned long)paddr & PAGE_MASK));
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
if (free && addr)
cache_page(paddr, 0);
return addr;
}
/* lock the page at virtual address 'user_addr' and return its
page index. Return -1 if error */
unsigned long CDECL kqemu_lock_user_page(unsigned long user_addr)
{
int rc;
caddr_t addr = (caddr_t) user_addr;
vm_page_t m;
vm_offset_t paddr;
/*kqemu_log("kqemu_lock_user_page(0x%08x)\n", addr);*/
rc = vm_fault_quick(addr, VM_PROT_READ|VM_PROT_WRITE);
if (rc < 0) {
/*kqemu_log("vm_fault_quick failed rc=%d\n",rc);*/
return -1;
}
paddr = vtophys(addr);
m = PHYS_TO_VM_PAGE(paddr);
vm_page_wire(m);
return cache_page(paddr, addr);
}
void CDECL kqemu_unlock_user_page(unsigned long page_index)
{
vm_page_t m;
vm_offset_t paddr;
/*kqemu_log("kqemu_unlock_user_page(0x%08x)\n",page_index);*/
paddr = (vm_offset_t)(page_index << PAGE_SHIFT);
m = PHYS_TO_VM_PAGE(paddr);
vm_page_unwire(m, 1);
cache_page(paddr, 0);
}
unsigned long CDECL kqemu_alloc_zeroed_page(void)
{
void *addr;
vm_offset_t paddr;
/*kqemu_log("kqemu_alloc_zeroed_page()\n");*/
addr = contigmalloc(PAGE_SIZE, M_KQEMU, M_WAITOK, 0, ~0ul, PAGE_SIZE, 0);
if (!addr) {
/*kqemu_log("contigmalloc failed\n");*/
return -1;
}
memset(addr, 0, PAGE_SIZE);
paddr = vtophys(addr);
return cache_page(paddr, addr);
}
void CDECL kqemu_free_page(unsigned long page_index)
{
vm_offset_t paddr;
caddr_t addr;
/*kqemu_log("kqemu_free_page(0x%08x)\n", page_index);*/
paddr = (vm_offset_t) (page_index << PAGE_SHIFT);
if ((addr = find_page(paddr,1))) {
contigfree((void *) addr, PAGE_SIZE, M_KQEMU);
}
}
void * CDECL kqemu_page_kaddr(unsigned long page_index)
{
vm_offset_t paddr;
/*kqemu_log("kqemu_page_kaddr(0x%08x)\n", page_index);*/
paddr = (vm_offset_t) (page_index << PAGE_SHIFT);
return (void *) find_page(paddr, 0);
}
/* contraint: each page of the vmalloced area must be in the first 4
GB of physical memory */
void * CDECL kqemu_vmalloc(unsigned int size)
{
/*kqemu_log("kqemu_vmalloc(0x%08x)\n", size);*/
return malloc(size, M_KQEMU, M_WAITOK);
}
void CDECL kqemu_vfree(void *ptr)
{
/*kqemu_log("kqemu_vfree(0x%08x)\n", ptr);*/
return free(ptr, M_KQEMU);
}
unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr)
{
caddr_t addr = (caddr_t)vaddr;
vm_offset_t paddr = vtophys(addr);
return cache_page(paddr, addr);
}
#if __FreeBSD_version < 500000
static int
curpriority_cmp(struct proc *p)
{
int c_class, p_class;
c_class = RTP_PRIO_BASE(curproc->p_rtprio.type);
p_class = RTP_PRIO_BASE(p->p_rtprio.type);
if (p_class != c_class)
return (p_class - c_class);
if (p_class == RTP_PRIO_NORMAL)
return (((int)p->p_priority - (int)curpriority) / PPQ);
return ((int)p->p_rtprio.prio - (int)curproc->p_rtprio.prio);
}
/* return TRUE if a signal is pending (i.e. the guest must stop
execution) */
int CDECL kqemu_schedule(void)
{
struct proc *p = curproc;
if (curpriority_cmp(p) > 0) {
int s = splhigh();
p->p_priority = MAXPRI;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
splx(s);
}
return issignal(curproc) != 0;
}
#else
/* return TRUE if a signal is pending (i.e. the guest must stop
execution) */
int CDECL kqemu_schedule(void)
{
struct thread *td = curthread;
struct proc *p = td->td_proc;
int rc;
mtx_lock_spin(&sched_lock);
sched_prio(td, td->td_ksegrp->kg_user_pri);
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
rc = cursig(td);
mtx_unlock(&p->p_sigacts->ps_mtx);
PROC_UNLOCK(p);
return rc;
}
#endif
static char log_buf[4096];
void CDECL kqemu_log(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vsnprintf(log_buf, sizeof(log_buf), fmt, ap);
printf("kqemu: %s", log_buf);
va_end(ap);
}
/*********************************************************/
#define KQEMU_MAX_INSTANCES 4
struct kqemu_instance {
#if __FreeBSD_version > 500000
TAILQ_ENTRY(kqemu_instance) kqemu_ent;
struct cdev *kqemu_dev;
#endif
struct kqemu_state *state;
};
static int kqemu_ref_count = 0;
static int max_locked_pages;
#if __FreeBSD_version < 500000
static dev_t kqemu_dev;
#else
static struct clonedevs *kqemuclones;
static TAILQ_HEAD(,kqemu_instance) kqemuhead = TAILQ_HEAD_INITIALIZER(kqemuhead);
static eventhandler_tag clonetag;
#endif
static d_open_t kqemu_open;
static d_close_t kqemu_close;
static d_ioctl_t kqemu_ioctl;
static struct cdevsw kqemu_cdevsw = {
#if __FreeBSD_version < 500000
/* open */ kqemu_open,
/* close */ kqemu_close,
/* read */ noread,
/* write */ nowrite,
/* ioctl */ kqemu_ioctl,
/* poll */ nopoll,
/* mmap */ nommap,
/* strategy */ nostrategy,
/* name */ "kqemu",
/* maj */ KQEMU_MAJOR,
/* dump */ nodump,
/* psize */ nopsize,
/* flags */ 0,
/* bmaj */ -1
#else
.d_version = D_VERSION,
.d_open = kqemu_open,
.d_close = kqemu_close,
.d_ioctl = kqemu_ioctl,
.d_name = "kqemu",
#ifdef D_NEEDGIANT
.d_flags = D_NEEDGIANT,
#endif
#endif
};
#if __FreeBSD_version > 500000
static void
kqemu_clone(void *arg, char *name, int namelen, struct cdev **dev)
{
int unit, r;
if (*dev != NULL)
return;
if (strcmp(name, "kqemu") == 0)
unit = -1;
else if (dev_stdclone(name, NULL, "kqemu", &unit) != 1)
return; /* Bad name */
if (unit != -1 && unit > KQEMU_MAX_INSTANCES)
return;
r = clone_create(&kqemuclones, &kqemu_cdevsw, &unit, dev, 0);
if (r) {
*dev = make_dev(&kqemu_cdevsw, unit2minor(unit),
UID_ROOT, GID_WHEEL, 0660, "kqemu%d", unit);
if (*dev != NULL) {
dev_ref(*dev);
(*dev)->si_flags |= SI_CHEAPCLONE;
}
}
}
#endif
static void kqemu_destroy(struct kqemu_instance *ks)
{
struct cdev *dev = ks->kqemu_dev;
if (ks->state) {
kqemu_delete(ks->state);
ks->state = NULL;
}
free(ks, M_KQEMU);
dev->si_drv1 = NULL;
#if __FreeBSD_version > 500000
mtx_lock_spin(&cache_lock);
TAILQ_REMOVE(&kqemuhead, ks, kqemu_ent);
#endif
if (!--kqemu_ref_count) {
int i;
for (i = 1023; i >= 0; i--)
kqemu_vfree(pagecache[i]);
memset(pagecache, 0, 1024 * sizeof(void *));
}
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
destroy_dev(dev);
#endif
}
int
#if __FreeBSD_version < 500000
kqemu_open(dev, flags, fmt, p)
dev_t dev;
int flags, fmt;
struct proc *p;
{
#else
kqemu_open(dev, flags, fmt, td)
struct cdev *dev;
int flags, fmt;
struct thread *td;
{
struct proc *p = td->td_proc;
#endif
struct kqemu_instance *ks;
if (dev->si_drv1 || kqemu_ref_count >= KQEMU_MAX_INSTANCES)
return(EBUSY);
if ((flags & (FREAD|FWRITE)) == FREAD)
return(EPERM);
ks = (struct kqemu_instance *) malloc(sizeof(*ks), M_KQEMU, M_WAITOK);
if (ks == NULL)
return(ENOMEM);
memset(ks, 0, sizeof *ks);
dev->si_drv1 = ks;
#if __FreeBSD_version > 500000
ks->kqemu_dev = dev;
mtx_lock_spin(&cache_lock);
TAILQ_INSERT_TAIL(&kqemuhead, ks, kqemu_ent);
#endif
kqemu_ref_count++;
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
kqemu_log("opened by pid=%d\n", p->p_pid);
return(0);
}
int
#if __FreeBSD_version < 500000
kqemu_close(dev, flags, fmt, p)
dev_t dev;
int flags, fmt;
struct proc *p;
{
#else
kqemu_close(dev, flags, fmt, td)
struct cdev *dev;
int flags, fmt;
struct thread *td;
{
struct proc *p = td->td_proc;
#endif
struct kqemu_instance *ks = (struct kqemu_instance *) dev->si_drv1;
kqemu_destroy(ks);
kqemu_log("closed by pid=%d\n", p->p_pid);
return(0);
}
int
#if __FreeBSD_version < 500000
kqemu_ioctl(dev, cmd, cmdarg, flags, p)
dev_t dev;
unsigned long cmd;
caddr_t cmdarg;
int flags;
struct proc *p;
{
#else
kqemu_ioctl(dev, cmd, cmdarg, flags, td)
struct cdev *dev;
unsigned long cmd;
caddr_t cmdarg;
int flags;
struct thread *td;
{
#endif
struct kqemu_instance *ks = (struct kqemu_instance *) dev->si_drv1;
struct kqemu_state *s = ks->state;
long ret;
int error = 0;
switch (cmd) {
case KQEMU_INIT:
/*kqemu_log("KQEMU_INIT data=0x%08x\n",cmdarg);*/
{
if (s) {
error = (EIO);
break;
}
if (!(s = kqemu_init((struct kqemu_init *)cmdarg, max_locked_pages))) {
error = (ENOMEM);
break;
}
ks->state = s;
break;
}
case KQEMU_EXEC:
/*kqemu_log("KQEMU_EXEC data=0x%08x\n",cmdarg);*/
{
struct kqemu_cpu_state *ctx;
if (!s) {
error = (EIO);
break;
}
ctx = kqemu_get_cpu_state(s);
memcpy((void *)ctx, (void *)cmdarg, sizeof(struct kqemu_cpu_state));
ret = kqemu_exec(s);
#if __FreeBSD_version > 500000
td->td_retval[0] = ret;
#else
p->p_retval[0] = ret;
#endif
memcpy((void *)cmdarg, (void *)ctx, sizeof(struct kqemu_cpu_state));
break;
}
case KQEMU_GET_VERSION:
/*kqemu_log("KQEMU_GET_VERSION data=0x%08x\n",cmdarg);*/
{
*(int *)cmdarg = KQEMU_VERSION;
break;
}
default:
/*kqemu_log("ioctl unknown 0x%08x\n",cmd);*/
error = (ENXIO);
}
return(error);
}
static int
init_module(void)
{
#if __FreeBSD_version < 500000
int rc;
#endif
printf("QEMU Accelerator Module version %d.%d.%d, Copyright (c) 2005 Fabrice Bellard\n"
"FreeBSD wrapper port, Copyright (c) 2005 Antony T Curtis\n"
"This is a proprietary product. Read the LICENSE file for more information\n"
"Redistribution of this module is prohibited without authorization\n",
(KQEMU_VERSION >> 16),
(KQEMU_VERSION >> 8) & 0xff,
(KQEMU_VERSION) & 0xff);
if (!(pagecache = (struct pagecache **)
kqemu_vmalloc(1024 * sizeof(void *))))
return(ENOMEM);
memset(pagecache, 0, 1024 * sizeof(void *));
#if __FreeBSD_version > 500000
mtx_init(&cache_lock, "pagecache lock", NULL, MTX_SPIN);
#endif
max_locked_pages = physmem / (2 * KQEMU_MAX_INSTANCES);
if (max_locked_pages > 32768)
max_locked_pages = 32768;
#if __FreeBSD_version < 500000
if ((rc = cdevsw_add(&kqemu_cdevsw))) {
kqemu_log("error registering cdevsw, rc=%d\n", rc);
return(ENOENT);
}
kqemu_dev = make_dev(&kqemu_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, "kqemu");
#else
clone_setup(&kqemuclones);
clonetag = EVENTHANDLER_REGISTER(dev_clone, kqemu_clone, 0, 1000);
if (!clonetag)
return ENOMEM;
#endif
kqemu_log("KQEMU installed, max_instances=%d max_locked_mem=%dkB.\n",
KQEMU_MAX_INSTANCES, max_locked_pages * 4);
kqemu_ref_count = 0;
return 0;
}
static void
cleanup_module(void)
{
#if __FreeBSD_version < 500000
int rc;
#else
struct kqemu_instance *ks;
#endif
#if __FreeBSD_version < 500000
destroy_dev(kqemu_dev);
if ((rc = cdevsw_remove(&kqemu_cdevsw)))
kqemu_log("error unregistering, rc=%d\n", rc);
#else
EVENTHANDLER_DEREGISTER(dev_clone, clonetag);
mtx_lock_spin(&cache_lock);
while ((ks = TAILQ_FIRST(&kqemuhead)) != NULL) {
mtx_unlock_spin(&cache_lock);
kqemu_destroy(ks);
mtx_lock_spin(&cache_lock);
}
mtx_unlock_spin(&cache_lock);
mtx_destroy(&cache_lock);
clone_cleanup(&kqemuclones);
#endif
kqemu_vfree(pagecache);
pagecache = 0;
}
static int
kqemu_modevent(module_t mod, int type, void *data)
{
int err = 0;
switch (type) {
case MOD_LOAD:
err = init_module();
break;
case MOD_UNLOAD:
if (kqemu_ref_count > 0) {
err = EBUSY;
break;
}
/* fall through */
case MOD_SHUTDOWN:
cleanup_module();
break;
default:
err = EINVAL;
break;
}
return(err);
}
static moduledata_t kqemu_mod = {
"kqemu_driver",
kqemu_modevent,
NULL
};
DECLARE_MODULE(kqemu, kqemu_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);

View file

@ -0,0 +1,506 @@
Index: qemu/kqemu/Makefile.freebsd
@@ -1,9 +1,13 @@
+# $Id: Makefile.freebsd,v 1.1 2005/04/17 17:21:31 bellard Exp $
KMOD= kqemu
SRCS= kqemu-freebsd.c
.if ${MACHINE_ARCH} == "i386"
OBJS= kqemu-mod-i386.o
.elif ${MACHINE_ARCH} == "amd64"
OBJS= kqemu-mod-x86_64.o
+.endif
+.if ${OSVERSION} >= 500000
+CC= cc
.endif
WERROR=
Index: qemu/kqemu/kqemu-freebsd.c
@@ -3,20 +3,33 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
+#include <sys/ctype.h>
+#include <sys/fcntl.h>
#include <sys/ioccom.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/resourcevar.h>
+#if __FreeBSD_version >= 500000
#include <sys/sched.h>
+#endif
#include <sys/signalvar.h>
#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#if __FreeBSD_version < 500000
+#include <sys/buf.h>
+#endif
+
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
#include <machine/vmparam.h>
#include <machine/stdarg.h>
@@ -25,10 +38,14 @@
MALLOC_DECLARE(M_KQEMU);
MALLOC_DEFINE(M_KQEMU, "kqemu", "kqemu buffers");
+int kqemu_debug;
+SYSCTL_INT(_debug, OID_AUTO, kqemu_debug, CTLFLAG_RW, &kqemu_debug, 0,
+ "kqemu debug flag");
+
#define USER_BASE 0x1000
/* lock the page at virtual address 'user_addr' and return its
- physical page index. Return -1 if error */
+ physical page index. Return NULL if error */
struct kqemu_user_page *CDECL kqemu_lock_user_page(unsigned long *ppage_index,
unsigned long user_addr)
{
@@ -37,14 +54,18 @@
vm_paddr_t pa = 0;
int ret;
pmap_t pmap;
+#if __FreeBSD_version >= 500000
ret = vm_map_wire(&vm->vm_map, va, va+PAGE_SIZE, VM_MAP_WIRE_USER);
+#else
+ ret = vm_map_user_pageable(&vm->vm_map, va, va+PAGE_SIZE, FALSE);
+#endif
if (ret != KERN_SUCCESS) {
- printf("kqemu_lock_user_page(%08lx) failed, ret=%d\n", user_addr, ret);
+ kqemu_log("kqemu_lock_user_page(%08lx) failed, ret=%d\n", user_addr, ret);
return NULL;
}
pmap = vm_map_pmap(&vm->vm_map);
pa = pmap_extract(pmap, va);
- // printf("kqemu_lock_user_page(%08lx) va=%08x pa=%08x\n", user_addr, va, pa);
+ // kqemu_log("kqemu_lock_user_page(%08lx) va=%08x pa=%08x\n", user_addr, va, pa);
*ppage_index = pa >> PAGE_SHIFT;
return (struct kqemu_user_page *)va;
}
@@ -54,12 +75,16 @@
struct vmspace *vm = curproc->p_vmspace;
vm_offset_t va;
int ret;
- // printf("kqemu_unlock_user_page(%08lx)\n", page_index);
+ // kqemu_log("kqemu_unlock_user_page(%08lx)\n", page_index);
va = (vm_offset_t)page;
+#if __FreeBSD_version >= 500000
ret = vm_map_unwire(&vm->vm_map, va, va+PAGE_SIZE, VM_MAP_WIRE_USER);
+#else
+ ret = vm_map_user_pageable(&vm->vm_map, va, va+PAGE_SIZE, TRUE);
+#endif
#if 0
if (ret != KERN_SUCCESS) {
- printf("kqemu_unlock_user_page(%08lx) failed, ret=%d\n", page_index, ret);
+ kqemu_log("kqemu_unlock_user_page(%08lx) failed, ret=%d\n", page_index, ret);
}
#endif
}
@@ -76,20 +101,21 @@
va = kmem_alloc(kernel_map, PAGE_SIZE);
if (va == 0) {
- printf("kqemu_alloc_zeroed_page: NULL\n");
- return -1;
+ kqemu_log("kqemu_alloc_zeroed_page: NULL\n");
+ return NULL;
}
pmap = vm_map_pmap(kernel_map);
pa = pmap_extract(pmap, va);
- // printf("kqemu_alloc_zeroed_page: %08x\n", pa);
+ // kqemu_log("kqemu_alloc_zeroed_page: %08x\n", pa);
*ppage_index = pa >> PAGE_SHIFT;
return (struct kqemu_page *)va;
}
void CDECL kqemu_free_page(struct kqemu_page *page)
{
- // printf("kqemu_free_page(%08lx)\n", page_index);
- /* XXX: do it */
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_free_page(%p)\n", page);
+ kmem_free(kernel_map, (vm_offset_t) page, PAGE_SIZE);
}
/* return kernel address of the physical page page_index */
@@ -103,42 +129,29 @@
GB of physical memory */
void * CDECL kqemu_vmalloc(unsigned int size)
{
- struct vmspace *vm = curproc->p_vmspace;
- vm_offset_t va = USER_BASE;
- int rv;
- if (size % PAGE_SIZE != 0) {
- printf("kqemu_vmalloc(%d) not a multiple of page size\n", size);
- return NULL;
- }
- rv = vm_map_find(&vm->vm_map, NULL, 0, &va, size, 1,
- VM_PROT_ALL, VM_PROT_ALL, 0);
- if (rv != KERN_SUCCESS) {
- printf("kqemu_vmalloc(%d) failed rv=%d\n", size, rv);
- return NULL;
- }
- printf("kqemu_vmalloc(%d): %08x\n", size, va);
- return (void *)va;
+ void *ptr = malloc(size, M_KQEMU, M_WAITOK);
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_vmalloc(%d): %p\n", size, ptr);
+ return ptr;
}
void CDECL kqemu_vfree(void *ptr)
{
- printf("kqemu_vfree(%p)\n", ptr);
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_vfree(%p)\n", ptr);
+ free(ptr, M_KQEMU);
}
/* return the physical page index for a given virtual page */
unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr)
{
- struct vmspace *vm = curproc->p_vmspace;
- vm_paddr_t pa;
- pmap_t pmap;
-
- pmap = vm_map_pmap(&vm->vm_map);
- pa = pmap_extract(pmap, (vm_offset_t)vaddr);
+ vm_paddr_t pa = vtophys(vaddr);
if (pa == 0) {
- printf("kqemu_vmalloc_to_phys(%p)->error\n", vaddr);
+ kqemu_log("kqemu_vmalloc_to_phys(%p)->error\n", vaddr);
return -1;
}
- printf("kqemu_vmalloc_to_phys(%p)->%08x\n", vaddr, pa);
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_vmalloc_to_phys(%p)->%08x\n", vaddr, pa);
return pa >> PAGE_SHIFT;
}
@@ -154,16 +167,48 @@
{
}
+#if __FreeBSD_version < 500000
+static int
+curpriority_cmp(struct proc *p)
+{
+ int c_class, p_class;
+
+ c_class = RTP_PRIO_BASE(curproc->p_rtprio.type);
+ p_class = RTP_PRIO_BASE(p->p_rtprio.type);
+ if (p_class != c_class)
+ return (p_class - c_class);
+ if (p_class == RTP_PRIO_NORMAL)
+ return (((int)p->p_priority - (int)curpriority) / PPQ);
+ return ((int)p->p_rtprio.prio - (int)curproc->p_rtprio.prio);
+}
+
/* return TRUE if a signal is pending (i.e. the guest must stop
execution) */
int CDECL kqemu_schedule(void)
{
- // printf("kqemu_schedule\n");
+ struct proc *p = curproc;
+ if (curpriority_cmp(p) > 0) {
+ int s = splhigh();
+ p->p_priority = MAXPRI;
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nvcsw++;
+ mi_switch();
+ splx(s);
+ }
+ return issignal(curproc) != 0;
+}
+#else
+/* return TRUE if a signal is pending (i.e. the guest must stop
+ execution) */
+int CDECL kqemu_schedule(void)
+{
+ // kqemu_log("kqemu_schedule\n");
mtx_lock_spin(&sched_lock);
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
return SIGPENDING(curthread);
}
+#endif
static char log_buf[4096];
@@ -176,47 +221,154 @@
va_end(ap);
}
+#define KQEMU_MAX_INSTANCES 4
+
struct kqemu_instance {
+#if __FreeBSD_version >= 500000
+ TAILQ_ENTRY(kqemu_instance) kqemu_ent;
+ struct cdev *kqemu_dev;
+#endif
// struct semaphore sem;
struct kqemu_state *state;
};
+static int kqemu_ref_count = 0;
+static int max_locked_pages;
+
+#if __FreeBSD_version < 500000
+static dev_t kqemu_dev;
+#else
+static struct clonedevs *kqemuclones;
+static TAILQ_HEAD(,kqemu_instance) kqemuhead = TAILQ_HEAD_INITIALIZER(kqemuhead);
+static eventhandler_tag clonetag;
+#endif
+
static d_close_t kqemu_close;
static d_open_t kqemu_open;
static d_ioctl_t kqemu_ioctl;
static struct cdevsw kqemu_cdevsw = {
+#if __FreeBSD_version < 500000
+ /* open */ kqemu_open,
+ /* close */ kqemu_close,
+ /* read */ noread,
+ /* write */ nowrite,
+ /* ioctl */ kqemu_ioctl,
+ /* poll */ nopoll,
+ /* mmap */ nommap,
+ /* strategy */ nostrategy,
+ /* name */ "kqemu",
+ /* maj */ KQEMU_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ 0,
+ /* bmaj */ -1
+#else
.d_version = D_VERSION,
.d_flags = D_NEEDGIANT,
.d_open = kqemu_open,
.d_ioctl = kqemu_ioctl,
.d_close = kqemu_close,
.d_name = "kqemu"
+#endif
};
-/* For use with make_dev(9)/destroy_dev(9). */
-static struct cdev *kqemu_dev;
+#if __FreeBSD_version >= 500000
+static void
+#if __FreeBSD_version >= 600034
+kqemu_clone(void *arg, struct ucred *cred, char *name, int namelen,
+struct cdev **dev)
+#else
+kqemu_clone(void *arg, char *name, int namelen, struct cdev **dev)
+#endif
+{
+ int unit, r;
+ if (*dev != NULL)
+ return;
+
+ if (strcmp(name, "kqemu") == 0)
+ unit = -1;
+ else if (dev_stdclone(name, NULL, "kqemu", &unit) != 1)
+ return; /* Bad name */
+ if (unit != -1 && unit > KQEMU_MAX_INSTANCES)
+ return;
+
+ r = clone_create(&kqemuclones, &kqemu_cdevsw, &unit, dev, 0);
+ if (r) {
+ *dev = make_dev(&kqemu_cdevsw, unit2minor(unit),
+ UID_ROOT, GID_WHEEL, 0660, "kqemu%d", unit);
+ if (*dev != NULL) {
+ dev_ref(*dev);
+ (*dev)->si_flags |= SI_CHEAPCLONE;
+ }
+ }
+}
+#endif
+
+static void kqemu_destroy(struct kqemu_instance *ks)
+{
+ struct cdev *dev = ks->kqemu_dev;
+
+ if (ks->state) {
+ kqemu_delete(ks->state);
+ ks->state = NULL;
+ }
+
+ free(ks, M_KQEMU);
+ dev->si_drv1 = NULL;
+#if __FreeBSD_version >= 500000
+ TAILQ_REMOVE(&kqemuhead, ks, kqemu_ent);
+ destroy_dev(dev);
+#endif
+ --kqemu_ref_count;
+}
/* ARGSUSED */
static int
+#if __FreeBSD_version < 500000
+kqemu_open(dev_t dev, int flags, int fmt __unused, struct proc *p)
+{
+#else
kqemu_open(struct cdev *dev, int flags, int fmt __unused,
struct thread *td)
{
+ struct proc *p = td->td_proc;
+#endif
struct kqemu_instance *ks;
+
+ if (dev->si_drv1 || kqemu_ref_count >= KQEMU_MAX_INSTANCES)
+ return(EBUSY);
+
+ if ((flags & (FREAD|FWRITE)) == FREAD)
+ return(EPERM);
+
ks = malloc(sizeof(struct kqemu_instance), M_KQEMU, M_WAITOK);
if (ks == NULL) {
- printf("malloc failed\n");
+ kqemu_log("malloc failed\n");
return ENOMEM;
}
- ks->state = NULL;
+ memset(ks, 0, sizeof *ks);
+#if __FreeBSD_version >= 500000
+ ks->kqemu_dev = dev;
+ TAILQ_INSERT_TAIL(&kqemuhead, ks, kqemu_ent);
+#endif
+ kqemu_ref_count++;
+
dev->si_drv1 = ks;
+ if (kqemu_debug > 0)
+ kqemu_log("opened by pid=%d\n", p->p_pid);
return 0;
}
/* ARGSUSED */
static int
+#if __FreeBSD_version < 500000
+kqemu_ioctl(dev_t dev, u_long cmd, caddr_t addr,
+ int flags __unused, struct proc *p)
+#else
kqemu_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
int flags __unused, struct thread *td)
+#endif
{
int error = 0;
int ret;
@@ -231,8 +383,9 @@
break;
}
d1 = *(struct kqemu_init *)addr;
- printf("ram_base=%p ram_size=%ld\n", d1.ram_base, d1.ram_size);
- s = kqemu_init(d, 16000);
+ if (kqemu_debug > 0)
+ kqemu_log("ram_base=%p ram_size=%ld\n", d1.ram_base, d1.ram_size);
+ s = kqemu_init(d, max_locked_pages);
if (s == NULL) {
error = ENOMEM;
break;
@@ -248,9 +401,16 @@
}
ctx = kqemu_get_cpu_state(s);
*ctx = *(struct kqemu_cpu_state *)addr;
+#if __FreeBSD_version >= 500000
DROP_GIANT();
+#endif
ret = kqemu_exec(s);
+#if __FreeBSD_version >= 500000
PICKUP_GIANT();
+ td->td_retval[0] = ret;
+#else
+ p->p_retval[0] = ret;
+#endif
*(struct kqemu_cpu_state *)addr = *ctx;
break;
}
@@ -265,10 +425,22 @@
/* ARGSUSED */
static int
+#if __FreeBSD_version < 500000
+kqemu_close(dev_t dev, int flags, int fmt __unused, struct proc *p)
+{
+#else
kqemu_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
- return 0;
+ struct proc *p = td->td_proc;
+#endif
+ struct kqemu_instance *ks = (struct kqemu_instance *) dev->si_drv1;
+
+ kqemu_destroy(ks);
+
+ if (kqemu_debug > 0)
+ kqemu_log("closed by pid=%d\n", p->p_pid);
+ return 0;
}
/* ARGSUSED */
@@ -276,15 +448,55 @@
kqemu_modevent(module_t mod __unused, int type, void *data __unused)
{
int error = 0;
+#if __FreeBSD_version < 500000
+ int rc;
+#else
+ struct kqemu_instance *ks;
+#endif
switch (type) {
case MOD_LOAD:
printf("kqemu version 0x%08x\n", KQEMU_VERSION);
+ max_locked_pages = physmem / (2 * KQEMU_MAX_INSTANCES);
+ if (max_locked_pages > 32768)
+ max_locked_pages = 32768;
+#if __FreeBSD_version < 500000
+ if ((rc = cdevsw_add(&kqemu_cdevsw))) {
+ kqemu_log("error registering cdevsw, rc=%d\n", rc);
+ error = ENOENT;
+ break;
+ }
kqemu_dev = make_dev(&kqemu_cdevsw, 0,
- UID_ROOT, GID_WHEEL, 0666, "kqemu");
+ UID_ROOT, GID_WHEEL, 0660, "kqemu");
+#else
+ clone_setup(&kqemuclones);
+ clonetag = EVENTHANDLER_REGISTER(dev_clone, kqemu_clone, 0, 1000);
+ if (!clonetag) {
+ error = ENOMEM;
+ break;
+ }
+#endif
+ kqemu_log("KQEMU installed, max_instances=%d max_locked_mem=%dkB.\n",
+ KQEMU_MAX_INSTANCES, max_locked_pages * 4);
+
+ kqemu_ref_count = 0;
break;
case MOD_UNLOAD:
+ if (kqemu_ref_count > 0) {
+ error = EBUSY;
+ break;
+ }
+#if __FreeBSD_version < 500000
destroy_dev(kqemu_dev);
+ if ((rc = cdevsw_remove(&kqemu_cdevsw)))
+ kqemu_log("error unregistering, rc=%d\n", rc);
+#else
+ EVENTHANDLER_DEREGISTER(dev_clone, clonetag);
+ while ((ks = TAILQ_FIRST(&kqemuhead)) != NULL) {
+ kqemu_destroy(ks);
+ }
+ clone_cleanup(&kqemuclones);
+#endif
break;
case MOD_SHUTDOWN:
break;

View file

@ -13,7 +13,7 @@ Index: qemu/Makefile
$(MAKE) -C kqemu -f Makefile.winnt
else
- $(MAKE) -C kqemu
+ cd kqemu && $(BSD_MAKE)
+ ( cd kqemu && $(BSD_MAKE) )
endif
endif

View file

@ -0,0 +1,67 @@
Index: qemu/bsd/Makefile
@@ -16,7 +16,8 @@
${MACHINE_ARCH}/s_rintl.c \
${MACHINE_ARCH}/s_round.c \
${MACHINE_ARCH}/s_sinl.S \
- ${MACHINE_ARCH}/s_tanl.S
+ ${MACHINE_ARCH}/s_tanl.S \
+ ${MACHINE_ARCH}/s_ldexpl.c
OBJS= ${SRCS:R:S/$/.o/}
Index: qemu/bsd/i386/s_ldexpl.c
@@ -0,0 +1,21 @@
+#include <math.h>
+#include <errno.h>
+#include <sysdep.h>
+
+long double __ldexpl(long double x, int expn)
+{
+ long double res;
+ if (!isfinite (x) || x == 0.0L)
+ return x;
+
+ __asm__ ("fscale"
+ : "=t" (res)
+ : "0" (x), "u" ((long double) expn));
+
+ if (!isfinite (res) || res == 0.0L)
+ errno = ERANGE;
+
+ return res;
+}
+
+weak_alias(__ldexpl,ldexpl)
Index: qemu/bsd/amd64/s_ldexpl.c
@@ -0,0 +1,21 @@
+#include <math.h>
+#include <errno.h>
+#include <sysdep.h>
+
+long double __ldexpl(long double x, int expn)
+{
+ long double res;
+ if (!isfinite (x) || x == 0.0L)
+ return x;
+
+ __asm__ ("fscale"
+ : "=t" (res)
+ : "0" (x), "u" ((long double) expn));
+
+ if (!isfinite (res) || res == 0.0L)
+ errno = ERANGE;
+
+ return res;
+}
+
+weak_alias(__ldexpl,ldexpl)
Index: qemu/target-i386/helper.c
@@ -2886,6 +2886,8 @@
ST0 = floatx_round_to_int(ST0, &env->fp_status);
}
+long double ldexpl(long double, int);
+
void helper_fscale(void)
{
ST0 = ldexp (ST0, (int)(ST1));

View file

@ -0,0 +1,21 @@
Index: qemu/vl.c
@@ -40,6 +40,10 @@
#include <sys/socket.h>
#include <netinet/in.h>
#include <dirent.h>
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#include <libutil.h>
+#endif
#ifdef _BSD
#include <sys/stat.h>
#ifndef __APPLE__
@@ -1280,7 +1284,7 @@
return chr;
}
-#if defined(__linux__)
+#if defined(__linux__) || defined(__FreeBSD__)
CharDriverState *qemu_chr_open_pty(void)
{
char slave_name[1024];

View file

@ -1,6 +1,9 @@
====
FreeBSD host notes:
- needs to run as root in order to use /dev/tap* networking (why?)
(actually RELENG_6 and above now has a sysctl net.link.tap.user_open
to allow users to use it too. don't forget to adjust device node
permissions in /etc/devfs.rules.)
- slirp (usermode networking) is fixed now in cvs, on FreeSBIE 1.0 guests
you still have to manually do:
echo nameserver 10.0.2.3 >/etc/resolv.conf
@ -18,4 +21,9 @@ its kernel is built with HZ=5000, and FreeBSD's default is 100...
ioctl.)
- the -smb option (smb-export local dir to guest) needs the net/samba
port/package installed in addition to qemu.
- RELENG_6 and up guests often crash while accessing the emulated cdrom
(see kern/84102, http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/84102),
using a kernel without PREEMPTION has been reported to fix this problem.
(or do an ftp install instead of installing from the emulated cdrom, and
then make a new kernel.)
====

View file

@ -6,12 +6,12 @@
#
PORTNAME= qemu
PORTVERSION= 0.7.0s.20050717
PORTVERSION= 0.7.2s.20050909
CATEGORIES= emulators
MASTER_SITES= http://www.qemu.org/ \
http://people.fruitsalad.org/nox/qemu/ \
http://dad-answers.com/qemu/
DISTNAME= ${PORTNAME}-snapshot-2005-07-17_23
DISTNAME= ${PORTNAME}-snapshot-2005-09-09_23
EXTRACT_ONLY= ${DISTNAME}${EXTRACT_SUFX}
MAINTAINER= nox@jelal.kn-bremen.de
@ -23,8 +23,9 @@ RUN_DEPENDS+= ${LOCALBASE}/sbin/smbd:${PORTSDIR}/net/samba
.endif
.if defined(WITH_KQEMU)
DISTKQEMU= kqemu-0.6.2-1.tar.gz
DISTKQEMU= kqemu-0.7.2.tar.gz
DISTFILES= ${EXTRACT_ONLY} ${DISTKQEMU}
EXTRA_PATCHES= ${FILESDIR}/kqemu-freebsd-patch
.endif
HAS_CONFIGURE= yes
@ -40,9 +41,11 @@ MAN1= qemu.1 qemu-img.1
ONLY_FOR_ARCHS= amd64 i386
.if defined(WITH_KQEMU)
NO_PACKAGE= Depends on kernel, and module not redistributable
CONFIGURE_ARGS+= --enable-kqemu
PLIST_SUB= WITH_KQEMU=""
PLIST_SUB+= KMODDIR=${KMODDIR}
.else
CONFIGURE_ARGS+= --disable-kqemu
PLIST_SUB= WITH_KQEMU="@comment "
.endif
@ -52,7 +55,7 @@ PLIST_SUB= WITH_KQEMU="@comment "
.if ${ARCH} == "amd64"
ARCH= x86_64
.if ${OSVERSION} >= 502126
.if ${OSVERSION} >= 502126 && ${OSVERSION} <= 600029
BUILD_DEPENDS+= gcc34:${PORTSDIR}/lang/gcc34
GCCVERSION= 030402
CC= gcc34
@ -63,16 +66,12 @@ USE_GCC= 3.4
USE_GCC= 3.4
.endif
.if defined(WITH_KQEMU) && ${ARCH} != "i386"
IGNORE= kqemu only supported on i386
.endif
.if defined(WITH_KQEMU) && !exists(${SRC_BASE}/sys/Makefile)
IGNORE= kqemu requires kernel source to be installed
.endif
pre-everything::
.if !defined(WITH_KQEMU) && ${ARCH} == "i386"
.if !defined(WITH_KQEMU)
@${ECHO_MSG} "Notice: you can build qemu with the (alpha!) kqemu accelerator kernel module"
@${ECHO_MSG} "by defining WITH_KQEMU."
.endif
@ -85,7 +84,7 @@ pre-everything::
.if defined(WITH_KQEMU)
post-extract:
@cd ${WRKSRC} && ${TAR} xfz ${_DISTDIR}/${DISTKQEMU}
@${CP} ${FILESDIR}/BSDmakefile ${FILESDIR}/kmod_bsd.c ${WRKSRC}/kqemu
@${LN} -s Makefile.freebsd ${WRKSRC}/kqemu/BSDmakefile
.endif
pre-patch:

View file

@ -1,4 +1,4 @@
MD5 (qemu-snapshot-2005-07-17_23.tar.bz2) = 5d21295c1f328ea00de19a54715ee7c3
SIZE (qemu-snapshot-2005-07-17_23.tar.bz2) = 1114748
MD5 (kqemu-0.6.2-1.tar.gz) = c6bb3b40fb3d526d731eb0f1f9dee7ee
SIZE (kqemu-0.6.2-1.tar.gz) = 21002
MD5 (qemu-snapshot-2005-09-09_23.tar.bz2) = db4ffeb081666c7352f5c0231e3f09c7
SIZE (qemu-snapshot-2005-09-09_23.tar.bz2) = 1122120
MD5 (kqemu-0.7.2.tar.gz) = 02cfdecda90458d6393781496ec6b48b
SIZE (kqemu-0.7.2.tar.gz) = 79314

View file

@ -1,9 +0,0 @@
KMOD= kqemu
SRCS= kmod_bsd.c
OBJS= kqemu-mod-i386.o
.if ${OSVERSION} >= 500000
CC= cc
.endif
WERROR=
.include <bsd.kmod.mk>

View file

@ -1,642 +0,0 @@
/*
* FreeBSD kernel wrapper for KQEMU
* Copyright (c) 2005 Antony T Curtis
*
* Based upon the Linux wrapper by Fabrice Bellard
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#if __FreeBSD_version < 500000
#include <sys/buf.h>
#endif
#include <sys/uio.h>
#include <sys/conf.h>
#include <sys/ctype.h>
#include <sys/fcntl.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#if __FreeBSD_version > 500000
#include <sys/ktr.h>
#include <sys/sched.h>
#endif
#include <sys/ioccom.h>
#include <sys/signalvar.h>
#include <sys/resourcevar.h>
#include <sys/module.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <machine/stdarg.h>
#define __KERNEL__
#include "kqemu.h"
static unsigned long cache_page(vm_offset_t paddr, caddr_t addr);
static caddr_t find_page(vm_offset_t paddr, int free);
static MALLOC_DEFINE(M_KQEMU, "KQEMU", "KQEMU Resources");
struct pagecache {
caddr_t addr;
};
static struct pagecache **pagecache;
#if __FreeBSD_version > 500000
static struct mtx cache_lock;
#endif
static unsigned long cache_page(vm_offset_t paddr, caddr_t addr)
{
unsigned long ppn = (unsigned long)(paddr >> PAGE_SHIFT);
int pci = (int)(ppn >> 10);
struct pagecache *cache;
#if __FreeBSD_version > 500000
mtx_lock_spin(&cache_lock);
#endif
if (!(cache = pagecache[pci])) {
if (!addr) {
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return 0;
}
cache = pagecache[pci] = (struct pagecache *)
kqemu_vmalloc(1024 * sizeof(struct pagecache));
memset(cache, 0, 1024 * sizeof(struct pagecache));
}
if (!addr) {
int i;
cache[ppn & 1023].addr = (caddr_t) 0;
for (i = 1023; i >= 0; i--, cache++)
if (cache->addr)
break;
if (i < 0) {
kqemu_vfree(pagecache[pci]);
pagecache[pci] = 0;
}
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return 0;
}
cache[ppn & 1023].addr = (caddr_t) (((unsigned long) addr) & ~PAGE_MASK);
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return ppn;
}
static caddr_t find_page(vm_offset_t paddr, int free)
{
unsigned long ppn = (unsigned long)(paddr >> PAGE_SHIFT);
struct pagecache *cache;
caddr_t addr;
#if __FreeBSD_version > 500000
mtx_lock_spin(&cache_lock);
#endif
if (!(cache = pagecache[ppn >> 10])) {
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
return 0;
}
addr = (caddr_t)(((unsigned long)cache[ppn & 1023].addr)
| ((unsigned long)paddr & PAGE_MASK));
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
if (free && addr)
cache_page(paddr, 0);
return addr;
}
/* lock the page at virtual address 'user_addr' and return its
page index. Return -1 if error */
unsigned long CDECL kqemu_lock_user_page(unsigned long user_addr)
{
int rc;
caddr_t addr = (caddr_t) user_addr;
vm_page_t m;
vm_offset_t paddr;
/*kqemu_log("kqemu_lock_user_page(0x%08x)\n", addr);*/
rc = vm_fault_quick(addr, VM_PROT_READ|VM_PROT_WRITE);
if (rc < 0) {
/*kqemu_log("vm_fault_quick failed rc=%d\n",rc);*/
return -1;
}
paddr = vtophys(addr);
m = PHYS_TO_VM_PAGE(paddr);
vm_page_wire(m);
return cache_page(paddr, addr);
}
void CDECL kqemu_unlock_user_page(unsigned long page_index)
{
vm_page_t m;
vm_offset_t paddr;
/*kqemu_log("kqemu_unlock_user_page(0x%08x)\n",page_index);*/
paddr = (vm_offset_t)(page_index << PAGE_SHIFT);
m = PHYS_TO_VM_PAGE(paddr);
vm_page_unwire(m, 1);
cache_page(paddr, 0);
}
unsigned long CDECL kqemu_alloc_zeroed_page(void)
{
void *addr;
vm_offset_t paddr;
/*kqemu_log("kqemu_alloc_zeroed_page()\n");*/
addr = contigmalloc(PAGE_SIZE, M_KQEMU, M_WAITOK, 0, ~0ul, PAGE_SIZE, 0);
if (!addr) {
/*kqemu_log("contigmalloc failed\n");*/
return -1;
}
memset(addr, 0, PAGE_SIZE);
paddr = vtophys(addr);
return cache_page(paddr, addr);
}
void CDECL kqemu_free_page(unsigned long page_index)
{
vm_offset_t paddr;
caddr_t addr;
/*kqemu_log("kqemu_free_page(0x%08x)\n", page_index);*/
paddr = (vm_offset_t) (page_index << PAGE_SHIFT);
if ((addr = find_page(paddr,1))) {
contigfree((void *) addr, PAGE_SIZE, M_KQEMU);
}
}
void * CDECL kqemu_page_kaddr(unsigned long page_index)
{
vm_offset_t paddr;
/*kqemu_log("kqemu_page_kaddr(0x%08x)\n", page_index);*/
paddr = (vm_offset_t) (page_index << PAGE_SHIFT);
return (void *) find_page(paddr, 0);
}
/* contraint: each page of the vmalloced area must be in the first 4
GB of physical memory */
void * CDECL kqemu_vmalloc(unsigned int size)
{
/*kqemu_log("kqemu_vmalloc(0x%08x)\n", size);*/
return malloc(size, M_KQEMU, M_WAITOK);
}
void CDECL kqemu_vfree(void *ptr)
{
/*kqemu_log("kqemu_vfree(0x%08x)\n", ptr);*/
return free(ptr, M_KQEMU);
}
unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr)
{
caddr_t addr = (caddr_t)vaddr;
vm_offset_t paddr = vtophys(addr);
return cache_page(paddr, addr);
}
#if __FreeBSD_version < 500000
static int
curpriority_cmp(struct proc *p)
{
int c_class, p_class;
c_class = RTP_PRIO_BASE(curproc->p_rtprio.type);
p_class = RTP_PRIO_BASE(p->p_rtprio.type);
if (p_class != c_class)
return (p_class - c_class);
if (p_class == RTP_PRIO_NORMAL)
return (((int)p->p_priority - (int)curpriority) / PPQ);
return ((int)p->p_rtprio.prio - (int)curproc->p_rtprio.prio);
}
/* return TRUE if a signal is pending (i.e. the guest must stop
execution) */
int CDECL kqemu_schedule(void)
{
struct proc *p = curproc;
if (curpriority_cmp(p) > 0) {
int s = splhigh();
p->p_priority = MAXPRI;
setrunqueue(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
splx(s);
}
return issignal(curproc) != 0;
}
#else
/* return TRUE if a signal is pending (i.e. the guest must stop
execution) */
int CDECL kqemu_schedule(void)
{
struct thread *td = curthread;
struct proc *p = td->td_proc;
int rc;
mtx_lock_spin(&sched_lock);
sched_prio(td, td->td_ksegrp->kg_user_pri);
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
rc = cursig(td);
mtx_unlock(&p->p_sigacts->ps_mtx);
PROC_UNLOCK(p);
return rc;
}
#endif
static char log_buf[4096];
void CDECL kqemu_log(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vsnprintf(log_buf, sizeof(log_buf), fmt, ap);
printf("kqemu: %s", log_buf);
va_end(ap);
}
/*********************************************************/
#define KQEMU_MAX_INSTANCES 4
struct kqemu_instance {
#if __FreeBSD_version > 500000
TAILQ_ENTRY(kqemu_instance) kqemu_ent;
struct cdev *kqemu_dev;
#endif
struct kqemu_state *state;
};
static int kqemu_ref_count = 0;
static int max_locked_pages;
#if __FreeBSD_version < 500000
static dev_t kqemu_dev;
#else
static struct clonedevs *kqemuclones;
static TAILQ_HEAD(,kqemu_instance) kqemuhead = TAILQ_HEAD_INITIALIZER(kqemuhead);
static eventhandler_tag clonetag;
#endif
static d_open_t kqemu_open;
static d_close_t kqemu_close;
static d_ioctl_t kqemu_ioctl;
static struct cdevsw kqemu_cdevsw = {
#if __FreeBSD_version < 500000
/* open */ kqemu_open,
/* close */ kqemu_close,
/* read */ noread,
/* write */ nowrite,
/* ioctl */ kqemu_ioctl,
/* poll */ nopoll,
/* mmap */ nommap,
/* strategy */ nostrategy,
/* name */ "kqemu",
/* maj */ KQEMU_MAJOR,
/* dump */ nodump,
/* psize */ nopsize,
/* flags */ 0,
/* bmaj */ -1
#else
.d_version = D_VERSION,
.d_open = kqemu_open,
.d_close = kqemu_close,
.d_ioctl = kqemu_ioctl,
.d_name = "kqemu",
#ifdef D_NEEDGIANT
.d_flags = D_NEEDGIANT,
#endif
#endif
};
#if __FreeBSD_version > 500000
static void
kqemu_clone(void *arg, char *name, int namelen, struct cdev **dev)
{
int unit, r;
if (*dev != NULL)
return;
if (strcmp(name, "kqemu") == 0)
unit = -1;
else if (dev_stdclone(name, NULL, "kqemu", &unit) != 1)
return; /* Bad name */
if (unit != -1 && unit > KQEMU_MAX_INSTANCES)
return;
r = clone_create(&kqemuclones, &kqemu_cdevsw, &unit, dev, 0);
if (r) {
*dev = make_dev(&kqemu_cdevsw, unit2minor(unit),
UID_ROOT, GID_WHEEL, 0660, "kqemu%d", unit);
if (*dev != NULL) {
dev_ref(*dev);
(*dev)->si_flags |= SI_CHEAPCLONE;
}
}
}
#endif
static void kqemu_destroy(struct kqemu_instance *ks)
{
struct cdev *dev = ks->kqemu_dev;
if (ks->state) {
kqemu_delete(ks->state);
ks->state = NULL;
}
free(ks, M_KQEMU);
dev->si_drv1 = NULL;
#if __FreeBSD_version > 500000
mtx_lock_spin(&cache_lock);
TAILQ_REMOVE(&kqemuhead, ks, kqemu_ent);
#endif
if (!--kqemu_ref_count) {
int i;
for (i = 1023; i >= 0; i--)
kqemu_vfree(pagecache[i]);
memset(pagecache, 0, 1024 * sizeof(void *));
}
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
destroy_dev(dev);
#endif
}
int
#if __FreeBSD_version < 500000
kqemu_open(dev, flags, fmt, p)
dev_t dev;
int flags, fmt;
struct proc *p;
{
#else
kqemu_open(dev, flags, fmt, td)
struct cdev *dev;
int flags, fmt;
struct thread *td;
{
struct proc *p = td->td_proc;
#endif
struct kqemu_instance *ks;
if (dev->si_drv1 || kqemu_ref_count >= KQEMU_MAX_INSTANCES)
return(EBUSY);
if ((flags & (FREAD|FWRITE)) == FREAD)
return(EPERM);
ks = (struct kqemu_instance *) malloc(sizeof(*ks), M_KQEMU, M_WAITOK);
if (ks == NULL)
return(ENOMEM);
memset(ks, 0, sizeof *ks);
dev->si_drv1 = ks;
#if __FreeBSD_version > 500000
ks->kqemu_dev = dev;
mtx_lock_spin(&cache_lock);
TAILQ_INSERT_TAIL(&kqemuhead, ks, kqemu_ent);
#endif
kqemu_ref_count++;
#if __FreeBSD_version > 500000
mtx_unlock_spin(&cache_lock);
#endif
kqemu_log("opened by pid=%d\n", p->p_pid);
return(0);
}
int
#if __FreeBSD_version < 500000
kqemu_close(dev, flags, fmt, p)
dev_t dev;
int flags, fmt;
struct proc *p;
{
#else
kqemu_close(dev, flags, fmt, td)
struct cdev *dev;
int flags, fmt;
struct thread *td;
{
struct proc *p = td->td_proc;
#endif
struct kqemu_instance *ks = (struct kqemu_instance *) dev->si_drv1;
kqemu_destroy(ks);
kqemu_log("closed by pid=%d\n", p->p_pid);
return(0);
}
int
#if __FreeBSD_version < 500000
kqemu_ioctl(dev, cmd, cmdarg, flags, p)
dev_t dev;
unsigned long cmd;
caddr_t cmdarg;
int flags;
struct proc *p;
{
#else
kqemu_ioctl(dev, cmd, cmdarg, flags, td)
struct cdev *dev;
unsigned long cmd;
caddr_t cmdarg;
int flags;
struct thread *td;
{
#endif
struct kqemu_instance *ks = (struct kqemu_instance *) dev->si_drv1;
struct kqemu_state *s = ks->state;
long ret;
int error = 0;
switch (cmd) {
case KQEMU_INIT:
/*kqemu_log("KQEMU_INIT data=0x%08x\n",cmdarg);*/
{
if (s) {
error = (EIO);
break;
}
if (!(s = kqemu_init((struct kqemu_init *)cmdarg, max_locked_pages))) {
error = (ENOMEM);
break;
}
ks->state = s;
break;
}
case KQEMU_EXEC:
/*kqemu_log("KQEMU_EXEC data=0x%08x\n",cmdarg);*/
{
struct kqemu_cpu_state *ctx;
if (!s) {
error = (EIO);
break;
}
ctx = kqemu_get_cpu_state(s);
memcpy((void *)ctx, (void *)cmdarg, sizeof(struct kqemu_cpu_state));
ret = kqemu_exec(s);
#if __FreeBSD_version > 500000
td->td_retval[0] = ret;
#else
p->p_retval[0] = ret;
#endif
memcpy((void *)cmdarg, (void *)ctx, sizeof(struct kqemu_cpu_state));
break;
}
case KQEMU_GET_VERSION:
/*kqemu_log("KQEMU_GET_VERSION data=0x%08x\n",cmdarg);*/
{
*(int *)cmdarg = KQEMU_VERSION;
break;
}
default:
/*kqemu_log("ioctl unknown 0x%08x\n",cmd);*/
error = (ENXIO);
}
return(error);
}
static int
init_module(void)
{
#if __FreeBSD_version < 500000
int rc;
#endif
printf("QEMU Accelerator Module version %d.%d.%d, Copyright (c) 2005 Fabrice Bellard\n"
"FreeBSD wrapper port, Copyright (c) 2005 Antony T Curtis\n"
"This is a proprietary product. Read the LICENSE file for more information\n"
"Redistribution of this module is prohibited without authorization\n",
(KQEMU_VERSION >> 16),
(KQEMU_VERSION >> 8) & 0xff,
(KQEMU_VERSION) & 0xff);
if (!(pagecache = (struct pagecache **)
kqemu_vmalloc(1024 * sizeof(void *))))
return(ENOMEM);
memset(pagecache, 0, 1024 * sizeof(void *));
#if __FreeBSD_version > 500000
mtx_init(&cache_lock, "pagecache lock", NULL, MTX_SPIN);
#endif
max_locked_pages = physmem / (2 * KQEMU_MAX_INSTANCES);
if (max_locked_pages > 32768)
max_locked_pages = 32768;
#if __FreeBSD_version < 500000
if ((rc = cdevsw_add(&kqemu_cdevsw))) {
kqemu_log("error registering cdevsw, rc=%d\n", rc);
return(ENOENT);
}
kqemu_dev = make_dev(&kqemu_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, "kqemu");
#else
clone_setup(&kqemuclones);
clonetag = EVENTHANDLER_REGISTER(dev_clone, kqemu_clone, 0, 1000);
if (!clonetag)
return ENOMEM;
#endif
kqemu_log("KQEMU installed, max_instances=%d max_locked_mem=%dkB.\n",
KQEMU_MAX_INSTANCES, max_locked_pages * 4);
kqemu_ref_count = 0;
return 0;
}
static void
cleanup_module(void)
{
#if __FreeBSD_version < 500000
int rc;
#else
struct kqemu_instance *ks;
#endif
#if __FreeBSD_version < 500000
destroy_dev(kqemu_dev);
if ((rc = cdevsw_remove(&kqemu_cdevsw)))
kqemu_log("error unregistering, rc=%d\n", rc);
#else
EVENTHANDLER_DEREGISTER(dev_clone, clonetag);
mtx_lock_spin(&cache_lock);
while ((ks = TAILQ_FIRST(&kqemuhead)) != NULL) {
mtx_unlock_spin(&cache_lock);
kqemu_destroy(ks);
mtx_lock_spin(&cache_lock);
}
mtx_unlock_spin(&cache_lock);
mtx_destroy(&cache_lock);
clone_cleanup(&kqemuclones);
#endif
kqemu_vfree(pagecache);
pagecache = 0;
}
static int
kqemu_modevent(module_t mod, int type, void *data)
{
int err = 0;
switch (type) {
case MOD_LOAD:
err = init_module();
break;
case MOD_UNLOAD:
if (kqemu_ref_count > 0) {
err = EBUSY;
break;
}
/* fall through */
case MOD_SHUTDOWN:
cleanup_module();
break;
default:
err = EINVAL;
break;
}
return(err);
}
static moduledata_t kqemu_mod = {
"kqemu_driver",
kqemu_modevent,
NULL
};
DECLARE_MODULE(kqemu, kqemu_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);

View file

@ -0,0 +1,506 @@
Index: qemu/kqemu/Makefile.freebsd
@@ -1,9 +1,13 @@
+# $Id: Makefile.freebsd,v 1.1 2005/04/17 17:21:31 bellard Exp $
KMOD= kqemu
SRCS= kqemu-freebsd.c
.if ${MACHINE_ARCH} == "i386"
OBJS= kqemu-mod-i386.o
.elif ${MACHINE_ARCH} == "amd64"
OBJS= kqemu-mod-x86_64.o
+.endif
+.if ${OSVERSION} >= 500000
+CC= cc
.endif
WERROR=
Index: qemu/kqemu/kqemu-freebsd.c
@@ -3,20 +3,33 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
+#include <sys/ctype.h>
+#include <sys/fcntl.h>
#include <sys/ioccom.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/resourcevar.h>
+#if __FreeBSD_version >= 500000
#include <sys/sched.h>
+#endif
#include <sys/signalvar.h>
#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/uio.h>
+#if __FreeBSD_version < 500000
+#include <sys/buf.h>
+#endif
+
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+
#include <machine/vmparam.h>
#include <machine/stdarg.h>
@@ -25,10 +38,14 @@
MALLOC_DECLARE(M_KQEMU);
MALLOC_DEFINE(M_KQEMU, "kqemu", "kqemu buffers");
+int kqemu_debug;
+SYSCTL_INT(_debug, OID_AUTO, kqemu_debug, CTLFLAG_RW, &kqemu_debug, 0,
+ "kqemu debug flag");
+
#define USER_BASE 0x1000
/* lock the page at virtual address 'user_addr' and return its
- physical page index. Return -1 if error */
+ physical page index. Return NULL if error */
struct kqemu_user_page *CDECL kqemu_lock_user_page(unsigned long *ppage_index,
unsigned long user_addr)
{
@@ -37,14 +54,18 @@
vm_paddr_t pa = 0;
int ret;
pmap_t pmap;
+#if __FreeBSD_version >= 500000
ret = vm_map_wire(&vm->vm_map, va, va+PAGE_SIZE, VM_MAP_WIRE_USER);
+#else
+ ret = vm_map_user_pageable(&vm->vm_map, va, va+PAGE_SIZE, FALSE);
+#endif
if (ret != KERN_SUCCESS) {
- printf("kqemu_lock_user_page(%08lx) failed, ret=%d\n", user_addr, ret);
+ kqemu_log("kqemu_lock_user_page(%08lx) failed, ret=%d\n", user_addr, ret);
return NULL;
}
pmap = vm_map_pmap(&vm->vm_map);
pa = pmap_extract(pmap, va);
- // printf("kqemu_lock_user_page(%08lx) va=%08x pa=%08x\n", user_addr, va, pa);
+ // kqemu_log("kqemu_lock_user_page(%08lx) va=%08x pa=%08x\n", user_addr, va, pa);
*ppage_index = pa >> PAGE_SHIFT;
return (struct kqemu_user_page *)va;
}
@@ -54,12 +75,16 @@
struct vmspace *vm = curproc->p_vmspace;
vm_offset_t va;
int ret;
- // printf("kqemu_unlock_user_page(%08lx)\n", page_index);
+ // kqemu_log("kqemu_unlock_user_page(%08lx)\n", page_index);
va = (vm_offset_t)page;
+#if __FreeBSD_version >= 500000
ret = vm_map_unwire(&vm->vm_map, va, va+PAGE_SIZE, VM_MAP_WIRE_USER);
+#else
+ ret = vm_map_user_pageable(&vm->vm_map, va, va+PAGE_SIZE, TRUE);
+#endif
#if 0
if (ret != KERN_SUCCESS) {
- printf("kqemu_unlock_user_page(%08lx) failed, ret=%d\n", page_index, ret);
+ kqemu_log("kqemu_unlock_user_page(%08lx) failed, ret=%d\n", page_index, ret);
}
#endif
}
@@ -76,20 +101,21 @@
va = kmem_alloc(kernel_map, PAGE_SIZE);
if (va == 0) {
- printf("kqemu_alloc_zeroed_page: NULL\n");
- return -1;
+ kqemu_log("kqemu_alloc_zeroed_page: NULL\n");
+ return NULL;
}
pmap = vm_map_pmap(kernel_map);
pa = pmap_extract(pmap, va);
- // printf("kqemu_alloc_zeroed_page: %08x\n", pa);
+ // kqemu_log("kqemu_alloc_zeroed_page: %08x\n", pa);
*ppage_index = pa >> PAGE_SHIFT;
return (struct kqemu_page *)va;
}
void CDECL kqemu_free_page(struct kqemu_page *page)
{
- // printf("kqemu_free_page(%08lx)\n", page_index);
- /* XXX: do it */
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_free_page(%p)\n", page);
+ kmem_free(kernel_map, (vm_offset_t) page, PAGE_SIZE);
}
/* return kernel address of the physical page page_index */
@@ -103,42 +129,29 @@
GB of physical memory */
void * CDECL kqemu_vmalloc(unsigned int size)
{
- struct vmspace *vm = curproc->p_vmspace;
- vm_offset_t va = USER_BASE;
- int rv;
- if (size % PAGE_SIZE != 0) {
- printf("kqemu_vmalloc(%d) not a multiple of page size\n", size);
- return NULL;
- }
- rv = vm_map_find(&vm->vm_map, NULL, 0, &va, size, 1,
- VM_PROT_ALL, VM_PROT_ALL, 0);
- if (rv != KERN_SUCCESS) {
- printf("kqemu_vmalloc(%d) failed rv=%d\n", size, rv);
- return NULL;
- }
- printf("kqemu_vmalloc(%d): %08x\n", size, va);
- return (void *)va;
+ void *ptr = malloc(size, M_KQEMU, M_WAITOK);
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_vmalloc(%d): %p\n", size, ptr);
+ return ptr;
}
void CDECL kqemu_vfree(void *ptr)
{
- printf("kqemu_vfree(%p)\n", ptr);
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_vfree(%p)\n", ptr);
+ free(ptr, M_KQEMU);
}
/* return the physical page index for a given virtual page */
unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr)
{
- struct vmspace *vm = curproc->p_vmspace;
- vm_paddr_t pa;
- pmap_t pmap;
-
- pmap = vm_map_pmap(&vm->vm_map);
- pa = pmap_extract(pmap, (vm_offset_t)vaddr);
+ vm_paddr_t pa = vtophys(vaddr);
if (pa == 0) {
- printf("kqemu_vmalloc_to_phys(%p)->error\n", vaddr);
+ kqemu_log("kqemu_vmalloc_to_phys(%p)->error\n", vaddr);
return -1;
}
- printf("kqemu_vmalloc_to_phys(%p)->%08x\n", vaddr, pa);
+ if (kqemu_debug > 0)
+ kqemu_log("kqemu_vmalloc_to_phys(%p)->%08x\n", vaddr, pa);
return pa >> PAGE_SHIFT;
}
@@ -154,16 +167,48 @@
{
}
+#if __FreeBSD_version < 500000
+static int
+curpriority_cmp(struct proc *p)
+{
+ int c_class, p_class;
+
+ c_class = RTP_PRIO_BASE(curproc->p_rtprio.type);
+ p_class = RTP_PRIO_BASE(p->p_rtprio.type);
+ if (p_class != c_class)
+ return (p_class - c_class);
+ if (p_class == RTP_PRIO_NORMAL)
+ return (((int)p->p_priority - (int)curpriority) / PPQ);
+ return ((int)p->p_rtprio.prio - (int)curproc->p_rtprio.prio);
+}
+
/* return TRUE if a signal is pending (i.e. the guest must stop
execution) */
int CDECL kqemu_schedule(void)
{
- // printf("kqemu_schedule\n");
+ struct proc *p = curproc;
+ if (curpriority_cmp(p) > 0) {
+ int s = splhigh();
+ p->p_priority = MAXPRI;
+ setrunqueue(p);
+ p->p_stats->p_ru.ru_nvcsw++;
+ mi_switch();
+ splx(s);
+ }
+ return issignal(curproc) != 0;
+}
+#else
+/* return TRUE if a signal is pending (i.e. the guest must stop
+ execution) */
+int CDECL kqemu_schedule(void)
+{
+ // kqemu_log("kqemu_schedule\n");
mtx_lock_spin(&sched_lock);
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
return SIGPENDING(curthread);
}
+#endif
static char log_buf[4096];
@@ -176,47 +221,154 @@
va_end(ap);
}
+#define KQEMU_MAX_INSTANCES 4
+
struct kqemu_instance {
+#if __FreeBSD_version >= 500000
+ TAILQ_ENTRY(kqemu_instance) kqemu_ent;
+ struct cdev *kqemu_dev;
+#endif
// struct semaphore sem;
struct kqemu_state *state;
};
+static int kqemu_ref_count = 0;
+static int max_locked_pages;
+
+#if __FreeBSD_version < 500000
+static dev_t kqemu_dev;
+#else
+static struct clonedevs *kqemuclones;
+static TAILQ_HEAD(,kqemu_instance) kqemuhead = TAILQ_HEAD_INITIALIZER(kqemuhead);
+static eventhandler_tag clonetag;
+#endif
+
static d_close_t kqemu_close;
static d_open_t kqemu_open;
static d_ioctl_t kqemu_ioctl;
static struct cdevsw kqemu_cdevsw = {
+#if __FreeBSD_version < 500000
+ /* open */ kqemu_open,
+ /* close */ kqemu_close,
+ /* read */ noread,
+ /* write */ nowrite,
+ /* ioctl */ kqemu_ioctl,
+ /* poll */ nopoll,
+ /* mmap */ nommap,
+ /* strategy */ nostrategy,
+ /* name */ "kqemu",
+ /* maj */ KQEMU_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ 0,
+ /* bmaj */ -1
+#else
.d_version = D_VERSION,
.d_flags = D_NEEDGIANT,
.d_open = kqemu_open,
.d_ioctl = kqemu_ioctl,
.d_close = kqemu_close,
.d_name = "kqemu"
+#endif
};
-/* For use with make_dev(9)/destroy_dev(9). */
-static struct cdev *kqemu_dev;
+#if __FreeBSD_version >= 500000
+static void
+#if __FreeBSD_version >= 600034
+kqemu_clone(void *arg, struct ucred *cred, char *name, int namelen,
+struct cdev **dev)
+#else
+kqemu_clone(void *arg, char *name, int namelen, struct cdev **dev)
+#endif
+{
+ int unit, r;
+ if (*dev != NULL)
+ return;
+
+ if (strcmp(name, "kqemu") == 0)
+ unit = -1;
+ else if (dev_stdclone(name, NULL, "kqemu", &unit) != 1)
+ return; /* Bad name */
+ if (unit != -1 && unit > KQEMU_MAX_INSTANCES)
+ return;
+
+ r = clone_create(&kqemuclones, &kqemu_cdevsw, &unit, dev, 0);
+ if (r) {
+ *dev = make_dev(&kqemu_cdevsw, unit2minor(unit),
+ UID_ROOT, GID_WHEEL, 0660, "kqemu%d", unit);
+ if (*dev != NULL) {
+ dev_ref(*dev);
+ (*dev)->si_flags |= SI_CHEAPCLONE;
+ }
+ }
+}
+#endif
+
+static void kqemu_destroy(struct kqemu_instance *ks)
+{
+ struct cdev *dev = ks->kqemu_dev;
+
+ if (ks->state) {
+ kqemu_delete(ks->state);
+ ks->state = NULL;
+ }
+
+ free(ks, M_KQEMU);
+ dev->si_drv1 = NULL;
+#if __FreeBSD_version >= 500000
+ TAILQ_REMOVE(&kqemuhead, ks, kqemu_ent);
+ destroy_dev(dev);
+#endif
+ --kqemu_ref_count;
+}
/* ARGSUSED */
static int
+#if __FreeBSD_version < 500000
+kqemu_open(dev_t dev, int flags, int fmt __unused, struct proc *p)
+{
+#else
kqemu_open(struct cdev *dev, int flags, int fmt __unused,
struct thread *td)
{
+ struct proc *p = td->td_proc;
+#endif
struct kqemu_instance *ks;
+
+ if (dev->si_drv1 || kqemu_ref_count >= KQEMU_MAX_INSTANCES)
+ return(EBUSY);
+
+ if ((flags & (FREAD|FWRITE)) == FREAD)
+ return(EPERM);
+
ks = malloc(sizeof(struct kqemu_instance), M_KQEMU, M_WAITOK);
if (ks == NULL) {
- printf("malloc failed\n");
+ kqemu_log("malloc failed\n");
return ENOMEM;
}
- ks->state = NULL;
+ memset(ks, 0, sizeof *ks);
+#if __FreeBSD_version >= 500000
+ ks->kqemu_dev = dev;
+ TAILQ_INSERT_TAIL(&kqemuhead, ks, kqemu_ent);
+#endif
+ kqemu_ref_count++;
+
dev->si_drv1 = ks;
+ if (kqemu_debug > 0)
+ kqemu_log("opened by pid=%d\n", p->p_pid);
return 0;
}
/* ARGSUSED */
static int
+#if __FreeBSD_version < 500000
+kqemu_ioctl(dev_t dev, u_long cmd, caddr_t addr,
+ int flags __unused, struct proc *p)
+#else
kqemu_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
int flags __unused, struct thread *td)
+#endif
{
int error = 0;
int ret;
@@ -231,8 +383,9 @@
break;
}
d1 = *(struct kqemu_init *)addr;
- printf("ram_base=%p ram_size=%ld\n", d1.ram_base, d1.ram_size);
- s = kqemu_init(d, 16000);
+ if (kqemu_debug > 0)
+ kqemu_log("ram_base=%p ram_size=%ld\n", d1.ram_base, d1.ram_size);
+ s = kqemu_init(d, max_locked_pages);
if (s == NULL) {
error = ENOMEM;
break;
@@ -248,9 +401,16 @@
}
ctx = kqemu_get_cpu_state(s);
*ctx = *(struct kqemu_cpu_state *)addr;
+#if __FreeBSD_version >= 500000
DROP_GIANT();
+#endif
ret = kqemu_exec(s);
+#if __FreeBSD_version >= 500000
PICKUP_GIANT();
+ td->td_retval[0] = ret;
+#else
+ p->p_retval[0] = ret;
+#endif
*(struct kqemu_cpu_state *)addr = *ctx;
break;
}
@@ -265,10 +425,22 @@
/* ARGSUSED */
static int
+#if __FreeBSD_version < 500000
+kqemu_close(dev_t dev, int flags, int fmt __unused, struct proc *p)
+{
+#else
kqemu_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
- return 0;
+ struct proc *p = td->td_proc;
+#endif
+ struct kqemu_instance *ks = (struct kqemu_instance *) dev->si_drv1;
+
+ kqemu_destroy(ks);
+
+ if (kqemu_debug > 0)
+ kqemu_log("closed by pid=%d\n", p->p_pid);
+ return 0;
}
/* ARGSUSED */
@@ -276,15 +448,55 @@
kqemu_modevent(module_t mod __unused, int type, void *data __unused)
{
int error = 0;
+#if __FreeBSD_version < 500000
+ int rc;
+#else
+ struct kqemu_instance *ks;
+#endif
switch (type) {
case MOD_LOAD:
printf("kqemu version 0x%08x\n", KQEMU_VERSION);
+ max_locked_pages = physmem / (2 * KQEMU_MAX_INSTANCES);
+ if (max_locked_pages > 32768)
+ max_locked_pages = 32768;
+#if __FreeBSD_version < 500000
+ if ((rc = cdevsw_add(&kqemu_cdevsw))) {
+ kqemu_log("error registering cdevsw, rc=%d\n", rc);
+ error = ENOENT;
+ break;
+ }
kqemu_dev = make_dev(&kqemu_cdevsw, 0,
- UID_ROOT, GID_WHEEL, 0666, "kqemu");
+ UID_ROOT, GID_WHEEL, 0660, "kqemu");
+#else
+ clone_setup(&kqemuclones);
+ clonetag = EVENTHANDLER_REGISTER(dev_clone, kqemu_clone, 0, 1000);
+ if (!clonetag) {
+ error = ENOMEM;
+ break;
+ }
+#endif
+ kqemu_log("KQEMU installed, max_instances=%d max_locked_mem=%dkB.\n",
+ KQEMU_MAX_INSTANCES, max_locked_pages * 4);
+
+ kqemu_ref_count = 0;
break;
case MOD_UNLOAD:
+ if (kqemu_ref_count > 0) {
+ error = EBUSY;
+ break;
+ }
+#if __FreeBSD_version < 500000
destroy_dev(kqemu_dev);
+ if ((rc = cdevsw_remove(&kqemu_cdevsw)))
+ kqemu_log("error unregistering, rc=%d\n", rc);
+#else
+ EVENTHANDLER_DEREGISTER(dev_clone, clonetag);
+ while ((ks = TAILQ_FIRST(&kqemuhead)) != NULL) {
+ kqemu_destroy(ks);
+ }
+ clone_cleanup(&kqemuclones);
+#endif
break;
case MOD_SHUTDOWN:
break;

View file

@ -13,7 +13,7 @@ Index: qemu/Makefile
$(MAKE) -C kqemu -f Makefile.winnt
else
- $(MAKE) -C kqemu
+ cd kqemu && $(BSD_MAKE)
+ ( cd kqemu && $(BSD_MAKE) )
endif
endif

View file

@ -0,0 +1,67 @@
Index: qemu/bsd/Makefile
@@ -16,7 +16,8 @@
${MACHINE_ARCH}/s_rintl.c \
${MACHINE_ARCH}/s_round.c \
${MACHINE_ARCH}/s_sinl.S \
- ${MACHINE_ARCH}/s_tanl.S
+ ${MACHINE_ARCH}/s_tanl.S \
+ ${MACHINE_ARCH}/s_ldexpl.c
OBJS= ${SRCS:R:S/$/.o/}
Index: qemu/bsd/i386/s_ldexpl.c
@@ -0,0 +1,21 @@
+#include <math.h>
+#include <errno.h>
+#include <sysdep.h>
+
+long double __ldexpl(long double x, int expn)
+{
+ long double res;
+ if (!isfinite (x) || x == 0.0L)
+ return x;
+
+ __asm__ ("fscale"
+ : "=t" (res)
+ : "0" (x), "u" ((long double) expn));
+
+ if (!isfinite (res) || res == 0.0L)
+ errno = ERANGE;
+
+ return res;
+}
+
+weak_alias(__ldexpl,ldexpl)
Index: qemu/bsd/amd64/s_ldexpl.c
@@ -0,0 +1,21 @@
+#include <math.h>
+#include <errno.h>
+#include <sysdep.h>
+
+long double __ldexpl(long double x, int expn)
+{
+ long double res;
+ if (!isfinite (x) || x == 0.0L)
+ return x;
+
+ __asm__ ("fscale"
+ : "=t" (res)
+ : "0" (x), "u" ((long double) expn));
+
+ if (!isfinite (res) || res == 0.0L)
+ errno = ERANGE;
+
+ return res;
+}
+
+weak_alias(__ldexpl,ldexpl)
Index: qemu/target-i386/helper.c
@@ -2886,6 +2886,8 @@
ST0 = floatx_round_to_int(ST0, &env->fp_status);
}
+long double ldexpl(long double, int);
+
void helper_fscale(void)
{
ST0 = ldexp (ST0, (int)(ST1));

View file

@ -0,0 +1,21 @@
Index: qemu/vl.c
@@ -40,6 +40,10 @@
#include <sys/socket.h>
#include <netinet/in.h>
#include <dirent.h>
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#include <libutil.h>
+#endif
#ifdef _BSD
#include <sys/stat.h>
#ifndef __APPLE__
@@ -1280,7 +1284,7 @@
return chr;
}
-#if defined(__linux__)
+#if defined(__linux__) || defined(__FreeBSD__)
CharDriverState *qemu_chr_open_pty(void)
{
char slave_name[1024];

View file

@ -1,6 +1,9 @@
====
FreeBSD host notes:
- needs to run as root in order to use /dev/tap* networking (why?)
(actually RELENG_6 and above now has a sysctl net.link.tap.user_open
to allow users to use it too. don't forget to adjust device node
permissions in /etc/devfs.rules.)
- slirp (usermode networking) is fixed now in cvs, on FreeSBIE 1.0 guests
you still have to manually do:
echo nameserver 10.0.2.3 >/etc/resolv.conf
@ -18,4 +21,9 @@ its kernel is built with HZ=5000, and FreeBSD's default is 100...
ioctl.)
- the -smb option (smb-export local dir to guest) needs the net/samba
port/package installed in addition to qemu.
- RELENG_6 and up guests often crash while accessing the emulated cdrom
(see kern/84102, http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/84102),
using a kernel without PREEMPTION has been reported to fix this problem.
(or do an ftp install instead of installing from the emulated cdrom, and
then make a new kernel.)
====