ports/x11/nvidia-driver/files/extra-patch-src_nvidia-modeset_nvidia-modeset-freebsd.c.in
2023-08-16 14:01:25 -07:00

131 lines
5.1 KiB
C

--- src/nvidia-modeset/nvidia-modeset-freebsd.c.orig 2023-03-28 22:14:28 UTC
+++ src/nvidia-modeset/nvidia-modeset-freebsd.c
@@ -368,6 +368,7 @@ struct nvkms_timer_t {
NvBool cancel;
NvBool complete;
NvBool isRefPtr;
+ NvBool needsNvkmsLock;
NvBool callout_created;
nvkms_timer_proc_t *proc;
void *dataPtr;
@@ -406,7 +407,14 @@ static void nvkms_taskqueue_callback(void *arg, int pe
callout_drain(&timer->callout);
}
- sx_xlock(&nvkms_lock);
+ /*
+ * Only lock if this timer requests it. DRM's callback nv_drm_event_callback
+ * will not need this, since it may reenter nvkms through the kapi and lock
+ * nvkms_lock then.
+ */
+ if (timer->needsNvkmsLock) {
+ sx_xlock(&nvkms_lock);
+ }
if (timer->isRefPtr) {
// If the object this timer refers to was destroyed, treat the timer as
@@ -424,11 +432,13 @@ static void nvkms_taskqueue_callback(void *arg, int pe
timer->complete = NV_TRUE;
}
+ if (timer->needsNvkmsLock) {
+ sx_xunlock(&nvkms_lock);
+ }
+
if (timer->cancel || timer->isRefPtr) {
nvkms_free(timer, sizeof(*timer));
}
-
- sx_xunlock(&nvkms_lock);
}
static void nvkms_callout_callback(void *arg)
@@ -441,11 +451,13 @@ nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_ti
static void
nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc,
- void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec)
+ void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec,
+ NvBool needsNvkmsLock)
{
timer->cancel = NV_FALSE;
timer->complete = NV_FALSE;
timer->isRefPtr = isRefPtr;
+ timer->needsNvkmsLock = needsNvkmsLock;
timer->proc = proc;
timer->dataPtr = dataPtr;
@@ -479,19 +491,27 @@ nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_ti
mtx_unlock_spin(&nvkms_timers.lock);
}
-nvkms_timer_handle_t*
-nvkms_alloc_timer(nvkms_timer_proc_t *proc,
- void *dataPtr, NvU32 dataU32,
- NvU64 usec)
+static nvkms_timer_handle_t*
+nvkms_alloc_timer_locked(nvkms_timer_proc_t *proc,
+ void *dataPtr, NvU32 dataU32,
+ NvU64 usec, NvBool needsNvkmsLock)
{
// nvkms_alloc_timer cannot be called from an interrupt context.
struct nvkms_timer_t *timer = nvkms_alloc(sizeof(*timer), NV_TRUE);
if (timer) {
- nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec);
+ nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec, needsNvkmsLock);
}
return timer;
}
+nvkms_timer_handle_t*
+nvkms_alloc_timer(nvkms_timer_proc_t *proc,
+ void *dataPtr, NvU32 dataU32,
+ NvU64 usec)
+{
+ return nvkms_alloc_timer_locked(proc, dataPtr, dataU32, usec, NV_TRUE);
+}
+
NvBool
nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc,
struct nvkms_ref_ptr *ref_ptr,
@@ -506,7 +526,7 @@ nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *pro
// Reference the ref_ptr to make sure that it doesn't get freed before
// the timer fires.
nvkms_inc_ref(ref_ptr);
- nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec);
+ nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec, NV_TRUE);
}
return timer != NULL;
@@ -570,10 +590,11 @@ nvkms_event_queue_changed(nvkms_per_open_handle_t *pOp
break;
case NVKMS_CLIENT_KERNEL_SPACE:
if (!popen->kernel.task) {
- popen->kernel.task = nvkms_alloc_timer(nvkms_kapi_task_callback,
- popen,
- 0, /* dataU32 */
- 0 /* callout delay */);
+ popen->kernel.task = nvkms_alloc_timer_locked(nvkms_kapi_task_callback,
+ popen,
+ 0, /* dataU32 */
+ 0, /* callout delay */
+ NV_FALSE);
}
break;
}
@@ -828,10 +849,11 @@ static struct nvkms_per_open *nvkms_open_common(enum N
case NVKMS_CLIENT_KERNEL_SPACE:
/* enqueue our new task */
popen->kernel.device = device;
- popen->kernel.task = nvkms_alloc_timer(nvkms_kapi_task_callback,
- popen,
- 0, /* dataU32 */
- 0 /* callout delay */);
+ popen->kernel.task = nvkms_alloc_timer_locked(nvkms_kapi_task_callback,
+ popen,
+ 0, /* dataU32 */
+ 0, /* callout delay */
+ NV_FALSE);
break;
}