Mon Aug 27 00:56:38 2018 UTC ()
Handle uvm object reference counts in uvm_map more carefully.

Acquire a reference unconditionally first; then let uvm_map consume
it on success, and release it ourselves on failure.

As we did before -- acquiring a fresh reference on success -- another
thread might release the reference with a concurrent uvm_unmap before
we could acquire it back, thereby destroying the object.

XXX pullup-7
XXX pullup-8


(riastradh)
diff -r1.34 -r1.35 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c

cvs diff -r1.34 -r1.35 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c (expand / switch to context diff)
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c 2017/10/28 00:37:13 1.34
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c 2018/08/27 00:56:38 1.35
@@ -1771,6 +1771,8 @@
 #endif
 
 #ifdef __NetBSD__
+	/* Acquire a reference for uvm_map to consume.  */
+	uao_reference(obj->gemo_shm_uao);
 	addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
 	    (vaddr_t)curproc->p_vmspace->vm_daddr, args->size,
 	    curproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
@@ -1781,10 +1783,10 @@
 		(VM_PROT_READ | VM_PROT_WRITE), UVM_INH_COPY, UVM_ADV_NORMAL,
 		0));
 	if (ret) {
+		uao_detach(obj->gemo_shm_uao);
 		drm_gem_object_unreference_unlocked(obj);
 		return ret;
 	}
-	uao_reference(obj->gemo_shm_uao);
 	drm_gem_object_unreference_unlocked(obj);
 #else
 	addr = vm_mmap(obj->filp, 0, args->size,