Wed Oct 9 17:47:13 2019 UTC ()
simpler fix for the race between shmat() and shmdt():
change shmat() to hold shm_lock until it is completely done.


(chs)
diff -r1.140 -r1.141 src/sys/kern/sysv_shm.c

cvs diff -r1.140 -r1.141 src/sys/kern/sysv_shm.c (expand / switch to unified diff)

--- src/sys/kern/sysv_shm.c 2019/10/09 17:44:45 1.140
+++ src/sys/kern/sysv_shm.c 2019/10/09 17:47:13 1.141
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sysv_shm.c,v 1.140 2019/10/09 17:44:45 chs Exp $ */ 1/* $NetBSD: sysv_shm.c,v 1.141 2019/10/09 17:47:13 chs Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Mindaugas Rasiukevicius. 9 * NASA Ames Research Center, and by Mindaugas Rasiukevicius.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -51,27 +51,27 @@ @@ -51,27 +51,27 @@
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 54 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63#include <sys/cdefs.h> 63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.140 2019/10/09 17:44:45 chs Exp $"); 64__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.141 2019/10/09 17:47:13 chs Exp $");
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_sysv.h" 67#include "opt_sysv.h"
68#endif 68#endif
69 69
70#include <sys/param.h> 70#include <sys/param.h>
71#include <sys/kernel.h> 71#include <sys/kernel.h>
72#include <sys/kmem.h> 72#include <sys/kmem.h>
73#include <sys/shm.h> 73#include <sys/shm.h>
74#include <sys/mutex.h> 74#include <sys/mutex.h>
75#include <sys/mman.h> 75#include <sys/mman.h>
76#include <sys/stat.h> 76#include <sys/stat.h>
77#include <sys/sysctl.h> 77#include <sys/sysctl.h>
@@ -427,74 +427,59 @@ sys_shmat(struct lwp *l, const struct sy @@ -427,74 +427,59 @@ sys_shmat(struct lwp *l, const struct sy
427 else { 427 else {
428 error = EINVAL; 428 error = EINVAL;
429 goto err; 429 goto err;
430 } 430 }
431 } else { 431 } else {
432 /* This is just a hint to uvm_map() about where to put it. */ 432 /* This is just a hint to uvm_map() about where to put it. */
433 attach_va = p->p_emul->e_vm_default_addr(p, 433 attach_va = p->p_emul->e_vm_default_addr(p,
434 (vaddr_t)vm->vm_daddr, size, 434 (vaddr_t)vm->vm_daddr, size,
435 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); 435 p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
436 } 436 }
437 437
438 /* 438 /*
439 * Create a map entry, add it to the list and increase the counters. 439 * Create a map entry, add it to the list and increase the counters.
440 * The lock will be dropped before the mapping, disable reallocation. 
441 */ 440 */
442 shmmap_s = shmmap_getprivate(p); 441 shmmap_s = shmmap_getprivate(p);
443 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next); 442 SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
444 shmmap_s->nitems++; 443 shmmap_s->nitems++;
445 shmseg->shm_lpid = p->p_pid; 444 shmseg->shm_lpid = p->p_pid;
446 shmseg->shm_nattch++; 445 shmseg->shm_nattch++;
447 shm_realloc_disable++; 
448 446
449 /* 447 /*
450 * Add a reference to the uvm object while we hold the 448 * Map the segment into the address space.
451 * shm_lock. 
452 */ 449 */
453 uobj = shmseg->_shm_internal; 450 uobj = shmseg->_shm_internal;
454 uao_reference(uobj); 451 uao_reference(uobj);
455 mutex_exit(&shm_lock); 
456 
457 /* 
458 * Drop the shm_lock to map it into the address space, and lock 
459 * the memory, if needed (XXX where does this lock memory?). 
460 */ 
461 error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0, 452 error = uvm_map(&vm->vm_map, &attach_va, size, uobj, 0, 0,
462 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags)); 453 UVM_MAPFLAG(prot, prot, UVM_INH_SHARE, UVM_ADV_RANDOM, flags));
463 if (error) 454 if (error)
464 goto err_detach; 455 goto err_detach;
465 456
466 /* Set the new address, and update the time */ 457 /* Set the new address, and update the time */
467 mutex_enter(&shm_lock); 
468 shmmap_se->va = attach_va; 458 shmmap_se->va = attach_va;
469 shmseg->shm_atime = time_second; 459 shmseg->shm_atime = time_second;
470 shm_realloc_disable--; 
471 retval[0] = attach_va; 460 retval[0] = attach_va;
472 SHMPRINTF(("shmat: vm %p: add %d @%lx\n", 461 SHMPRINTF(("shmat: vm %p: add %d @%lx\n",
473 p->p_vmspace, shmmap_se->shmid, attach_va)); 462 p->p_vmspace, shmmap_se->shmid, attach_va));
474err: 463err:
475 cv_broadcast(&shm_realloc_cv); 
476 mutex_exit(&shm_lock); 464 mutex_exit(&shm_lock);
477 if (error && shmmap_se) { 465 if (error && shmmap_se) {
478 kmem_free(shmmap_se, sizeof(struct shmmap_entry)); 466 kmem_free(shmmap_se, sizeof(struct shmmap_entry));
479 } 467 }
480 return error; 468 return error;
481 469
482err_detach: 470err_detach:
483 uao_detach(uobj); 471 uao_detach(uobj);
484 mutex_enter(&shm_lock); 
485 uobj = shm_delete_mapping(shmmap_s, shmmap_se); 472 uobj = shm_delete_mapping(shmmap_s, shmmap_se);
486 shm_realloc_disable--; 
487 cv_broadcast(&shm_realloc_cv); 
488 mutex_exit(&shm_lock); 473 mutex_exit(&shm_lock);
489 if (uobj != NULL) { 474 if (uobj != NULL) {
490 uao_detach(uobj); 475 uao_detach(uobj);
491 } 476 }
492 kmem_free(shmmap_se, sizeof(struct shmmap_entry)); 477 kmem_free(shmmap_se, sizeof(struct shmmap_entry));
493 return error; 478 return error;
494} 479}
495 480
496/* 481/*
497 * Shared memory control operations. 482 * Shared memory control operations.
498 */ 483 */
499int 484int
500sys___shmctl50(struct lwp *l, const struct sys___shmctl50_args *uap, 485sys___shmctl50(struct lwp *l, const struct sys___shmctl50_args *uap,