Tue Feb 28 17:35:29 2017 UTC ()
in cpu_switchto() and the fast-softint context switch code,
put back the stwcx. instruction to clear the reservation.
we used to have this in the old cpu_switch() until it was
if-0'd in 2003 and removed completely in 2007.
this fixes hangs I've seen where a softint thread is
blocked waiting for a mutex that is not held.
this should also fix PR 44387.


(chs)
diff -r1.54 -r1.55 src/sys/arch/powerpc/powerpc/locore_subr.S

cvs diff -r1.54 -r1.55 src/sys/arch/powerpc/powerpc/locore_subr.S (expand / switch to unified diff)

--- src/sys/arch/powerpc/powerpc/locore_subr.S 2014/07/30 23:27:55 1.54
+++ src/sys/arch/powerpc/powerpc/locore_subr.S 2017/02/28 17:35:29 1.55
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: locore_subr.S,v 1.54 2014/07/30 23:27:55 matt Exp $ */ 1/* $NetBSD: locore_subr.S,v 1.55 2017/02/28 17:35:29 chs Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001 Wasabi Systems, Inc. 4 * Copyright (c) 2001 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -278,26 +278,27 @@ switchto_restore: @@ -278,26 +278,27 @@ switchto_restore:
278 * Move back old-lwp and new-lwp to r3 and r4. We need to return 278 * Move back old-lwp and new-lwp to r3 and r4. We need to return
279 * r3. However, lwp_startup needs r4 and we return to fork_trampoline 279 * r3. However, lwp_startup needs r4 and we return to fork_trampoline
280 * will directly invoke lwp_startup. So we "waste" an instruction by 280 * will directly invoke lwp_startup. So we "waste" an instruction by
281 * always doing it here. 281 * always doing it here.
282 */ 282 */
283 mr %r3,%r30 283 mr %r3,%r30
284 mr %r4,%r31 284 mr %r4,%r31
285 285
286/* 286/*
287 * Note that callframe linkages are setup in cpu_lwp_fork(). 287 * Note that callframe linkages are setup in cpu_lwp_fork().
288 */ 288 */
289 ldreg %r31,CFRAME_R31(%r1) /* restore saved registers */ 289 ldreg %r31,CFRAME_R31(%r1) /* restore saved registers */
290 ldreg %r30,CFRAME_R30(%r1) 290 ldreg %r30,CFRAME_R30(%r1)
 291 stwcx. %r1,0,%r1 /* clear reservation */
291#if 1 292#if 1
292 addi %r1,%r1,CALLFRAMELEN 293 addi %r1,%r1,CALLFRAMELEN
293#else 294#else
294 ldreg %r1,CFRAME_SP(%r1) /* pop stack frmae */ 295 ldreg %r1,CFRAME_SP(%r1) /* pop stack frmae */
295#endif 296#endif
296 ldreg %r0,CFRAME_LR(%r1) 297 ldreg %r0,CFRAME_LR(%r1)
297 mtlr %r0 298 mtlr %r0
298 blr /* CPUINIT needs a raw blr */ 299 blr /* CPUINIT needs a raw blr */
299 300
300ENTRY_NOPROFILE(emptyidlespin) 301ENTRY_NOPROFILE(emptyidlespin)
301#ifdef DIAGNOSTIC 302#ifdef DIAGNOSTIC
302 GET_CPUINFO(%r3) 303 GET_CPUINFO(%r3)
303 lbz %r4,CI_CPL(%r3) 304 lbz %r4,CI_CPL(%r3)
@@ -318,26 +319,27 @@ _ENTRY(softint_cleanup) @@ -318,26 +319,27 @@ _ENTRY(softint_cleanup)
318 ldint %r5, CI_MTX_COUNT(%r7) 319 ldint %r5, CI_MTX_COUNT(%r7)
319 addi %r5, %r5, 1 320 addi %r5, %r5, 1
320 stint %r5, CI_MTX_COUNT(%r7) 321 stint %r5, CI_MTX_COUNT(%r7)
321 li %r0, 0 322 li %r0, 0
322 stptr %r0, L_CTXSWTCH(%r3) /* clear ctxswitch of old lwp */ 323 stptr %r0, L_CTXSWTCH(%r3) /* clear ctxswitch of old lwp */
323 ldreg %r0, CFRAME_R31(%r1) /* get saved MSR */ 324 ldreg %r0, CFRAME_R31(%r1) /* get saved MSR */
324#if defined(PPC_IBM4XX) || defined(PPC_BOOKE) 325#if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
325 wrtee %r0 /* restore EE */ 326 wrtee %r0 /* restore EE */
326#endif 327#endif
327#if defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE) || defined(PPC_OEA64) 328#if defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE) || defined(PPC_OEA64)
328 mtmsr %r0 329 mtmsr %r0
329 isync 330 isync
330#endif 331#endif
 332 stwcx. %r1,0,%r1 /* clear reservation */
331 addi %r1, %r1, CALLFRAMELEN 333 addi %r1, %r1, CALLFRAMELEN
332 ldreg %r0, CFRAME_LR(%r1) 334 ldreg %r0, CFRAME_LR(%r1)
333 mtlr %r0 335 mtlr %r0
334#if IPL_SCHED != IPL_HIGH 336#if IPL_SCHED != IPL_HIGH
335 li %r3, IPL_HIGH 337 li %r3, IPL_HIGH
336 b _C_LABEL(splraise) 338 b _C_LABEL(splraise)
337#else 339#else
338 blr 340 blr
339#endif /* IPL SCHED != IPL_HIGH */ 341#endif /* IPL SCHED != IPL_HIGH */
340 342
341_ENTRY(softint_fast_dispatch) 343_ENTRY(softint_fast_dispatch)
342 /* 344 /*
343 * Our call frame which softint will grab LR from. 345 * Our call frame which softint will grab LR from.
@@ -468,26 +470,27 @@ _ENTRY(softint_fast_dispatch) @@ -468,26 +470,27 @@ _ENTRY(softint_fast_dispatch)
468#endif 470#endif
469#if 0 471#if 0
470 addi %r1,%r1,SFRAMELEN /* remove switch frame */ 472 addi %r1,%r1,SFRAMELEN /* remove switch frame */
471 473
472 ldreg %r31,CFRAME_R31(%r1) /* restore saved registers */ 474 ldreg %r31,CFRAME_R31(%r1) /* restore saved registers */
473 ldreg %r30,CFRAME_R30(%r1) /* from switchto callframe */ 475 ldreg %r30,CFRAME_R30(%r1) /* from switchto callframe */
474 addi %r1,%r1,CALLFRAMELEN /* remove switchto call frame */ 476 addi %r1,%r1,CALLFRAMELEN /* remove switchto call frame */
475 addi %r1,%r1,CALLFRAMELEN /* remove our call frame */ 477 addi %r1,%r1,CALLFRAMELEN /* remove our call frame */
476#else 478#else
477 ldreg %r28,SFRAME_R28(%r1) /* R28 */ 479 ldreg %r28,SFRAME_R28(%r1) /* R28 */
478 ldreg %r29,SFRAME_R29(%r1) /* R29 */ 480 ldreg %r29,SFRAME_R29(%r1) /* R29 */
479 ldreg %r31,SFRAMELEN+CFRAME_R31(%r1) /* restore saved registers */ 481 ldreg %r31,SFRAMELEN+CFRAME_R31(%r1) /* restore saved registers */
480 ldreg %r30,SFRAMELEN+CFRAME_R30(%r1) 482 ldreg %r30,SFRAMELEN+CFRAME_R30(%r1)
 483 stwcx. %r1,0,%r1 /* clear reservation */
481 addi %r1,%r1,SFRAMELEN+2*CALLFRAMELEN /* remove switch & callframes */ 484 addi %r1,%r1,SFRAMELEN+2*CALLFRAMELEN /* remove switch & callframes */
482#endif 485#endif
483 ldreg %r0,CFRAME_LR(%r1) 486 ldreg %r0,CFRAME_LR(%r1)
484 mtlr %r0 487 mtlr %r0
485 blr 488 blr
486#endif /* __HAVE_FAST_SOFTINTS */ 489#endif /* __HAVE_FAST_SOFTINTS */
487 490
488/* 491/*
489 * Child comes here at the end of a fork. 492 * Child comes here at the end of a fork.
490 * Return to userspace via the trap return path. 493 * Return to userspace via the trap return path.
491 */ 494 */
492 .globl _C_LABEL(cpu_lwp_bootstrap) 495 .globl _C_LABEL(cpu_lwp_bootstrap)
493_ENTRY(cpu_lwp_bootstrap) 496_ENTRY(cpu_lwp_bootstrap)