Sat Feb 12 17:09:07 2022 UTC ()
hppa: Membar audit in ipifuncs.c.


(riastradh)
diff -r1.5 -r1.6 src/sys/arch/hppa/hppa/ipifuncs.c

cvs diff -r1.5 -r1.6 src/sys/arch/hppa/hppa/ipifuncs.c (expand / switch to unified diff)

--- src/sys/arch/hppa/hppa/ipifuncs.c 2019/04/15 20:45:08 1.5
+++ src/sys/arch/hppa/hppa/ipifuncs.c 2022/02/12 17:09:07 1.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ipifuncs.c,v 1.5 2019/04/15 20:45:08 skrll Exp $ */ 1/* $NetBSD: ipifuncs.c,v 1.6 2022/02/12 17:09:07 riastradh Exp $ */
2/* $OpenBSD: ipi.c,v 1.4 2011/01/14 13:20:06 jsing Exp $ */ 2/* $OpenBSD: ipi.c,v 1.4 2011/01/14 13:20:06 jsing Exp $ */
3 3
4/* 4/*
5 * Copyright (c) 2010 Joel Sing <jsing@openbsd.org> 5 * Copyright (c) 2010 Joel Sing <jsing@openbsd.org>
6 * 6 *
7 * Permission to use, copy, modify, and distribute this software for any 7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above 8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies. 9 * copyright notice and this permission notice appear in all copies.
10 * 10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
@@ -73,55 +73,68 @@ hppa_ipi_init(struct cpu_info *ci) @@ -73,55 +73,68 @@ hppa_ipi_init(struct cpu_info *ci)
73 } 73 }
74} 74}
75 75
76int 76int
77hppa_ipi_intr(void *arg) 77hppa_ipi_intr(void *arg)
78{ 78{
79 struct cpu_info *ci = curcpu(); 79 struct cpu_info *ci = curcpu();
80 struct cpu_softc *sc = ci->ci_softc; 80 struct cpu_softc *sc = ci->ci_softc;
81 u_long ipi_pending; 81 u_long ipi_pending;
82 int bit = 0; 82 int bit = 0;
83 83
84 /* Handle an IPI. */ 84 /* Handle an IPI. */
85 ipi_pending = atomic_swap_ulong(&ci->ci_ipi, 0); 85 ipi_pending = atomic_swap_ulong(&ci->ci_ipi, 0);
 86 membar_enter(); /* matches membar_exit in xc_send_ipi, cpu_ipi */
86 87
87 KASSERT(ipi_pending); 88 KASSERT(ipi_pending);
88 89
89 sc->sc_evcnt_ipi.ev_count++; 90 sc->sc_evcnt_ipi.ev_count++;
90 91
91 while (ipi_pending) { 92 while (ipi_pending) {
92 if (ipi_pending & (1L << bit)) { 93 if (ipi_pending & (1L << bit)) {
93 sc->sc_evcnt_which_ipi[bit].ev_count++; 94 sc->sc_evcnt_which_ipi[bit].ev_count++;
94 (*ipifunc[bit])(); 95 (*ipifunc[bit])();
95 } 96 }
96 ipi_pending &= ~(1L << bit); 97 ipi_pending &= ~(1L << bit);
97 bit++; 98 bit++;
98 } 99 }
99 100
100 return 1; 101 return 1;
101} 102}
102 103
103int 104int
104hppa_ipi_send(struct cpu_info *ci, u_long ipi) 105hppa_ipi_send(struct cpu_info *ci, u_long ipi)
105{ 106{
106 struct iomod *cpu; 107 struct iomod *cpu;
107 KASSERT(ci->ci_flags & CPUF_RUNNING); 108 KASSERT(ci->ci_flags & CPUF_RUNNING);
108 109
109 atomic_or_ulong(&ci->ci_ipi, (1L << ipi)); 110 atomic_or_ulong(&ci->ci_ipi, (1L << ipi));
110 111
111 /* Send an IPI to the specified CPU by triggering EIR{1} (irq 30). */ 112 /*
 113 * Send an IPI to the specified CPU by triggering EIR{1} (irq 30).
 114 *
 115 * The `load-acquire operation' matching this store-release is
 116 * somewhere inside the silicon or firmware -- the point is
 117 * that the store to ci->ci_ipi above must happen before
 118 * writing to EIR{1}; there is conceptually some magic inside
 119 * the silicon or firmware on the target CPU that effectively
 120 * does
 121 *
 122 * if (atomic_load_acquire(&cpu->io_eir)) {
 123 * enter_interrupt_vector();
 124 * }
 125 */
112 cpu = (struct iomod *)(ci->ci_hpa); 126 cpu = (struct iomod *)(ci->ci_hpa);
113 cpu->io_eir = 1; 127 atomic_store_release(&cpu->io_eir, 1);
114 membar_sync(); 
115 128
116 return 0; 129 return 0;
117} 130}
118 131
119int 132int
120hppa_ipi_broadcast(u_long ipi) 133hppa_ipi_broadcast(u_long ipi)
121{ 134{
122 CPU_INFO_ITERATOR cii; 135 CPU_INFO_ITERATOR cii;
123 struct cpu_info *ci; 136 struct cpu_info *ci;
124 int count = 0; 137 int count = 0;
125 138
126 for (CPU_INFO_FOREACH(cii, ci)) { 139 for (CPU_INFO_FOREACH(cii, ci)) {
127 if (ci != curcpu() && (ci->ci_flags & CPUF_RUNNING)) 140 if (ci != curcpu() && (ci->ci_flags & CPUF_RUNNING))
@@ -146,36 +159,38 @@ hppa_ipi_halt(void) @@ -146,36 +159,38 @@ hppa_ipi_halt(void)
146// hppa_intr_disable(); 159// hppa_intr_disable();
147 ci->ci_flags &= ~CPUF_RUNNING; 160 ci->ci_flags &= ~CPUF_RUNNING;
148 161
149 for (;;) 162 for (;;)
150 ; 163 ;
151} 164}
152 165
153void 166void
154xc_send_ipi(struct cpu_info *ci) 167xc_send_ipi(struct cpu_info *ci)
155{ 168{
156 KASSERT(kpreempt_disabled()); 169 KASSERT(kpreempt_disabled());
157 KASSERT(curcpu() != ci); 170 KASSERT(curcpu() != ci);
158 171
 172 membar_exit(); /* matches membar_enter in hppa_ipi_intr */
159 if (ci) { 173 if (ci) {
160 /* Unicast: remote CPU. */ 174 /* Unicast: remote CPU. */
161 hppa_ipi_send(ci, HPPA_IPI_XCALL); 175 hppa_ipi_send(ci, HPPA_IPI_XCALL);
162 } else { 176 } else {
163 /* Broadcast: all, but local CPU (caller will handle it). */ 177 /* Broadcast: all, but local CPU (caller will handle it). */
164 hppa_ipi_broadcast(HPPA_IPI_XCALL); 178 hppa_ipi_broadcast(HPPA_IPI_XCALL);
165 } 179 }
166} 180}
167 181
168void 182void
169cpu_ipi(struct cpu_info *ci) 183cpu_ipi(struct cpu_info *ci)
170{ 184{
171 KASSERT(kpreempt_disabled()); 185 KASSERT(kpreempt_disabled());
172 KASSERT(curcpu() != ci); 186 KASSERT(curcpu() != ci);
173 187
 188 membar_exit(); /* matches membar_enter in hppa_ipi_intr */
174 if (ci) { 189 if (ci) {
175 /* Unicast: remote CPU. */ 190 /* Unicast: remote CPU. */
176 hppa_ipi_send(ci, HPPA_IPI_GENERIC); 191 hppa_ipi_send(ci, HPPA_IPI_GENERIC);
177 } else { 192 } else {
178 /* Broadcast: all, but local CPU (caller will handle it). */ 193 /* Broadcast: all, but local CPU (caller will handle it). */
179 hppa_ipi_broadcast(HPPA_IPI_GENERIC); 194 hppa_ipi_broadcast(HPPA_IPI_GENERIC);
180 } 195 }
181} 196}