Fri Oct 26 05:28:41 2012 UTC ()
When writing intmask from a softint/workqueue thread, make sure to do so
only with the hwlock locked.


(matt)
diff -r1.13 -r1.14 src/sys/arch/arm/broadcom/bcm53xx_eth.c

cvs diff -r1.13 -r1.14 src/sys/arch/arm/broadcom/bcm53xx_eth.c (expand / switch to unified diff)

--- src/sys/arch/arm/broadcom/bcm53xx_eth.c 2012/10/26 05:11:34 1.13
+++ src/sys/arch/arm/broadcom/bcm53xx_eth.c 2012/10/26 05:28:41 1.14
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE. 27 * POSSIBILITY OF SUCH DAMAGE.
28 */ 28 */
29 29
30#define _ARM32_BUS_DMA_PRIVATE 30#define _ARM32_BUS_DMA_PRIVATE
31#define GMAC_PRIVATE 31#define GMAC_PRIVATE
32 32
33#include "locators.h" 33#include "locators.h"
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36 36
37__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.13 2012/10/26 05:11:34 matt Exp $"); 37__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.14 2012/10/26 05:28:41 matt Exp $");
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/atomic.h> 40#include <sys/atomic.h>
41#include <sys/bus.h> 41#include <sys/bus.h>
42#include <sys/device.h> 42#include <sys/device.h>
43#include <sys/ioctl.h> 43#include <sys/ioctl.h>
44#include <sys/intr.h> 44#include <sys/intr.h>
45#include <sys/kmem.h> 45#include <sys/kmem.h>
46#include <sys/mutex.h> 46#include <sys/mutex.h>
47#include <sys/socket.h> 47#include <sys/socket.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/workqueue.h> 49#include <sys/workqueue.h>
50 50
@@ -128,27 +128,27 @@ struct bcmeth_softc { @@ -128,27 +128,27 @@ struct bcmeth_softc {
128 kmutex_t *sc_lock; 128 kmutex_t *sc_lock;
129 kmutex_t *sc_hwlock; 129 kmutex_t *sc_hwlock;
130 struct ethercom sc_ec; 130 struct ethercom sc_ec;
131#define sc_if sc_ec.ec_if 131#define sc_if sc_ec.ec_if
132 struct ifmedia sc_media; 132 struct ifmedia sc_media;
133 void *sc_soft_ih; 133 void *sc_soft_ih;
134 void *sc_ih; 134 void *sc_ih;
135 135
136 struct bcmeth_rxqueue sc_rxq; 136 struct bcmeth_rxqueue sc_rxq;
137 struct bcmeth_txqueue sc_txq; 137 struct bcmeth_txqueue sc_txq;
138 138
139 uint32_t sc_maxfrm; 139 uint32_t sc_maxfrm;
140 uint32_t sc_cmdcfg; 140 uint32_t sc_cmdcfg;
141 uint32_t sc_intmask; 141 volatile uint32_t sc_intmask;
142 uint32_t sc_rcvlazy; 142 uint32_t sc_rcvlazy;
143 volatile uint32_t sc_soft_flags; 143 volatile uint32_t sc_soft_flags;
144#define SOFT_RXINTR 0x01 144#define SOFT_RXINTR 0x01
145#define SOFT_TXINTR 0x02 145#define SOFT_TXINTR 0x02
146 146
147 struct evcnt sc_ev_intr; 147 struct evcnt sc_ev_intr;
148 struct evcnt sc_ev_soft_intr; 148 struct evcnt sc_ev_soft_intr;
149 struct evcnt sc_ev_work; 149 struct evcnt sc_ev_work;
150 struct evcnt sc_ev_tx_stall; 150 struct evcnt sc_ev_tx_stall;
151 struct evcnt sc_ev_rx_badmagic_lo; 151 struct evcnt sc_ev_rx_badmagic_lo;
152 struct evcnt sc_ev_rx_badmagic_hi; 152 struct evcnt sc_ev_rx_badmagic_hi;
153 153
154 struct ifqueue sc_rx_bufcache; 154 struct ifqueue sc_rx_bufcache;
@@ -1698,27 +1698,29 @@ bcmeth_soft_intr(void *arg) @@ -1698,27 +1698,29 @@ bcmeth_soft_intr(void *arg)
1698 atomic_or_32(&sc->sc_intmask, XMTINT_0); 1698 atomic_or_32(&sc->sc_intmask, XMTINT_0);
1699 } 1699 }
1700 1700
1701 if (soft_flags & SOFT_RXINTR) { 1701 if (soft_flags & SOFT_RXINTR) {
1702 /* 1702 /*
1703 * Let's consume  1703 * Let's consume
1704 */ 1704 */
1705 bcmeth_rxq_consume(sc, &sc->sc_rxq); 1705 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1706 atomic_or_32(&sc->sc_intmask, RCVINT); 1706 atomic_or_32(&sc->sc_intmask, RCVINT);
1707 } 1707 }
1708 1708
1709 if (ifp->if_flags & IFF_RUNNING) { 1709 if (ifp->if_flags & IFF_RUNNING) {
1710 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1710 bcmeth_rxq_produce(sc, &sc->sc_rxq);
 1711 mutex_spin_enter(sc->sc_hwlock);
1711 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1712 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
 1713 mutex_spin_exit(sc->sc_hwlock);
1712 } 1714 }
1713 1715
1714 mutex_exit(sc->sc_lock); 1716 mutex_exit(sc->sc_lock);
1715} 1717}
1716 1718
1717void 1719void
1718bcmeth_worker(struct work *wk, void *arg) 1720bcmeth_worker(struct work *wk, void *arg)
1719{ 1721{
1720 struct bcmeth_softc * const sc = arg; 1722 struct bcmeth_softc * const sc = arg;
1721 struct ifnet * const ifp = &sc->sc_if; 1723 struct ifnet * const ifp = &sc->sc_if;
1722 1724
1723 mutex_enter(sc->sc_lock); 1725 mutex_enter(sc->sc_lock);
1724 1726
@@ -1747,18 +1749,20 @@ bcmeth_worker(struct work *wk, void *arg @@ -1747,18 +1749,20 @@ bcmeth_worker(struct work *wk, void *arg
1747 rxq->rxq_threshold = threshold; 1749 rxq->rxq_threshold = threshold;
1748 } 1750 }
1749 1751
1750 if (work_flags & WORK_RXINTR) { 1752 if (work_flags & WORK_RXINTR) {
1751 /* 1753 /*
1752 * Let's consume  1754 * Let's consume
1753 */ 1755 */
1754 bcmeth_rxq_consume(sc, &sc->sc_rxq); 1756 bcmeth_rxq_consume(sc, &sc->sc_rxq);
1755 atomic_or_32(&sc->sc_intmask, RCVINT); 1757 atomic_or_32(&sc->sc_intmask, RCVINT);
1756 } 1758 }
1757 1759
1758 if (ifp->if_flags & IFF_RUNNING) { 1760 if (ifp->if_flags & IFF_RUNNING) {
1759 bcmeth_rxq_produce(sc, &sc->sc_rxq); 1761 bcmeth_rxq_produce(sc, &sc->sc_rxq);
 1762 mutex_spin_enter(sc->sc_hwlock);
1760 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask); 1763 bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
 1764 mutex_spin_exit(sc->sc_hwlock);
1761 } 1765 }
1762 1766
1763 mutex_exit(sc->sc_lock); 1767 mutex_exit(sc->sc_lock);
1764} 1768}