Fri Aug 30 05:03:32 2019 UTC ()
vmxnet3_softc.vmx_stats should not count globally. pointed out by hikaru@n.o

divide vmxnet3_softc.vmx_stats to each vmxnet3_txqueue and vmxnet3_rxqueue,
furthermore make them evcnt.


(knakahara)
diff -r1.48 -r1.49 src/sys/arch/x86/pci/if_vmx.c

cvs diff -r1.48 -r1.49 src/sys/arch/x86/pci/Attic/if_vmx.c (expand / switch to unified diff)

--- src/sys/arch/x86/pci/Attic/if_vmx.c 2019/08/19 05:25:38 1.48
+++ src/sys/arch/x86/pci/Attic/if_vmx.c 2019/08/30 05:03:32 1.49
@@ -1,35 +1,35 @@ @@ -1,35 +1,35 @@
1/* $NetBSD: if_vmx.c,v 1.48 2019/08/19 05:25:38 knakahara Exp $ */ 1/* $NetBSD: if_vmx.c,v 1.49 2019/08/30 05:03:32 knakahara Exp $ */
2/* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */ 2/* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */
3 3
4/* 4/*
5 * Copyright (c) 2013 Tsubai Masanari 5 * Copyright (c) 2013 Tsubai Masanari
6 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org> 6 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies. 10 * copyright notice and this permission notice appear in all copies.
11 * 11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */ 19 */
20 20
21#include <sys/cdefs.h> 21#include <sys/cdefs.h>
22__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.48 2019/08/19 05:25:38 knakahara Exp $"); 22__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.49 2019/08/30 05:03:32 knakahara Exp $");
23 23
24#include <sys/param.h> 24#include <sys/param.h>
25#include <sys/cpu.h> 25#include <sys/cpu.h>
26#include <sys/kernel.h> 26#include <sys/kernel.h>
27#include <sys/kmem.h> 27#include <sys/kmem.h>
28#include <sys/bus.h> 28#include <sys/bus.h>
29#include <sys/device.h> 29#include <sys/device.h>
30#include <sys/mbuf.h> 30#include <sys/mbuf.h>
31#include <sys/sockio.h> 31#include <sys/sockio.h>
32#include <sys/pcq.h> 32#include <sys/pcq.h>
33#include <sys/workqueue.h> 33#include <sys/workqueue.h>
34#include <sys/interrupt.h> 34#include <sys/interrupt.h>
35 35
@@ -197,70 +197,67 @@ struct vmxnet3_txqueue { @@ -197,70 +197,67 @@ struct vmxnet3_txqueue {
197 struct vmxnet3_comp_ring vxtxq_comp_ring; 197 struct vmxnet3_comp_ring vxtxq_comp_ring;
198 struct vmxnet3_txq_stats vxtxq_stats; 198 struct vmxnet3_txq_stats vxtxq_stats;
199 struct vmxnet3_txq_shared *vxtxq_ts; 199 struct vmxnet3_txq_shared *vxtxq_ts;
200 char vxtxq_name[16]; 200 char vxtxq_name[16];
201 201
202 void *vxtxq_si; 202 void *vxtxq_si;
203 203
204 struct evcnt vxtxq_intr; 204 struct evcnt vxtxq_intr;
205 struct evcnt vxtxq_defer; 205 struct evcnt vxtxq_defer;
206 struct evcnt vxtxq_deferreq; 206 struct evcnt vxtxq_deferreq;
207 struct evcnt vxtxq_pcqdrop; 207 struct evcnt vxtxq_pcqdrop;
208 struct evcnt vxtxq_transmitdef; 208 struct evcnt vxtxq_transmitdef;
209 struct evcnt vxtxq_watchdogto; 209 struct evcnt vxtxq_watchdogto;
 210 struct evcnt vxtxq_defragged;
 211 struct evcnt vxtxq_defrag_failed;
210}; 212};
211 213
212struct vmxnet3_rxq_stats { 214struct vmxnet3_rxq_stats {
213 uint64_t vmrxs_ipackets; /* if_ipackets */ 215 uint64_t vmrxs_ipackets; /* if_ipackets */
214 uint64_t vmrxs_ibytes; /* if_ibytes */ 216 uint64_t vmrxs_ibytes; /* if_ibytes */
215 uint64_t vmrxs_iqdrops; /* if_iqdrops */ 217 uint64_t vmrxs_iqdrops; /* if_iqdrops */
216 uint64_t vmrxs_ierrors; /* if_ierrors */ 218 uint64_t vmrxs_ierrors; /* if_ierrors */
217}; 219};
218 220
219struct vmxnet3_rxqueue { 221struct vmxnet3_rxqueue {
220 kmutex_t *vxrxq_mtx; 222 kmutex_t *vxrxq_mtx;
221 struct vmxnet3_softc *vxrxq_sc; 223 struct vmxnet3_softc *vxrxq_sc;
222 struct mbuf *vxrxq_mhead; 224 struct mbuf *vxrxq_mhead;
223 struct mbuf *vxrxq_mtail; 225 struct mbuf *vxrxq_mtail;
224 struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ]; 226 struct vmxnet3_rxring vxrxq_cmd_ring[VMXNET3_RXRINGS_PERQ];
225 struct vmxnet3_comp_ring vxrxq_comp_ring; 227 struct vmxnet3_comp_ring vxrxq_comp_ring;
226 struct vmxnet3_rxq_stats vxrxq_stats; 228 struct vmxnet3_rxq_stats vxrxq_stats;
227 struct vmxnet3_rxq_shared *vxrxq_rs; 229 struct vmxnet3_rxq_shared *vxrxq_rs;
228 char vxrxq_name[16]; 230 char vxrxq_name[16];
229 231
230 struct evcnt vxrxq_intr; 232 struct evcnt vxrxq_intr;
231 struct evcnt vxrxq_defer; 233 struct evcnt vxrxq_defer;
232 struct evcnt vxrxq_deferreq; 234 struct evcnt vxrxq_deferreq;
 235 struct evcnt vxrxq_mgetcl_failed;
 236 struct evcnt vxrxq_mbuf_load_failed;
233}; 237};
234 238
235struct vmxnet3_queue { 239struct vmxnet3_queue {
236 int vxq_id; 240 int vxq_id;
237 int vxq_intr_idx; 241 int vxq_intr_idx;
238 242
239 struct vmxnet3_txqueue vxq_txqueue; 243 struct vmxnet3_txqueue vxq_txqueue;
240 struct vmxnet3_rxqueue vxq_rxqueue; 244 struct vmxnet3_rxqueue vxq_rxqueue;
241 245
242 void *vxq_si; 246 void *vxq_si;
243 bool vxq_workqueue; 247 bool vxq_workqueue;
244 struct work vxq_wq_cookie; 248 struct work vxq_wq_cookie;
245}; 249};
246 250
247struct vmxnet3_statistics { 
248 uint32_t vmst_defragged; 
249 uint32_t vmst_defrag_failed; 
250 uint32_t vmst_mgetcl_failed; 
251 uint32_t vmst_mbuf_load_failed; 
252}; 
253 
254struct vmxnet3_softc { 251struct vmxnet3_softc {
255 device_t vmx_dev; 252 device_t vmx_dev;
256 struct ethercom vmx_ethercom; 253 struct ethercom vmx_ethercom;
257 struct ifmedia vmx_media; 254 struct ifmedia vmx_media;
258 struct vmxnet3_driver_shared *vmx_ds; 255 struct vmxnet3_driver_shared *vmx_ds;
259 int vmx_flags; 256 int vmx_flags;
260#define VMXNET3_FLAG_NO_MSIX (1 << 0) 257#define VMXNET3_FLAG_NO_MSIX (1 << 0)
261#define VMXNET3_FLAG_RSS (1 << 1) 258#define VMXNET3_FLAG_RSS (1 << 1)
262#define VMXNET3_FLAG_ATTACHED (1 << 2) 259#define VMXNET3_FLAG_ATTACHED (1 << 2)
263 260
264 struct vmxnet3_queue *vmx_queue; 261 struct vmxnet3_queue *vmx_queue;
265 262
266 struct pci_attach_args *vmx_pa; 263 struct pci_attach_args *vmx_pa;
@@ -271,27 +268,26 @@ struct vmxnet3_softc { @@ -271,27 +268,26 @@ struct vmxnet3_softc {
271 bus_space_handle_t vmx_ioh0; 268 bus_space_handle_t vmx_ioh0;
272 bus_space_handle_t vmx_ioh1; 269 bus_space_handle_t vmx_ioh1;
273 bus_size_t vmx_ios0; 270 bus_size_t vmx_ios0;
274 bus_size_t vmx_ios1; 271 bus_size_t vmx_ios1;
275 bus_dma_tag_t vmx_dmat; 272 bus_dma_tag_t vmx_dmat;
276 273
277 int vmx_link_active; 274 int vmx_link_active;
278 int vmx_ntxqueues; 275 int vmx_ntxqueues;
279 int vmx_nrxqueues; 276 int vmx_nrxqueues;
280 int vmx_ntxdescs; 277 int vmx_ntxdescs;
281 int vmx_nrxdescs; 278 int vmx_nrxdescs;
282 int vmx_max_rxsegs; 279 int vmx_max_rxsegs;
283 280
284 struct vmxnet3_statistics vmx_stats; 
285 struct evcnt vmx_event_intr; 281 struct evcnt vmx_event_intr;
286 struct evcnt vmx_event_link; 282 struct evcnt vmx_event_link;
287 struct evcnt vmx_event_txqerror; 283 struct evcnt vmx_event_txqerror;
288 struct evcnt vmx_event_rxqerror; 284 struct evcnt vmx_event_rxqerror;
289 struct evcnt vmx_event_dic; 285 struct evcnt vmx_event_dic;
290 struct evcnt vmx_event_debug; 286 struct evcnt vmx_event_debug;
291 287
292 int vmx_intr_type; 288 int vmx_intr_type;
293 int vmx_intr_mask_mode; 289 int vmx_intr_mask_mode;
294 int vmx_event_intr_idx; 290 int vmx_event_intr_idx;
295 int vmx_nintrs; 291 int vmx_nintrs;
296 pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */ 292 pci_intr_handle_t *vmx_intrs; /* legacy use vmx_intrs[0] */
297 void *vmx_ihs[VMXNET3_MAX_INTRS]; 293 void *vmx_ihs[VMXNET3_MAX_INTRS];
@@ -385,27 +381,28 @@ void vmxnet3_free_rxq_data(struct vmxnet @@ -385,27 +381,28 @@ void vmxnet3_free_rxq_data(struct vmxnet
385int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); 381int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
386void vmxnet3_free_queue_data(struct vmxnet3_softc *); 382void vmxnet3_free_queue_data(struct vmxnet3_softc *);
387int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); 383int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
388void vmxnet3_free_mcast_table(struct vmxnet3_softc *); 384void vmxnet3_free_mcast_table(struct vmxnet3_softc *);
389void vmxnet3_init_shared_data(struct vmxnet3_softc *); 385void vmxnet3_init_shared_data(struct vmxnet3_softc *);
390void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *); 386void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
391void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); 387void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
392int vmxnet3_alloc_data(struct vmxnet3_softc *); 388int vmxnet3_alloc_data(struct vmxnet3_softc *);
393void vmxnet3_free_data(struct vmxnet3_softc *); 389void vmxnet3_free_data(struct vmxnet3_softc *);
394int vmxnet3_setup_interface(struct vmxnet3_softc *); 390int vmxnet3_setup_interface(struct vmxnet3_softc *);
395 391
396void vmxnet3_evintr(struct vmxnet3_softc *); 392void vmxnet3_evintr(struct vmxnet3_softc *);
397bool vmxnet3_txq_eof(struct vmxnet3_txqueue *, u_int); 393bool vmxnet3_txq_eof(struct vmxnet3_txqueue *, u_int);
398int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *); 394int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxqueue *,
 395 struct vmxnet3_rxring *);
399void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, 396void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
400 struct vmxnet3_rxring *, int); 397 struct vmxnet3_rxring *, int);
401void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *); 398void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *);
402void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); 399void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
403void vmxnet3_rxq_input(struct vmxnet3_rxqueue *, 400void vmxnet3_rxq_input(struct vmxnet3_rxqueue *,
404 struct vmxnet3_rxcompdesc *, struct mbuf *); 401 struct vmxnet3_rxcompdesc *, struct mbuf *);
405bool vmxnet3_rxq_eof(struct vmxnet3_rxqueue *, u_int); 402bool vmxnet3_rxq_eof(struct vmxnet3_rxqueue *, u_int);
406int vmxnet3_legacy_intr(void *); 403int vmxnet3_legacy_intr(void *);
407int vmxnet3_txrxq_intr(void *); 404int vmxnet3_txrxq_intr(void *);
408void vmxnet3_handle_queue(void *); 405void vmxnet3_handle_queue(void *);
409void vmxnet3_handle_queue_work(struct work *, void *); 406void vmxnet3_handle_queue_work(struct work *, void *);
410int vmxnet3_event_intr(void *); 407int vmxnet3_event_intr(void *);
411 408
@@ -1961,37 +1958,45 @@ vmxnet3_setup_stats(struct vmxnet3_softc @@ -1961,37 +1958,45 @@ vmxnet3_setup_stats(struct vmxnet3_softc
1961 txq = &vmxq->vxq_txqueue; 1958 txq = &vmxq->vxq_txqueue;
1962 evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR, 1959 evcnt_attach_dynamic(&txq->vxtxq_intr, EVCNT_TYPE_INTR,
1963 NULL, txq->vxtxq_name, "Interrupt on queue"); 1960 NULL, txq->vxtxq_name, "Interrupt on queue");
1964 evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC, 1961 evcnt_attach_dynamic(&txq->vxtxq_defer, EVCNT_TYPE_MISC,
1965 NULL, txq->vxtxq_name, "Handled queue in softint/workqueue"); 1962 NULL, txq->vxtxq_name, "Handled queue in softint/workqueue");
1966 evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC, 1963 evcnt_attach_dynamic(&txq->vxtxq_deferreq, EVCNT_TYPE_MISC,
1967 NULL, txq->vxtxq_name, "Requested in softint/workqueue"); 1964 NULL, txq->vxtxq_name, "Requested in softint/workqueue");
1968 evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC, 1965 evcnt_attach_dynamic(&txq->vxtxq_pcqdrop, EVCNT_TYPE_MISC,
1969 NULL, txq->vxtxq_name, "Dropped in pcq"); 1966 NULL, txq->vxtxq_name, "Dropped in pcq");
1970 evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC, 1967 evcnt_attach_dynamic(&txq->vxtxq_transmitdef, EVCNT_TYPE_MISC,
1971 NULL, txq->vxtxq_name, "Deferred transmit"); 1968 NULL, txq->vxtxq_name, "Deferred transmit");
1972 evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC, 1969 evcnt_attach_dynamic(&txq->vxtxq_watchdogto, EVCNT_TYPE_MISC,
1973 NULL, txq->vxtxq_name, "Watchdog timeount"); 1970 NULL, txq->vxtxq_name, "Watchdog timeount");
 1971 evcnt_attach_dynamic(&txq->vxtxq_defragged, EVCNT_TYPE_MISC,
 1972 NULL, txq->vxtxq_name, "m_defrag sucessed");
 1973 evcnt_attach_dynamic(&txq->vxtxq_defrag_failed, EVCNT_TYPE_MISC,
 1974 NULL, txq->vxtxq_name, "m_defrag failed");
1974 } 1975 }
1975 1976
1976 for (i = 0; i < sc->vmx_nrxqueues; i++) { 1977 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1977 vmxq = &sc->vmx_queue[i]; 1978 vmxq = &sc->vmx_queue[i];
1978 rxq = &vmxq->vxq_rxqueue; 1979 rxq = &vmxq->vxq_rxqueue;
1979 evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR, 1980 evcnt_attach_dynamic(&rxq->vxrxq_intr, EVCNT_TYPE_INTR,
1980 NULL, rxq->vxrxq_name, "Interrupt on queue"); 1981 NULL, rxq->vxrxq_name, "Interrupt on queue");
1981 evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC, 1982 evcnt_attach_dynamic(&rxq->vxrxq_defer, EVCNT_TYPE_MISC,
1982 NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue"); 1983 NULL, rxq->vxrxq_name, "Handled queue in softint/workqueue");
1983 evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC, 1984 evcnt_attach_dynamic(&rxq->vxrxq_deferreq, EVCNT_TYPE_MISC,
1984 NULL, rxq->vxrxq_name, "Requested in softint/workqueue"); 1985 NULL, rxq->vxrxq_name, "Requested in softint/workqueue");
 1986 evcnt_attach_dynamic(&rxq->vxrxq_mgetcl_failed, EVCNT_TYPE_MISC,
 1987 NULL, rxq->vxrxq_name, "MCLGET failed");
 1988 evcnt_attach_dynamic(&rxq->vxrxq_mbuf_load_failed, EVCNT_TYPE_MISC,
 1989 NULL, rxq->vxrxq_name, "bus_dmamap_load_mbuf failed");
1985 } 1990 }
1986 1991
1987 evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR, 1992 evcnt_attach_dynamic(&sc->vmx_event_intr, EVCNT_TYPE_INTR,
1988 NULL, device_xname(sc->vmx_dev), "Interrupt for other events"); 1993 NULL, device_xname(sc->vmx_dev), "Interrupt for other events");
1989 evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC, 1994 evcnt_attach_dynamic(&sc->vmx_event_link, EVCNT_TYPE_MISC,
1990 NULL, device_xname(sc->vmx_dev), "Link status event"); 1995 NULL, device_xname(sc->vmx_dev), "Link status event");
1991 evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC, 1996 evcnt_attach_dynamic(&sc->vmx_event_txqerror, EVCNT_TYPE_MISC,
1992 NULL, device_xname(sc->vmx_dev), "Tx queue error event"); 1997 NULL, device_xname(sc->vmx_dev), "Tx queue error event");
1993 evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC, 1998 evcnt_attach_dynamic(&sc->vmx_event_rxqerror, EVCNT_TYPE_MISC,
1994 NULL, device_xname(sc->vmx_dev), "Rx queue error event"); 1999 NULL, device_xname(sc->vmx_dev), "Rx queue error event");
1995 evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC, 2000 evcnt_attach_dynamic(&sc->vmx_event_dic, EVCNT_TYPE_MISC,
1996 NULL, device_xname(sc->vmx_dev), "Device impl change event"); 2001 NULL, device_xname(sc->vmx_dev), "Device impl change event");
1997 evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC, 2002 evcnt_attach_dynamic(&sc->vmx_event_debug, EVCNT_TYPE_MISC,
@@ -2007,34 +2012,38 @@ vmxnet3_teardown_stats(struct vmxnet3_so @@ -2007,34 +2012,38 @@ vmxnet3_teardown_stats(struct vmxnet3_so
2007 struct vmxnet3_txqueue *txq; 2012 struct vmxnet3_txqueue *txq;
2008 struct vmxnet3_rxqueue *rxq; 2013 struct vmxnet3_rxqueue *rxq;
2009 int i; 2014 int i;
2010 2015
2011 for (i = 0; i < sc->vmx_ntxqueues; i++) { 2016 for (i = 0; i < sc->vmx_ntxqueues; i++) {
2012 vmxq = &sc->vmx_queue[i]; 2017 vmxq = &sc->vmx_queue[i];
2013 txq = &vmxq->vxq_txqueue; 2018 txq = &vmxq->vxq_txqueue;
2014 evcnt_detach(&txq->vxtxq_intr); 2019 evcnt_detach(&txq->vxtxq_intr);
2015 evcnt_detach(&txq->vxtxq_defer); 2020 evcnt_detach(&txq->vxtxq_defer);
2016 evcnt_detach(&txq->vxtxq_deferreq); 2021 evcnt_detach(&txq->vxtxq_deferreq);
2017 evcnt_detach(&txq->vxtxq_pcqdrop); 2022 evcnt_detach(&txq->vxtxq_pcqdrop);
2018 evcnt_detach(&txq->vxtxq_transmitdef); 2023 evcnt_detach(&txq->vxtxq_transmitdef);
2019 evcnt_detach(&txq->vxtxq_watchdogto); 2024 evcnt_detach(&txq->vxtxq_watchdogto);
 2025 evcnt_detach(&txq->vxtxq_defragged);
 2026 evcnt_detach(&txq->vxtxq_defrag_failed);
2020 } 2027 }
2021 2028
2022 for (i = 0; i < sc->vmx_nrxqueues; i++) { 2029 for (i = 0; i < sc->vmx_nrxqueues; i++) {
2023 vmxq = &sc->vmx_queue[i]; 2030 vmxq = &sc->vmx_queue[i];
2024 rxq = &vmxq->vxq_rxqueue; 2031 rxq = &vmxq->vxq_rxqueue;
2025 evcnt_detach(&rxq->vxrxq_intr); 2032 evcnt_detach(&rxq->vxrxq_intr);
2026 evcnt_detach(&rxq->vxrxq_defer); 2033 evcnt_detach(&rxq->vxrxq_defer);
2027 evcnt_detach(&rxq->vxrxq_deferreq); 2034 evcnt_detach(&rxq->vxrxq_deferreq);
 2035 evcnt_detach(&rxq->vxrxq_mgetcl_failed);
 2036 evcnt_detach(&rxq->vxrxq_mbuf_load_failed);
2028 } 2037 }
2029 2038
2030 evcnt_detach(&sc->vmx_event_intr); 2039 evcnt_detach(&sc->vmx_event_intr);
2031 evcnt_detach(&sc->vmx_event_link); 2040 evcnt_detach(&sc->vmx_event_link);
2032 evcnt_detach(&sc->vmx_event_txqerror); 2041 evcnt_detach(&sc->vmx_event_txqerror);
2033 evcnt_detach(&sc->vmx_event_rxqerror); 2042 evcnt_detach(&sc->vmx_event_rxqerror);
2034 evcnt_detach(&sc->vmx_event_dic); 2043 evcnt_detach(&sc->vmx_event_dic);
2035 evcnt_detach(&sc->vmx_event_debug); 2044 evcnt_detach(&sc->vmx_event_debug);
2036} 2045}
2037 2046
2038void 2047void
2039vmxnet3_evintr(struct vmxnet3_softc *sc) 2048vmxnet3_evintr(struct vmxnet3_softc *sc)
2040{ 2049{
@@ -2144,64 +2153,65 @@ vmxnet3_txq_eof(struct vmxnet3_txqueue * @@ -2144,64 +2153,65 @@ vmxnet3_txq_eof(struct vmxnet3_txqueue *
2144 txb->vtxb_m = NULL; 2153 txb->vtxb_m = NULL;
2145 } 2154 }
2146 2155
2147 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; 2156 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
2148 } 2157 }
2149 2158
2150 if (txr->vxtxr_head == txr->vxtxr_next) 2159 if (txr->vxtxr_head == txr->vxtxr_next)
2151 txq->vxtxq_watchdog = 0; 2160 txq->vxtxq_watchdog = 0;
2152 2161
2153 return more; 2162 return more;
2154} 2163}
2155 2164
2156int 2165int
2157vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr) 2166vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq,
 2167 struct vmxnet3_rxring *rxr)
2158{ 2168{
2159 struct mbuf *m; 2169 struct mbuf *m;
2160 struct vmxnet3_rxdesc *rxd; 2170 struct vmxnet3_rxdesc *rxd;
2161 struct vmxnet3_rxbuf *rxb; 2171 struct vmxnet3_rxbuf *rxb;
2162 bus_dma_tag_t tag; 2172 bus_dma_tag_t tag;
2163 bus_dmamap_t dmap; 2173 bus_dmamap_t dmap;
2164 int idx, btype, error; 2174 int idx, btype, error;
2165 2175
2166 tag = sc->vmx_dmat; 2176 tag = sc->vmx_dmat;
2167 dmap = rxr->vxrxr_spare_dmap; 2177 dmap = rxr->vxrxr_spare_dmap;
2168 idx = rxr->vxrxr_fill; 2178 idx = rxr->vxrxr_fill;
2169 rxd = &rxr->vxrxr_rxd[idx]; 2179 rxd = &rxr->vxrxr_rxd[idx];
2170 rxb = &rxr->vxrxr_rxbuf[idx]; 2180 rxb = &rxr->vxrxr_rxbuf[idx];
2171 2181
2172 /* Don't allocate buffers for ring 2 for now. */ 2182 /* Don't allocate buffers for ring 2 for now. */
2173 if (rxr->vxrxr_rid != 0) 2183 if (rxr->vxrxr_rid != 0)
2174 return -1; 2184 return -1;
2175 btype = VMXNET3_BTYPE_HEAD; 2185 btype = VMXNET3_BTYPE_HEAD;
2176 2186
2177 MGETHDR(m, M_DONTWAIT, MT_DATA); 2187 MGETHDR(m, M_DONTWAIT, MT_DATA);
2178 if (m == NULL) 2188 if (m == NULL)
2179 return (ENOBUFS); 2189 return (ENOBUFS);
2180 2190
2181 MCLGET(m, M_DONTWAIT); 2191 MCLGET(m, M_DONTWAIT);
2182 if ((m->m_flags & M_EXT) == 0) { 2192 if ((m->m_flags & M_EXT) == 0) {
2183 sc->vmx_stats.vmst_mgetcl_failed++; 2193 rxq->vxrxq_mgetcl_failed.ev_count++;
2184 m_freem(m); 2194 m_freem(m);
2185 return (ENOBUFS); 2195 return (ENOBUFS);
2186 } 2196 }
2187 2197
2188 m->m_pkthdr.len = m->m_len = JUMBO_LEN; 2198 m->m_pkthdr.len = m->m_len = JUMBO_LEN;
2189 m_adj(m, ETHER_ALIGN); 2199 m_adj(m, ETHER_ALIGN);
2190 2200
2191 error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT); 2201 error = bus_dmamap_load_mbuf(sc->vmx_dmat, dmap, m, BUS_DMA_NOWAIT);
2192 if (error) { 2202 if (error) {
2193 m_freem(m); 2203 m_freem(m);
2194 sc->vmx_stats.vmst_mbuf_load_failed++; 2204 rxq->vxrxq_mbuf_load_failed.ev_count++;
2195 return (error); 2205 return (error);
2196 } 2206 }
2197 2207
2198 if (rxb->vrxb_m != NULL) { 2208 if (rxb->vrxb_m != NULL) {
2199 bus_dmamap_sync(tag, rxb->vrxb_dmamap, 2209 bus_dmamap_sync(tag, rxb->vrxb_dmamap,
2200 0, rxb->vrxb_dmamap->dm_mapsize, 2210 0, rxb->vrxb_dmamap->dm_mapsize,
2201 BUS_DMASYNC_POSTREAD); 2211 BUS_DMASYNC_POSTREAD);
2202 bus_dmamap_unload(tag, rxb->vrxb_dmamap); 2212 bus_dmamap_unload(tag, rxb->vrxb_dmamap);
2203 } 2213 }
2204 2214
2205 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; 2215 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
2206 rxb->vrxb_dmamap = dmap; 2216 rxb->vrxb_dmamap = dmap;
2207 rxb->vrxb_m = m; 2217 rxb->vrxb_m = m;
@@ -2393,46 +2403,46 @@ vmxnet3_rxq_eof(struct vmxnet3_rxqueue * @@ -2393,46 +2403,46 @@ vmxnet3_rxq_eof(struct vmxnet3_rxqueue *
2393 /* start of frame w/o head buffer */ 2403 /* start of frame w/o head buffer */
2394 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD); 2404 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD);
2395 /* start of frame not in ring 0 */ 2405 /* start of frame not in ring 0 */
2396 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]); 2406 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0]);
2397 /* duplicate start of frame? */ 2407 /* duplicate start of frame? */
2398 KASSERT(m_head == NULL); 2408 KASSERT(m_head == NULL);
2399 2409
2400 if (length == 0) { 2410 if (length == 0) {
2401 /* Just ignore this descriptor. */ 2411 /* Just ignore this descriptor. */
2402 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2412 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2403 goto nextp; 2413 goto nextp;
2404 } 2414 }
2405 2415
2406 if (vmxnet3_newbuf(sc, rxr) != 0) { 2416 if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2407 rxq->vxrxq_stats.vmrxs_iqdrops++; 2417 rxq->vxrxq_stats.vmrxs_iqdrops++;
2408 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2418 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2409 if (!rxcd->eop) 2419 if (!rxcd->eop)
2410 vmxnet3_rxq_discard_chain(rxq); 2420 vmxnet3_rxq_discard_chain(rxq);
2411 goto nextp; 2421 goto nextp;
2412 } 2422 }
2413 2423
2414 m_set_rcvif(m, ifp); 2424 m_set_rcvif(m, ifp);
2415 m->m_pkthdr.len = m->m_len = length; 2425 m->m_pkthdr.len = m->m_len = length;
2416 m->m_pkthdr.csum_flags = 0; 2426 m->m_pkthdr.csum_flags = 0;
2417 m_head = m_tail = m; 2427 m_head = m_tail = m;
2418 2428
2419 } else { 2429 } else {
2420 /* non start of frame w/o body buffer */ 2430 /* non start of frame w/o body buffer */
2421 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY); 2431 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY);
2422 /* frame not started? */ 2432 /* frame not started? */
2423 KASSERT(m_head != NULL); 2433 KASSERT(m_head != NULL);
2424 2434
2425 if (vmxnet3_newbuf(sc, rxr) != 0) { 2435 if (vmxnet3_newbuf(sc, rxq, rxr) != 0) {
2426 rxq->vxrxq_stats.vmrxs_iqdrops++; 2436 rxq->vxrxq_stats.vmrxs_iqdrops++;
2427 vmxnet3_rxq_eof_discard(rxq, rxr, idx); 2437 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2428 if (!rxcd->eop) 2438 if (!rxcd->eop)
2429 vmxnet3_rxq_discard_chain(rxq); 2439 vmxnet3_rxq_discard_chain(rxq);
2430 m_freem(m_head); 2440 m_freem(m_head);
2431 m_head = m_tail = NULL; 2441 m_head = m_tail = NULL;
2432 goto nextp; 2442 goto nextp;
2433 } 2443 }
2434 2444
2435 m->m_len = length; 2445 m->m_len = length;
2436 m_head->m_pkthdr.len += length; 2446 m_head->m_pkthdr.len += length;
2437 m_tail->m_next = m; 2447 m_tail->m_next = m;
2438 m_tail = m; 2448 m_tail = m;
@@ -2774,27 +2784,27 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, @@ -2774,27 +2784,27 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc,
2774 int i, populate, idx, error; 2784 int i, populate, idx, error;
2775 2785
2776 /* LRO and jumbo frame is not supported yet */ 2786 /* LRO and jumbo frame is not supported yet */
2777 populate = 1; 2787 populate = 1;
2778 2788
2779 for (i = 0; i < populate; i++) { 2789 for (i = 0; i < populate; i++) {
2780 rxr = &rxq->vxrxq_cmd_ring[i]; 2790 rxr = &rxq->vxrxq_cmd_ring[i];
2781 rxr->vxrxr_fill = 0; 2791 rxr->vxrxr_fill = 0;
2782 rxr->vxrxr_gen = VMXNET3_INIT_GEN; 2792 rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2783 memset(rxr->vxrxr_rxd, 0, 2793 memset(rxr->vxrxr_rxd, 0,
2784 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); 2794 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2785 2795
2786 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { 2796 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2787 error = vmxnet3_newbuf(sc, rxr); 2797 error = vmxnet3_newbuf(sc, rxq, rxr);
2788 if (error) 2798 if (error)
2789 return (error); 2799 return (error);
2790 } 2800 }
2791 } 2801 }
2792 2802
2793 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { 2803 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2794 rxr = &rxq->vxrxq_cmd_ring[i]; 2804 rxr = &rxq->vxrxq_cmd_ring[i];
2795 rxr->vxrxr_fill = 0; 2805 rxr->vxrxr_fill = 0;
2796 rxr->vxrxr_gen = 0; 2806 rxr->vxrxr_gen = 0;
2797 memset(rxr->vxrxr_rxd, 0, 2807 memset(rxr->vxrxr_rxd, 0,
2798 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); 2808 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2799 } 2809 }
2800 2810
@@ -3008,29 +3018,29 @@ vmxnet3_txq_load_mbuf(struct vmxnet3_txq @@ -3008,29 +3018,29 @@ vmxnet3_txq_load_mbuf(struct vmxnet3_txq
3008 if (error == 0 || error != EFBIG) 3018 if (error == 0 || error != EFBIG)
3009 return (error); 3019 return (error);
3010 3020
3011 m = m_defrag(m, M_NOWAIT); 3021 m = m_defrag(m, M_NOWAIT);
3012 if (m != NULL) { 3022 if (m != NULL) {
3013 *m0 = m; 3023 *m0 = m;
3014 error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT); 3024 error = bus_dmamap_load_mbuf(tag, dmap, m, BUS_DMA_NOWAIT);
3015 } else 3025 } else
3016 error = ENOBUFS; 3026 error = ENOBUFS;
3017 3027
3018 if (error) { 3028 if (error) {
3019 m_freem(*m0); 3029 m_freem(*m0);
3020 *m0 = NULL; 3030 *m0 = NULL;
3021 txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++; 3031 txq->vxtxq_defrag_failed.ev_count++;
3022 } else 3032 } else
3023 txq->vxtxq_sc->vmx_stats.vmst_defragged++; 3033 txq->vxtxq_defragged.ev_count++;
3024 3034
3025 return (error); 3035 return (error);
3026} 3036}
3027 3037
3028void 3038void
3029vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) 3039vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
3030{ 3040{
3031 3041
3032 bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap); 3042 bus_dmamap_unload(txq->vxtxq_sc->vmx_dmat, dmap);
3033} 3043}
3034 3044
3035int 3045int
3036vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) 3046vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)