Thu Mar 23 03:02:17 2023 UTC ()
vioif(4): divide IFF_OACTIVE into per-queue


(yamaguchi)
diff -r1.101 -r1.102 src/sys/dev/pci/if_vioif.c

cvs diff -r1.101 -r1.102 src/sys/dev/pci/if_vioif.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_vioif.c 2023/03/23 02:57:54 1.101
+++ src/sys/dev/pci/if_vioif.c 2023/03/23 03:02:17 1.102
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_vioif.c,v 1.101 2023/03/23 02:57:54 yamaguchi Exp $ */ 1/* $NetBSD: if_vioif.c,v 1.102 2023/03/23 03:02:17 yamaguchi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * Copyright (c) 2010 Minoura Makoto. 5 * Copyright (c) 2010 Minoura Makoto.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.101 2023/03/23 02:57:54 yamaguchi Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.102 2023/03/23 03:02:17 yamaguchi Exp $");
31 31
32#ifdef _KERNEL_OPT 32#ifdef _KERNEL_OPT
33#include "opt_net_mpsafe.h" 33#include "opt_net_mpsafe.h"
34#endif 34#endif
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/systm.h> 37#include <sys/systm.h>
38#include <sys/kernel.h> 38#include <sys/kernel.h>
39#include <sys/atomic.h> 39#include <sys/atomic.h>
40#include <sys/bus.h> 40#include <sys/bus.h>
41#include <sys/condvar.h> 41#include <sys/condvar.h>
42#include <sys/device.h> 42#include <sys/device.h>
43#include <sys/evcnt.h> 43#include <sys/evcnt.h>
@@ -256,26 +256,27 @@ struct vioif_netqueue { @@ -256,26 +256,27 @@ struct vioif_netqueue {
256 void *netq_softint; 256 void *netq_softint;
257 struct vioif_work netq_work; 257 struct vioif_work netq_work;
258 bool netq_workqueue; 258 bool netq_workqueue;
259 259
260 char netq_evgroup[32]; 260 char netq_evgroup[32];
261 struct evcnt netq_mbuf_load_failed; 261 struct evcnt netq_mbuf_load_failed;
262 struct evcnt netq_enqueue_failed; 262 struct evcnt netq_enqueue_failed;
263 263
264 void *netq_ctx; 264 void *netq_ctx;
265}; 265};
266 266
267struct vioif_tx_context { 267struct vioif_tx_context {
268 bool txc_link_active; 268 bool txc_link_active;
 269 bool txc_no_free_slots;
269 pcq_t *txc_intrq; 270 pcq_t *txc_intrq;
270 void *txc_deferred_transmit; 271 void *txc_deferred_transmit;
271 272
272 struct evcnt txc_defrag_failed; 273 struct evcnt txc_defrag_failed;
273}; 274};
274 275
275struct vioif_rx_context { 276struct vioif_rx_context {
276 struct evcnt rxc_mbuf_enobufs; 277 struct evcnt rxc_mbuf_enobufs;
277}; 278};
278struct vioif_ctrlqueue { 279struct vioif_ctrlqueue {
279 struct virtqueue *ctrlq_vq; 280 struct virtqueue *ctrlq_vq;
280 enum { 281 enum {
281 FREE, INUSE, DONE 282 FREE, INUSE, DONE
@@ -720,27 +721,26 @@ vioif_init(struct ifnet *ifp) @@ -720,27 +721,26 @@ vioif_init(struct ifnet *ifp)
720 721
721 virtio_reinit_end(vsc); 722 virtio_reinit_end(vsc);
722 723
723 if (sc->sc_has_ctrl) 724 if (sc->sc_has_ctrl)
724 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq); 725 virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
725 726
726 r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs); 727 r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs);
727 if (r == 0) 728 if (r == 0)
728 sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs; 729 sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs;
729 else 730 else
730 sc->sc_act_nvq_pairs = 1; 731 sc->sc_act_nvq_pairs = 1;
731 732
732 SET(ifp->if_flags, IFF_RUNNING); 733 SET(ifp->if_flags, IFF_RUNNING);
733 CLR(ifp->if_flags, IFF_OACTIVE); 
734 734
735 vioif_net_intr_enable(sc, vsc); 735 vioif_net_intr_enable(sc, vsc);
736 736
737 vioif_update_link_status(sc); 737 vioif_update_link_status(sc);
738 r = vioif_rx_filter(sc); 738 r = vioif_rx_filter(sc);
739 739
740 return r; 740 return r;
741} 741}
742 742
743static void 743static void
744vioif_stop(struct ifnet *ifp, int disable) 744vioif_stop(struct ifnet *ifp, int disable)
745{ 745{
746 struct vioif_softc *sc = ifp->if_softc; 746 struct vioif_softc *sc = ifp->if_softc;
@@ -850,27 +850,32 @@ vioif_transmit(struct ifnet *ifp, struct @@ -850,27 +850,32 @@ vioif_transmit(struct ifnet *ifp, struct
850 mutex_exit(&netq->netq_lock); 850 mutex_exit(&netq->netq_lock);
851 } 851 }
852 852
853 return 0; 853 return 0;
854} 854}
855 855
856void 856void
857vioif_watchdog(struct ifnet *ifp) 857vioif_watchdog(struct ifnet *ifp)
858{ 858{
859 struct vioif_softc *sc = ifp->if_softc; 859 struct vioif_softc *sc = ifp->if_softc;
860 struct vioif_netqueue *netq; 860 struct vioif_netqueue *netq;
861 int i; 861 int i;
862 862
863 if (ifp->if_flags & IFF_RUNNING) { 863 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
 864 if (ISSET(ifp->if_flags, IFF_DEBUG)) {
 865 log(LOG_DEBUG, "%s: watchdog timed out\n",
 866 ifp->if_xname);
 867 }
 868
864 for (i = 0; i < sc->sc_act_nvq_pairs; i++) { 869 for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
865 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)]; 870 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
866 871
867 mutex_enter(&netq->netq_lock); 872 mutex_enter(&netq->netq_lock);
868 if (!netq->netq_running_handle) { 873 if (!netq->netq_running_handle) {
869 netq->netq_running_handle = true; 874 netq->netq_running_handle = true;
870 vioif_net_sched_handle(sc, netq); 875 vioif_net_sched_handle(sc, netq);
871 } 876 }
872 mutex_exit(&netq->netq_lock); 877 mutex_exit(&netq->netq_lock);
873 } 878 }
874 } 879 }
875} 880}
876 881
@@ -1486,26 +1491,27 @@ vioif_netqueue_init(struct vioif_softc * @@ -1486,26 +1491,27 @@ vioif_netqueue_init(struct vioif_softc *
1486 break; 1491 break;
1487 case VIOIF_NETQ_TX: 1492 case VIOIF_NETQ_TX:
1488 txc = kmem_zalloc(sizeof(*txc), KM_SLEEP); 1493 txc = kmem_zalloc(sizeof(*txc), KM_SLEEP);
1489 netq->netq_ctx = (void *)txc; 1494 netq->netq_ctx = (void *)txc;
1490 txc->txc_deferred_transmit = softint_establish(softint_flags, 1495 txc->txc_deferred_transmit = softint_establish(softint_flags,
1491 vioif_deferred_transmit, netq); 1496 vioif_deferred_transmit, netq);
1492 if (txc->txc_deferred_transmit == NULL) { 1497 if (txc->txc_deferred_transmit == NULL) {
1493 aprint_error_dev(sc->sc_dev, 1498 aprint_error_dev(sc->sc_dev,
1494 "couldn't establish softint for " 1499 "couldn't establish softint for "
1495 "tx deferred transmit\n"); 1500 "tx deferred transmit\n");
1496 goto err; 1501 goto err;
1497 } 1502 }
1498 txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc); 1503 txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc);
 1504 txc->txc_no_free_slots = false;
1499 txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP); 1505 txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP);
1500 break; 1506 break;
1501 } 1507 }
1502 1508
1503 return 0; 1509 return 0;
1504 1510
1505err: 1511err:
1506 netq->netq_ctx = NULL; 1512 netq->netq_ctx = NULL;
1507 1513
1508 if (rxc != NULL) { 1514 if (rxc != NULL) {
1509 kmem_free(rxc, sizeof(*rxc)); 1515 kmem_free(rxc, sizeof(*rxc));
1510 } 1516 }
1511 1517
@@ -1961,38 +1967,37 @@ vioif_send_common_locked(struct ifnet *i @@ -1961,38 +1967,37 @@ vioif_send_common_locked(struct ifnet *i
1961 struct vioif_tx_context *txc; 1967 struct vioif_tx_context *txc;
1962 struct vioif_net_map *map; 1968 struct vioif_net_map *map;
1963 struct mbuf *m; 1969 struct mbuf *m;
1964 int queued = 0; 1970 int queued = 0;
1965 1971
1966 KASSERT(mutex_owned(&netq->netq_lock)); 1972 KASSERT(mutex_owned(&netq->netq_lock));
1967 1973
1968 if (netq->netq_stopping || 1974 if (netq->netq_stopping ||
1969 !ISSET(ifp->if_flags, IFF_RUNNING)) 1975 !ISSET(ifp->if_flags, IFF_RUNNING))
1970 return; 1976 return;
1971 1977
1972 txc = netq->netq_ctx; 1978 txc = netq->netq_ctx;
1973 1979
1974 if (!txc->txc_link_active) 1980 if (!txc->txc_link_active ||
1975 return; 1981 txc->txc_no_free_slots)
1976 
1977 if (!is_transmit && 
1978 ISSET(ifp->if_flags, IFF_OACTIVE)) 
1979 return; 1982 return;
1980 1983
1981 for (;;) { 1984 for (;;) {
1982 int slot, r; 1985 int slot, r;
1983 r = virtio_enqueue_prep(vsc, vq, &slot); 1986 r = virtio_enqueue_prep(vsc, vq, &slot);
1984 if (r == EAGAIN) 1987 if (r == EAGAIN) {
 1988 txc->txc_no_free_slots = true;
1985 break; 1989 break;
 1990 }
1986 if (__predict_false(r != 0)) 1991 if (__predict_false(r != 0))
1987 panic("enqueue_prep for tx buffers"); 1992 panic("enqueue_prep for tx buffers");
1988 1993
1989 if (is_transmit) 1994 if (is_transmit)
1990 m = pcq_get(txc->txc_intrq); 1995 m = pcq_get(txc->txc_intrq);
1991 else 1996 else
1992 IFQ_DEQUEUE(&ifp->if_snd, m); 1997 IFQ_DEQUEUE(&ifp->if_snd, m);
1993 1998
1994 if (m == NULL) { 1999 if (m == NULL) {
1995 virtio_enqueue_abort(vsc, vq, slot); 2000 virtio_enqueue_abort(vsc, vq, slot);
1996 break; 2001 break;
1997 } 2002 }
1998 2003
@@ -2039,90 +2044,103 @@ vioif_send_common_locked(struct ifnet *i @@ -2039,90 +2044,103 @@ vioif_send_common_locked(struct ifnet *i
2039 queued++; 2044 queued++;
2040 bpf_mtap(ifp, m, BPF_D_OUT); 2045 bpf_mtap(ifp, m, BPF_D_OUT);
2041 } 2046 }
2042 2047
2043 if (queued > 0) { 2048 if (queued > 0) {
2044 vioif_notify(vsc, vq); 2049 vioif_notify(vsc, vq);
2045 ifp->if_timer = 5; 2050 ifp->if_timer = 5;
2046 } 2051 }
2047} 2052}
2048 2053
2049/* dequeue sent mbufs */ 2054/* dequeue sent mbufs */
2050static bool 2055static bool
2051vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc, 2056vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
2052 struct vioif_netqueue *netq, u_int limit) 2057 struct vioif_netqueue *netq, u_int limit, size_t *ndeqp)
2053{ 2058{
2054 struct virtqueue *vq = netq->netq_vq; 2059 struct virtqueue *vq = netq->netq_vq;
2055 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2060 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2056 struct vioif_net_map *map; 2061 struct vioif_net_map *map;
2057 struct mbuf *m; 2062 struct mbuf *m;
2058 int slot, len; 2063 int slot, len;
2059 bool more = false; 2064 bool more;
 2065 size_t ndeq;
2060 2066
2061 KASSERT(mutex_owned(&netq->netq_lock)); 2067 KASSERT(mutex_owned(&netq->netq_lock));
2062 2068
 2069 more = false;
 2070 ndeq = 0;
 2071
2063 if (virtio_vq_is_enqueued(vsc, vq) == false) 2072 if (virtio_vq_is_enqueued(vsc, vq) == false)
2064 return false; 2073 goto done;
2065 2074
2066 for (;;) { 2075 for (;;ndeq++) {
2067 if (limit-- == 0) { 2076 if (limit-- == 0) {
2068 more = true; 2077 more = true;
2069 break; 2078 break;
2070 } 2079 }
2071 2080
2072 if (virtio_dequeue(vsc, vq, &slot, &len) != 0) 2081 if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
2073 break; 2082 break;
2074 2083
2075 map = &netq->netq_maps[slot]; 2084 map = &netq->netq_maps[slot];
2076 KASSERT(map->vnm_mbuf != NULL); 2085 KASSERT(map->vnm_mbuf != NULL);
2077 m = vioif_net_dequeue_commit(vsc, vq, slot, 2086 m = vioif_net_dequeue_commit(vsc, vq, slot,
2078 map, BUS_DMASYNC_POSTWRITE); 2087 map, BUS_DMASYNC_POSTWRITE);
2079 KASSERT(m != NULL); 2088 KASSERT(m != NULL);
2080 2089
2081 if_statinc(ifp, if_opackets); 2090 if_statinc(ifp, if_opackets);
2082 m_freem(m); 2091 m_freem(m);
2083 } 2092 }
2084 2093
 2094done:
 2095 if (ndeqp != NULL)
 2096 *ndeqp = ndeq;
2085 return more; 2097 return more;
2086} 2098}
2087 2099
2088static void 2100static void
2089vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc, 2101vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
2090 struct vioif_netqueue *netq) 2102 struct vioif_netqueue *netq)
2091{ 2103{
 2104 struct vioif_tx_context *txc;
2092 struct vioif_net_map *map; 2105 struct vioif_net_map *map;
2093 struct mbuf *m; 2106 struct mbuf *m;
2094 unsigned int i, vq_num; 2107 unsigned int i, vq_num;
2095 bool more; 2108 bool more;
2096 2109
2097 mutex_enter(&netq->netq_lock); 2110 mutex_enter(&netq->netq_lock);
2098 2111
 2112 txc = netq->netq_ctx;
2099 vq_num = netq->netq_vq->vq_num; 2113 vq_num = netq->netq_vq->vq_num;
 2114
2100 for (;;) { 2115 for (;;) {
2101 more = vioif_tx_deq_locked(sc, vsc, netq, vq_num); 2116 more = vioif_tx_deq_locked(sc, vsc, netq, vq_num, NULL);
2102 if (more == false) 2117 if (more == false)
2103 break; 2118 break;
2104 } 2119 }
2105 2120
2106 for (i = 0; i < vq_num; i++) { 2121 for (i = 0; i < vq_num; i++) {
2107 map = &netq->netq_maps[i]; 2122 map = &netq->netq_maps[i];
2108 2123
2109 m = map->vnm_mbuf; 2124 m = map->vnm_mbuf;
2110 if (m == NULL) 2125 if (m == NULL)
2111 continue; 2126 continue;
2112 2127
2113 vioif_net_unload_mbuf(vsc, map); 2128 vioif_net_unload_mbuf(vsc, map);
2114 m_freem(m); 2129 m_freem(m);
2115 } 2130 }
 2131
 2132 txc->txc_no_free_slots = false;
 2133
2116 mutex_exit(&netq->netq_lock); 2134 mutex_exit(&netq->netq_lock);
2117} 2135}
2118 2136
2119static void 2137static void
2120vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq) 2138vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
2121{ 2139{
2122 2140
2123 /* 2141 /*
2124 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c. 2142 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
2125 */ 2143 */
2126 vioif_send_common_locked(ifp, netq, false); 2144 vioif_send_common_locked(ifp, netq, false);
2127 2145
2128} 2146}
@@ -2147,52 +2165,57 @@ vioif_deferred_transmit(void *arg) @@ -2147,52 +2165,57 @@ vioif_deferred_transmit(void *arg)
2147 mutex_exit(&netq->netq_lock); 2165 mutex_exit(&netq->netq_lock);
2148} 2166}
2149 2167
2150static void 2168static void
2151vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit) 2169vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit)
2152{ 2170{
2153 struct virtqueue *vq = netq->netq_vq; 2171 struct virtqueue *vq = netq->netq_vq;
2154 struct vioif_tx_context *txc = netq->netq_ctx; 2172 struct vioif_tx_context *txc = netq->netq_ctx;
2155 struct virtio_softc *vsc = vq->vq_owner; 2173 struct virtio_softc *vsc = vq->vq_owner;
2156 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2174 struct vioif_softc *sc = device_private(virtio_child(vsc));
2157 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2175 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2158 bool more; 2176 bool more;
2159 int enqueued; 2177 int enqueued;
 2178 size_t ndeq;
2160 2179
2161 KASSERT(mutex_owned(&netq->netq_lock)); 2180 KASSERT(mutex_owned(&netq->netq_lock));
2162 KASSERT(!netq->netq_stopping); 2181 KASSERT(!netq->netq_stopping);
2163 2182
2164 more = vioif_tx_deq_locked(sc, vsc, netq, limit); 2183 more = vioif_tx_deq_locked(sc, vsc, netq, limit, &ndeq);
 2184 if (txc->txc_no_free_slots && ndeq > 0) {
 2185 txc->txc_no_free_slots = false;
 2186 softint_schedule(txc->txc_deferred_transmit);
 2187 }
 2188
2165 if (more) { 2189 if (more) {
2166 vioif_net_sched_handle(sc, netq); 2190 vioif_net_sched_handle(sc, netq);
2167 return; 2191 return;
2168 } 2192 }
2169 2193
2170 enqueued = (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) ? 2194 enqueued = (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) ?
2171 virtio_postpone_intr_smart(vsc, vq): 2195 virtio_postpone_intr_smart(vsc, vq):
2172 virtio_start_vq_intr(vsc, vq); 2196 virtio_start_vq_intr(vsc, vq);
2173 if (enqueued != 0) { 2197 if (enqueued != 0) {
2174 virtio_stop_vq_intr(vsc, vq); 2198 virtio_stop_vq_intr(vsc, vq);
2175 vioif_net_sched_handle(sc, netq); 2199 vioif_net_sched_handle(sc, netq);
2176 return; 2200 return;
2177 } 2201 }
2178 2202
2179 netq->netq_running_handle = false; 2203 netq->netq_running_handle = false;
2180 2204
2181 /* for ALTQ */ 2205 /* for ALTQ */
2182 if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]) { 2206 if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)])
2183 if_schedule_deferred_start(ifp); 2207 if_schedule_deferred_start(ifp);
2184 ifp->if_flags &= ~IFF_OACTIVE; 2208
2185 } 
2186 softint_schedule(txc->txc_deferred_transmit); 2209 softint_schedule(txc->txc_deferred_transmit);
2187} 2210}
2188 2211
2189static int 2212static int
2190vioif_tx_intr(void *arg) 2213vioif_tx_intr(void *arg)
2191{ 2214{
2192 struct vioif_netqueue *netq = arg; 2215 struct vioif_netqueue *netq = arg;
2193 struct virtqueue *vq = netq->netq_vq; 2216 struct virtqueue *vq = netq->netq_vq;
2194 struct virtio_softc *vsc = vq->vq_owner; 2217 struct virtio_softc *vsc = vq->vq_owner;
2195 struct vioif_softc *sc = device_private(virtio_child(vsc)); 2218 struct vioif_softc *sc = device_private(virtio_child(vsc));
2196 u_int limit; 2219 u_int limit;
2197 2220
2198 mutex_enter(&netq->netq_lock); 2221 mutex_enter(&netq->netq_lock);