| @@ -1,1000 +1,1000 @@ | | | @@ -1,1000 +1,1000 @@ |
1 | /* $NetBSD: ix_txrx.c,v 1.68 2021/03/12 01:53:36 knakahara Exp $ */ | | 1 | /* $NetBSD: ix_txrx.c,v 1.69 2021/03/12 01:54:29 knakahara Exp $ */ |
2 | | | 2 | |
3 | /****************************************************************************** | | 3 | /****************************************************************************** |
4 | | | 4 | |
5 | Copyright (c) 2001-2017, Intel Corporation | | 5 | Copyright (c) 2001-2017, Intel Corporation |
6 | All rights reserved. | | 6 | All rights reserved. |
7 | | | 7 | |
8 | Redistribution and use in source and binary forms, with or without | | 8 | Redistribution and use in source and binary forms, with or without |
9 | modification, are permitted provided that the following conditions are met: | | 9 | modification, are permitted provided that the following conditions are met: |
10 | | | 10 | |
11 | 1. Redistributions of source code must retain the above copyright notice, | | 11 | 1. Redistributions of source code must retain the above copyright notice, |
12 | this list of conditions and the following disclaimer. | | 12 | this list of conditions and the following disclaimer. |
13 | | | 13 | |
14 | 2. Redistributions in binary form must reproduce the above copyright | | 14 | 2. Redistributions in binary form must reproduce the above copyright |
15 | notice, this list of conditions and the following disclaimer in the | | 15 | notice, this list of conditions and the following disclaimer in the |
16 | documentation and/or other materials provided with the distribution. | | 16 | documentation and/or other materials provided with the distribution. |
17 | | | 17 | |
18 | 3. Neither the name of the Intel Corporation nor the names of its | | 18 | 3. Neither the name of the Intel Corporation nor the names of its |
19 | contributors may be used to endorse or promote products derived from | | 19 | contributors may be used to endorse or promote products derived from |
20 | this software without specific prior written permission. | | 20 | this software without specific prior written permission. |
21 | | | 21 | |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | POSSIBILITY OF SUCH DAMAGE. | | 32 | POSSIBILITY OF SUCH DAMAGE. |
33 | | | 33 | |
34 | ******************************************************************************/ | | 34 | ******************************************************************************/ |
35 | /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/ | | 35 | /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/ |
36 | | | 36 | |
37 | /* | | 37 | /* |
38 | * Copyright (c) 2011 The NetBSD Foundation, Inc. | | 38 | * Copyright (c) 2011 The NetBSD Foundation, Inc. |
39 | * All rights reserved. | | 39 | * All rights reserved. |
40 | * | | 40 | * |
41 | * This code is derived from software contributed to The NetBSD Foundation | | 41 | * This code is derived from software contributed to The NetBSD Foundation |
42 | * by Coyote Point Systems, Inc. | | 42 | * by Coyote Point Systems, Inc. |
43 | * | | 43 | * |
44 | * Redistribution and use in source and binary forms, with or without | | 44 | * Redistribution and use in source and binary forms, with or without |
45 | * modification, are permitted provided that the following conditions | | 45 | * modification, are permitted provided that the following conditions |
46 | * are met: | | 46 | * are met: |
47 | * 1. Redistributions of source code must retain the above copyright | | 47 | * 1. Redistributions of source code must retain the above copyright |
48 | * notice, this list of conditions and the following disclaimer. | | 48 | * notice, this list of conditions and the following disclaimer. |
49 | * 2. Redistributions in binary form must reproduce the above copyright | | 49 | * 2. Redistributions in binary form must reproduce the above copyright |
50 | * notice, this list of conditions and the following disclaimer in the | | 50 | * notice, this list of conditions and the following disclaimer in the |
51 | * documentation and/or other materials provided with the distribution. | | 51 | * documentation and/or other materials provided with the distribution. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 53 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
63 | * POSSIBILITY OF SUCH DAMAGE. | | 63 | * POSSIBILITY OF SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | #include "opt_inet.h" | | 66 | #include "opt_inet.h" |
67 | #include "opt_inet6.h" | | 67 | #include "opt_inet6.h" |
68 | | | 68 | |
69 | #include "ixgbe.h" | | 69 | #include "ixgbe.h" |
70 | | | 70 | |
71 | /* | | 71 | /* |
72 | * HW RSC control: | | 72 | * HW RSC control: |
73 | * this feature only works with | | 73 | * this feature only works with |
74 | * IPv4, and only on 82599 and later. | | 74 | * IPv4, and only on 82599 and later. |
75 | * Also this will cause IP forwarding to | | 75 | * Also this will cause IP forwarding to |
76 | * fail and that can't be controlled by | | 76 | * fail and that can't be controlled by |
77 | * the stack as LRO can. For all these | | 77 | * the stack as LRO can. For all these |
78 | * reasons I've deemed it best to leave | | 78 | * reasons I've deemed it best to leave |
79 | * this off and not bother with a tuneable | | 79 | * this off and not bother with a tuneable |
80 | * interface, this would need to be compiled | | 80 | * interface, this would need to be compiled |
81 | * to enable. | | 81 | * to enable. |
82 | */ | | 82 | */ |
83 | static bool ixgbe_rsc_enable = FALSE; | | 83 | static bool ixgbe_rsc_enable = FALSE; |
84 | | | 84 | |
85 | /* | | 85 | /* |
86 | * For Flow Director: this is the | | 86 | * For Flow Director: this is the |
87 | * number of TX packets we sample | | 87 | * number of TX packets we sample |
88 | * for the filter pool, this means | | 88 | * for the filter pool, this means |
89 | * every 20th packet will be probed. | | 89 | * every 20th packet will be probed. |
90 | * | | 90 | * |
91 | * This feature can be disabled by | | 91 | * This feature can be disabled by |
92 | * setting this to 0. | | 92 | * setting this to 0. |
93 | */ | | 93 | */ |
94 | static int atr_sample_rate = 20; | | 94 | static int atr_sample_rate = 20; |
95 | | | 95 | |
96 | /************************************************************************ | | 96 | /************************************************************************ |
97 | * Local Function prototypes | | 97 | * Local Function prototypes |
98 | ************************************************************************/ | | 98 | ************************************************************************/ |
99 | static void ixgbe_setup_transmit_ring(struct tx_ring *); | | 99 | static void ixgbe_setup_transmit_ring(struct tx_ring *); |
100 | static void ixgbe_free_transmit_buffers(struct tx_ring *); | | 100 | static void ixgbe_free_transmit_buffers(struct tx_ring *); |
101 | static int ixgbe_setup_receive_ring(struct rx_ring *); | | 101 | static int ixgbe_setup_receive_ring(struct rx_ring *); |
102 | static void ixgbe_free_receive_buffers(struct rx_ring *); | | 102 | static void ixgbe_free_receive_buffers(struct rx_ring *); |
103 | static void ixgbe_rx_checksum(u32, struct mbuf *, u32, | | 103 | static void ixgbe_rx_checksum(u32, struct mbuf *, u32, |
104 | struct ixgbe_hw_stats *); | | 104 | struct ixgbe_hw_stats *); |
105 | static void ixgbe_refresh_mbufs(struct rx_ring *, int); | | 105 | static void ixgbe_refresh_mbufs(struct rx_ring *, int); |
106 | static void ixgbe_drain(struct ifnet *, struct tx_ring *); | | 106 | static void ixgbe_drain(struct ifnet *, struct tx_ring *); |
107 | static int ixgbe_xmit(struct tx_ring *, struct mbuf *); | | 107 | static int ixgbe_xmit(struct tx_ring *, struct mbuf *); |
108 | static int ixgbe_tx_ctx_setup(struct tx_ring *, | | 108 | static int ixgbe_tx_ctx_setup(struct tx_ring *, |
109 | struct mbuf *, u32 *, u32 *); | | 109 | struct mbuf *, u32 *, u32 *); |
110 | static int ixgbe_tso_setup(struct tx_ring *, | | 110 | static int ixgbe_tso_setup(struct tx_ring *, |
111 | struct mbuf *, u32 *, u32 *); | | 111 | struct mbuf *, u32 *, u32 *); |
112 | static __inline void ixgbe_rx_discard(struct rx_ring *, int); | | 112 | static __inline void ixgbe_rx_discard(struct rx_ring *, int); |
113 | static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, | | 113 | static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, |
114 | struct mbuf *, u32); | | 114 | struct mbuf *, u32); |
115 | static int ixgbe_dma_malloc(struct adapter *, bus_size_t, | | 115 | static int ixgbe_dma_malloc(struct adapter *, bus_size_t, |
116 | struct ixgbe_dma_alloc *, int); | | 116 | struct ixgbe_dma_alloc *, int); |
117 | static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); | | 117 | static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); |
118 | | | 118 | |
119 | static void ixgbe_setup_hw_rsc(struct rx_ring *); | | 119 | static void ixgbe_setup_hw_rsc(struct rx_ring *); |
120 | | | 120 | |
121 | /************************************************************************ | | 121 | /************************************************************************ |
122 | * ixgbe_legacy_start_locked - Transmit entry point | | 122 | * ixgbe_legacy_start_locked - Transmit entry point |
123 | * | | 123 | * |
124 | * Called by the stack to initiate a transmit. | | 124 | * Called by the stack to initiate a transmit. |
125 | * The driver will remain in this routine as long as there are | | 125 | * The driver will remain in this routine as long as there are |
126 | * packets to transmit and transmit resources are available. | | 126 | * packets to transmit and transmit resources are available. |
127 | * In case resources are not available, the stack is notified | | 127 | * In case resources are not available, the stack is notified |
128 | * and the packet is requeued. | | 128 | * and the packet is requeued. |
129 | ************************************************************************/ | | 129 | ************************************************************************/ |
130 | int | | 130 | int |
131 | ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) | | 131 | ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) |
132 | { | | 132 | { |
133 | int rc; | | 133 | int rc; |
134 | struct mbuf *m_head; | | 134 | struct mbuf *m_head; |
135 | struct adapter *adapter = txr->adapter; | | 135 | struct adapter *adapter = txr->adapter; |
136 | | | 136 | |
137 | IXGBE_TX_LOCK_ASSERT(txr); | | 137 | IXGBE_TX_LOCK_ASSERT(txr); |
138 | | | 138 | |
139 | if (adapter->link_active != LINK_STATE_UP) { | | 139 | if (adapter->link_active != LINK_STATE_UP) { |
140 | /* | | 140 | /* |
141 | * discard all packets buffered in IFQ to avoid | | 141 | * discard all packets buffered in IFQ to avoid |
142 | * sending old packets at next link up timing. | | 142 | * sending old packets at next link up timing. |
143 | */ | | 143 | */ |
144 | ixgbe_drain(ifp, txr); | | 144 | ixgbe_drain(ifp, txr); |
145 | return (ENETDOWN); | | 145 | return (ENETDOWN); |
146 | } | | 146 | } |
147 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 147 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
148 | return (ENETDOWN); | | 148 | return (ENETDOWN); |
149 | if (txr->txr_no_space) | | 149 | if (txr->txr_no_space) |
150 | return (ENETDOWN); | | 150 | return (ENETDOWN); |
151 | | | 151 | |
152 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 152 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
153 | if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) | | 153 | if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) |
154 | break; | | 154 | break; |
155 | | | 155 | |
156 | IFQ_POLL(&ifp->if_snd, m_head); | | 156 | IFQ_POLL(&ifp->if_snd, m_head); |
157 | if (m_head == NULL) | | 157 | if (m_head == NULL) |
158 | break; | | 158 | break; |
159 | | | 159 | |
160 | if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { | | 160 | if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { |
161 | break; | | 161 | break; |
162 | } | | 162 | } |
163 | IFQ_DEQUEUE(&ifp->if_snd, m_head); | | 163 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
164 | if (rc != 0) { | | 164 | if (rc != 0) { |
165 | m_freem(m_head); | | 165 | m_freem(m_head); |
166 | continue; | | 166 | continue; |
167 | } | | 167 | } |
168 | | | 168 | |
169 | /* Send a copy of the frame to the BPF listener */ | | 169 | /* Send a copy of the frame to the BPF listener */ |
170 | bpf_mtap(ifp, m_head, BPF_D_OUT); | | 170 | bpf_mtap(ifp, m_head, BPF_D_OUT); |
171 | } | | 171 | } |
172 | | | 172 | |
173 | return IXGBE_SUCCESS; | | 173 | return IXGBE_SUCCESS; |
174 | } /* ixgbe_legacy_start_locked */ | | 174 | } /* ixgbe_legacy_start_locked */ |
175 | | | 175 | |
176 | /************************************************************************ | | 176 | /************************************************************************ |
177 | * ixgbe_legacy_start | | 177 | * ixgbe_legacy_start |
178 | * | | 178 | * |
179 | * Called by the stack, this always uses the first tx ring, | | 179 | * Called by the stack, this always uses the first tx ring, |
180 | * and should not be used with multiqueue tx enabled. | | 180 | * and should not be used with multiqueue tx enabled. |
181 | ************************************************************************/ | | 181 | ************************************************************************/ |
182 | void | | 182 | void |
183 | ixgbe_legacy_start(struct ifnet *ifp) | | 183 | ixgbe_legacy_start(struct ifnet *ifp) |
184 | { | | 184 | { |
185 | struct adapter *adapter = ifp->if_softc; | | 185 | struct adapter *adapter = ifp->if_softc; |
186 | struct tx_ring *txr = adapter->tx_rings; | | 186 | struct tx_ring *txr = adapter->tx_rings; |
187 | | | 187 | |
188 | if (ifp->if_flags & IFF_RUNNING) { | | 188 | if (ifp->if_flags & IFF_RUNNING) { |
189 | IXGBE_TX_LOCK(txr); | | 189 | IXGBE_TX_LOCK(txr); |
190 | ixgbe_legacy_start_locked(ifp, txr); | | 190 | ixgbe_legacy_start_locked(ifp, txr); |
191 | IXGBE_TX_UNLOCK(txr); | | 191 | IXGBE_TX_UNLOCK(txr); |
192 | } | | 192 | } |
193 | } /* ixgbe_legacy_start */ | | 193 | } /* ixgbe_legacy_start */ |
194 | | | 194 | |
195 | /************************************************************************ | | 195 | /************************************************************************ |
196 | * ixgbe_mq_start - Multiqueue Transmit Entry Point | | 196 | * ixgbe_mq_start - Multiqueue Transmit Entry Point |
197 | * | | 197 | * |
198 | * (if_transmit function) | | 198 | * (if_transmit function) |
199 | ************************************************************************/ | | 199 | ************************************************************************/ |
200 | int | | 200 | int |
201 | ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) | | 201 | ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) |
202 | { | | 202 | { |
203 | struct adapter *adapter = ifp->if_softc; | | 203 | struct adapter *adapter = ifp->if_softc; |
204 | struct tx_ring *txr; | | 204 | struct tx_ring *txr; |
205 | int i; | | 205 | int i; |
206 | #ifdef RSS | | 206 | #ifdef RSS |
207 | uint32_t bucket_id; | | 207 | uint32_t bucket_id; |
208 | #endif | | 208 | #endif |
209 | | | 209 | |
210 | /* | | 210 | /* |
211 | * When doing RSS, map it to the same outbound queue | | 211 | * When doing RSS, map it to the same outbound queue |
212 | * as the incoming flow would be mapped to. | | 212 | * as the incoming flow would be mapped to. |
213 | * | | 213 | * |
214 | * If everything is setup correctly, it should be the | | 214 | * If everything is setup correctly, it should be the |
215 | * same bucket that the current CPU we're on is. | | 215 | * same bucket that the current CPU we're on is. |
216 | */ | | 216 | */ |
217 | #ifdef RSS | | 217 | #ifdef RSS |
218 | if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { | | 218 | if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { |
219 | if ((adapter->feat_en & IXGBE_FEATURE_RSS) && | | 219 | if ((adapter->feat_en & IXGBE_FEATURE_RSS) && |
220 | (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), | | 220 | (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), |
221 | &bucket_id) == 0)) { | | 221 | &bucket_id) == 0)) { |
222 | i = bucket_id % adapter->num_queues; | | 222 | i = bucket_id % adapter->num_queues; |
223 | #ifdef IXGBE_DEBUG | | 223 | #ifdef IXGBE_DEBUG |
224 | if (bucket_id > adapter->num_queues) | | 224 | if (bucket_id > adapter->num_queues) |
225 | if_printf(ifp, | | 225 | if_printf(ifp, |
226 | "bucket_id (%d) > num_queues (%d)\n", | | 226 | "bucket_id (%d) > num_queues (%d)\n", |
227 | bucket_id, adapter->num_queues); | | 227 | bucket_id, adapter->num_queues); |
228 | #endif | | 228 | #endif |
229 | } else | | 229 | } else |
230 | i = m->m_pkthdr.flowid % adapter->num_queues; | | 230 | i = m->m_pkthdr.flowid % adapter->num_queues; |
231 | } else | | 231 | } else |
232 | #endif /* 0 */ | | 232 | #endif /* 0 */ |
233 | i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues; | | 233 | i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues; |
234 | | | 234 | |
235 | /* Check for a hung queue and pick alternative */ | | 235 | /* Check for a hung queue and pick alternative */ |
236 | if (((1ULL << i) & adapter->active_queues) == 0) | | 236 | if (((1ULL << i) & adapter->active_queues) == 0) |
237 | i = ffs64(adapter->active_queues); | | 237 | i = ffs64(adapter->active_queues); |
238 | | | 238 | |
239 | txr = &adapter->tx_rings[i]; | | 239 | txr = &adapter->tx_rings[i]; |
240 | | | 240 | |
241 | if (__predict_false(!pcq_put(txr->txr_interq, m))) { | | 241 | if (__predict_false(!pcq_put(txr->txr_interq, m))) { |
242 | m_freem(m); | | 242 | m_freem(m); |
243 | txr->pcq_drops.ev_count++; | | 243 | txr->pcq_drops.ev_count++; |
244 | return ENOBUFS; | | 244 | return ENOBUFS; |
245 | } | | 245 | } |
246 | if (IXGBE_TX_TRYLOCK(txr)) { | | 246 | if (IXGBE_TX_TRYLOCK(txr)) { |
247 | ixgbe_mq_start_locked(ifp, txr); | | 247 | ixgbe_mq_start_locked(ifp, txr); |
248 | IXGBE_TX_UNLOCK(txr); | | 248 | IXGBE_TX_UNLOCK(txr); |
249 | } else { | | 249 | } else { |
250 | if (adapter->txrx_use_workqueue) { | | 250 | if (adapter->txrx_use_workqueue) { |
251 | u_int *enqueued; | | 251 | u_int *enqueued; |
252 | | | 252 | |
253 | /* | | 253 | /* |
254 | * This function itself is not called in interrupt | | 254 | * This function itself is not called in interrupt |
255 | * context, however it can be called in fast softint | | 255 | * context, however it can be called in fast softint |
256 | * context right after receiving forwarding packets. | | 256 | * context right after receiving forwarding packets. |
257 | * So, it is required to protect workqueue from twice | | 257 | * So, it is required to protect workqueue from twice |
258 | * enqueuing when the machine uses both spontaneous | | 258 | * enqueuing when the machine uses both spontaneous |
259 | * packets and forwarding packets. | | 259 | * packets and forwarding packets. |
260 | */ | | 260 | */ |
261 | enqueued = percpu_getref(adapter->txr_wq_enqueued); | | 261 | enqueued = percpu_getref(adapter->txr_wq_enqueued); |
262 | if (*enqueued == 0) { | | 262 | if (*enqueued == 0) { |
263 | *enqueued = 1; | | 263 | *enqueued = 1; |
264 | percpu_putref(adapter->txr_wq_enqueued); | | 264 | percpu_putref(adapter->txr_wq_enqueued); |
265 | workqueue_enqueue(adapter->txr_wq, | | 265 | workqueue_enqueue(adapter->txr_wq, |
266 | &txr->wq_cookie, curcpu()); | | 266 | &txr->wq_cookie, curcpu()); |
267 | } else | | 267 | } else |
268 | percpu_putref(adapter->txr_wq_enqueued); | | 268 | percpu_putref(adapter->txr_wq_enqueued); |
269 | } else { | | 269 | } else { |
270 | kpreempt_disable(); | | 270 | kpreempt_disable(); |
271 | softint_schedule(txr->txr_si); | | 271 | softint_schedule(txr->txr_si); |
272 | kpreempt_enable(); | | 272 | kpreempt_enable(); |
273 | } | | 273 | } |
274 | } | | 274 | } |
275 | | | 275 | |
276 | return (0); | | 276 | return (0); |
277 | } /* ixgbe_mq_start */ | | 277 | } /* ixgbe_mq_start */ |
278 | | | 278 | |
279 | /************************************************************************ | | 279 | /************************************************************************ |
280 | * ixgbe_mq_start_locked | | 280 | * ixgbe_mq_start_locked |
281 | ************************************************************************/ | | 281 | ************************************************************************/ |
282 | int | | 282 | int |
283 | ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) | | 283 | ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) |
284 | { | | 284 | { |
285 | struct mbuf *next; | | 285 | struct mbuf *next; |
286 | int enqueued = 0, err = 0; | | 286 | int enqueued = 0, err = 0; |
287 | | | 287 | |
288 | if (txr->adapter->link_active != LINK_STATE_UP) { | | 288 | if (txr->adapter->link_active != LINK_STATE_UP) { |
289 | /* | | 289 | /* |
290 | * discard all packets buffered in txr_interq to avoid | | 290 | * discard all packets buffered in txr_interq to avoid |
291 | * sending old packets at next link up timing. | | 291 | * sending old packets at next link up timing. |
292 | */ | | 292 | */ |
293 | ixgbe_drain(ifp, txr); | | 293 | ixgbe_drain(ifp, txr); |
294 | return (ENETDOWN); | | 294 | return (ENETDOWN); |
295 | } | | 295 | } |
296 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 296 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
297 | return (ENETDOWN); | | 297 | return (ENETDOWN); |
298 | if (txr->txr_no_space) | | 298 | if (txr->txr_no_space) |
299 | return (ENETDOWN); | | 299 | return (ENETDOWN); |
300 | | | 300 | |
301 | /* Process the queue */ | | 301 | /* Process the queue */ |
302 | while ((next = pcq_get(txr->txr_interq)) != NULL) { | | 302 | while ((next = pcq_get(txr->txr_interq)) != NULL) { |
303 | if ((err = ixgbe_xmit(txr, next)) != 0) { | | 303 | if ((err = ixgbe_xmit(txr, next)) != 0) { |
304 | m_freem(next); | | 304 | m_freem(next); |
305 | /* All errors are counted in ixgbe_xmit() */ | | 305 | /* All errors are counted in ixgbe_xmit() */ |
306 | break; | | 306 | break; |
307 | } | | 307 | } |
308 | enqueued++; | | 308 | enqueued++; |
309 | #if __FreeBSD_version >= 1100036 | | 309 | #if __FreeBSD_version >= 1100036 |
310 | /* | | 310 | /* |
311 | * Since we're looking at the tx ring, we can check | | 311 | * Since we're looking at the tx ring, we can check |
312 | * to see if we're a VF by examing our tail register | | 312 | * to see if we're a VF by examing our tail register |
313 | * address. | | 313 | * address. |
314 | */ | | 314 | */ |
315 | if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) && | | 315 | if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) && |
316 | (next->m_flags & M_MCAST)) | | 316 | (next->m_flags & M_MCAST)) |
317 | if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); | | 317 | if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); |
318 | #endif | | 318 | #endif |
319 | /* Send a copy of the frame to the BPF listener */ | | 319 | /* Send a copy of the frame to the BPF listener */ |
320 | bpf_mtap(ifp, next, BPF_D_OUT); | | 320 | bpf_mtap(ifp, next, BPF_D_OUT); |
321 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 321 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
322 | break; | | 322 | break; |
323 | } | | 323 | } |
324 | | | 324 | |
325 | if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter)) | | 325 | if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter)) |
326 | ixgbe_txeof(txr); | | 326 | ixgbe_txeof(txr); |
327 | | | 327 | |
328 | return (err); | | 328 | return (err); |
329 | } /* ixgbe_mq_start_locked */ | | 329 | } /* ixgbe_mq_start_locked */ |
330 | | | 330 | |
331 | /************************************************************************ | | 331 | /************************************************************************ |
332 | * ixgbe_deferred_mq_start | | 332 | * ixgbe_deferred_mq_start |
333 | * | | 333 | * |
334 | * Called from a softint and workqueue (indirectly) to drain queued | | 334 | * Called from a softint and workqueue (indirectly) to drain queued |
335 | * transmit packets. | | 335 | * transmit packets. |
336 | ************************************************************************/ | | 336 | ************************************************************************/ |
337 | void | | 337 | void |
338 | ixgbe_deferred_mq_start(void *arg) | | 338 | ixgbe_deferred_mq_start(void *arg) |
339 | { | | 339 | { |
340 | struct tx_ring *txr = arg; | | 340 | struct tx_ring *txr = arg; |
341 | struct adapter *adapter = txr->adapter; | | 341 | struct adapter *adapter = txr->adapter; |
342 | struct ifnet *ifp = adapter->ifp; | | 342 | struct ifnet *ifp = adapter->ifp; |
343 | | | 343 | |
344 | IXGBE_TX_LOCK(txr); | | 344 | IXGBE_TX_LOCK(txr); |
345 | if (pcq_peek(txr->txr_interq) != NULL) | | 345 | if (pcq_peek(txr->txr_interq) != NULL) |
346 | ixgbe_mq_start_locked(ifp, txr); | | 346 | ixgbe_mq_start_locked(ifp, txr); |
347 | IXGBE_TX_UNLOCK(txr); | | 347 | IXGBE_TX_UNLOCK(txr); |
348 | } /* ixgbe_deferred_mq_start */ | | 348 | } /* ixgbe_deferred_mq_start */ |
349 | | | 349 | |
350 | /************************************************************************ | | 350 | /************************************************************************ |
351 | * ixgbe_deferred_mq_start_work | | 351 | * ixgbe_deferred_mq_start_work |
352 | * | | 352 | * |
353 | * Called from a workqueue to drain queued transmit packets. | | 353 | * Called from a workqueue to drain queued transmit packets. |
354 | ************************************************************************/ | | 354 | ************************************************************************/ |
355 | void | | 355 | void |
356 | ixgbe_deferred_mq_start_work(struct work *wk, void *arg) | | 356 | ixgbe_deferred_mq_start_work(struct work *wk, void *arg) |
357 | { | | 357 | { |
358 | struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); | | 358 | struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); |
359 | struct adapter *adapter = txr->adapter; | | 359 | struct adapter *adapter = txr->adapter; |
360 | u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued); | | 360 | u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued); |
361 | *enqueued = 0; | | 361 | *enqueued = 0; |
362 | percpu_putref(adapter->txr_wq_enqueued); | | 362 | percpu_putref(adapter->txr_wq_enqueued); |
363 | | | 363 | |
364 | ixgbe_deferred_mq_start(txr); | | 364 | ixgbe_deferred_mq_start(txr); |
365 | } /* ixgbe_deferred_mq_start */ | | 365 | } /* ixgbe_deferred_mq_start */ |
366 | | | 366 | |
367 | /************************************************************************ | | 367 | /************************************************************************ |
368 | * ixgbe_drain_all | | 368 | * ixgbe_drain_all |
369 | ************************************************************************/ | | 369 | ************************************************************************/ |
370 | void | | 370 | void |
371 | ixgbe_drain_all(struct adapter *adapter) | | 371 | ixgbe_drain_all(struct adapter *adapter) |
372 | { | | 372 | { |
373 | struct ifnet *ifp = adapter->ifp; | | 373 | struct ifnet *ifp = adapter->ifp; |
374 | struct ix_queue *que = adapter->queues; | | 374 | struct ix_queue *que = adapter->queues; |
375 | | | 375 | |
376 | for (int i = 0; i < adapter->num_queues; i++, que++) { | | 376 | for (int i = 0; i < adapter->num_queues; i++, que++) { |
377 | struct tx_ring *txr = que->txr; | | 377 | struct tx_ring *txr = que->txr; |
378 | | | 378 | |
379 | IXGBE_TX_LOCK(txr); | | 379 | IXGBE_TX_LOCK(txr); |
380 | ixgbe_drain(ifp, txr); | | 380 | ixgbe_drain(ifp, txr); |
381 | IXGBE_TX_UNLOCK(txr); | | 381 | IXGBE_TX_UNLOCK(txr); |
382 | } | | 382 | } |
383 | } | | 383 | } |
384 | | | 384 | |
385 | /************************************************************************ | | 385 | /************************************************************************ |
386 | * ixgbe_xmit | | 386 | * ixgbe_xmit |
387 | * | | 387 | * |
388 | * Maps the mbufs to tx descriptors, allowing the | | 388 | * Maps the mbufs to tx descriptors, allowing the |
389 | * TX engine to transmit the packets. | | 389 | * TX engine to transmit the packets. |
390 | * | | 390 | * |
391 | * Return 0 on success, positive on failure | | 391 | * Return 0 on success, positive on failure |
392 | ************************************************************************/ | | 392 | ************************************************************************/ |
393 | static int | | 393 | static int |
394 | ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) | | 394 | ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) |
395 | { | | 395 | { |
396 | struct adapter *adapter = txr->adapter; | | 396 | struct adapter *adapter = txr->adapter; |
397 | struct ixgbe_tx_buf *txbuf; | | 397 | struct ixgbe_tx_buf *txbuf; |
398 | union ixgbe_adv_tx_desc *txd = NULL; | | 398 | union ixgbe_adv_tx_desc *txd = NULL; |
399 | struct ifnet *ifp = adapter->ifp; | | 399 | struct ifnet *ifp = adapter->ifp; |
400 | int i, j, error; | | 400 | int i, j, error; |
401 | int first; | | 401 | int first; |
402 | u32 olinfo_status = 0, cmd_type_len; | | 402 | u32 olinfo_status = 0, cmd_type_len; |
403 | bool remap = TRUE; | | 403 | bool remap = TRUE; |
404 | bus_dmamap_t map; | | 404 | bus_dmamap_t map; |
405 | | | 405 | |
406 | /* Basic descriptor defines */ | | 406 | /* Basic descriptor defines */ |
407 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | | | 407 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | |
408 | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); | | 408 | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); |
409 | | | 409 | |
410 | if (vlan_has_tag(m_head)) | | 410 | if (vlan_has_tag(m_head)) |
411 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; | | 411 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; |
412 | | | 412 | |
413 | /* | | 413 | /* |
414 | * Important to capture the first descriptor | | 414 | * Important to capture the first descriptor |
415 | * used because it will contain the index of | | 415 | * used because it will contain the index of |
416 | * the one we tell the hardware to report back | | 416 | * the one we tell the hardware to report back |
417 | */ | | 417 | */ |
418 | first = txr->next_avail_desc; | | 418 | first = txr->next_avail_desc; |
419 | txbuf = &txr->tx_buffers[first]; | | 419 | txbuf = &txr->tx_buffers[first]; |
420 | map = txbuf->map; | | 420 | map = txbuf->map; |
421 | | | 421 | |
422 | /* | | 422 | /* |
423 | * Map the packet for DMA. | | 423 | * Map the packet for DMA. |
424 | */ | | 424 | */ |
425 | retry: | | 425 | retry: |
426 | error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head, | | 426 | error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head, |
427 | BUS_DMA_NOWAIT); | | 427 | BUS_DMA_NOWAIT); |
428 | | | 428 | |
429 | if (__predict_false(error)) { | | 429 | if (__predict_false(error)) { |
430 | struct mbuf *m; | | 430 | struct mbuf *m; |
431 | | | 431 | |
432 | switch (error) { | | 432 | switch (error) { |
433 | case EAGAIN: | | 433 | case EAGAIN: |
434 | txr->q_eagain_tx_dma_setup++; | | 434 | txr->q_eagain_tx_dma_setup++; |
435 | return EAGAIN; | | 435 | return EAGAIN; |
436 | case ENOMEM: | | 436 | case ENOMEM: |
437 | txr->q_enomem_tx_dma_setup++; | | 437 | txr->q_enomem_tx_dma_setup++; |
438 | return EAGAIN; | | 438 | return EAGAIN; |
439 | case EFBIG: | | 439 | case EFBIG: |
440 | /* Try it again? - one try */ | | 440 | /* Try it again? - one try */ |
441 | if (remap == TRUE) { | | 441 | if (remap == TRUE) { |
442 | remap = FALSE; | | 442 | remap = FALSE; |
443 | /* | | 443 | /* |
444 | * XXX: m_defrag will choke on | | 444 | * XXX: m_defrag will choke on |
445 | * non-MCLBYTES-sized clusters | | 445 | * non-MCLBYTES-sized clusters |
446 | */ | | 446 | */ |
447 | txr->q_efbig_tx_dma_setup++; | | 447 | txr->q_efbig_tx_dma_setup++; |
448 | m = m_defrag(m_head, M_NOWAIT); | | 448 | m = m_defrag(m_head, M_NOWAIT); |
449 | if (m == NULL) { | | 449 | if (m == NULL) { |
450 | txr->q_mbuf_defrag_failed++; | | 450 | txr->q_mbuf_defrag_failed++; |
451 | return ENOBUFS; | | 451 | return ENOBUFS; |
452 | } | | 452 | } |
453 | m_head = m; | | 453 | m_head = m; |
454 | goto retry; | | 454 | goto retry; |
455 | } else { | | 455 | } else { |
456 | txr->q_efbig2_tx_dma_setup++; | | 456 | txr->q_efbig2_tx_dma_setup++; |
457 | return error; | | 457 | return error; |
458 | } | | 458 | } |
459 | case EINVAL: | | 459 | case EINVAL: |
460 | txr->q_einval_tx_dma_setup++; | | 460 | txr->q_einval_tx_dma_setup++; |
461 | return error; | | 461 | return error; |
462 | default: | | 462 | default: |
463 | txr->q_other_tx_dma_setup++; | | 463 | txr->q_other_tx_dma_setup++; |
464 | return error; | | 464 | return error; |
465 | } | | 465 | } |
466 | } | | 466 | } |
467 | | | 467 | |
468 | /* Make certain there are enough descriptors */ | | 468 | /* Make certain there are enough descriptors */ |
469 | if (txr->tx_avail < (map->dm_nsegs + 2)) { | | 469 | if (txr->tx_avail < (map->dm_nsegs + 2)) { |
470 | txr->txr_no_space = true; | | 470 | txr->txr_no_space = true; |
471 | txr->no_desc_avail.ev_count++; | | 471 | txr->no_desc_avail.ev_count++; |
472 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); | | 472 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); |
473 | return EAGAIN; | | 473 | return EAGAIN; |
474 | } | | 474 | } |
475 | | | 475 | |
476 | /* | | 476 | /* |
477 | * Set up the appropriate offload context | | 477 | * Set up the appropriate offload context |
478 | * this will consume the first descriptor | | 478 | * this will consume the first descriptor |
479 | */ | | 479 | */ |
480 | error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); | | 480 | error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); |
481 | if (__predict_false(error)) { | | 481 | if (__predict_false(error)) { |
482 | return (error); | | 482 | return (error); |
483 | } | | 483 | } |
484 | | | 484 | |
485 | /* Do the flow director magic */ | | 485 | /* Do the flow director magic */ |
486 | if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && | | 486 | if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && |
487 | (txr->atr_sample) && (!adapter->fdir_reinit)) { | | 487 | (txr->atr_sample) && (!adapter->fdir_reinit)) { |
488 | ++txr->atr_count; | | 488 | ++txr->atr_count; |
489 | if (txr->atr_count >= atr_sample_rate) { | | 489 | if (txr->atr_count >= atr_sample_rate) { |
490 | ixgbe_atr(txr, m_head); | | 490 | ixgbe_atr(txr, m_head); |
491 | txr->atr_count = 0; | | 491 | txr->atr_count = 0; |
492 | } | | 492 | } |
493 | } | | 493 | } |
494 | | | 494 | |
495 | olinfo_status |= IXGBE_ADVTXD_CC; | | 495 | olinfo_status |= IXGBE_ADVTXD_CC; |
496 | i = txr->next_avail_desc; | | 496 | i = txr->next_avail_desc; |
497 | for (j = 0; j < map->dm_nsegs; j++) { | | 497 | for (j = 0; j < map->dm_nsegs; j++) { |
498 | bus_size_t seglen; | | 498 | bus_size_t seglen; |
499 | bus_addr_t segaddr; | | 499 | bus_addr_t segaddr; |
500 | | | 500 | |
501 | txbuf = &txr->tx_buffers[i]; | | 501 | txbuf = &txr->tx_buffers[i]; |
502 | txd = &txr->tx_base[i]; | | 502 | txd = &txr->tx_base[i]; |
503 | seglen = map->dm_segs[j].ds_len; | | 503 | seglen = map->dm_segs[j].ds_len; |
504 | segaddr = htole64(map->dm_segs[j].ds_addr); | | 504 | segaddr = htole64(map->dm_segs[j].ds_addr); |
505 | | | 505 | |
506 | txd->read.buffer_addr = segaddr; | | 506 | txd->read.buffer_addr = segaddr; |
507 | txd->read.cmd_type_len = htole32(cmd_type_len | seglen); | | 507 | txd->read.cmd_type_len = htole32(cmd_type_len | seglen); |
508 | txd->read.olinfo_status = htole32(olinfo_status); | | 508 | txd->read.olinfo_status = htole32(olinfo_status); |
509 | | | 509 | |
510 | if (++i == txr->num_desc) | | 510 | if (++i == txr->num_desc) |
511 | i = 0; | | 511 | i = 0; |
512 | } | | 512 | } |
513 | | | 513 | |
514 | txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); | | 514 | txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); |
515 | txr->tx_avail -= map->dm_nsegs; | | 515 | txr->tx_avail -= map->dm_nsegs; |
516 | txr->next_avail_desc = i; | | 516 | txr->next_avail_desc = i; |
517 | | | 517 | |
518 | txbuf->m_head = m_head; | | 518 | txbuf->m_head = m_head; |
519 | /* | | 519 | /* |
520 | * Here we swap the map so the last descriptor, | | 520 | * Here we swap the map so the last descriptor, |
521 | * which gets the completion interrupt has the | | 521 | * which gets the completion interrupt has the |
522 | * real map, and the first descriptor gets the | | 522 | * real map, and the first descriptor gets the |
523 | * unused map from this descriptor. | | 523 | * unused map from this descriptor. |
524 | */ | | 524 | */ |
525 | txr->tx_buffers[first].map = txbuf->map; | | 525 | txr->tx_buffers[first].map = txbuf->map; |
526 | txbuf->map = map; | | 526 | txbuf->map = map; |
527 | bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, | | 527 | bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, |
528 | BUS_DMASYNC_PREWRITE); | | 528 | BUS_DMASYNC_PREWRITE); |
529 | | | 529 | |
530 | /* Set the EOP descriptor that will be marked done */ | | 530 | /* Set the EOP descriptor that will be marked done */ |
531 | txbuf = &txr->tx_buffers[first]; | | 531 | txbuf = &txr->tx_buffers[first]; |
532 | txbuf->eop = txd; | | 532 | txbuf->eop = txd; |
533 | | | 533 | |
534 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 534 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
535 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 535 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
536 | /* | | 536 | /* |
537 | * Advance the Transmit Descriptor Tail (Tdt), this tells the | | 537 | * Advance the Transmit Descriptor Tail (Tdt), this tells the |
538 | * hardware that this frame is available to transmit. | | 538 | * hardware that this frame is available to transmit. |
539 | */ | | 539 | */ |
540 | ++txr->total_packets.ev_count; | | 540 | ++txr->total_packets.ev_count; |
541 | IXGBE_WRITE_REG(&adapter->hw, txr->tail, i); | | 541 | IXGBE_WRITE_REG(&adapter->hw, txr->tail, i); |
542 | | | 542 | |
543 | net_stat_ref_t nsr = IF_STAT_GETREF(ifp); | | 543 | net_stat_ref_t nsr = IF_STAT_GETREF(ifp); |
544 | if_statadd_ref(nsr, if_obytes, m_head->m_pkthdr.len); | | 544 | if_statadd_ref(nsr, if_obytes, m_head->m_pkthdr.len); |
545 | if (m_head->m_flags & M_MCAST) | | 545 | if (m_head->m_flags & M_MCAST) |
546 | if_statinc_ref(nsr, if_omcasts); | | 546 | if_statinc_ref(nsr, if_omcasts); |
547 | IF_STAT_PUTREF(ifp); | | 547 | IF_STAT_PUTREF(ifp); |
548 | | | 548 | |
549 | /* Mark queue as having work */ | | 549 | /* Mark queue as having work */ |
550 | if (txr->busy == 0) | | 550 | if (txr->busy == 0) |
551 | txr->busy = 1; | | 551 | txr->busy = 1; |
552 | | | 552 | |
553 | return (0); | | 553 | return (0); |
554 | } /* ixgbe_xmit */ | | 554 | } /* ixgbe_xmit */ |
555 | | | 555 | |
556 | /************************************************************************ | | 556 | /************************************************************************ |
557 | * ixgbe_drain | | 557 | * ixgbe_drain |
558 | ************************************************************************/ | | 558 | ************************************************************************/ |
559 | static void | | 559 | static void |
560 | ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr) | | 560 | ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr) |
561 | { | | 561 | { |
562 | struct mbuf *m; | | 562 | struct mbuf *m; |
563 | | | 563 | |
564 | IXGBE_TX_LOCK_ASSERT(txr); | | 564 | IXGBE_TX_LOCK_ASSERT(txr); |
565 | | | 565 | |
566 | if (txr->me == 0) { | | 566 | if (txr->me == 0) { |
567 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 567 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
568 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 568 | IFQ_DEQUEUE(&ifp->if_snd, m); |
569 | m_freem(m); | | 569 | m_freem(m); |
570 | IF_DROP(&ifp->if_snd); | | 570 | IF_DROP(&ifp->if_snd); |
571 | } | | 571 | } |
572 | } | | 572 | } |
573 | | | 573 | |
574 | while ((m = pcq_get(txr->txr_interq)) != NULL) { | | 574 | while ((m = pcq_get(txr->txr_interq)) != NULL) { |
575 | m_freem(m); | | 575 | m_freem(m); |
576 | txr->pcq_drops.ev_count++; | | 576 | txr->pcq_drops.ev_count++; |
577 | } | | 577 | } |
578 | } | | 578 | } |
579 | | | 579 | |
580 | /************************************************************************ | | 580 | /************************************************************************ |
581 | * ixgbe_allocate_transmit_buffers | | 581 | * ixgbe_allocate_transmit_buffers |
582 | * | | 582 | * |
583 | * Allocate memory for tx_buffer structures. The tx_buffer stores all | | 583 | * Allocate memory for tx_buffer structures. The tx_buffer stores all |
584 | * the information needed to transmit a packet on the wire. This is | | 584 | * the information needed to transmit a packet on the wire. This is |
585 | * called only once at attach, setup is done every reset. | | 585 | * called only once at attach, setup is done every reset. |
586 | ************************************************************************/ | | 586 | ************************************************************************/ |
587 | static int | | 587 | static int |
588 | ixgbe_allocate_transmit_buffers(struct tx_ring *txr) | | 588 | ixgbe_allocate_transmit_buffers(struct tx_ring *txr) |
589 | { | | 589 | { |
590 | struct adapter *adapter = txr->adapter; | | 590 | struct adapter *adapter = txr->adapter; |
591 | device_t dev = adapter->dev; | | 591 | device_t dev = adapter->dev; |
592 | struct ixgbe_tx_buf *txbuf; | | 592 | struct ixgbe_tx_buf *txbuf; |
593 | int error, i; | | 593 | int error, i; |
594 | | | 594 | |
595 | /* | | 595 | /* |
596 | * Setup DMA descriptor areas. | | 596 | * Setup DMA descriptor areas. |
597 | */ | | 597 | */ |
598 | error = ixgbe_dma_tag_create( | | 598 | error = ixgbe_dma_tag_create( |
599 | /* parent */ adapter->osdep.dmat, | | 599 | /* parent */ adapter->osdep.dmat, |
600 | /* alignment */ 1, | | 600 | /* alignment */ 1, |
601 | /* bounds */ 0, | | 601 | /* bounds */ 0, |
602 | /* maxsize */ IXGBE_TSO_SIZE, | | 602 | /* maxsize */ IXGBE_TSO_SIZE, |
603 | /* nsegments */ adapter->num_segs, | | 603 | /* nsegments */ adapter->num_segs, |
604 | /* maxsegsize */ PAGE_SIZE, | | 604 | /* maxsegsize */ PAGE_SIZE, |
605 | /* flags */ 0, | | 605 | /* flags */ 0, |
606 | &txr->txtag); | | 606 | &txr->txtag); |
607 | if (error != 0) { | | 607 | if (error != 0) { |
608 | aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); | | 608 | aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); |
609 | goto fail; | | 609 | goto fail; |
610 | } | | 610 | } |
611 | | | 611 | |
612 | txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) * | | 612 | txr->tx_buffers = malloc(sizeof(struct ixgbe_tx_buf) * |
613 | adapter->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO); | | 613 | adapter->num_tx_desc, M_DEVBUF, M_WAITOK | M_ZERO); |
614 | | | 614 | |
615 | /* Create the descriptor buffer dma maps */ | | 615 | /* Create the descriptor buffer dma maps */ |
616 | txbuf = txr->tx_buffers; | | 616 | txbuf = txr->tx_buffers; |
617 | for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { | | 617 | for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { |
618 | error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); | | 618 | error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); |
619 | if (error != 0) { | | 619 | if (error != 0) { |
620 | aprint_error_dev(dev, | | 620 | aprint_error_dev(dev, |
621 | "Unable to create TX DMA map (%d)\n", error); | | 621 | "Unable to create TX DMA map (%d)\n", error); |
622 | goto fail; | | 622 | goto fail; |
623 | } | | 623 | } |
624 | } | | 624 | } |
625 | | | 625 | |
626 | return 0; | | 626 | return 0; |
627 | fail: | | 627 | fail: |
628 | /* We free all, it handles case where we are in the middle */ | | 628 | /* We free all, it handles case where we are in the middle */ |
629 | #if 0 /* XXX was FreeBSD */ | | 629 | #if 0 /* XXX was FreeBSD */ |
630 | ixgbe_free_transmit_structures(adapter); | | 630 | ixgbe_free_transmit_structures(adapter); |
631 | #else | | 631 | #else |
632 | ixgbe_free_transmit_buffers(txr); | | 632 | ixgbe_free_transmit_buffers(txr); |
633 | #endif | | 633 | #endif |
634 | return (error); | | 634 | return (error); |
635 | } /* ixgbe_allocate_transmit_buffers */ | | 635 | } /* ixgbe_allocate_transmit_buffers */ |
636 | | | 636 | |
637 | /************************************************************************ | | 637 | /************************************************************************ |
638 | * ixgbe_setup_transmit_ring - Initialize a transmit ring. | | 638 | * ixgbe_setup_transmit_ring - Initialize a transmit ring. |
639 | ************************************************************************/ | | 639 | ************************************************************************/ |
640 | static void | | 640 | static void |
641 | ixgbe_setup_transmit_ring(struct tx_ring *txr) | | 641 | ixgbe_setup_transmit_ring(struct tx_ring *txr) |
642 | { | | 642 | { |
643 | struct adapter *adapter = txr->adapter; | | 643 | struct adapter *adapter = txr->adapter; |
644 | struct ixgbe_tx_buf *txbuf; | | 644 | struct ixgbe_tx_buf *txbuf; |
645 | #ifdef DEV_NETMAP | | 645 | #ifdef DEV_NETMAP |
646 | struct netmap_adapter *na = NA(adapter->ifp); | | 646 | struct netmap_adapter *na = NA(adapter->ifp); |
647 | struct netmap_slot *slot; | | 647 | struct netmap_slot *slot; |
648 | #endif /* DEV_NETMAP */ | | 648 | #endif /* DEV_NETMAP */ |
649 | | | 649 | |
650 | /* Clear the old ring contents */ | | 650 | /* Clear the old ring contents */ |
651 | IXGBE_TX_LOCK(txr); | | 651 | IXGBE_TX_LOCK(txr); |
652 | | | 652 | |
653 | #ifdef DEV_NETMAP | | 653 | #ifdef DEV_NETMAP |
654 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { | | 654 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { |
655 | /* | | 655 | /* |
656 | * (under lock): if in netmap mode, do some consistency | | 656 | * (under lock): if in netmap mode, do some consistency |
657 | * checks and set slot to entry 0 of the netmap ring. | | 657 | * checks and set slot to entry 0 of the netmap ring. |
658 | */ | | 658 | */ |
659 | slot = netmap_reset(na, NR_TX, txr->me, 0); | | 659 | slot = netmap_reset(na, NR_TX, txr->me, 0); |
660 | } | | 660 | } |
661 | #endif /* DEV_NETMAP */ | | 661 | #endif /* DEV_NETMAP */ |
662 | | | 662 | |
663 | bzero((void *)txr->tx_base, | | 663 | bzero((void *)txr->tx_base, |
664 | (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); | | 664 | (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); |
665 | /* Reset indices */ | | 665 | /* Reset indices */ |
666 | txr->next_avail_desc = 0; | | 666 | txr->next_avail_desc = 0; |
667 | txr->next_to_clean = 0; | | 667 | txr->next_to_clean = 0; |
668 | | | 668 | |
669 | /* Free any existing tx buffers. */ | | 669 | /* Free any existing tx buffers. */ |
670 | txbuf = txr->tx_buffers; | | 670 | txbuf = txr->tx_buffers; |
671 | for (int i = 0; i < txr->num_desc; i++, txbuf++) { | | 671 | for (int i = 0; i < txr->num_desc; i++, txbuf++) { |
672 | if (txbuf->m_head != NULL) { | | 672 | if (txbuf->m_head != NULL) { |
673 | bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map, | | 673 | bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map, |
674 | 0, txbuf->m_head->m_pkthdr.len, | | 674 | 0, txbuf->m_head->m_pkthdr.len, |
675 | BUS_DMASYNC_POSTWRITE); | | 675 | BUS_DMASYNC_POSTWRITE); |
676 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); | | 676 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); |
677 | m_freem(txbuf->m_head); | | 677 | m_freem(txbuf->m_head); |
678 | txbuf->m_head = NULL; | | 678 | txbuf->m_head = NULL; |
679 | } | | 679 | } |
680 | | | 680 | |
681 | #ifdef DEV_NETMAP | | 681 | #ifdef DEV_NETMAP |
682 | /* | | 682 | /* |
683 | * In netmap mode, set the map for the packet buffer. | | 683 | * In netmap mode, set the map for the packet buffer. |
684 | * NOTE: Some drivers (not this one) also need to set | | 684 | * NOTE: Some drivers (not this one) also need to set |
685 | * the physical buffer address in the NIC ring. | | 685 | * the physical buffer address in the NIC ring. |
686 | * Slots in the netmap ring (indexed by "si") are | | 686 | * Slots in the netmap ring (indexed by "si") are |
687 | * kring->nkr_hwofs positions "ahead" wrt the | | 687 | * kring->nkr_hwofs positions "ahead" wrt the |
688 | * corresponding slot in the NIC ring. In some drivers | | 688 | * corresponding slot in the NIC ring. In some drivers |
689 | * (not here) nkr_hwofs can be negative. Function | | 689 | * (not here) nkr_hwofs can be negative. Function |
690 | * netmap_idx_n2k() handles wraparounds properly. | | 690 | * netmap_idx_n2k() handles wraparounds properly. |
691 | */ | | 691 | */ |
692 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { | | 692 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { |
693 | int si = netmap_idx_n2k(na->tx_rings[txr->me], i); | | 693 | int si = netmap_idx_n2k(na->tx_rings[txr->me], i); |
694 | netmap_load_map(na, txr->txtag, | | 694 | netmap_load_map(na, txr->txtag, |
695 | txbuf->map, NMB(na, slot + si)); | | 695 | txbuf->map, NMB(na, slot + si)); |
696 | } | | 696 | } |
697 | #endif /* DEV_NETMAP */ | | 697 | #endif /* DEV_NETMAP */ |
698 | | | 698 | |
699 | /* Clear the EOP descriptor pointer */ | | 699 | /* Clear the EOP descriptor pointer */ |
700 | txbuf->eop = NULL; | | 700 | txbuf->eop = NULL; |
701 | } | | 701 | } |
702 | | | 702 | |
703 | /* Set the rate at which we sample packets */ | | 703 | /* Set the rate at which we sample packets */ |
704 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) | | 704 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) |
705 | txr->atr_sample = atr_sample_rate; | | 705 | txr->atr_sample = atr_sample_rate; |
706 | | | 706 | |
707 | /* Set number of descriptors available */ | | 707 | /* Set number of descriptors available */ |
708 | txr->tx_avail = adapter->num_tx_desc; | | 708 | txr->tx_avail = adapter->num_tx_desc; |
709 | | | 709 | |
710 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 710 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
711 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 711 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
712 | IXGBE_TX_UNLOCK(txr); | | 712 | IXGBE_TX_UNLOCK(txr); |
713 | } /* ixgbe_setup_transmit_ring */ | | 713 | } /* ixgbe_setup_transmit_ring */ |
714 | | | 714 | |
715 | /************************************************************************ | | 715 | /************************************************************************ |
716 | * ixgbe_setup_transmit_structures - Initialize all transmit rings. | | 716 | * ixgbe_setup_transmit_structures - Initialize all transmit rings. |
717 | ************************************************************************/ | | 717 | ************************************************************************/ |
718 | int | | 718 | int |
719 | ixgbe_setup_transmit_structures(struct adapter *adapter) | | 719 | ixgbe_setup_transmit_structures(struct adapter *adapter) |
720 | { | | 720 | { |
721 | struct tx_ring *txr = adapter->tx_rings; | | 721 | struct tx_ring *txr = adapter->tx_rings; |
722 | | | 722 | |
723 | for (int i = 0; i < adapter->num_queues; i++, txr++) | | 723 | for (int i = 0; i < adapter->num_queues; i++, txr++) |
724 | ixgbe_setup_transmit_ring(txr); | | 724 | ixgbe_setup_transmit_ring(txr); |
725 | | | 725 | |
726 | return (0); | | 726 | return (0); |
727 | } /* ixgbe_setup_transmit_structures */ | | 727 | } /* ixgbe_setup_transmit_structures */ |
728 | | | 728 | |
729 | /************************************************************************ | | 729 | /************************************************************************ |
730 | * ixgbe_free_transmit_structures - Free all transmit rings. | | 730 | * ixgbe_free_transmit_structures - Free all transmit rings. |
731 | ************************************************************************/ | | 731 | ************************************************************************/ |
732 | void | | 732 | void |
733 | ixgbe_free_transmit_structures(struct adapter *adapter) | | 733 | ixgbe_free_transmit_structures(struct adapter *adapter) |
734 | { | | 734 | { |
735 | struct tx_ring *txr = adapter->tx_rings; | | 735 | struct tx_ring *txr = adapter->tx_rings; |
736 | | | 736 | |
737 | for (int i = 0; i < adapter->num_queues; i++, txr++) { | | 737 | for (int i = 0; i < adapter->num_queues; i++, txr++) { |
738 | ixgbe_free_transmit_buffers(txr); | | 738 | ixgbe_free_transmit_buffers(txr); |
739 | ixgbe_dma_free(adapter, &txr->txdma); | | 739 | ixgbe_dma_free(adapter, &txr->txdma); |
740 | IXGBE_TX_LOCK_DESTROY(txr); | | 740 | IXGBE_TX_LOCK_DESTROY(txr); |
741 | } | | 741 | } |
742 | free(adapter->tx_rings, M_DEVBUF); | | 742 | free(adapter->tx_rings, M_DEVBUF); |
743 | } /* ixgbe_free_transmit_structures */ | | 743 | } /* ixgbe_free_transmit_structures */ |
744 | | | 744 | |
745 | /************************************************************************ | | 745 | /************************************************************************ |
746 | * ixgbe_free_transmit_buffers | | 746 | * ixgbe_free_transmit_buffers |
747 | * | | 747 | * |
748 | * Free transmit ring related data structures. | | 748 | * Free transmit ring related data structures. |
749 | ************************************************************************/ | | 749 | ************************************************************************/ |
750 | static void | | 750 | static void |
751 | ixgbe_free_transmit_buffers(struct tx_ring *txr) | | 751 | ixgbe_free_transmit_buffers(struct tx_ring *txr) |
752 | { | | 752 | { |
753 | struct adapter *adapter = txr->adapter; | | 753 | struct adapter *adapter = txr->adapter; |
754 | struct ixgbe_tx_buf *tx_buffer; | | 754 | struct ixgbe_tx_buf *tx_buffer; |
755 | int i; | | 755 | int i; |
756 | | | 756 | |
757 | INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); | | 757 | INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); |
758 | | | 758 | |
759 | if (txr->tx_buffers == NULL) | | 759 | if (txr->tx_buffers == NULL) |
760 | return; | | 760 | return; |
761 | | | 761 | |
762 | tx_buffer = txr->tx_buffers; | | 762 | tx_buffer = txr->tx_buffers; |
763 | for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { | | 763 | for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { |
764 | if (tx_buffer->m_head != NULL) { | | 764 | if (tx_buffer->m_head != NULL) { |
765 | bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, | | 765 | bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, |
766 | 0, tx_buffer->m_head->m_pkthdr.len, | | 766 | 0, tx_buffer->m_head->m_pkthdr.len, |
767 | BUS_DMASYNC_POSTWRITE); | | 767 | BUS_DMASYNC_POSTWRITE); |
768 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); | | 768 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); |
769 | m_freem(tx_buffer->m_head); | | 769 | m_freem(tx_buffer->m_head); |
770 | tx_buffer->m_head = NULL; | | 770 | tx_buffer->m_head = NULL; |
771 | if (tx_buffer->map != NULL) { | | 771 | if (tx_buffer->map != NULL) { |
772 | ixgbe_dmamap_destroy(txr->txtag, | | 772 | ixgbe_dmamap_destroy(txr->txtag, |
773 | tx_buffer->map); | | 773 | tx_buffer->map); |
774 | tx_buffer->map = NULL; | | 774 | tx_buffer->map = NULL; |
775 | } | | 775 | } |
776 | } else if (tx_buffer->map != NULL) { | | 776 | } else if (tx_buffer->map != NULL) { |
777 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); | | 777 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); |
778 | ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); | | 778 | ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); |
779 | tx_buffer->map = NULL; | | 779 | tx_buffer->map = NULL; |
780 | } | | 780 | } |
781 | } | | 781 | } |
782 | if (txr->txr_interq != NULL) { | | 782 | if (txr->txr_interq != NULL) { |
783 | struct mbuf *m; | | 783 | struct mbuf *m; |
784 | | | 784 | |
785 | while ((m = pcq_get(txr->txr_interq)) != NULL) | | 785 | while ((m = pcq_get(txr->txr_interq)) != NULL) |
786 | m_freem(m); | | 786 | m_freem(m); |
787 | pcq_destroy(txr->txr_interq); | | 787 | pcq_destroy(txr->txr_interq); |
788 | } | | 788 | } |
789 | if (txr->tx_buffers != NULL) { | | 789 | if (txr->tx_buffers != NULL) { |
790 | free(txr->tx_buffers, M_DEVBUF); | | 790 | free(txr->tx_buffers, M_DEVBUF); |
791 | txr->tx_buffers = NULL; | | 791 | txr->tx_buffers = NULL; |
792 | } | | 792 | } |
793 | if (txr->txtag != NULL) { | | 793 | if (txr->txtag != NULL) { |
794 | ixgbe_dma_tag_destroy(txr->txtag); | | 794 | ixgbe_dma_tag_destroy(txr->txtag); |
795 | txr->txtag = NULL; | | 795 | txr->txtag = NULL; |
796 | } | | 796 | } |
797 | } /* ixgbe_free_transmit_buffers */ | | 797 | } /* ixgbe_free_transmit_buffers */ |
798 | | | 798 | |
799 | /************************************************************************ | | 799 | /************************************************************************ |
800 | * ixgbe_tx_ctx_setup | | 800 | * ixgbe_tx_ctx_setup |
801 | * | | 801 | * |
802 | * Advanced Context Descriptor setup for VLAN, CSUM or TSO | | 802 | * Advanced Context Descriptor setup for VLAN, CSUM or TSO |
803 | ************************************************************************/ | | 803 | ************************************************************************/ |
804 | static int | | 804 | static int |
805 | ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, | | 805 | ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, |
806 | u32 *cmd_type_len, u32 *olinfo_status) | | 806 | u32 *cmd_type_len, u32 *olinfo_status) |
807 | { | | 807 | { |
808 | struct adapter *adapter = txr->adapter; | | 808 | struct adapter *adapter = txr->adapter; |
809 | struct ixgbe_adv_tx_context_desc *TXD; | | 809 | struct ixgbe_adv_tx_context_desc *TXD; |
810 | struct ether_vlan_header *eh; | | 810 | struct ether_vlan_header *eh; |
811 | #ifdef INET | | 811 | #ifdef INET |
812 | struct ip *ip; | | 812 | struct ip *ip; |
813 | #endif | | 813 | #endif |
814 | #ifdef INET6 | | 814 | #ifdef INET6 |
815 | struct ip6_hdr *ip6; | | 815 | struct ip6_hdr *ip6; |
816 | #endif | | 816 | #endif |
817 | int ehdrlen, ip_hlen = 0; | | 817 | int ehdrlen, ip_hlen = 0; |
818 | int offload = TRUE; | | 818 | int offload = TRUE; |
819 | int ctxd = txr->next_avail_desc; | | 819 | int ctxd = txr->next_avail_desc; |
820 | u32 vlan_macip_lens = 0; | | 820 | u32 vlan_macip_lens = 0; |
821 | u32 type_tucmd_mlhl = 0; | | 821 | u32 type_tucmd_mlhl = 0; |
822 | u16 vtag = 0; | | 822 | u16 vtag = 0; |
823 | u16 etype; | | 823 | u16 etype; |
824 | u8 ipproto = 0; | | 824 | u8 ipproto = 0; |
825 | char *l3d; | | 825 | char *l3d; |
826 | | | 826 | |
827 | | | 827 | |
828 | /* First check if TSO is to be used */ | | 828 | /* First check if TSO is to be used */ |
829 | if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { | | 829 | if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { |
830 | int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); | | 830 | int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); |
831 | | | 831 | |
832 | if (rv != 0) | | 832 | if (rv != 0) |
833 | ++adapter->tso_err.ev_count; | | 833 | ++adapter->tso_err.ev_count; |
834 | return rv; | | 834 | return rv; |
835 | } | | 835 | } |
836 | | | 836 | |
837 | if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) | | 837 | if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) |
838 | offload = FALSE; | | 838 | offload = FALSE; |
839 | | | 839 | |
840 | /* Indicate the whole packet as payload when not doing TSO */ | | 840 | /* Indicate the whole packet as payload when not doing TSO */ |
841 | *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; | | 841 | *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; |
842 | | | 842 | |
843 | /* Now ready a context descriptor */ | | 843 | /* Now ready a context descriptor */ |
844 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; | | 844 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; |
845 | | | 845 | |
846 | /* | | 846 | /* |
847 | * In advanced descriptors the vlan tag must | | 847 | * In advanced descriptors the vlan tag must |
848 | * be placed into the context descriptor. Hence | | 848 | * be placed into the context descriptor. Hence |
849 | * we need to make one even if not doing offloads. | | 849 | * we need to make one even if not doing offloads. |
850 | */ | | 850 | */ |
851 | if (vlan_has_tag(mp)) { | | 851 | if (vlan_has_tag(mp)) { |
852 | vtag = htole16(vlan_get_tag(mp)); | | 852 | vtag = htole16(vlan_get_tag(mp)); |
853 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); | | 853 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); |
854 | } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && | | 854 | } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && |
855 | (offload == FALSE)) | | 855 | (offload == FALSE)) |
856 | return (0); | | 856 | return (0); |
857 | | | 857 | |
858 | /* | | 858 | /* |
859 | * Determine where frame payload starts. | | 859 | * Determine where frame payload starts. |
860 | * Jump over vlan headers if already present, | | 860 | * Jump over vlan headers if already present, |
861 | * helpful for QinQ too. | | 861 | * helpful for QinQ too. |
862 | */ | | 862 | */ |
863 | KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag)); | | 863 | KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag)); |
864 | eh = mtod(mp, struct ether_vlan_header *); | | 864 | eh = mtod(mp, struct ether_vlan_header *); |
865 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { | | 865 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
866 | KASSERT(mp->m_len >= sizeof(struct ether_vlan_header)); | | 866 | KASSERT(mp->m_len >= sizeof(struct ether_vlan_header)); |
867 | etype = ntohs(eh->evl_proto); | | 867 | etype = ntohs(eh->evl_proto); |
868 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; | | 868 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
869 | } else { | | 869 | } else { |
870 | etype = ntohs(eh->evl_encap_proto); | | 870 | etype = ntohs(eh->evl_encap_proto); |
871 | ehdrlen = ETHER_HDR_LEN; | | 871 | ehdrlen = ETHER_HDR_LEN; |
872 | } | | 872 | } |
873 | | | 873 | |
874 | /* Set the ether header length */ | | 874 | /* Set the ether header length */ |
875 | vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; | | 875 | vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; |
876 | | | 876 | |
877 | if (offload == FALSE) | | 877 | if (offload == FALSE) |
878 | goto no_offloads; | | 878 | goto no_offloads; |
879 | | | 879 | |
880 | /* | | 880 | /* |
881 | * If the first mbuf only includes the ethernet header, | | 881 | * If the first mbuf only includes the ethernet header, |
882 | * jump to the next one | | 882 | * jump to the next one |
883 | * XXX: This assumes the stack splits mbufs containing headers | | 883 | * XXX: This assumes the stack splits mbufs containing headers |
884 | * on header boundaries | | 884 | * on header boundaries |
885 | * XXX: And assumes the entire IP header is contained in one mbuf | | 885 | * XXX: And assumes the entire IP header is contained in one mbuf |
886 | */ | | 886 | */ |
887 | if (mp->m_len == ehdrlen && mp->m_next) | | 887 | if (mp->m_len == ehdrlen && mp->m_next) |
888 | l3d = mtod(mp->m_next, char *); | | 888 | l3d = mtod(mp->m_next, char *); |
889 | else | | 889 | else |
890 | l3d = mtod(mp, char *) + ehdrlen; | | 890 | l3d = mtod(mp, char *) + ehdrlen; |
891 | | | 891 | |
892 | switch (etype) { | | 892 | switch (etype) { |
893 | #ifdef INET | | 893 | #ifdef INET |
894 | case ETHERTYPE_IP: | | 894 | case ETHERTYPE_IP: |
895 | ip = (struct ip *)(l3d); | | 895 | ip = (struct ip *)(l3d); |
896 | ip_hlen = ip->ip_hl << 2; | | 896 | ip_hlen = ip->ip_hl << 2; |
897 | ipproto = ip->ip_p; | | 897 | ipproto = ip->ip_p; |
898 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | | 898 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
899 | KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 || | | 899 | KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 || |
900 | ip->ip_sum == 0); | | 900 | ip->ip_sum == 0); |
901 | break; | | 901 | break; |
902 | #endif | | 902 | #endif |
903 | #ifdef INET6 | | 903 | #ifdef INET6 |
904 | case ETHERTYPE_IPV6: | | 904 | case ETHERTYPE_IPV6: |
905 | ip6 = (struct ip6_hdr *)(l3d); | | 905 | ip6 = (struct ip6_hdr *)(l3d); |
906 | ip_hlen = sizeof(struct ip6_hdr); | | 906 | ip_hlen = sizeof(struct ip6_hdr); |
907 | ipproto = ip6->ip6_nxt; | | 907 | ipproto = ip6->ip6_nxt; |
908 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; | | 908 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; |
909 | break; | | 909 | break; |
910 | #endif | | 910 | #endif |
911 | default: | | 911 | default: |
912 | offload = false; | | 912 | offload = false; |
913 | break; | | 913 | break; |
914 | } | | 914 | } |
915 | | | 915 | |
916 | if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) | | 916 | if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) |
917 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; | | 917 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; |
918 | | | 918 | |
919 | vlan_macip_lens |= ip_hlen; | | 919 | vlan_macip_lens |= ip_hlen; |
920 | | | 920 | |
921 | /* No support for offloads for non-L4 next headers */ | | 921 | /* No support for offloads for non-L4 next headers */ |
922 | switch (ipproto) { | | 922 | switch (ipproto) { |
923 | case IPPROTO_TCP: | | 923 | case IPPROTO_TCP: |
924 | if (mp->m_pkthdr.csum_flags & | | 924 | if (mp->m_pkthdr.csum_flags & |
925 | (M_CSUM_TCPv4 | M_CSUM_TCPv6)) | | 925 | (M_CSUM_TCPv4 | M_CSUM_TCPv6)) |
926 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | | 926 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; |
927 | else | | 927 | else |
928 | offload = false; | | 928 | offload = false; |
929 | break; | | 929 | break; |
930 | case IPPROTO_UDP: | | 930 | case IPPROTO_UDP: |
931 | if (mp->m_pkthdr.csum_flags & | | 931 | if (mp->m_pkthdr.csum_flags & |
932 | (M_CSUM_UDPv4 | M_CSUM_UDPv6)) | | 932 | (M_CSUM_UDPv4 | M_CSUM_UDPv6)) |
933 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; | | 933 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; |
934 | else | | 934 | else |
935 | offload = false; | | 935 | offload = false; |
936 | break; | | 936 | break; |
937 | default: | | 937 | default: |
938 | offload = false; | | 938 | offload = false; |
939 | break; | | 939 | break; |
940 | } | | 940 | } |
941 | | | 941 | |
942 | if (offload) /* Insert L4 checksum into data descriptors */ | | 942 | if (offload) /* Insert L4 checksum into data descriptors */ |
943 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; | | 943 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; |
944 | | | 944 | |
945 | no_offloads: | | 945 | no_offloads: |
946 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | | 946 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; |
947 | | | 947 | |
948 | /* Now copy bits into descriptor */ | | 948 | /* Now copy bits into descriptor */ |
949 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); | | 949 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); |
950 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); | | 950 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); |
951 | TXD->seqnum_seed = htole32(0); | | 951 | TXD->seqnum_seed = htole32(0); |
952 | TXD->mss_l4len_idx = htole32(0); | | 952 | TXD->mss_l4len_idx = htole32(0); |
953 | | | 953 | |
954 | /* We've consumed the first desc, adjust counters */ | | 954 | /* We've consumed the first desc, adjust counters */ |
955 | if (++ctxd == txr->num_desc) | | 955 | if (++ctxd == txr->num_desc) |
956 | ctxd = 0; | | 956 | ctxd = 0; |
957 | txr->next_avail_desc = ctxd; | | 957 | txr->next_avail_desc = ctxd; |
958 | --txr->tx_avail; | | 958 | --txr->tx_avail; |
959 | | | 959 | |
960 | return (0); | | 960 | return (0); |
961 | } /* ixgbe_tx_ctx_setup */ | | 961 | } /* ixgbe_tx_ctx_setup */ |
962 | | | 962 | |
963 | /************************************************************************ | | 963 | /************************************************************************ |
964 | * ixgbe_tso_setup | | 964 | * ixgbe_tso_setup |
965 | * | | 965 | * |
966 | * Setup work for hardware segmentation offload (TSO) on | | 966 | * Setup work for hardware segmentation offload (TSO) on |
967 | * adapters using advanced tx descriptors | | 967 | * adapters using advanced tx descriptors |
968 | ************************************************************************/ | | 968 | ************************************************************************/ |
969 | static int | | 969 | static int |
970 | ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, | | 970 | ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, |
971 | u32 *olinfo_status) | | 971 | u32 *olinfo_status) |
972 | { | | 972 | { |
973 | struct ixgbe_adv_tx_context_desc *TXD; | | 973 | struct ixgbe_adv_tx_context_desc *TXD; |
974 | struct ether_vlan_header *eh; | | 974 | struct ether_vlan_header *eh; |
975 | #ifdef INET6 | | 975 | #ifdef INET6 |
976 | struct ip6_hdr *ip6; | | 976 | struct ip6_hdr *ip6; |
977 | #endif | | 977 | #endif |
978 | #ifdef INET | | 978 | #ifdef INET |
979 | struct ip *ip; | | 979 | struct ip *ip; |
980 | #endif | | 980 | #endif |
981 | struct tcphdr *th; | | 981 | struct tcphdr *th; |
982 | int ctxd, ehdrlen, ip_hlen, tcp_hlen; | | 982 | int ctxd, ehdrlen, ip_hlen, tcp_hlen; |
983 | u32 vlan_macip_lens = 0; | | 983 | u32 vlan_macip_lens = 0; |
984 | u32 type_tucmd_mlhl = 0; | | 984 | u32 type_tucmd_mlhl = 0; |
985 | u32 mss_l4len_idx = 0, paylen; | | 985 | u32 mss_l4len_idx = 0, paylen; |
986 | u16 vtag = 0, eh_type; | | 986 | u16 vtag = 0, eh_type; |
987 | | | 987 | |
988 | /* | | 988 | /* |
989 | * Determine where frame payload starts. | | 989 | * Determine where frame payload starts. |
990 | * Jump over vlan headers if already present | | 990 | * Jump over vlan headers if already present |
991 | */ | | 991 | */ |
992 | eh = mtod(mp, struct ether_vlan_header *); | | 992 | eh = mtod(mp, struct ether_vlan_header *); |
993 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { | | 993 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
994 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; | | 994 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
995 | eh_type = eh->evl_proto; | | 995 | eh_type = eh->evl_proto; |
996 | } else { | | 996 | } else { |
997 | ehdrlen = ETHER_HDR_LEN; | | 997 | ehdrlen = ETHER_HDR_LEN; |
998 | eh_type = eh->evl_encap_proto; | | 998 | eh_type = eh->evl_encap_proto; |
999 | } | | 999 | } |
1000 | | | 1000 | |
| @@ -1083,1339 +1083,1337 @@ ixgbe_tso_setup(struct tx_ring *txr, str | | | @@ -1083,1339 +1083,1337 @@ ixgbe_tso_setup(struct tx_ring *txr, str |
1083 | * | | 1083 | * |
1084 | * Examine each tx_buffer in the used queue. If the hardware is done | | 1084 | * Examine each tx_buffer in the used queue. If the hardware is done |
1085 | * processing the packet then free associated resources. The | | 1085 | * processing the packet then free associated resources. The |
1086 | * tx_buffer is put back on the free queue. | | 1086 | * tx_buffer is put back on the free queue. |
1087 | ************************************************************************/ | | 1087 | ************************************************************************/ |
1088 | bool | | 1088 | bool |
1089 | ixgbe_txeof(struct tx_ring *txr) | | 1089 | ixgbe_txeof(struct tx_ring *txr) |
1090 | { | | 1090 | { |
1091 | struct adapter *adapter = txr->adapter; | | 1091 | struct adapter *adapter = txr->adapter; |
1092 | struct ifnet *ifp = adapter->ifp; | | 1092 | struct ifnet *ifp = adapter->ifp; |
1093 | struct ixgbe_tx_buf *buf; | | 1093 | struct ixgbe_tx_buf *buf; |
1094 | union ixgbe_adv_tx_desc *txd; | | 1094 | union ixgbe_adv_tx_desc *txd; |
1095 | u32 work, processed = 0; | | 1095 | u32 work, processed = 0; |
1096 | u32 limit = adapter->tx_process_limit; | | 1096 | u32 limit = adapter->tx_process_limit; |
1097 | | | 1097 | |
1098 | KASSERT(mutex_owned(&txr->tx_mtx)); | | 1098 | KASSERT(mutex_owned(&txr->tx_mtx)); |
1099 | | | 1099 | |
1100 | #ifdef DEV_NETMAP | | 1100 | #ifdef DEV_NETMAP |
1101 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && | | 1101 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && |
1102 | (adapter->ifp->if_capenable & IFCAP_NETMAP)) { | | 1102 | (adapter->ifp->if_capenable & IFCAP_NETMAP)) { |
1103 | struct netmap_adapter *na = NA(adapter->ifp); | | 1103 | struct netmap_adapter *na = NA(adapter->ifp); |
1104 | struct netmap_kring *kring = na->tx_rings[txr->me]; | | 1104 | struct netmap_kring *kring = na->tx_rings[txr->me]; |
1105 | txd = txr->tx_base; | | 1105 | txd = txr->tx_base; |
1106 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1106 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1107 | BUS_DMASYNC_POSTREAD); | | 1107 | BUS_DMASYNC_POSTREAD); |
1108 | /* | | 1108 | /* |
1109 | * In netmap mode, all the work is done in the context | | 1109 | * In netmap mode, all the work is done in the context |
1110 | * of the client thread. Interrupt handlers only wake up | | 1110 | * of the client thread. Interrupt handlers only wake up |
1111 | * clients, which may be sleeping on individual rings | | 1111 | * clients, which may be sleeping on individual rings |
1112 | * or on a global resource for all rings. | | 1112 | * or on a global resource for all rings. |
1113 | * To implement tx interrupt mitigation, we wake up the client | | 1113 | * To implement tx interrupt mitigation, we wake up the client |
1114 | * thread roughly every half ring, even if the NIC interrupts | | 1114 | * thread roughly every half ring, even if the NIC interrupts |
1115 | * more frequently. This is implemented as follows: | | 1115 | * more frequently. This is implemented as follows: |
1116 | * - ixgbe_txsync() sets kring->nr_kflags with the index of | | 1116 | * - ixgbe_txsync() sets kring->nr_kflags with the index of |
1117 | * the slot that should wake up the thread (nkr_num_slots | | 1117 | * the slot that should wake up the thread (nkr_num_slots |
1118 | * means the user thread should not be woken up); | | 1118 | * means the user thread should not be woken up); |
1119 | * - the driver ignores tx interrupts unless netmap_mitigate=0 | | 1119 | * - the driver ignores tx interrupts unless netmap_mitigate=0 |
1120 | * or the slot has the DD bit set. | | 1120 | * or the slot has the DD bit set. |
1121 | */ | | 1121 | */ |
1122 | if (kring->nr_kflags < kring->nkr_num_slots && | | 1122 | if (kring->nr_kflags < kring->nkr_num_slots && |
1123 | txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) { | | 1123 | txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) { |
1124 | netmap_tx_irq(ifp, txr->me); | | 1124 | netmap_tx_irq(ifp, txr->me); |
1125 | } | | 1125 | } |
1126 | return false; | | 1126 | return false; |
1127 | } | | 1127 | } |
1128 | #endif /* DEV_NETMAP */ | | 1128 | #endif /* DEV_NETMAP */ |
1129 | | | 1129 | |
1130 | if (txr->tx_avail == txr->num_desc) { | | 1130 | if (txr->tx_avail == txr->num_desc) { |
1131 | txr->busy = 0; | | 1131 | txr->busy = 0; |
1132 | return false; | | 1132 | return false; |
1133 | } | | 1133 | } |
1134 | | | 1134 | |
1135 | /* Get work starting point */ | | 1135 | /* Get work starting point */ |
1136 | work = txr->next_to_clean; | | 1136 | work = txr->next_to_clean; |
1137 | buf = &txr->tx_buffers[work]; | | 1137 | buf = &txr->tx_buffers[work]; |
1138 | txd = &txr->tx_base[work]; | | 1138 | txd = &txr->tx_base[work]; |
1139 | work -= txr->num_desc; /* The distance to ring end */ | | 1139 | work -= txr->num_desc; /* The distance to ring end */ |
1140 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1140 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1141 | BUS_DMASYNC_POSTREAD); | | 1141 | BUS_DMASYNC_POSTREAD); |
1142 | | | 1142 | |
1143 | do { | | 1143 | do { |
1144 | union ixgbe_adv_tx_desc *eop = buf->eop; | | 1144 | union ixgbe_adv_tx_desc *eop = buf->eop; |
1145 | if (eop == NULL) /* No work */ | | 1145 | if (eop == NULL) /* No work */ |
1146 | break; | | 1146 | break; |
1147 | | | 1147 | |
1148 | if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) | | 1148 | if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) |
1149 | break; /* I/O not complete */ | | 1149 | break; /* I/O not complete */ |
1150 | | | 1150 | |
1151 | if (buf->m_head) { | | 1151 | if (buf->m_head) { |
1152 | txr->bytes += buf->m_head->m_pkthdr.len; | | 1152 | txr->bytes += buf->m_head->m_pkthdr.len; |
1153 | bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, | | 1153 | bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, |
1154 | 0, buf->m_head->m_pkthdr.len, | | 1154 | 0, buf->m_head->m_pkthdr.len, |
1155 | BUS_DMASYNC_POSTWRITE); | | 1155 | BUS_DMASYNC_POSTWRITE); |
1156 | ixgbe_dmamap_unload(txr->txtag, buf->map); | | 1156 | ixgbe_dmamap_unload(txr->txtag, buf->map); |
1157 | m_freem(buf->m_head); | | 1157 | m_freem(buf->m_head); |
1158 | buf->m_head = NULL; | | 1158 | buf->m_head = NULL; |
1159 | } | | 1159 | } |
1160 | buf->eop = NULL; | | 1160 | buf->eop = NULL; |
1161 | txr->txr_no_space = false; | | 1161 | txr->txr_no_space = false; |
1162 | ++txr->tx_avail; | | 1162 | ++txr->tx_avail; |
1163 | | | 1163 | |
1164 | /* We clean the range if multi segment */ | | 1164 | /* We clean the range if multi segment */ |
1165 | while (txd != eop) { | | 1165 | while (txd != eop) { |
1166 | ++txd; | | 1166 | ++txd; |
1167 | ++buf; | | 1167 | ++buf; |
1168 | ++work; | | 1168 | ++work; |
1169 | /* wrap the ring? */ | | 1169 | /* wrap the ring? */ |
1170 | if (__predict_false(!work)) { | | 1170 | if (__predict_false(!work)) { |
1171 | work -= txr->num_desc; | | 1171 | work -= txr->num_desc; |
1172 | buf = txr->tx_buffers; | | 1172 | buf = txr->tx_buffers; |
1173 | txd = txr->tx_base; | | 1173 | txd = txr->tx_base; |
1174 | } | | 1174 | } |
1175 | if (buf->m_head) { | | 1175 | if (buf->m_head) { |
1176 | txr->bytes += | | 1176 | txr->bytes += |
1177 | buf->m_head->m_pkthdr.len; | | 1177 | buf->m_head->m_pkthdr.len; |
1178 | bus_dmamap_sync(txr->txtag->dt_dmat, | | 1178 | bus_dmamap_sync(txr->txtag->dt_dmat, |
1179 | buf->map, | | 1179 | buf->map, |
1180 | 0, buf->m_head->m_pkthdr.len, | | 1180 | 0, buf->m_head->m_pkthdr.len, |
1181 | BUS_DMASYNC_POSTWRITE); | | 1181 | BUS_DMASYNC_POSTWRITE); |
1182 | ixgbe_dmamap_unload(txr->txtag, | | 1182 | ixgbe_dmamap_unload(txr->txtag, |
1183 | buf->map); | | 1183 | buf->map); |
1184 | m_freem(buf->m_head); | | 1184 | m_freem(buf->m_head); |
1185 | buf->m_head = NULL; | | 1185 | buf->m_head = NULL; |
1186 | } | | 1186 | } |
1187 | ++txr->tx_avail; | | 1187 | ++txr->tx_avail; |
1188 | buf->eop = NULL; | | 1188 | buf->eop = NULL; |
1189 | | | 1189 | |
1190 | } | | 1190 | } |
1191 | ++txr->packets; | | 1191 | ++txr->packets; |
1192 | ++processed; | | 1192 | ++processed; |
1193 | if_statinc(ifp, if_opackets); | | 1193 | if_statinc(ifp, if_opackets); |
1194 | | | 1194 | |
1195 | /* Try the next packet */ | | 1195 | /* Try the next packet */ |
1196 | ++txd; | | 1196 | ++txd; |
1197 | ++buf; | | 1197 | ++buf; |
1198 | ++work; | | 1198 | ++work; |
1199 | /* reset with a wrap */ | | 1199 | /* reset with a wrap */ |
1200 | if (__predict_false(!work)) { | | 1200 | if (__predict_false(!work)) { |
1201 | work -= txr->num_desc; | | 1201 | work -= txr->num_desc; |
1202 | buf = txr->tx_buffers; | | 1202 | buf = txr->tx_buffers; |
1203 | txd = txr->tx_base; | | 1203 | txd = txr->tx_base; |
1204 | } | | 1204 | } |
1205 | prefetch(txd); | | 1205 | prefetch(txd); |
1206 | } while (__predict_true(--limit)); | | 1206 | } while (__predict_true(--limit)); |
1207 | | | 1207 | |
1208 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1208 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1209 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1209 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1210 | | | 1210 | |
1211 | work += txr->num_desc; | | 1211 | work += txr->num_desc; |
1212 | txr->next_to_clean = work; | | 1212 | txr->next_to_clean = work; |
1213 | | | 1213 | |
1214 | /* | | 1214 | /* |
1215 | * Queue Hang detection, we know there's | | 1215 | * Queue Hang detection, we know there's |
1216 | * work outstanding or the first return | | 1216 | * work outstanding or the first return |
1217 | * would have been taken, so increment busy | | 1217 | * would have been taken, so increment busy |
1218 | * if nothing managed to get cleaned, then | | 1218 | * if nothing managed to get cleaned, then |
1219 | * in local_timer it will be checked and | | 1219 | * in local_timer it will be checked and |
1220 | * marked as HUNG if it exceeds a MAX attempt. | | 1220 | * marked as HUNG if it exceeds a MAX attempt. |
1221 | */ | | 1221 | */ |
1222 | if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) | | 1222 | if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) |
1223 | ++txr->busy; | | 1223 | ++txr->busy; |
1224 | /* | | 1224 | /* |
1225 | * If anything gets cleaned we reset state to 1, | | 1225 | * If anything gets cleaned we reset state to 1, |
1226 | * note this will turn off HUNG if its set. | | 1226 | * note this will turn off HUNG if its set. |
1227 | */ | | 1227 | */ |
1228 | if (processed) | | 1228 | if (processed) |
1229 | txr->busy = 1; | | 1229 | txr->busy = 1; |
1230 | | | 1230 | |
1231 | if (txr->tx_avail == txr->num_desc) | | 1231 | if (txr->tx_avail == txr->num_desc) |
1232 | txr->busy = 0; | | 1232 | txr->busy = 0; |
1233 | | | 1233 | |
1234 | return ((limit > 0) ? false : true); | | 1234 | return ((limit > 0) ? false : true); |
1235 | } /* ixgbe_txeof */ | | 1235 | } /* ixgbe_txeof */ |
1236 | | | 1236 | |
1237 | /************************************************************************ | | 1237 | /************************************************************************ |
1238 | * ixgbe_rsc_count | | 1238 | * ixgbe_rsc_count |
1239 | * | | 1239 | * |
1240 | * Used to detect a descriptor that has been merged by Hardware RSC. | | 1240 | * Used to detect a descriptor that has been merged by Hardware RSC. |
1241 | ************************************************************************/ | | 1241 | ************************************************************************/ |
1242 | static inline u32 | | 1242 | static inline u32 |
1243 | ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) | | 1243 | ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) |
1244 | { | | 1244 | { |
1245 | return (le32toh(rx->wb.lower.lo_dword.data) & | | 1245 | return (le32toh(rx->wb.lower.lo_dword.data) & |
1246 | IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; | | 1246 | IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; |
1247 | } /* ixgbe_rsc_count */ | | 1247 | } /* ixgbe_rsc_count */ |
1248 | | | 1248 | |
1249 | /************************************************************************ | | 1249 | /************************************************************************ |
1250 | * ixgbe_setup_hw_rsc | | 1250 | * ixgbe_setup_hw_rsc |
1251 | * | | 1251 | * |
1252 | * Initialize Hardware RSC (LRO) feature on 82599 | | 1252 | * Initialize Hardware RSC (LRO) feature on 82599 |
1253 | * for an RX ring, this is toggled by the LRO capability | | 1253 | * for an RX ring, this is toggled by the LRO capability |
1254 | * even though it is transparent to the stack. | | 1254 | * even though it is transparent to the stack. |
1255 | * | | 1255 | * |
1256 | * NOTE: Since this HW feature only works with IPv4 and | | 1256 | * NOTE: Since this HW feature only works with IPv4 and |
1257 | * testing has shown soft LRO to be as effective, | | 1257 | * testing has shown soft LRO to be as effective, |
1258 | * this feature will be disabled by default. | | 1258 | * this feature will be disabled by default. |
1259 | ************************************************************************/ | | 1259 | ************************************************************************/ |
1260 | static void | | 1260 | static void |
1261 | ixgbe_setup_hw_rsc(struct rx_ring *rxr) | | 1261 | ixgbe_setup_hw_rsc(struct rx_ring *rxr) |
1262 | { | | 1262 | { |
1263 | struct adapter *adapter = rxr->adapter; | | 1263 | struct adapter *adapter = rxr->adapter; |
1264 | struct ixgbe_hw *hw = &adapter->hw; | | 1264 | struct ixgbe_hw *hw = &adapter->hw; |
1265 | u32 rscctrl, rdrxctl; | | 1265 | u32 rscctrl, rdrxctl; |
1266 | | | 1266 | |
1267 | /* If turning LRO/RSC off we need to disable it */ | | 1267 | /* If turning LRO/RSC off we need to disable it */ |
1268 | if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) { | | 1268 | if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) { |
1269 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); | | 1269 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); |
1270 | rscctrl &= ~IXGBE_RSCCTL_RSCEN; | | 1270 | rscctrl &= ~IXGBE_RSCCTL_RSCEN; |
1271 | return; | | 1271 | return; |
1272 | } | | 1272 | } |
1273 | | | 1273 | |
1274 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); | | 1274 | rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); |
1275 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; | | 1275 | rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; |
1276 | #ifdef DEV_NETMAP | | 1276 | #ifdef DEV_NETMAP |
1277 | /* Always strip CRC unless Netmap disabled it */ | | 1277 | /* Always strip CRC unless Netmap disabled it */ |
1278 | if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) || | | 1278 | if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) || |
1279 | !(adapter->ifp->if_capenable & IFCAP_NETMAP) || | | 1279 | !(adapter->ifp->if_capenable & IFCAP_NETMAP) || |
1280 | ix_crcstrip) | | 1280 | ix_crcstrip) |
1281 | #endif /* DEV_NETMAP */ | | 1281 | #endif /* DEV_NETMAP */ |
1282 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; | | 1282 | rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; |
1283 | rdrxctl |= IXGBE_RDRXCTL_RSCACKC; | | 1283 | rdrxctl |= IXGBE_RDRXCTL_RSCACKC; |
1284 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); | | 1284 | IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); |
1285 | | | 1285 | |
1286 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); | | 1286 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); |
1287 | rscctrl |= IXGBE_RSCCTL_RSCEN; | | 1287 | rscctrl |= IXGBE_RSCCTL_RSCEN; |
1288 | /* | | 1288 | /* |
1289 | * Limit the total number of descriptors that | | 1289 | * Limit the total number of descriptors that |
1290 | * can be combined, so it does not exceed 64K | | 1290 | * can be combined, so it does not exceed 64K |
1291 | */ | | 1291 | */ |
1292 | if (rxr->mbuf_sz == MCLBYTES) | | 1292 | if (rxr->mbuf_sz == MCLBYTES) |
1293 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | | 1293 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
1294 | else if (rxr->mbuf_sz == MJUMPAGESIZE) | | 1294 | else if (rxr->mbuf_sz == MJUMPAGESIZE) |
1295 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | | 1295 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; |
1296 | else if (rxr->mbuf_sz == MJUM9BYTES) | | 1296 | else if (rxr->mbuf_sz == MJUM9BYTES) |
1297 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | | 1297 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; |
1298 | else /* Using 16K cluster */ | | 1298 | else /* Using 16K cluster */ |
1299 | rscctrl |= IXGBE_RSCCTL_MAXDESC_1; | | 1299 | rscctrl |= IXGBE_RSCCTL_MAXDESC_1; |
1300 | | | 1300 | |
1301 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); | | 1301 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); |
1302 | | | 1302 | |
1303 | /* Enable TCP header recognition */ | | 1303 | /* Enable TCP header recognition */ |
1304 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), | | 1304 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), |
1305 | (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR)); | | 1305 | (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR)); |
1306 | | | 1306 | |
1307 | /* Disable RSC for ACK packets */ | | 1307 | /* Disable RSC for ACK packets */ |
1308 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | | 1308 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, |
1309 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | | 1309 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); |
1310 | | | 1310 | |
1311 | rxr->hw_rsc = TRUE; | | 1311 | rxr->hw_rsc = TRUE; |
1312 | } /* ixgbe_setup_hw_rsc */ | | 1312 | } /* ixgbe_setup_hw_rsc */ |
1313 | | | 1313 | |
1314 | /************************************************************************ | | 1314 | /************************************************************************ |
1315 | * ixgbe_refresh_mbufs | | 1315 | * ixgbe_refresh_mbufs |
1316 | * | | 1316 | * |
1317 | * Refresh mbuf buffers for RX descriptor rings | | 1317 | * Refresh mbuf buffers for RX descriptor rings |
1318 | * - now keeps its own state so discards due to resource | | 1318 | * - now keeps its own state so discards due to resource |
1319 | * exhaustion are unnecessary, if an mbuf cannot be obtained | | 1319 | * exhaustion are unnecessary, if an mbuf cannot be obtained |
1320 | * it just returns, keeping its placeholder, thus it can simply | | 1320 | * it just returns, keeping its placeholder, thus it can simply |
1321 | * be recalled to try again. | | 1321 | * be recalled to try again. |
1322 | * | | 1322 | * |
1323 | * XXX NetBSD TODO: | | 1323 | * XXX NetBSD TODO: |
1324 | * - The ixgbe_rxeof() function always preallocates mbuf cluster (jcl), | | 1324 | * - The ixgbe_rxeof() function always preallocates mbuf cluster (jcl), |
1325 | * so the ixgbe_refresh_mbufs() function can be simplified. | | 1325 | * so the ixgbe_refresh_mbufs() function can be simplified. |
1326 | * | | 1326 | * |
1327 | ************************************************************************/ | | 1327 | ************************************************************************/ |
1328 | static void | | 1328 | static void |
1329 | ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) | | 1329 | ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) |
1330 | { | | 1330 | { |
1331 | struct adapter *adapter = rxr->adapter; | | 1331 | struct adapter *adapter = rxr->adapter; |
1332 | struct ixgbe_rx_buf *rxbuf; | | 1332 | struct ixgbe_rx_buf *rxbuf; |
1333 | struct mbuf *mp; | | 1333 | struct mbuf *mp; |
1334 | int i, j, error; | | 1334 | int i, j, error; |
1335 | bool refreshed = false; | | 1335 | bool refreshed = false; |
1336 | | | 1336 | |
1337 | i = j = rxr->next_to_refresh; | | 1337 | i = j = rxr->next_to_refresh; |
1338 | /* Control the loop with one beyond */ | | 1338 | /* Control the loop with one beyond */ |
1339 | if (++j == rxr->num_desc) | | 1339 | if (++j == rxr->num_desc) |
1340 | j = 0; | | 1340 | j = 0; |
1341 | | | 1341 | |
1342 | while (j != limit) { | | 1342 | while (j != limit) { |
1343 | rxbuf = &rxr->rx_buffers[i]; | | 1343 | rxbuf = &rxr->rx_buffers[i]; |
1344 | if (rxbuf->buf == NULL) { | | 1344 | if (rxbuf->buf == NULL) { |
1345 | mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, | | 1345 | mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, |
1346 | MT_DATA, M_PKTHDR, rxr->mbuf_sz); | | 1346 | MT_DATA, M_PKTHDR, rxr->mbuf_sz); |
1347 | if (mp == NULL) { | | 1347 | if (mp == NULL) { |
1348 | rxr->no_jmbuf.ev_count++; | | 1348 | rxr->no_jmbuf.ev_count++; |
1349 | goto update; | | 1349 | goto update; |
1350 | } | | 1350 | } |
1351 | if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) | | 1351 | if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) |
1352 | m_adj(mp, ETHER_ALIGN); | | 1352 | m_adj(mp, ETHER_ALIGN); |
1353 | } else | | 1353 | } else |
1354 | mp = rxbuf->buf; | | 1354 | mp = rxbuf->buf; |
1355 | | | 1355 | |
1356 | mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; | | 1356 | mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; |
1357 | | | 1357 | |
1358 | /* If we're dealing with an mbuf that was copied rather | | 1358 | /* If we're dealing with an mbuf that was copied rather |
1359 | * than replaced, there's no need to go through busdma. | | 1359 | * than replaced, there's no need to go through busdma. |
1360 | */ | | 1360 | */ |
1361 | if ((rxbuf->flags & IXGBE_RX_COPY) == 0) { | | 1361 | if ((rxbuf->flags & IXGBE_RX_COPY) == 0) { |
1362 | /* Get the memory mapping */ | | 1362 | /* Get the memory mapping */ |
1363 | ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap); | | 1363 | ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap); |
1364 | error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, | | 1364 | error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, |
1365 | rxbuf->pmap, mp, BUS_DMA_NOWAIT); | | 1365 | rxbuf->pmap, mp, BUS_DMA_NOWAIT); |
1366 | if (error != 0) { | | 1366 | if (error != 0) { |
1367 | device_printf(adapter->dev, "Refresh mbufs: " | | 1367 | device_printf(adapter->dev, "Refresh mbufs: " |
1368 | "payload dmamap load failure - %d\n", | | 1368 | "payload dmamap load failure - %d\n", |
1369 | error); | | 1369 | error); |
1370 | m_free(mp); | | 1370 | m_free(mp); |
1371 | rxbuf->buf = NULL; | | 1371 | rxbuf->buf = NULL; |
1372 | goto update; | | 1372 | goto update; |
1373 | } | | 1373 | } |
1374 | rxbuf->buf = mp; | | 1374 | rxbuf->buf = mp; |
1375 | bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, | | 1375 | bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, |
1376 | 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); | | 1376 | 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); |
1377 | rxbuf->addr = rxr->rx_base[i].read.pkt_addr = | | 1377 | rxbuf->addr = rxr->rx_base[i].read.pkt_addr = |
1378 | htole64(rxbuf->pmap->dm_segs[0].ds_addr); | | 1378 | htole64(rxbuf->pmap->dm_segs[0].ds_addr); |
1379 | } else { | | 1379 | } else { |
1380 | rxr->rx_base[i].read.pkt_addr = rxbuf->addr; | | 1380 | rxr->rx_base[i].read.pkt_addr = rxbuf->addr; |
1381 | rxbuf->flags &= ~IXGBE_RX_COPY; | | 1381 | rxbuf->flags &= ~IXGBE_RX_COPY; |
1382 | } | | 1382 | } |
1383 | | | 1383 | |
1384 | refreshed = true; | | 1384 | refreshed = true; |
1385 | /* Next is precalculated */ | | 1385 | /* Next is precalculated */ |
1386 | i = j; | | 1386 | i = j; |
1387 | rxr->next_to_refresh = i; | | 1387 | rxr->next_to_refresh = i; |
1388 | if (++j == rxr->num_desc) | | 1388 | if (++j == rxr->num_desc) |
1389 | j = 0; | | 1389 | j = 0; |
1390 | } | | 1390 | } |
1391 | | | 1391 | |
1392 | update: | | 1392 | update: |
1393 | if (refreshed) /* Update hardware tail index */ | | 1393 | if (refreshed) /* Update hardware tail index */ |
1394 | IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh); | | 1394 | IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh); |
1395 | | | 1395 | |
1396 | return; | | 1396 | return; |
1397 | } /* ixgbe_refresh_mbufs */ | | 1397 | } /* ixgbe_refresh_mbufs */ |
1398 | | | 1398 | |
1399 | /************************************************************************ | | 1399 | /************************************************************************ |
1400 | * ixgbe_allocate_receive_buffers | | 1400 | * ixgbe_allocate_receive_buffers |
1401 | * | | 1401 | * |
1402 | * Allocate memory for rx_buffer structures. Since we use one | | 1402 | * Allocate memory for rx_buffer structures. Since we use one |
1403 | * rx_buffer per received packet, the maximum number of rx_buffer's | | 1403 | * rx_buffer per received packet, the maximum number of rx_buffer's |
1404 | * that we'll need is equal to the number of receive descriptors | | 1404 | * that we'll need is equal to the number of receive descriptors |
1405 | * that we've allocated. | | 1405 | * that we've allocated. |
1406 | ************************************************************************/ | | 1406 | ************************************************************************/ |
1407 | static int | | 1407 | static int |
1408 | ixgbe_allocate_receive_buffers(struct rx_ring *rxr) | | 1408 | ixgbe_allocate_receive_buffers(struct rx_ring *rxr) |
1409 | { | | 1409 | { |
1410 | struct adapter *adapter = rxr->adapter; | | 1410 | struct adapter *adapter = rxr->adapter; |
1411 | device_t dev = adapter->dev; | | 1411 | device_t dev = adapter->dev; |
1412 | struct ixgbe_rx_buf *rxbuf; | | 1412 | struct ixgbe_rx_buf *rxbuf; |
1413 | int bsize, error; | | 1413 | int bsize, error; |
1414 | | | 1414 | |
1415 | bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; | | 1415 | bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; |
1416 | rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO); | | 1416 | rxr->rx_buffers = malloc(bsize, M_DEVBUF, M_WAITOK | M_ZERO); |
1417 | | | 1417 | |
1418 | error = ixgbe_dma_tag_create( | | 1418 | error = ixgbe_dma_tag_create( |
1419 | /* parent */ adapter->osdep.dmat, | | 1419 | /* parent */ adapter->osdep.dmat, |
1420 | /* alignment */ 1, | | 1420 | /* alignment */ 1, |
1421 | /* bounds */ 0, | | 1421 | /* bounds */ 0, |
1422 | /* maxsize */ MJUM16BYTES, | | 1422 | /* maxsize */ MJUM16BYTES, |
1423 | /* nsegments */ 1, | | 1423 | /* nsegments */ 1, |
1424 | /* maxsegsize */ MJUM16BYTES, | | 1424 | /* maxsegsize */ MJUM16BYTES, |
1425 | /* flags */ 0, | | 1425 | /* flags */ 0, |
1426 | &rxr->ptag); | | 1426 | &rxr->ptag); |
1427 | if (error != 0) { | | 1427 | if (error != 0) { |
1428 | aprint_error_dev(dev, "Unable to create RX DMA tag\n"); | | 1428 | aprint_error_dev(dev, "Unable to create RX DMA tag\n"); |
1429 | goto fail; | | 1429 | goto fail; |
1430 | } | | 1430 | } |
1431 | | | 1431 | |
1432 | for (int i = 0; i < rxr->num_desc; i++, rxbuf++) { | | 1432 | for (int i = 0; i < rxr->num_desc; i++, rxbuf++) { |
1433 | rxbuf = &rxr->rx_buffers[i]; | | 1433 | rxbuf = &rxr->rx_buffers[i]; |
1434 | error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); | | 1434 | error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); |
1435 | if (error) { | | 1435 | if (error) { |
1436 | aprint_error_dev(dev, "Unable to create RX dma map\n"); | | 1436 | aprint_error_dev(dev, "Unable to create RX dma map\n"); |
1437 | goto fail; | | 1437 | goto fail; |
1438 | } | | 1438 | } |
1439 | } | | 1439 | } |
1440 | | | 1440 | |
1441 | return (0); | | 1441 | return (0); |
1442 | | | 1442 | |
1443 | fail: | | 1443 | fail: |
1444 | /* Frees all, but can handle partial completion */ | | 1444 | /* Frees all, but can handle partial completion */ |
1445 | ixgbe_free_receive_structures(adapter); | | 1445 | ixgbe_free_receive_structures(adapter); |
1446 | | | 1446 | |
1447 | return (error); | | 1447 | return (error); |
1448 | } /* ixgbe_allocate_receive_buffers */ | | 1448 | } /* ixgbe_allocate_receive_buffers */ |
1449 | | | 1449 | |
1450 | /************************************************************************ | | 1450 | /************************************************************************ |
1451 | * ixgbe_free_receive_ring | | 1451 | * ixgbe_free_receive_ring |
1452 | ************************************************************************/ | | 1452 | ************************************************************************/ |
1453 | static void | | 1453 | static void |
1454 | ixgbe_free_receive_ring(struct rx_ring *rxr) | | 1454 | ixgbe_free_receive_ring(struct rx_ring *rxr) |
1455 | { | | 1455 | { |
1456 | for (int i = 0; i < rxr->num_desc; i++) { | | 1456 | for (int i = 0; i < rxr->num_desc; i++) { |
1457 | ixgbe_rx_discard(rxr, i); | | 1457 | ixgbe_rx_discard(rxr, i); |
1458 | } | | 1458 | } |
1459 | } /* ixgbe_free_receive_ring */ | | 1459 | } /* ixgbe_free_receive_ring */ |
1460 | | | 1460 | |
1461 | /************************************************************************ | | 1461 | /************************************************************************ |
1462 | * ixgbe_setup_receive_ring | | 1462 | * ixgbe_setup_receive_ring |
1463 | * | | 1463 | * |
1464 | * Initialize a receive ring and its buffers. | | 1464 | * Initialize a receive ring and its buffers. |
1465 | ************************************************************************/ | | 1465 | ************************************************************************/ |
1466 | static int | | 1466 | static int |
1467 | ixgbe_setup_receive_ring(struct rx_ring *rxr) | | 1467 | ixgbe_setup_receive_ring(struct rx_ring *rxr) |
1468 | { | | 1468 | { |
1469 | struct adapter *adapter; | | 1469 | struct adapter *adapter; |
1470 | struct ixgbe_rx_buf *rxbuf; | | 1470 | struct ixgbe_rx_buf *rxbuf; |
1471 | #ifdef LRO | | 1471 | #ifdef LRO |
1472 | struct ifnet *ifp; | | 1472 | struct ifnet *ifp; |
1473 | struct lro_ctrl *lro = &rxr->lro; | | 1473 | struct lro_ctrl *lro = &rxr->lro; |
1474 | #endif /* LRO */ | | 1474 | #endif /* LRO */ |
1475 | #ifdef DEV_NETMAP | | 1475 | #ifdef DEV_NETMAP |
1476 | struct netmap_adapter *na = NA(rxr->adapter->ifp); | | 1476 | struct netmap_adapter *na = NA(rxr->adapter->ifp); |
1477 | struct netmap_slot *slot; | | 1477 | struct netmap_slot *slot; |
1478 | #endif /* DEV_NETMAP */ | | 1478 | #endif /* DEV_NETMAP */ |
1479 | int rsize, error = 0; | | 1479 | int rsize, error = 0; |
1480 | | | 1480 | |
1481 | adapter = rxr->adapter; | | 1481 | adapter = rxr->adapter; |
1482 | #ifdef LRO | | 1482 | #ifdef LRO |
1483 | ifp = adapter->ifp; | | 1483 | ifp = adapter->ifp; |
1484 | #endif /* LRO */ | | 1484 | #endif /* LRO */ |
1485 | | | 1485 | |
1486 | /* Clear the ring contents */ | | 1486 | /* Clear the ring contents */ |
1487 | IXGBE_RX_LOCK(rxr); | | 1487 | IXGBE_RX_LOCK(rxr); |
1488 | | | 1488 | |
1489 | #ifdef DEV_NETMAP | | 1489 | #ifdef DEV_NETMAP |
1490 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 1490 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
1491 | slot = netmap_reset(na, NR_RX, rxr->me, 0); | | 1491 | slot = netmap_reset(na, NR_RX, rxr->me, 0); |
1492 | #endif /* DEV_NETMAP */ | | 1492 | #endif /* DEV_NETMAP */ |
1493 | | | 1493 | |
1494 | rsize = roundup2(adapter->num_rx_desc * | | 1494 | rsize = roundup2(adapter->num_rx_desc * |
1495 | sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); | | 1495 | sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); |
1496 | bzero((void *)rxr->rx_base, rsize); | | 1496 | bzero((void *)rxr->rx_base, rsize); |
1497 | /* Cache the size */ | | 1497 | /* Cache the size */ |
1498 | rxr->mbuf_sz = adapter->rx_mbuf_sz; | | 1498 | rxr->mbuf_sz = adapter->rx_mbuf_sz; |
1499 | | | 1499 | |
1500 | /* Free current RX buffer structs and their mbufs */ | | 1500 | /* Free current RX buffer structs and their mbufs */ |
1501 | ixgbe_free_receive_ring(rxr); | | 1501 | ixgbe_free_receive_ring(rxr); |
1502 | | | 1502 | |
1503 | IXGBE_RX_UNLOCK(rxr); | | 1503 | IXGBE_RX_UNLOCK(rxr); |
1504 | /* | | 1504 | /* |
1505 | * Now reinitialize our supply of jumbo mbufs. The number | | 1505 | * Now reinitialize our supply of jumbo mbufs. The number |
1506 | * or size of jumbo mbufs may have changed. | | 1506 | * or size of jumbo mbufs may have changed. |
1507 | * Assume all of rxr->ptag are the same. | | 1507 | * Assume all of rxr->ptag are the same. |
1508 | */ | | 1508 | */ |
1509 | ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr, | | 1509 | ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr, |
1510 | adapter->num_jcl, adapter->rx_mbuf_sz); | | 1510 | adapter->num_jcl, adapter->rx_mbuf_sz); |
1511 | | | 1511 | |
1512 | IXGBE_RX_LOCK(rxr); | | 1512 | IXGBE_RX_LOCK(rxr); |
1513 | | | 1513 | |
1514 | /* Now replenish the mbufs */ | | 1514 | /* Now replenish the mbufs */ |
1515 | for (int j = 0; j != rxr->num_desc; ++j) { | | 1515 | for (int j = 0; j != rxr->num_desc; ++j) { |
1516 | struct mbuf *mp; | | 1516 | struct mbuf *mp; |
1517 | | | 1517 | |
1518 | rxbuf = &rxr->rx_buffers[j]; | | 1518 | rxbuf = &rxr->rx_buffers[j]; |
1519 | | | 1519 | |
1520 | #ifdef DEV_NETMAP | | 1520 | #ifdef DEV_NETMAP |
1521 | /* | | 1521 | /* |
1522 | * In netmap mode, fill the map and set the buffer | | 1522 | * In netmap mode, fill the map and set the buffer |
1523 | * address in the NIC ring, considering the offset | | 1523 | * address in the NIC ring, considering the offset |
1524 | * between the netmap and NIC rings (see comment in | | 1524 | * between the netmap and NIC rings (see comment in |
1525 | * ixgbe_setup_transmit_ring() ). No need to allocate | | 1525 | * ixgbe_setup_transmit_ring() ). No need to allocate |
1526 | * an mbuf, so end the block with a continue; | | 1526 | * an mbuf, so end the block with a continue; |
1527 | */ | | 1527 | */ |
1528 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { | | 1528 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { |
1529 | int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j); | | 1529 | int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j); |
1530 | uint64_t paddr; | | 1530 | uint64_t paddr; |
1531 | void *addr; | | 1531 | void *addr; |
1532 | | | 1532 | |
1533 | addr = PNMB(na, slot + sj, &paddr); | | 1533 | addr = PNMB(na, slot + sj, &paddr); |
1534 | netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); | | 1534 | netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); |
1535 | /* Update descriptor and the cached value */ | | 1535 | /* Update descriptor and the cached value */ |
1536 | rxr->rx_base[j].read.pkt_addr = htole64(paddr); | | 1536 | rxr->rx_base[j].read.pkt_addr = htole64(paddr); |
1537 | rxbuf->addr = htole64(paddr); | | 1537 | rxbuf->addr = htole64(paddr); |
1538 | continue; | | 1538 | continue; |
1539 | } | | 1539 | } |
1540 | #endif /* DEV_NETMAP */ | | 1540 | #endif /* DEV_NETMAP */ |
1541 | | | 1541 | |
1542 | rxbuf->flags = 0; | | 1542 | rxbuf->flags = 0; |
1543 | rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, | | 1543 | rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, |
1544 | MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); | | 1544 | MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); |
1545 | if (rxbuf->buf == NULL) { | | 1545 | if (rxbuf->buf == NULL) { |
1546 | error = ENOBUFS; | | 1546 | error = ENOBUFS; |
1547 | goto fail; | | 1547 | goto fail; |
1548 | } | | 1548 | } |
1549 | mp = rxbuf->buf; | | 1549 | mp = rxbuf->buf; |
1550 | mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; | | 1550 | mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; |
1551 | /* Get the memory mapping */ | | 1551 | /* Get the memory mapping */ |
1552 | error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap, | | 1552 | error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap, |
1553 | mp, BUS_DMA_NOWAIT); | | 1553 | mp, BUS_DMA_NOWAIT); |
1554 | if (error != 0) | | 1554 | if (error != 0) |
1555 | goto fail; | | 1555 | goto fail; |
1556 | bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, | | 1556 | bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, |
1557 | 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD); | | 1557 | 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD); |
1558 | /* Update the descriptor and the cached value */ | | 1558 | /* Update the descriptor and the cached value */ |
1559 | rxr->rx_base[j].read.pkt_addr = | | 1559 | rxr->rx_base[j].read.pkt_addr = |
1560 | htole64(rxbuf->pmap->dm_segs[0].ds_addr); | | 1560 | htole64(rxbuf->pmap->dm_segs[0].ds_addr); |
1561 | rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr); | | 1561 | rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr); |
1562 | } | | 1562 | } |
1563 | | | 1563 | |
1564 | /* Setup our descriptor indices */ | | 1564 | /* Setup our descriptor indices */ |
1565 | rxr->next_to_check = 0; | | 1565 | rxr->next_to_check = 0; |
1566 | rxr->next_to_refresh = 0; | | 1566 | rxr->next_to_refresh = 0; |
1567 | rxr->lro_enabled = FALSE; | | 1567 | rxr->lro_enabled = FALSE; |
1568 | rxr->rx_copies.ev_count = 0; | | 1568 | rxr->rx_copies.ev_count = 0; |
1569 | #if 0 /* NetBSD */ | | 1569 | #if 0 /* NetBSD */ |
1570 | rxr->rx_bytes.ev_count = 0; | | 1570 | rxr->rx_bytes.ev_count = 0; |
1571 | #if 1 /* Fix inconsistency */ | | 1571 | #if 1 /* Fix inconsistency */ |
1572 | rxr->rx_packets.ev_count = 0; | | 1572 | rxr->rx_packets.ev_count = 0; |
1573 | #endif | | 1573 | #endif |
1574 | #endif | | 1574 | #endif |
1575 | rxr->vtag_strip = FALSE; | | 1575 | rxr->vtag_strip = FALSE; |
1576 | | | 1576 | |
1577 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | | 1577 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
1578 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1578 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1579 | | | 1579 | |
1580 | /* | | 1580 | /* |
1581 | * Now set up the LRO interface | | 1581 | * Now set up the LRO interface |
1582 | */ | | 1582 | */ |
1583 | if (ixgbe_rsc_enable) | | 1583 | if (ixgbe_rsc_enable) |
1584 | ixgbe_setup_hw_rsc(rxr); | | 1584 | ixgbe_setup_hw_rsc(rxr); |
1585 | #ifdef LRO | | 1585 | #ifdef LRO |
1586 | else if (ifp->if_capenable & IFCAP_LRO) { | | 1586 | else if (ifp->if_capenable & IFCAP_LRO) { |
1587 | device_t dev = adapter->dev; | | 1587 | device_t dev = adapter->dev; |
1588 | int err = tcp_lro_init(lro); | | 1588 | int err = tcp_lro_init(lro); |
1589 | if (err) { | | 1589 | if (err) { |
1590 | device_printf(dev, "LRO Initialization failed!\n"); | | 1590 | device_printf(dev, "LRO Initialization failed!\n"); |
1591 | goto fail; | | 1591 | goto fail; |
1592 | } | | 1592 | } |
1593 | INIT_DEBUGOUT("RX Soft LRO Initialized\n"); | | 1593 | INIT_DEBUGOUT("RX Soft LRO Initialized\n"); |
1594 | rxr->lro_enabled = TRUE; | | 1594 | rxr->lro_enabled = TRUE; |
1595 | lro->ifp = adapter->ifp; | | 1595 | lro->ifp = adapter->ifp; |
1596 | } | | 1596 | } |
1597 | #endif /* LRO */ | | 1597 | #endif /* LRO */ |
1598 | | | 1598 | |
1599 | IXGBE_RX_UNLOCK(rxr); | | 1599 | IXGBE_RX_UNLOCK(rxr); |
1600 | | | 1600 | |
1601 | return (0); | | 1601 | return (0); |
1602 | | | 1602 | |
1603 | fail: | | 1603 | fail: |
1604 | ixgbe_free_receive_ring(rxr); | | 1604 | ixgbe_free_receive_ring(rxr); |
1605 | IXGBE_RX_UNLOCK(rxr); | | 1605 | IXGBE_RX_UNLOCK(rxr); |
1606 | | | 1606 | |
1607 | return (error); | | 1607 | return (error); |
1608 | } /* ixgbe_setup_receive_ring */ | | 1608 | } /* ixgbe_setup_receive_ring */ |
1609 | | | 1609 | |
1610 | /************************************************************************ | | 1610 | /************************************************************************ |
1611 | * ixgbe_setup_receive_structures - Initialize all receive rings. | | 1611 | * ixgbe_setup_receive_structures - Initialize all receive rings. |
1612 | ************************************************************************/ | | 1612 | ************************************************************************/ |
1613 | int | | 1613 | int |
1614 | ixgbe_setup_receive_structures(struct adapter *adapter) | | 1614 | ixgbe_setup_receive_structures(struct adapter *adapter) |
1615 | { | | 1615 | { |
1616 | struct rx_ring *rxr = adapter->rx_rings; | | 1616 | struct rx_ring *rxr = adapter->rx_rings; |
1617 | int j; | | 1617 | int j; |
1618 | | | 1618 | |
1619 | INIT_DEBUGOUT("ixgbe_setup_receive_structures"); | | 1619 | INIT_DEBUGOUT("ixgbe_setup_receive_structures"); |
1620 | for (j = 0; j < adapter->num_queues; j++, rxr++) | | 1620 | for (j = 0; j < adapter->num_queues; j++, rxr++) |
1621 | if (ixgbe_setup_receive_ring(rxr)) | | 1621 | if (ixgbe_setup_receive_ring(rxr)) |
1622 | goto fail; | | 1622 | goto fail; |
1623 | | | 1623 | |
1624 | return (0); | | 1624 | return (0); |
1625 | fail: | | 1625 | fail: |
1626 | /* | | 1626 | /* |
1627 | * Free RX buffers allocated so far, we will only handle | | 1627 | * Free RX buffers allocated so far, we will only handle |
1628 | * the rings that completed, the failing case will have | | 1628 | * the rings that completed, the failing case will have |
1629 | * cleaned up for itself. 'j' failed, so its the terminus. | | 1629 | * cleaned up for itself. 'j' failed, so its the terminus. |
1630 | */ | | 1630 | */ |
1631 | for (int i = 0; i < j; ++i) { | | 1631 | for (int i = 0; i < j; ++i) { |
1632 | rxr = &adapter->rx_rings[i]; | | 1632 | rxr = &adapter->rx_rings[i]; |
1633 | IXGBE_RX_LOCK(rxr); | | 1633 | IXGBE_RX_LOCK(rxr); |
1634 | ixgbe_free_receive_ring(rxr); | | 1634 | ixgbe_free_receive_ring(rxr); |
1635 | IXGBE_RX_UNLOCK(rxr); | | 1635 | IXGBE_RX_UNLOCK(rxr); |
1636 | } | | 1636 | } |
1637 | | | 1637 | |
1638 | return (ENOBUFS); | | 1638 | return (ENOBUFS); |
1639 | } /* ixgbe_setup_receive_structures */ | | 1639 | } /* ixgbe_setup_receive_structures */ |
1640 | | | 1640 | |
1641 | | | 1641 | |
1642 | /************************************************************************ | | 1642 | /************************************************************************ |
1643 | * ixgbe_free_receive_structures - Free all receive rings. | | 1643 | * ixgbe_free_receive_structures - Free all receive rings. |
1644 | ************************************************************************/ | | 1644 | ************************************************************************/ |
1645 | void | | 1645 | void |
1646 | ixgbe_free_receive_structures(struct adapter *adapter) | | 1646 | ixgbe_free_receive_structures(struct adapter *adapter) |
1647 | { | | 1647 | { |
1648 | struct rx_ring *rxr = adapter->rx_rings; | | 1648 | struct rx_ring *rxr = adapter->rx_rings; |
1649 | | | 1649 | |
1650 | INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); | | 1650 | INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); |
1651 | | | 1651 | |
1652 | for (int i = 0; i < adapter->num_queues; i++, rxr++) { | | 1652 | for (int i = 0; i < adapter->num_queues; i++, rxr++) { |
1653 | ixgbe_free_receive_buffers(rxr); | | 1653 | ixgbe_free_receive_buffers(rxr); |
1654 | #ifdef LRO | | 1654 | #ifdef LRO |
1655 | /* Free LRO memory */ | | 1655 | /* Free LRO memory */ |
1656 | tcp_lro_free(&rxr->lro); | | 1656 | tcp_lro_free(&rxr->lro); |
1657 | #endif /* LRO */ | | 1657 | #endif /* LRO */ |
1658 | /* Free the ring memory as well */ | | 1658 | /* Free the ring memory as well */ |
1659 | ixgbe_dma_free(adapter, &rxr->rxdma); | | 1659 | ixgbe_dma_free(adapter, &rxr->rxdma); |
1660 | IXGBE_RX_LOCK_DESTROY(rxr); | | 1660 | IXGBE_RX_LOCK_DESTROY(rxr); |
1661 | } | | 1661 | } |
1662 | | | 1662 | |
1663 | free(adapter->rx_rings, M_DEVBUF); | | 1663 | free(adapter->rx_rings, M_DEVBUF); |
1664 | } /* ixgbe_free_receive_structures */ | | 1664 | } /* ixgbe_free_receive_structures */ |
1665 | | | 1665 | |
1666 | | | 1666 | |
1667 | /************************************************************************ | | 1667 | /************************************************************************ |
1668 | * ixgbe_free_receive_buffers - Free receive ring data structures | | 1668 | * ixgbe_free_receive_buffers - Free receive ring data structures |
1669 | ************************************************************************/ | | 1669 | ************************************************************************/ |
1670 | static void | | 1670 | static void |
1671 | ixgbe_free_receive_buffers(struct rx_ring *rxr) | | 1671 | ixgbe_free_receive_buffers(struct rx_ring *rxr) |
1672 | { | | 1672 | { |
1673 | struct adapter *adapter = rxr->adapter; | | 1673 | struct adapter *adapter = rxr->adapter; |
1674 | struct ixgbe_rx_buf *rxbuf; | | 1674 | struct ixgbe_rx_buf *rxbuf; |
1675 | | | 1675 | |
1676 | INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); | | 1676 | INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); |
1677 | | | 1677 | |
1678 | /* Cleanup any existing buffers */ | | 1678 | /* Cleanup any existing buffers */ |
1679 | if (rxr->rx_buffers != NULL) { | | 1679 | if (rxr->rx_buffers != NULL) { |
1680 | for (int i = 0; i < adapter->num_rx_desc; i++) { | | 1680 | for (int i = 0; i < adapter->num_rx_desc; i++) { |
1681 | rxbuf = &rxr->rx_buffers[i]; | | 1681 | rxbuf = &rxr->rx_buffers[i]; |
1682 | ixgbe_rx_discard(rxr, i); | | 1682 | ixgbe_rx_discard(rxr, i); |
1683 | if (rxbuf->pmap != NULL) { | | 1683 | if (rxbuf->pmap != NULL) { |
1684 | ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); | | 1684 | ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); |
1685 | rxbuf->pmap = NULL; | | 1685 | rxbuf->pmap = NULL; |
1686 | } | | 1686 | } |
1687 | } | | 1687 | } |
1688 | | | 1688 | |
1689 | /* NetBSD specific. See ixgbe_netbsd.c */ | | 1689 | /* NetBSD specific. See ixgbe_netbsd.c */ |
1690 | ixgbe_jcl_destroy(adapter, rxr); | | 1690 | ixgbe_jcl_destroy(adapter, rxr); |
1691 | | | 1691 | |
1692 | if (rxr->rx_buffers != NULL) { | | 1692 | if (rxr->rx_buffers != NULL) { |
1693 | free(rxr->rx_buffers, M_DEVBUF); | | 1693 | free(rxr->rx_buffers, M_DEVBUF); |
1694 | rxr->rx_buffers = NULL; | | 1694 | rxr->rx_buffers = NULL; |
1695 | } | | 1695 | } |
1696 | } | | 1696 | } |
1697 | | | 1697 | |
1698 | if (rxr->ptag != NULL) { | | 1698 | if (rxr->ptag != NULL) { |
1699 | ixgbe_dma_tag_destroy(rxr->ptag); | | 1699 | ixgbe_dma_tag_destroy(rxr->ptag); |
1700 | rxr->ptag = NULL; | | 1700 | rxr->ptag = NULL; |
1701 | } | | 1701 | } |
1702 | | | 1702 | |
1703 | return; | | 1703 | return; |
1704 | } /* ixgbe_free_receive_buffers */ | | 1704 | } /* ixgbe_free_receive_buffers */ |
1705 | | | 1705 | |
1706 | /************************************************************************ | | 1706 | /************************************************************************ |
1707 | * ixgbe_rx_input | | 1707 | * ixgbe_rx_input |
1708 | ************************************************************************/ | | 1708 | ************************************************************************/ |
1709 | static __inline void | | 1709 | static __inline void |
1710 | ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, | | 1710 | ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, |
1711 | u32 ptype) | | 1711 | u32 ptype) |
1712 | { | | 1712 | { |
1713 | struct adapter *adapter = ifp->if_softc; | | 1713 | struct adapter *adapter = ifp->if_softc; |
1714 | | | 1714 | |
1715 | #ifdef LRO | | 1715 | #ifdef LRO |
1716 | struct ethercom *ec = &adapter->osdep.ec; | | 1716 | struct ethercom *ec = &adapter->osdep.ec; |
1717 | | | 1717 | |
1718 | /* | | 1718 | /* |
1719 | * ATM LRO is only for IP/TCP packets and TCP checksum of the packet | | 1719 | * ATM LRO is only for IP/TCP packets and TCP checksum of the packet |
1720 | * should be computed by hardware. Also it should not have VLAN tag in | | 1720 | * should be computed by hardware. Also it should not have VLAN tag in |
1721 | * ethernet header. In case of IPv6 we do not yet support ext. hdrs. | | 1721 | * ethernet header. In case of IPv6 we do not yet support ext. hdrs. |
1722 | */ | | 1722 | */ |
1723 | if (rxr->lro_enabled && | | 1723 | if (rxr->lro_enabled && |
1724 | (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 && | | 1724 | (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 && |
1725 | (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && | | 1725 | (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && |
1726 | ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == | | 1726 | ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == |
1727 | (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) || | | 1727 | (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) || |
1728 | (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) == | | 1728 | (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) == |
1729 | (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) && | | 1729 | (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) && |
1730 | (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == | | 1730 | (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == |
1731 | (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { | | 1731 | (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { |
1732 | /* | | 1732 | /* |
1733 | * Send to the stack if: | | 1733 | * Send to the stack if: |
1734 | ** - LRO not enabled, or | | 1734 | ** - LRO not enabled, or |
1735 | ** - no LRO resources, or | | 1735 | ** - no LRO resources, or |
1736 | ** - lro enqueue fails | | 1736 | ** - lro enqueue fails |
1737 | */ | | 1737 | */ |
1738 | if (rxr->lro.lro_cnt != 0) | | 1738 | if (rxr->lro.lro_cnt != 0) |
1739 | if (tcp_lro_rx(&rxr->lro, m, 0) == 0) | | 1739 | if (tcp_lro_rx(&rxr->lro, m, 0) == 0) |
1740 | return; | | 1740 | return; |
1741 | } | | 1741 | } |
1742 | #endif /* LRO */ | | 1742 | #endif /* LRO */ |
1743 | | | 1743 | |
1744 | if_percpuq_enqueue(adapter->ipq, m); | | 1744 | if_percpuq_enqueue(adapter->ipq, m); |
1745 | } /* ixgbe_rx_input */ | | 1745 | } /* ixgbe_rx_input */ |
1746 | | | 1746 | |
1747 | /************************************************************************ | | 1747 | /************************************************************************ |
1748 | * ixgbe_rx_discard | | 1748 | * ixgbe_rx_discard |
1749 | ************************************************************************/ | | 1749 | ************************************************************************/ |
1750 | static __inline void | | 1750 | static __inline void |
1751 | ixgbe_rx_discard(struct rx_ring *rxr, int i) | | 1751 | ixgbe_rx_discard(struct rx_ring *rxr, int i) |
1752 | { | | 1752 | { |
1753 | struct ixgbe_rx_buf *rbuf; | | 1753 | struct ixgbe_rx_buf *rbuf; |
1754 | | | 1754 | |
1755 | rbuf = &rxr->rx_buffers[i]; | | 1755 | rbuf = &rxr->rx_buffers[i]; |
1756 | | | 1756 | |
1757 | /* | | 1757 | /* |
1758 | * With advanced descriptors the writeback | | 1758 | * With advanced descriptors the writeback |
1759 | * clobbers the buffer addrs, so its easier | | 1759 | * clobbers the buffer addrs, so its easier |
1760 | * to just free the existing mbufs and take | | 1760 | * to just free the existing mbufs and take |
1761 | * the normal refresh path to get new buffers | | 1761 | * the normal refresh path to get new buffers |
1762 | * and mapping. | | 1762 | * and mapping. |
1763 | */ | | 1763 | */ |
1764 | | | 1764 | |
1765 | if (rbuf->fmp != NULL) {/* Partial chain ? */ | | 1765 | if (rbuf->fmp != NULL) {/* Partial chain ? */ |
1766 | bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, | | 1766 | bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, |
1767 | rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); | | 1767 | rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); |
1768 | m_freem(rbuf->fmp); | | 1768 | m_freem(rbuf->fmp); |
1769 | rbuf->fmp = NULL; | | 1769 | rbuf->fmp = NULL; |
1770 | rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */ | | 1770 | rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */ |
1771 | } else if (rbuf->buf) { | | 1771 | } else if (rbuf->buf) { |
1772 | bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, | | 1772 | bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, |
1773 | rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); | | 1773 | rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); |
1774 | m_free(rbuf->buf); | | 1774 | m_free(rbuf->buf); |
1775 | rbuf->buf = NULL; | | 1775 | rbuf->buf = NULL; |
1776 | } | | 1776 | } |
1777 | ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap); | | 1777 | ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap); |
1778 | | | 1778 | |
1779 | rbuf->flags = 0; | | 1779 | rbuf->flags = 0; |
1780 | | | 1780 | |
1781 | return; | | 1781 | return; |
1782 | } /* ixgbe_rx_discard */ | | 1782 | } /* ixgbe_rx_discard */ |
1783 | | | 1783 | |
1784 | | | 1784 | |
1785 | /************************************************************************ | | 1785 | /************************************************************************ |
1786 | * ixgbe_rxeof | | 1786 | * ixgbe_rxeof |
1787 | * | | 1787 | * |
1788 | * Executes in interrupt context. It replenishes the | | 1788 | * Executes in interrupt context. It replenishes the |
1789 | * mbufs in the descriptor and sends data which has | | 1789 | * mbufs in the descriptor and sends data which has |
1790 | * been dma'ed into host memory to upper layer. | | 1790 | * been dma'ed into host memory to upper layer. |
1791 | * | | 1791 | * |
1792 | * Return TRUE for more work, FALSE for all clean. | | 1792 | * Return TRUE for more work, FALSE for all clean. |
1793 | ************************************************************************/ | | 1793 | ************************************************************************/ |
1794 | bool | | 1794 | bool |
1795 | ixgbe_rxeof(struct ix_queue *que) | | 1795 | ixgbe_rxeof(struct ix_queue *que) |
1796 | { | | 1796 | { |
1797 | struct adapter *adapter = que->adapter; | | 1797 | struct adapter *adapter = que->adapter; |
1798 | struct rx_ring *rxr = que->rxr; | | 1798 | struct rx_ring *rxr = que->rxr; |
1799 | struct ifnet *ifp = adapter->ifp; | | 1799 | struct ifnet *ifp = adapter->ifp; |
1800 | #ifdef LRO | | 1800 | #ifdef LRO |
1801 | struct lro_ctrl *lro = &rxr->lro; | | 1801 | struct lro_ctrl *lro = &rxr->lro; |
1802 | #endif /* LRO */ | | 1802 | #endif /* LRO */ |
1803 | union ixgbe_adv_rx_desc *cur; | | 1803 | union ixgbe_adv_rx_desc *cur; |
1804 | struct ixgbe_rx_buf *rbuf, *nbuf; | | 1804 | struct ixgbe_rx_buf *rbuf, *nbuf; |
1805 | int i, nextp, processed = 0; | | 1805 | int i, nextp, processed = 0; |
1806 | u32 staterr = 0; | | 1806 | u32 staterr = 0; |
1807 | u32 count = 0; | | 1807 | u32 count = 0; |
1808 | u32 limit = adapter->rx_process_limit; | | 1808 | u32 limit = adapter->rx_process_limit; |
1809 | bool discard_multidesc = false; | | 1809 | bool discard_multidesc = false; |
1810 | #ifdef RSS | | 1810 | #ifdef RSS |
1811 | u16 pkt_info; | | 1811 | u16 pkt_info; |
1812 | #endif | | 1812 | #endif |
1813 | | | 1813 | |
1814 | IXGBE_RX_LOCK(rxr); | | 1814 | IXGBE_RX_LOCK(rxr); |
1815 | | | 1815 | |
1816 | #ifdef DEV_NETMAP | | 1816 | #ifdef DEV_NETMAP |
1817 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { | | 1817 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { |
1818 | /* Same as the txeof routine: wakeup clients on intr. */ | | 1818 | /* Same as the txeof routine: wakeup clients on intr. */ |
1819 | if (netmap_rx_irq(ifp, rxr->me, &processed)) { | | 1819 | if (netmap_rx_irq(ifp, rxr->me, &processed)) { |
1820 | IXGBE_RX_UNLOCK(rxr); | | 1820 | IXGBE_RX_UNLOCK(rxr); |
1821 | return (FALSE); | | 1821 | return (FALSE); |
1822 | } | | 1822 | } |
1823 | } | | 1823 | } |
1824 | #endif /* DEV_NETMAP */ | | 1824 | #endif /* DEV_NETMAP */ |
1825 | | | 1825 | |
1826 | /* | | 1826 | /* |
1827 | * The max number of loop is rx_process_limit. If discard_multidesc is | | 1827 | * The max number of loop is rx_process_limit. If discard_multidesc is |
1828 | * true, continue processing to not to send broken packet to the upper | | 1828 | * true, continue processing to not to send broken packet to the upper |
1829 | * layer. | | 1829 | * layer. |
1830 | */ | | 1830 | */ |
1831 | for (i = rxr->next_to_check; | | 1831 | for (i = rxr->next_to_check; |
1832 | (count < limit) || (discard_multidesc == true);) { | | 1832 | (count < limit) || (discard_multidesc == true);) { |
1833 | | | 1833 | |
1834 | struct mbuf *sendmp, *mp; | | 1834 | struct mbuf *sendmp, *mp; |
1835 | struct mbuf *newmp; | | 1835 | struct mbuf *newmp; |
1836 | u32 rsc, ptype; | | 1836 | u32 rsc, ptype; |
1837 | u16 len; | | 1837 | u16 len; |
1838 | u16 vtag = 0; | | 1838 | u16 vtag = 0; |
1839 | bool eop; | | 1839 | bool eop; |
1840 | | | 1840 | |
1841 | /* Sync the ring. */ | | 1841 | /* Sync the ring. */ |
1842 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | | 1842 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
1843 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1843 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1844 | | | 1844 | |
1845 | cur = &rxr->rx_base[i]; | | 1845 | cur = &rxr->rx_base[i]; |
1846 | staterr = le32toh(cur->wb.upper.status_error); | | 1846 | staterr = le32toh(cur->wb.upper.status_error); |
1847 | #ifdef RSS | | 1847 | #ifdef RSS |
1848 | pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); | | 1848 | pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); |
1849 | #endif | | 1849 | #endif |
1850 | | | 1850 | |
1851 | if ((staterr & IXGBE_RXD_STAT_DD) == 0) | | 1851 | if ((staterr & IXGBE_RXD_STAT_DD) == 0) |
1852 | break; | | 1852 | break; |
1853 | | | 1853 | |
1854 | count++; | | 1854 | count++; |
1855 | sendmp = NULL; | | 1855 | sendmp = NULL; |
1856 | nbuf = NULL; | | 1856 | nbuf = NULL; |
1857 | rsc = 0; | | 1857 | rsc = 0; |
1858 | cur->wb.upper.status_error = 0; | | 1858 | cur->wb.upper.status_error = 0; |
1859 | rbuf = &rxr->rx_buffers[i]; | | 1859 | rbuf = &rxr->rx_buffers[i]; |
1860 | mp = rbuf->buf; | | 1860 | mp = rbuf->buf; |
1861 | | | 1861 | |
1862 | len = le16toh(cur->wb.upper.length); | | 1862 | len = le16toh(cur->wb.upper.length); |
1863 | ptype = le32toh(cur->wb.lower.lo_dword.data) & | | 1863 | ptype = le32toh(cur->wb.lower.lo_dword.data) & |
1864 | IXGBE_RXDADV_PKTTYPE_MASK; | | 1864 | IXGBE_RXDADV_PKTTYPE_MASK; |
1865 | eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); | | 1865 | eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); |
1866 | | | 1866 | |
1867 | /* Make sure bad packets are discarded */ | | 1867 | /* Make sure bad packets are discarded */ |
1868 | if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { | | 1868 | if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { |
1869 | #if __FreeBSD_version >= 1100036 | | 1869 | #if __FreeBSD_version >= 1100036 |
1870 | if (adapter->feat_en & IXGBE_FEATURE_VF) | | 1870 | if (adapter->feat_en & IXGBE_FEATURE_VF) |
1871 | if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); | | 1871 | if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); |
1872 | #endif | | 1872 | #endif |
1873 | rxr->rx_discarded.ev_count++; | | 1873 | rxr->rx_discarded.ev_count++; |
1874 | ixgbe_rx_discard(rxr, i); | | 1874 | ixgbe_rx_discard(rxr, i); |
1875 | discard_multidesc = false; | | 1875 | discard_multidesc = false; |
1876 | goto next_desc; | | 1876 | goto next_desc; |
1877 | } | | 1877 | } |
1878 | | | 1878 | |
1879 | /* pre-alloc new mbuf */ | | 1879 | /* pre-alloc new mbuf */ |
1880 | if (!discard_multidesc) | | 1880 | if (!discard_multidesc) |
1881 | newmp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, MT_DATA, | | 1881 | newmp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, MT_DATA, |
1882 | M_PKTHDR, rxr->mbuf_sz); | | 1882 | M_PKTHDR, rxr->mbuf_sz); |
1883 | else | | 1883 | else |
1884 | newmp = NULL; | | 1884 | newmp = NULL; |
1885 | if (newmp == NULL) { | | 1885 | if (newmp == NULL) { |
1886 | rxr->no_jmbuf.ev_count++; | | 1886 | rxr->no_jmbuf.ev_count++; |
1887 | /* | | 1887 | /* |
1888 | * Descriptor initialization is already done by the | | 1888 | * Descriptor initialization is already done by the |
1889 | * above code (cur->wb.upper.status_error = 0). | | 1889 | * above code (cur->wb.upper.status_error = 0). |
1890 | * So, we can reuse current rbuf->buf for new packet. | | 1890 | * So, we can reuse current rbuf->buf for new packet. |
1891 | * | | 1891 | * |
1892 | * Rewrite the buffer addr, see comment in | | 1892 | * Rewrite the buffer addr, see comment in |
1893 | * ixgbe_rx_discard(). | | 1893 | * ixgbe_rx_discard(). |
1894 | */ | | 1894 | */ |
1895 | cur->read.pkt_addr = rbuf->addr; | | 1895 | cur->read.pkt_addr = rbuf->addr; |
1896 | m_freem(rbuf->fmp); | | 1896 | m_freem(rbuf->fmp); |
1897 | rbuf->fmp = NULL; | | 1897 | rbuf->fmp = NULL; |
1898 | if (!eop) { | | 1898 | if (!eop) { |
1899 | /* Discard the entire packet. */ | | 1899 | /* Discard the entire packet. */ |
1900 | discard_multidesc = true; | | 1900 | discard_multidesc = true; |
1901 | } else | | 1901 | } else |
1902 | discard_multidesc = false; | | 1902 | discard_multidesc = false; |
1903 | goto next_desc; | | 1903 | goto next_desc; |
1904 | } | | 1904 | } |
1905 | discard_multidesc = false; | | 1905 | discard_multidesc = false; |
1906 | | | 1906 | |
1907 | bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, | | 1907 | bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, |
1908 | rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); | | 1908 | rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); |
1909 | | | 1909 | |
1910 | /* | | 1910 | /* |
1911 | * On 82599 which supports a hardware | | 1911 | * On 82599 which supports a hardware |
1912 | * LRO (called HW RSC), packets need | | 1912 | * LRO (called HW RSC), packets need |
1913 | * not be fragmented across sequential | | 1913 | * not be fragmented across sequential |
1914 | * descriptors, rather the next descriptor | | 1914 | * descriptors, rather the next descriptor |
1915 | * is indicated in bits of the descriptor. | | 1915 | * is indicated in bits of the descriptor. |
1916 | * This also means that we might proceses | | 1916 | * This also means that we might proceses |
1917 | * more than one packet at a time, something | | 1917 | * more than one packet at a time, something |
1918 | * that has never been true before, it | | 1918 | * that has never been true before, it |
1919 | * required eliminating global chain pointers | | 1919 | * required eliminating global chain pointers |
1920 | * in favor of what we are doing here. -jfv | | 1920 | * in favor of what we are doing here. -jfv |
1921 | */ | | 1921 | */ |
1922 | if (!eop) { | | 1922 | if (!eop) { |
1923 | /* | | 1923 | /* |
1924 | * Figure out the next descriptor | | 1924 | * Figure out the next descriptor |
1925 | * of this frame. | | 1925 | * of this frame. |
1926 | */ | | 1926 | */ |
1927 | if (rxr->hw_rsc == TRUE) { | | 1927 | if (rxr->hw_rsc == TRUE) { |
1928 | rsc = ixgbe_rsc_count(cur); | | 1928 | rsc = ixgbe_rsc_count(cur); |
1929 | rxr->rsc_num += (rsc - 1); | | 1929 | rxr->rsc_num += (rsc - 1); |
1930 | } | | 1930 | } |
1931 | if (rsc) { /* Get hardware index */ | | 1931 | if (rsc) { /* Get hardware index */ |
1932 | nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >> | | 1932 | nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >> |
1933 | IXGBE_RXDADV_NEXTP_SHIFT); | | 1933 | IXGBE_RXDADV_NEXTP_SHIFT); |
1934 | } else { /* Just sequential */ | | 1934 | } else { /* Just sequential */ |
1935 | nextp = i + 1; | | 1935 | nextp = i + 1; |
1936 | if (nextp == adapter->num_rx_desc) | | 1936 | if (nextp == adapter->num_rx_desc) |
1937 | nextp = 0; | | 1937 | nextp = 0; |
1938 | } | | 1938 | } |
1939 | nbuf = &rxr->rx_buffers[nextp]; | | 1939 | nbuf = &rxr->rx_buffers[nextp]; |
1940 | prefetch(nbuf); | | 1940 | prefetch(nbuf); |
1941 | } | | 1941 | } |
1942 | /* | | 1942 | /* |
1943 | * Rather than using the fmp/lmp global pointers | | 1943 | * Rather than using the fmp/lmp global pointers |
1944 | * we now keep the head of a packet chain in the | | 1944 | * we now keep the head of a packet chain in the |
1945 | * buffer struct and pass this along from one | | 1945 | * buffer struct and pass this along from one |
1946 | * descriptor to the next, until we get EOP. | | 1946 | * descriptor to the next, until we get EOP. |
1947 | */ | | 1947 | */ |
1948 | mp->m_len = len; | | 1948 | mp->m_len = len; |
1949 | /* | | 1949 | /* |
1950 | * See if there is a stored head | | 1950 | * See if there is a stored head |
1951 | * that determines what we are | | 1951 | * that determines what we are |
1952 | */ | | 1952 | */ |
1953 | sendmp = rbuf->fmp; | | 1953 | sendmp = rbuf->fmp; |
1954 | if (sendmp != NULL) { /* secondary frag */ | | 1954 | if (sendmp != NULL) { /* secondary frag */ |
1955 | rbuf->buf = newmp; | | 1955 | rbuf->buf = newmp; |
1956 | rbuf->fmp = NULL; | | 1956 | rbuf->fmp = NULL; |
1957 | mp->m_flags &= ~M_PKTHDR; | | 1957 | mp->m_flags &= ~M_PKTHDR; |
1958 | sendmp->m_pkthdr.len += mp->m_len; | | 1958 | sendmp->m_pkthdr.len += mp->m_len; |
1959 | } else { | | 1959 | } else { |
1960 | /* | | 1960 | /* |
1961 | * Optimize. This might be a small packet, | | 1961 | * Optimize. This might be a small packet, |
1962 | * maybe just a TCP ACK. Do a fast copy that | | 1962 | * maybe just a TCP ACK. Do a fast copy that |
1963 | * is cache aligned into a new mbuf, and | | 1963 | * is cache aligned into a new mbuf, and |
1964 | * leave the old mbuf+cluster for re-use. | | 1964 | * leave the old mbuf+cluster for re-use. |
1965 | */ | | 1965 | */ |
1966 | if (eop && len <= IXGBE_RX_COPY_LEN) { | | 1966 | if (eop && len <= IXGBE_RX_COPY_LEN) { |
1967 | sendmp = m_gethdr(M_NOWAIT, MT_DATA); | | 1967 | sendmp = m_gethdr(M_NOWAIT, MT_DATA); |
1968 | if (sendmp != NULL) { | | 1968 | if (sendmp != NULL) { |
1969 | sendmp->m_data += IXGBE_RX_COPY_ALIGN; | | 1969 | sendmp->m_data += IXGBE_RX_COPY_ALIGN; |
1970 | ixgbe_bcopy(mp->m_data, sendmp->m_data, | | 1970 | ixgbe_bcopy(mp->m_data, sendmp->m_data, |
1971 | len); | | 1971 | len); |
1972 | sendmp->m_len = len; | | 1972 | sendmp->m_len = len; |
1973 | rxr->rx_copies.ev_count++; | | 1973 | rxr->rx_copies.ev_count++; |
1974 | rbuf->flags |= IXGBE_RX_COPY; | | 1974 | rbuf->flags |= IXGBE_RX_COPY; |
1975 | | | 1975 | |
1976 | m_freem(newmp); | | 1976 | m_freem(newmp); |
1977 | } | | 1977 | } |
1978 | } | | 1978 | } |
1979 | if (sendmp == NULL) { | | 1979 | if (sendmp == NULL) { |
1980 | rbuf->buf = newmp; | | 1980 | rbuf->buf = newmp; |
1981 | rbuf->fmp = NULL; | | 1981 | rbuf->fmp = NULL; |
1982 | sendmp = mp; | | 1982 | sendmp = mp; |
1983 | } | | 1983 | } |
1984 | | | 1984 | |
1985 | /* first desc of a non-ps chain */ | | 1985 | /* first desc of a non-ps chain */ |
1986 | sendmp->m_flags |= M_PKTHDR; | | 1986 | sendmp->m_flags |= M_PKTHDR; |
1987 | sendmp->m_pkthdr.len = mp->m_len; | | 1987 | sendmp->m_pkthdr.len = mp->m_len; |
1988 | } | | 1988 | } |
1989 | ++processed; | | 1989 | ++processed; |
1990 | | | 1990 | |
1991 | /* Pass the head pointer on */ | | 1991 | /* Pass the head pointer on */ |
1992 | if (eop == 0) { | | 1992 | if (eop == 0) { |
1993 | nbuf->fmp = sendmp; | | 1993 | nbuf->fmp = sendmp; |
1994 | sendmp = NULL; | | 1994 | sendmp = NULL; |
1995 | mp->m_next = nbuf->buf; | | 1995 | mp->m_next = nbuf->buf; |
1996 | } else { /* Sending this frame */ | | 1996 | } else { /* Sending this frame */ |
1997 | m_set_rcvif(sendmp, ifp); | | 1997 | m_set_rcvif(sendmp, ifp); |
1998 | ++rxr->packets; | | 1998 | ++rxr->packets; |
1999 | rxr->rx_packets.ev_count++; | | 1999 | rxr->rx_packets.ev_count++; |
2000 | /* capture data for AIM */ | | 2000 | /* capture data for AIM */ |
2001 | rxr->bytes += sendmp->m_pkthdr.len; | | 2001 | rxr->bytes += sendmp->m_pkthdr.len; |
2002 | rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len; | | 2002 | rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len; |
2003 | /* Process vlan info */ | | 2003 | /* Process vlan info */ |
2004 | if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP)) | | 2004 | if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP)) |
2005 | vtag = le16toh(cur->wb.upper.vlan); | | 2005 | vtag = le16toh(cur->wb.upper.vlan); |
2006 | if (vtag) { | | 2006 | if (vtag) { |
2007 | vlan_set_tag(sendmp, vtag); | | 2007 | vlan_set_tag(sendmp, vtag); |
2008 | } | | 2008 | } |
2009 | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { | | 2009 | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { |
2010 | ixgbe_rx_checksum(staterr, sendmp, ptype, | | 2010 | ixgbe_rx_checksum(staterr, sendmp, ptype, |
2011 | &adapter->stats.pf); | | 2011 | &adapter->stats.pf); |
2012 | } | | 2012 | } |
2013 | | | 2013 | |
2014 | #if 0 /* FreeBSD */ | | 2014 | #if 0 /* FreeBSD */ |
2015 | /* | | 2015 | /* |
2016 | * In case of multiqueue, we have RXCSUM.PCSD bit set | | 2016 | * In case of multiqueue, we have RXCSUM.PCSD bit set |
2017 | * and never cleared. This means we have RSS hash | | 2017 | * and never cleared. This means we have RSS hash |
2018 | * available to be used. | | 2018 | * available to be used. |
2019 | */ | | 2019 | */ |
2020 | if (adapter->num_queues > 1) { | | 2020 | if (adapter->num_queues > 1) { |
2021 | sendmp->m_pkthdr.flowid = | | 2021 | sendmp->m_pkthdr.flowid = |
2022 | le32toh(cur->wb.lower.hi_dword.rss); | | 2022 | le32toh(cur->wb.lower.hi_dword.rss); |
2023 | switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) { | | 2023 | switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) { |
2024 | case IXGBE_RXDADV_RSSTYPE_IPV4: | | 2024 | case IXGBE_RXDADV_RSSTYPE_IPV4: |
2025 | M_HASHTYPE_SET(sendmp, | | 2025 | M_HASHTYPE_SET(sendmp, |
2026 | M_HASHTYPE_RSS_IPV4); | | 2026 | M_HASHTYPE_RSS_IPV4); |
2027 | break; | | 2027 | break; |
2028 | case IXGBE_RXDADV_RSSTYPE_IPV4_TCP: | | 2028 | case IXGBE_RXDADV_RSSTYPE_IPV4_TCP: |
2029 | M_HASHTYPE_SET(sendmp, | | 2029 | M_HASHTYPE_SET(sendmp, |
2030 | M_HASHTYPE_RSS_TCP_IPV4); | | 2030 | M_HASHTYPE_RSS_TCP_IPV4); |
2031 | break; | | 2031 | break; |
2032 | case IXGBE_RXDADV_RSSTYPE_IPV6: | | 2032 | case IXGBE_RXDADV_RSSTYPE_IPV6: |
2033 | M_HASHTYPE_SET(sendmp, | | 2033 | M_HASHTYPE_SET(sendmp, |
2034 | M_HASHTYPE_RSS_IPV6); | | 2034 | M_HASHTYPE_RSS_IPV6); |
2035 | break; | | 2035 | break; |
2036 | case IXGBE_RXDADV_RSSTYPE_IPV6_TCP: | | 2036 | case IXGBE_RXDADV_RSSTYPE_IPV6_TCP: |
2037 | M_HASHTYPE_SET(sendmp, | | 2037 | M_HASHTYPE_SET(sendmp, |
2038 | M_HASHTYPE_RSS_TCP_IPV6); | | 2038 | M_HASHTYPE_RSS_TCP_IPV6); |
2039 | break; | | 2039 | break; |
2040 | case IXGBE_RXDADV_RSSTYPE_IPV6_EX: | | 2040 | case IXGBE_RXDADV_RSSTYPE_IPV6_EX: |
2041 | M_HASHTYPE_SET(sendmp, | | 2041 | M_HASHTYPE_SET(sendmp, |
2042 | M_HASHTYPE_RSS_IPV6_EX); | | 2042 | M_HASHTYPE_RSS_IPV6_EX); |
2043 | break; | | 2043 | break; |
2044 | case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX: | | 2044 | case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX: |
2045 | M_HASHTYPE_SET(sendmp, | | 2045 | M_HASHTYPE_SET(sendmp, |
2046 | M_HASHTYPE_RSS_TCP_IPV6_EX); | | 2046 | M_HASHTYPE_RSS_TCP_IPV6_EX); |
2047 | break; | | 2047 | break; |
2048 | #if __FreeBSD_version > 1100000 | | 2048 | #if __FreeBSD_version > 1100000 |
2049 | case IXGBE_RXDADV_RSSTYPE_IPV4_UDP: | | 2049 | case IXGBE_RXDADV_RSSTYPE_IPV4_UDP: |
2050 | M_HASHTYPE_SET(sendmp, | | 2050 | M_HASHTYPE_SET(sendmp, |
2051 | M_HASHTYPE_RSS_UDP_IPV4); | | 2051 | M_HASHTYPE_RSS_UDP_IPV4); |
2052 | break; | | 2052 | break; |
2053 | case IXGBE_RXDADV_RSSTYPE_IPV6_UDP: | | 2053 | case IXGBE_RXDADV_RSSTYPE_IPV6_UDP: |
2054 | M_HASHTYPE_SET(sendmp, | | 2054 | M_HASHTYPE_SET(sendmp, |
2055 | M_HASHTYPE_RSS_UDP_IPV6); | | 2055 | M_HASHTYPE_RSS_UDP_IPV6); |
2056 | break; | | 2056 | break; |
2057 | case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX: | | 2057 | case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX: |
2058 | M_HASHTYPE_SET(sendmp, | | 2058 | M_HASHTYPE_SET(sendmp, |
2059 | M_HASHTYPE_RSS_UDP_IPV6_EX); | | 2059 | M_HASHTYPE_RSS_UDP_IPV6_EX); |
2060 | break; | | 2060 | break; |
2061 | #endif | | 2061 | #endif |
2062 | default: | | 2062 | default: |
2063 | M_HASHTYPE_SET(sendmp, | | 2063 | M_HASHTYPE_SET(sendmp, |
2064 | M_HASHTYPE_OPAQUE_HASH); | | 2064 | M_HASHTYPE_OPAQUE_HASH); |
2065 | } | | 2065 | } |
2066 | } else { | | 2066 | } else { |
2067 | sendmp->m_pkthdr.flowid = que->msix; | | 2067 | sendmp->m_pkthdr.flowid = que->msix; |
2068 | M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); | | 2068 | M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); |
2069 | } | | 2069 | } |
2070 | #endif | | 2070 | #endif |
2071 | } | | 2071 | } |
2072 | next_desc: | | 2072 | next_desc: |
2073 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, | | 2073 | ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
2074 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 2074 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2075 | | | 2075 | |
2076 | /* Advance our pointers to the next descriptor. */ | | 2076 | /* Advance our pointers to the next descriptor. */ |
2077 | if (++i == rxr->num_desc) | | 2077 | if (++i == rxr->num_desc) |
2078 | i = 0; | | 2078 | i = 0; |
2079 | | | 2079 | |
2080 | /* Now send to the stack or do LRO */ | | 2080 | /* Now send to the stack or do LRO */ |
2081 | if (sendmp != NULL) { | | 2081 | if (sendmp != NULL) { |
2082 | rxr->next_to_check = i; | | | |
2083 | ixgbe_rx_input(rxr, ifp, sendmp, ptype); | | 2082 | ixgbe_rx_input(rxr, ifp, sendmp, ptype); |
2084 | i = rxr->next_to_check; | | | |
2085 | } | | 2083 | } |
2086 | | | 2084 | |
2087 | /* Every 8 descriptors we go to refresh mbufs */ | | 2085 | /* Every 8 descriptors we go to refresh mbufs */ |
2088 | if (processed == 8) { | | 2086 | if (processed == 8) { |
2089 | ixgbe_refresh_mbufs(rxr, i); | | 2087 | ixgbe_refresh_mbufs(rxr, i); |
2090 | processed = 0; | | 2088 | processed = 0; |
2091 | } | | 2089 | } |
2092 | } | | 2090 | } |
2093 | | | 2091 | |
2094 | /* Refresh any remaining buf structs */ | | 2092 | /* Refresh any remaining buf structs */ |
2095 | if (ixgbe_rx_unrefreshed(rxr)) | | 2093 | if (ixgbe_rx_unrefreshed(rxr)) |
2096 | ixgbe_refresh_mbufs(rxr, i); | | 2094 | ixgbe_refresh_mbufs(rxr, i); |
2097 | | | 2095 | |
2098 | rxr->next_to_check = i; | | 2096 | rxr->next_to_check = i; |
2099 | | | 2097 | |
2100 | IXGBE_RX_UNLOCK(rxr); | | 2098 | IXGBE_RX_UNLOCK(rxr); |
2101 | | | 2099 | |
2102 | #ifdef LRO | | 2100 | #ifdef LRO |
2103 | /* | | 2101 | /* |
2104 | * Flush any outstanding LRO work | | 2102 | * Flush any outstanding LRO work |
2105 | */ | | 2103 | */ |
2106 | tcp_lro_flush_all(lro); | | 2104 | tcp_lro_flush_all(lro); |
2107 | #endif /* LRO */ | | 2105 | #endif /* LRO */ |
2108 | | | 2106 | |
2109 | /* | | 2107 | /* |
2110 | * Still have cleaning to do? | | 2108 | * Still have cleaning to do? |
2111 | */ | | 2109 | */ |
2112 | if ((staterr & IXGBE_RXD_STAT_DD) != 0) | | 2110 | if ((staterr & IXGBE_RXD_STAT_DD) != 0) |
2113 | return (TRUE); | | 2111 | return (TRUE); |
2114 | | | 2112 | |
2115 | return (FALSE); | | 2113 | return (FALSE); |
2116 | } /* ixgbe_rxeof */ | | 2114 | } /* ixgbe_rxeof */ |
2117 | | | 2115 | |
2118 | | | 2116 | |
2119 | /************************************************************************ | | 2117 | /************************************************************************ |
2120 | * ixgbe_rx_checksum | | 2118 | * ixgbe_rx_checksum |
2121 | * | | 2119 | * |
2122 | * Verify that the hardware indicated that the checksum is valid. | | 2120 | * Verify that the hardware indicated that the checksum is valid. |
2123 | * Inform the stack about the status of checksum so that stack | | 2121 | * Inform the stack about the status of checksum so that stack |
2124 | * doesn't spend time verifying the checksum. | | 2122 | * doesn't spend time verifying the checksum. |
2125 | ************************************************************************/ | | 2123 | ************************************************************************/ |
2126 | static void | | 2124 | static void |
2127 | ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype, | | 2125 | ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype, |
2128 | struct ixgbe_hw_stats *stats) | | 2126 | struct ixgbe_hw_stats *stats) |
2129 | { | | 2127 | { |
2130 | u16 status = (u16)staterr; | | 2128 | u16 status = (u16)staterr; |
2131 | u8 errors = (u8)(staterr >> 24); | | 2129 | u8 errors = (u8)(staterr >> 24); |
2132 | #if 0 | | 2130 | #if 0 |
2133 | bool sctp = false; | | 2131 | bool sctp = false; |
2134 | | | 2132 | |
2135 | if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && | | 2133 | if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && |
2136 | (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) | | 2134 | (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) |
2137 | sctp = true; | | 2135 | sctp = true; |
2138 | #endif | | 2136 | #endif |
2139 | | | 2137 | |
2140 | /* IPv4 checksum */ | | 2138 | /* IPv4 checksum */ |
2141 | if (status & IXGBE_RXD_STAT_IPCS) { | | 2139 | if (status & IXGBE_RXD_STAT_IPCS) { |
2142 | stats->ipcs.ev_count++; | | 2140 | stats->ipcs.ev_count++; |
2143 | if (!(errors & IXGBE_RXD_ERR_IPE)) { | | 2141 | if (!(errors & IXGBE_RXD_ERR_IPE)) { |
2144 | /* IP Checksum Good */ | | 2142 | /* IP Checksum Good */ |
2145 | mp->m_pkthdr.csum_flags = M_CSUM_IPv4; | | 2143 | mp->m_pkthdr.csum_flags = M_CSUM_IPv4; |
2146 | } else { | | 2144 | } else { |
2147 | stats->ipcs_bad.ev_count++; | | 2145 | stats->ipcs_bad.ev_count++; |
2148 | mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD; | | 2146 | mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD; |
2149 | } | | 2147 | } |
2150 | } | | 2148 | } |
2151 | /* TCP/UDP/SCTP checksum */ | | 2149 | /* TCP/UDP/SCTP checksum */ |
2152 | if (status & IXGBE_RXD_STAT_L4CS) { | | 2150 | if (status & IXGBE_RXD_STAT_L4CS) { |
2153 | stats->l4cs.ev_count++; | | 2151 | stats->l4cs.ev_count++; |
2154 | int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6; | | 2152 | int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6; |
2155 | if (!(errors & IXGBE_RXD_ERR_TCPE)) { | | 2153 | if (!(errors & IXGBE_RXD_ERR_TCPE)) { |
2156 | mp->m_pkthdr.csum_flags |= type; | | 2154 | mp->m_pkthdr.csum_flags |= type; |
2157 | } else { | | 2155 | } else { |
2158 | stats->l4cs_bad.ev_count++; | | 2156 | stats->l4cs_bad.ev_count++; |
2159 | mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD; | | 2157 | mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD; |
2160 | } | | 2158 | } |
2161 | } | | 2159 | } |
2162 | } /* ixgbe_rx_checksum */ | | 2160 | } /* ixgbe_rx_checksum */ |
2163 | | | 2161 | |
2164 | /************************************************************************ | | 2162 | /************************************************************************ |
2165 | * ixgbe_dma_malloc | | 2163 | * ixgbe_dma_malloc |
2166 | ************************************************************************/ | | 2164 | ************************************************************************/ |
2167 | int | | 2165 | int |
2168 | ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size, | | 2166 | ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size, |
2169 | struct ixgbe_dma_alloc *dma, const int mapflags) | | 2167 | struct ixgbe_dma_alloc *dma, const int mapflags) |
2170 | { | | 2168 | { |
2171 | device_t dev = adapter->dev; | | 2169 | device_t dev = adapter->dev; |
2172 | int r, rsegs; | | 2170 | int r, rsegs; |
2173 | | | 2171 | |
2174 | r = ixgbe_dma_tag_create( | | 2172 | r = ixgbe_dma_tag_create( |
2175 | /* parent */ adapter->osdep.dmat, | | 2173 | /* parent */ adapter->osdep.dmat, |
2176 | /* alignment */ DBA_ALIGN, | | 2174 | /* alignment */ DBA_ALIGN, |
2177 | /* bounds */ 0, | | 2175 | /* bounds */ 0, |
2178 | /* maxsize */ size, | | 2176 | /* maxsize */ size, |
2179 | /* nsegments */ 1, | | 2177 | /* nsegments */ 1, |
2180 | /* maxsegsize */ size, | | 2178 | /* maxsegsize */ size, |
2181 | /* flags */ BUS_DMA_ALLOCNOW, | | 2179 | /* flags */ BUS_DMA_ALLOCNOW, |
2182 | &dma->dma_tag); | | 2180 | &dma->dma_tag); |
2183 | if (r != 0) { | | 2181 | if (r != 0) { |
2184 | aprint_error_dev(dev, | | 2182 | aprint_error_dev(dev, |
2185 | "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, | | 2183 | "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, |
2186 | r); | | 2184 | r); |
2187 | goto fail_0; | | 2185 | goto fail_0; |
2188 | } | | 2186 | } |
2189 | | | 2187 | |
2190 | r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size, | | 2188 | r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size, |
2191 | dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary, | | 2189 | dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary, |
2192 | &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT); | | 2190 | &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT); |
2193 | if (r != 0) { | | 2191 | if (r != 0) { |
2194 | aprint_error_dev(dev, | | 2192 | aprint_error_dev(dev, |
2195 | "%s: bus_dmamem_alloc failed; error %d\n", __func__, r); | | 2193 | "%s: bus_dmamem_alloc failed; error %d\n", __func__, r); |
2196 | goto fail_1; | | 2194 | goto fail_1; |
2197 | } | | 2195 | } |
2198 | | | 2196 | |
2199 | r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs, | | 2197 | r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs, |
2200 | size, &dma->dma_vaddr, BUS_DMA_NOWAIT); | | 2198 | size, &dma->dma_vaddr, BUS_DMA_NOWAIT); |
2201 | if (r != 0) { | | 2199 | if (r != 0) { |
2202 | aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", | | 2200 | aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", |
2203 | __func__, r); | | 2201 | __func__, r); |
2204 | goto fail_2; | | 2202 | goto fail_2; |
2205 | } | | 2203 | } |
2206 | | | 2204 | |
2207 | r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map); | | 2205 | r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map); |
2208 | if (r != 0) { | | 2206 | if (r != 0) { |
2209 | aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", | | 2207 | aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", |
2210 | __func__, r); | | 2208 | __func__, r); |
2211 | goto fail_3; | | 2209 | goto fail_3; |
2212 | } | | 2210 | } |
2213 | | | 2211 | |
2214 | r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, | | 2212 | r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, |
2215 | dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT); | | 2213 | dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT); |
2216 | if (r != 0) { | | 2214 | if (r != 0) { |
2217 | aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n", | | 2215 | aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n", |
2218 | __func__, r); | | 2216 | __func__, r); |
2219 | goto fail_4; | | 2217 | goto fail_4; |
2220 | } | | 2218 | } |
2221 | dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; | | 2219 | dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; |
2222 | dma->dma_size = size; | | 2220 | dma->dma_size = size; |
2223 | return 0; | | 2221 | return 0; |
2224 | fail_4: | | 2222 | fail_4: |
2225 | ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map); | | 2223 | ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map); |
2226 | fail_3: | | 2224 | fail_3: |
2227 | bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size); | | 2225 | bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size); |
2228 | fail_2: | | 2226 | fail_2: |
2229 | bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs); | | 2227 | bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs); |
2230 | fail_1: | | 2228 | fail_1: |
2231 | ixgbe_dma_tag_destroy(dma->dma_tag); | | 2229 | ixgbe_dma_tag_destroy(dma->dma_tag); |
2232 | fail_0: | | 2230 | fail_0: |
2233 | | | 2231 | |
2234 | return (r); | | 2232 | return (r); |
2235 | } /* ixgbe_dma_malloc */ | | 2233 | } /* ixgbe_dma_malloc */ |
2236 | | | 2234 | |
2237 | /************************************************************************ | | 2235 | /************************************************************************ |
2238 | * ixgbe_dma_free | | 2236 | * ixgbe_dma_free |
2239 | ************************************************************************/ | | 2237 | ************************************************************************/ |
2240 | void | | 2238 | void |
2241 | ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma) | | 2239 | ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma) |
2242 | { | | 2240 | { |
2243 | bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size, | | 2241 | bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size, |
2244 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 2242 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
2245 | ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map); | | 2243 | ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map); |
2246 | bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1); | | 2244 | bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1); |
2247 | ixgbe_dma_tag_destroy(dma->dma_tag); | | 2245 | ixgbe_dma_tag_destroy(dma->dma_tag); |
2248 | } /* ixgbe_dma_free */ | | 2246 | } /* ixgbe_dma_free */ |
2249 | | | 2247 | |
2250 | | | 2248 | |
2251 | /************************************************************************ | | 2249 | /************************************************************************ |
2252 | * ixgbe_allocate_queues | | 2250 | * ixgbe_allocate_queues |
2253 | * | | 2251 | * |
2254 | * Allocate memory for the transmit and receive rings, and then | | 2252 | * Allocate memory for the transmit and receive rings, and then |
2255 | * the descriptors associated with each, called only once at attach. | | 2253 | * the descriptors associated with each, called only once at attach. |
2256 | ************************************************************************/ | | 2254 | ************************************************************************/ |
2257 | int | | 2255 | int |
2258 | ixgbe_allocate_queues(struct adapter *adapter) | | 2256 | ixgbe_allocate_queues(struct adapter *adapter) |
2259 | { | | 2257 | { |
2260 | device_t dev = adapter->dev; | | 2258 | device_t dev = adapter->dev; |
2261 | struct ix_queue *que; | | 2259 | struct ix_queue *que; |
2262 | struct tx_ring *txr; | | 2260 | struct tx_ring *txr; |
2263 | struct rx_ring *rxr; | | 2261 | struct rx_ring *rxr; |
2264 | int rsize, tsize, error = IXGBE_SUCCESS; | | 2262 | int rsize, tsize, error = IXGBE_SUCCESS; |
2265 | int txconf = 0, rxconf = 0; | | 2263 | int txconf = 0, rxconf = 0; |
2266 | | | 2264 | |
2267 | /* First, allocate the top level queue structs */ | | 2265 | /* First, allocate the top level queue structs */ |
2268 | adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) * | | 2266 | adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) * |
2269 | adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); | | 2267 | adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); |
2270 | | | 2268 | |
2271 | /* Second, allocate the TX ring struct memory */ | | 2269 | /* Second, allocate the TX ring struct memory */ |
2272 | adapter->tx_rings = malloc(sizeof(struct tx_ring) * | | 2270 | adapter->tx_rings = malloc(sizeof(struct tx_ring) * |
2273 | adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); | | 2271 | adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); |
2274 | | | 2272 | |
2275 | /* Third, allocate the RX ring */ | | 2273 | /* Third, allocate the RX ring */ |
2276 | adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) * | | 2274 | adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) * |
2277 | adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); | | 2275 | adapter->num_queues, M_DEVBUF, M_WAITOK | M_ZERO); |
2278 | | | 2276 | |
2279 | /* For the ring itself */ | | 2277 | /* For the ring itself */ |
2280 | tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc), | | 2278 | tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc), |
2281 | DBA_ALIGN); | | 2279 | DBA_ALIGN); |
2282 | | | 2280 | |
2283 | /* | | 2281 | /* |
2284 | * Now set up the TX queues, txconf is needed to handle the | | 2282 | * Now set up the TX queues, txconf is needed to handle the |
2285 | * possibility that things fail midcourse and we need to | | 2283 | * possibility that things fail midcourse and we need to |
2286 | * undo memory gracefully | | 2284 | * undo memory gracefully |
2287 | */ | | 2285 | */ |
2288 | for (int i = 0; i < adapter->num_queues; i++, txconf++) { | | 2286 | for (int i = 0; i < adapter->num_queues; i++, txconf++) { |
2289 | /* Set up some basics */ | | 2287 | /* Set up some basics */ |
2290 | txr = &adapter->tx_rings[i]; | | 2288 | txr = &adapter->tx_rings[i]; |
2291 | txr->adapter = adapter; | | 2289 | txr->adapter = adapter; |
2292 | txr->txr_interq = NULL; | | 2290 | txr->txr_interq = NULL; |
2293 | /* In case SR-IOV is enabled, align the index properly */ | | 2291 | /* In case SR-IOV is enabled, align the index properly */ |
2294 | #ifdef PCI_IOV | | 2292 | #ifdef PCI_IOV |
2295 | txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, | | 2293 | txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, |
2296 | i); | | 2294 | i); |
2297 | #else | | 2295 | #else |
2298 | txr->me = i; | | 2296 | txr->me = i; |
2299 | #endif | | 2297 | #endif |
2300 | txr->num_desc = adapter->num_tx_desc; | | 2298 | txr->num_desc = adapter->num_tx_desc; |
2301 | | | 2299 | |
2302 | /* Initialize the TX side lock */ | | 2300 | /* Initialize the TX side lock */ |
2303 | mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET); | | 2301 | mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET); |
2304 | | | 2302 | |
2305 | if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma, | | 2303 | if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma, |
2306 | BUS_DMA_NOWAIT)) { | | 2304 | BUS_DMA_NOWAIT)) { |
2307 | aprint_error_dev(dev, | | 2305 | aprint_error_dev(dev, |
2308 | "Unable to allocate TX Descriptor memory\n"); | | 2306 | "Unable to allocate TX Descriptor memory\n"); |
2309 | error = ENOMEM; | | 2307 | error = ENOMEM; |
2310 | goto err_tx_desc; | | 2308 | goto err_tx_desc; |
2311 | } | | 2309 | } |
2312 | txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; | | 2310 | txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; |
2313 | bzero((void *)txr->tx_base, tsize); | | 2311 | bzero((void *)txr->tx_base, tsize); |
2314 | | | 2312 | |
2315 | /* Now allocate transmit buffers for the ring */ | | 2313 | /* Now allocate transmit buffers for the ring */ |
2316 | if (ixgbe_allocate_transmit_buffers(txr)) { | | 2314 | if (ixgbe_allocate_transmit_buffers(txr)) { |
2317 | aprint_error_dev(dev, | | 2315 | aprint_error_dev(dev, |
2318 | "Critical Failure setting up transmit buffers\n"); | | 2316 | "Critical Failure setting up transmit buffers\n"); |
2319 | error = ENOMEM; | | 2317 | error = ENOMEM; |
2320 | goto err_tx_desc; | | 2318 | goto err_tx_desc; |
2321 | } | | 2319 | } |
2322 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { | | 2320 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { |
2323 | /* Allocate a buf ring */ | | 2321 | /* Allocate a buf ring */ |
2324 | txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP); | | 2322 | txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP); |
2325 | if (txr->txr_interq == NULL) { | | 2323 | if (txr->txr_interq == NULL) { |
2326 | aprint_error_dev(dev, | | 2324 | aprint_error_dev(dev, |
2327 | "Critical Failure setting up buf ring\n"); | | 2325 | "Critical Failure setting up buf ring\n"); |
2328 | error = ENOMEM; | | 2326 | error = ENOMEM; |
2329 | goto err_tx_desc; | | 2327 | goto err_tx_desc; |
2330 | } | | 2328 | } |
2331 | } | | 2329 | } |
2332 | } | | 2330 | } |
2333 | | | 2331 | |
2334 | /* | | 2332 | /* |
2335 | * Next the RX queues... | | 2333 | * Next the RX queues... |
2336 | */ | | 2334 | */ |
2337 | rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), | | 2335 | rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), |
2338 | DBA_ALIGN); | | 2336 | DBA_ALIGN); |
2339 | for (int i = 0; i < adapter->num_queues; i++, rxconf++) { | | 2337 | for (int i = 0; i < adapter->num_queues; i++, rxconf++) { |
2340 | rxr = &adapter->rx_rings[i]; | | 2338 | rxr = &adapter->rx_rings[i]; |
2341 | /* Set up some basics */ | | 2339 | /* Set up some basics */ |
2342 | rxr->adapter = adapter; | | 2340 | rxr->adapter = adapter; |
2343 | #ifdef PCI_IOV | | 2341 | #ifdef PCI_IOV |
2344 | /* In case SR-IOV is enabled, align the index properly */ | | 2342 | /* In case SR-IOV is enabled, align the index properly */ |
2345 | rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, | | 2343 | rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, |
2346 | i); | | 2344 | i); |
2347 | #else | | 2345 | #else |
2348 | rxr->me = i; | | 2346 | rxr->me = i; |
2349 | #endif | | 2347 | #endif |
2350 | rxr->num_desc = adapter->num_rx_desc; | | 2348 | rxr->num_desc = adapter->num_rx_desc; |
2351 | | | 2349 | |
2352 | /* Initialize the RX side lock */ | | 2350 | /* Initialize the RX side lock */ |
2353 | mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET); | | 2351 | mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET); |
2354 | | | 2352 | |
2355 | if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma, | | 2353 | if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma, |
2356 | BUS_DMA_NOWAIT)) { | | 2354 | BUS_DMA_NOWAIT)) { |
2357 | aprint_error_dev(dev, | | 2355 | aprint_error_dev(dev, |
2358 | "Unable to allocate RxDescriptor memory\n"); | | 2356 | "Unable to allocate RxDescriptor memory\n"); |
2359 | error = ENOMEM; | | 2357 | error = ENOMEM; |
2360 | goto err_rx_desc; | | 2358 | goto err_rx_desc; |
2361 | } | | 2359 | } |
2362 | rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; | | 2360 | rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; |
2363 | bzero((void *)rxr->rx_base, rsize); | | 2361 | bzero((void *)rxr->rx_base, rsize); |
2364 | | | 2362 | |
2365 | /* Allocate receive buffers for the ring */ | | 2363 | /* Allocate receive buffers for the ring */ |
2366 | if (ixgbe_allocate_receive_buffers(rxr)) { | | 2364 | if (ixgbe_allocate_receive_buffers(rxr)) { |
2367 | aprint_error_dev(dev, | | 2365 | aprint_error_dev(dev, |
2368 | "Critical Failure setting up receive buffers\n"); | | 2366 | "Critical Failure setting up receive buffers\n"); |
2369 | error = ENOMEM; | | 2367 | error = ENOMEM; |
2370 | goto err_rx_desc; | | 2368 | goto err_rx_desc; |
2371 | } | | 2369 | } |
2372 | } | | 2370 | } |
2373 | | | 2371 | |
2374 | /* | | 2372 | /* |
2375 | * Finally set up the queue holding structs | | 2373 | * Finally set up the queue holding structs |
2376 | */ | | 2374 | */ |
2377 | for (int i = 0; i < adapter->num_queues; i++) { | | 2375 | for (int i = 0; i < adapter->num_queues; i++) { |
2378 | que = &adapter->queues[i]; | | 2376 | que = &adapter->queues[i]; |
2379 | que->adapter = adapter; | | 2377 | que->adapter = adapter; |
2380 | que->me = i; | | 2378 | que->me = i; |
2381 | que->txr = &adapter->tx_rings[i]; | | 2379 | que->txr = &adapter->tx_rings[i]; |
2382 | que->rxr = &adapter->rx_rings[i]; | | 2380 | que->rxr = &adapter->rx_rings[i]; |
2383 | | | 2381 | |
2384 | mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET); | | 2382 | mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET); |
2385 | que->disabled_count = 0; | | 2383 | que->disabled_count = 0; |
2386 | } | | 2384 | } |
2387 | | | 2385 | |
2388 | return (0); | | 2386 | return (0); |
2389 | | | 2387 | |
2390 | err_rx_desc: | | 2388 | err_rx_desc: |
2391 | for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) | | 2389 | for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) |
2392 | ixgbe_dma_free(adapter, &rxr->rxdma); | | 2390 | ixgbe_dma_free(adapter, &rxr->rxdma); |
2393 | err_tx_desc: | | 2391 | err_tx_desc: |
2394 | for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) | | 2392 | for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) |
2395 | ixgbe_dma_free(adapter, &txr->txdma); | | 2393 | ixgbe_dma_free(adapter, &txr->txdma); |
2396 | free(adapter->rx_rings, M_DEVBUF); | | 2394 | free(adapter->rx_rings, M_DEVBUF); |
2397 | free(adapter->tx_rings, M_DEVBUF); | | 2395 | free(adapter->tx_rings, M_DEVBUF); |
2398 | free(adapter->queues, M_DEVBUF); | | 2396 | free(adapter->queues, M_DEVBUF); |
2399 | return (error); | | 2397 | return (error); |
2400 | } /* ixgbe_allocate_queues */ | | 2398 | } /* ixgbe_allocate_queues */ |
2401 | | | 2399 | |
2402 | /************************************************************************ | | 2400 | /************************************************************************ |
2403 | * ixgbe_free_queues | | 2401 | * ixgbe_free_queues |
2404 | * | | 2402 | * |
2405 | * Free descriptors for the transmit and receive rings, and then | | 2403 | * Free descriptors for the transmit and receive rings, and then |
2406 | * the memory associated with each. | | 2404 | * the memory associated with each. |
2407 | ************************************************************************/ | | 2405 | ************************************************************************/ |
2408 | void | | 2406 | void |
2409 | ixgbe_free_queues(struct adapter *adapter) | | 2407 | ixgbe_free_queues(struct adapter *adapter) |
2410 | { | | 2408 | { |
2411 | struct ix_queue *que; | | 2409 | struct ix_queue *que; |
2412 | int i; | | 2410 | int i; |
2413 | | | 2411 | |
2414 | ixgbe_free_transmit_structures(adapter); | | 2412 | ixgbe_free_transmit_structures(adapter); |
2415 | ixgbe_free_receive_structures(adapter); | | 2413 | ixgbe_free_receive_structures(adapter); |
2416 | for (i = 0; i < adapter->num_queues; i++) { | | 2414 | for (i = 0; i < adapter->num_queues; i++) { |
2417 | que = &adapter->queues[i]; | | 2415 | que = &adapter->queues[i]; |
2418 | mutex_destroy(&que->dc_mtx); | | 2416 | mutex_destroy(&que->dc_mtx); |
2419 | } | | 2417 | } |
2420 | free(adapter->queues, M_DEVBUF); | | 2418 | free(adapter->queues, M_DEVBUF); |
2421 | } /* ixgbe_free_queues */ | | 2419 | } /* ixgbe_free_queues */ |