| @@ -1,1232 +1,1232 @@ | | | @@ -1,1232 +1,1232 @@ |
1 | /* $NetBSD: ix_txrx.c,v 1.24.2.13 2018/09/07 12:37:20 martin Exp $ */ | | 1 | /* $NetBSD: ix_txrx.c,v 1.24.2.14 2018/12/20 11:34:33 martin Exp $ */ |
2 | | | 2 | |
3 | /****************************************************************************** | | 3 | /****************************************************************************** |
4 | | | 4 | |
5 | Copyright (c) 2001-2017, Intel Corporation | | 5 | Copyright (c) 2001-2017, Intel Corporation |
6 | All rights reserved. | | 6 | All rights reserved. |
7 | | | 7 | |
8 | Redistribution and use in source and binary forms, with or without | | 8 | Redistribution and use in source and binary forms, with or without |
9 | modification, are permitted provided that the following conditions are met: | | 9 | modification, are permitted provided that the following conditions are met: |
10 | | | 10 | |
11 | 1. Redistributions of source code must retain the above copyright notice, | | 11 | 1. Redistributions of source code must retain the above copyright notice, |
12 | this list of conditions and the following disclaimer. | | 12 | this list of conditions and the following disclaimer. |
13 | | | 13 | |
14 | 2. Redistributions in binary form must reproduce the above copyright | | 14 | 2. Redistributions in binary form must reproduce the above copyright |
15 | notice, this list of conditions and the following disclaimer in the | | 15 | notice, this list of conditions and the following disclaimer in the |
16 | documentation and/or other materials provided with the distribution. | | 16 | documentation and/or other materials provided with the distribution. |
17 | | | 17 | |
18 | 3. Neither the name of the Intel Corporation nor the names of its | | 18 | 3. Neither the name of the Intel Corporation nor the names of its |
19 | contributors may be used to endorse or promote products derived from | | 19 | contributors may be used to endorse or promote products derived from |
20 | this software without specific prior written permission. | | 20 | this software without specific prior written permission. |
21 | | | 21 | |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | POSSIBILITY OF SUCH DAMAGE. | | 32 | POSSIBILITY OF SUCH DAMAGE. |
33 | | | 33 | |
34 | ******************************************************************************/ | | 34 | ******************************************************************************/ |
35 | /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/ | | 35 | /*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/ |
36 | | | 36 | |
37 | /* | | 37 | /* |
38 | * Copyright (c) 2011 The NetBSD Foundation, Inc. | | 38 | * Copyright (c) 2011 The NetBSD Foundation, Inc. |
39 | * All rights reserved. | | 39 | * All rights reserved. |
40 | * | | 40 | * |
41 | * This code is derived from software contributed to The NetBSD Foundation | | 41 | * This code is derived from software contributed to The NetBSD Foundation |
42 | * by Coyote Point Systems, Inc. | | 42 | * by Coyote Point Systems, Inc. |
43 | * | | 43 | * |
44 | * Redistribution and use in source and binary forms, with or without | | 44 | * Redistribution and use in source and binary forms, with or without |
45 | * modification, are permitted provided that the following conditions | | 45 | * modification, are permitted provided that the following conditions |
46 | * are met: | | 46 | * are met: |
47 | * 1. Redistributions of source code must retain the above copyright | | 47 | * 1. Redistributions of source code must retain the above copyright |
48 | * notice, this list of conditions and the following disclaimer. | | 48 | * notice, this list of conditions and the following disclaimer. |
49 | * 2. Redistributions in binary form must reproduce the above copyright | | 49 | * 2. Redistributions in binary form must reproduce the above copyright |
50 | * notice, this list of conditions and the following disclaimer in the | | 50 | * notice, this list of conditions and the following disclaimer in the |
51 | * documentation and/or other materials provided with the distribution. | | 51 | * documentation and/or other materials provided with the distribution. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 53 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
63 | * POSSIBILITY OF SUCH DAMAGE. | | 63 | * POSSIBILITY OF SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | #include "opt_inet.h" | | 66 | #include "opt_inet.h" |
67 | #include "opt_inet6.h" | | 67 | #include "opt_inet6.h" |
68 | | | 68 | |
69 | #include "ixgbe.h" | | 69 | #include "ixgbe.h" |
70 | | | 70 | |
71 | /* | | 71 | /* |
72 | * HW RSC control: | | 72 | * HW RSC control: |
73 | * this feature only works with | | 73 | * this feature only works with |
74 | * IPv4, and only on 82599 and later. | | 74 | * IPv4, and only on 82599 and later. |
75 | * Also this will cause IP forwarding to | | 75 | * Also this will cause IP forwarding to |
76 | * fail and that can't be controlled by | | 76 | * fail and that can't be controlled by |
77 | * the stack as LRO can. For all these | | 77 | * the stack as LRO can. For all these |
78 | * reasons I've deemed it best to leave | | 78 | * reasons I've deemed it best to leave |
79 | * this off and not bother with a tuneable | | 79 | * this off and not bother with a tuneable |
80 | * interface, this would need to be compiled | | 80 | * interface, this would need to be compiled |
81 | * to enable. | | 81 | * to enable. |
82 | */ | | 82 | */ |
83 | static bool ixgbe_rsc_enable = FALSE; | | 83 | static bool ixgbe_rsc_enable = FALSE; |
84 | | | 84 | |
85 | /* | | 85 | /* |
86 | * For Flow Director: this is the | | 86 | * For Flow Director: this is the |
87 | * number of TX packets we sample | | 87 | * number of TX packets we sample |
88 | * for the filter pool, this means | | 88 | * for the filter pool, this means |
89 | * every 20th packet will be probed. | | 89 | * every 20th packet will be probed. |
90 | * | | 90 | * |
91 | * This feature can be disabled by | | 91 | * This feature can be disabled by |
92 | * setting this to 0. | | 92 | * setting this to 0. |
93 | */ | | 93 | */ |
94 | static int atr_sample_rate = 20; | | 94 | static int atr_sample_rate = 20; |
95 | | | 95 | |
96 | /************************************************************************ | | 96 | /************************************************************************ |
97 | * Local Function prototypes | | 97 | * Local Function prototypes |
98 | ************************************************************************/ | | 98 | ************************************************************************/ |
99 | static void ixgbe_setup_transmit_ring(struct tx_ring *); | | 99 | static void ixgbe_setup_transmit_ring(struct tx_ring *); |
100 | static void ixgbe_free_transmit_buffers(struct tx_ring *); | | 100 | static void ixgbe_free_transmit_buffers(struct tx_ring *); |
101 | static int ixgbe_setup_receive_ring(struct rx_ring *); | | 101 | static int ixgbe_setup_receive_ring(struct rx_ring *); |
102 | static void ixgbe_free_receive_buffers(struct rx_ring *); | | 102 | static void ixgbe_free_receive_buffers(struct rx_ring *); |
103 | static void ixgbe_rx_checksum(u32, struct mbuf *, u32, | | 103 | static void ixgbe_rx_checksum(u32, struct mbuf *, u32, |
104 | struct ixgbe_hw_stats *); | | 104 | struct ixgbe_hw_stats *); |
105 | static void ixgbe_refresh_mbufs(struct rx_ring *, int); | | 105 | static void ixgbe_refresh_mbufs(struct rx_ring *, int); |
106 | static void ixgbe_drain(struct ifnet *, struct tx_ring *); | | 106 | static void ixgbe_drain(struct ifnet *, struct tx_ring *); |
107 | static int ixgbe_xmit(struct tx_ring *, struct mbuf *); | | 107 | static int ixgbe_xmit(struct tx_ring *, struct mbuf *); |
108 | static int ixgbe_tx_ctx_setup(struct tx_ring *, | | 108 | static int ixgbe_tx_ctx_setup(struct tx_ring *, |
109 | struct mbuf *, u32 *, u32 *); | | 109 | struct mbuf *, u32 *, u32 *); |
110 | static int ixgbe_tso_setup(struct tx_ring *, | | 110 | static int ixgbe_tso_setup(struct tx_ring *, |
111 | struct mbuf *, u32 *, u32 *); | | 111 | struct mbuf *, u32 *, u32 *); |
112 | static __inline void ixgbe_rx_discard(struct rx_ring *, int); | | 112 | static __inline void ixgbe_rx_discard(struct rx_ring *, int); |
113 | static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, | | 113 | static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, |
114 | struct mbuf *, u32); | | 114 | struct mbuf *, u32); |
115 | static int ixgbe_dma_malloc(struct adapter *, bus_size_t, | | 115 | static int ixgbe_dma_malloc(struct adapter *, bus_size_t, |
116 | struct ixgbe_dma_alloc *, int); | | 116 | struct ixgbe_dma_alloc *, int); |
117 | static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); | | 117 | static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); |
118 | | | 118 | |
119 | static void ixgbe_setup_hw_rsc(struct rx_ring *); | | 119 | static void ixgbe_setup_hw_rsc(struct rx_ring *); |
120 | | | 120 | |
121 | /************************************************************************ | | 121 | /************************************************************************ |
122 | * ixgbe_legacy_start_locked - Transmit entry point | | 122 | * ixgbe_legacy_start_locked - Transmit entry point |
123 | * | | 123 | * |
124 | * Called by the stack to initiate a transmit. | | 124 | * Called by the stack to initiate a transmit. |
125 | * The driver will remain in this routine as long as there are | | 125 | * The driver will remain in this routine as long as there are |
126 | * packets to transmit and transmit resources are available. | | 126 | * packets to transmit and transmit resources are available. |
127 | * In case resources are not available, the stack is notified | | 127 | * In case resources are not available, the stack is notified |
128 | * and the packet is requeued. | | 128 | * and the packet is requeued. |
129 | ************************************************************************/ | | 129 | ************************************************************************/ |
130 | int | | 130 | int |
131 | ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) | | 131 | ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) |
132 | { | | 132 | { |
133 | int rc; | | 133 | int rc; |
134 | struct mbuf *m_head; | | 134 | struct mbuf *m_head; |
135 | struct adapter *adapter = txr->adapter; | | 135 | struct adapter *adapter = txr->adapter; |
136 | | | 136 | |
137 | IXGBE_TX_LOCK_ASSERT(txr); | | 137 | IXGBE_TX_LOCK_ASSERT(txr); |
138 | | | 138 | |
139 | if (!adapter->link_active) { | | 139 | if (!adapter->link_active) { |
140 | /* | | 140 | /* |
141 | * discard all packets buffered in IFQ to avoid | | 141 | * discard all packets buffered in IFQ to avoid |
142 | * sending old packets at next link up timing. | | 142 | * sending old packets at next link up timing. |
143 | */ | | 143 | */ |
144 | ixgbe_drain(ifp, txr); | | 144 | ixgbe_drain(ifp, txr); |
145 | return (ENETDOWN); | | 145 | return (ENETDOWN); |
146 | } | | 146 | } |
147 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 147 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
148 | return (ENETDOWN); | | 148 | return (ENETDOWN); |
149 | if (txr->txr_no_space) | | 149 | if (txr->txr_no_space) |
150 | return (ENETDOWN); | | 150 | return (ENETDOWN); |
151 | | | 151 | |
152 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 152 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
153 | if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) | | 153 | if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) |
154 | break; | | 154 | break; |
155 | | | 155 | |
156 | IFQ_POLL(&ifp->if_snd, m_head); | | 156 | IFQ_POLL(&ifp->if_snd, m_head); |
157 | if (m_head == NULL) | | 157 | if (m_head == NULL) |
158 | break; | | 158 | break; |
159 | | | 159 | |
160 | if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { | | 160 | if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { |
161 | break; | | 161 | break; |
162 | } | | 162 | } |
163 | IFQ_DEQUEUE(&ifp->if_snd, m_head); | | 163 | IFQ_DEQUEUE(&ifp->if_snd, m_head); |
164 | if (rc != 0) { | | 164 | if (rc != 0) { |
165 | m_freem(m_head); | | 165 | m_freem(m_head); |
166 | continue; | | 166 | continue; |
167 | } | | 167 | } |
168 | | | 168 | |
169 | /* Send a copy of the frame to the BPF listener */ | | 169 | /* Send a copy of the frame to the BPF listener */ |
170 | bpf_mtap(ifp, m_head); | | 170 | bpf_mtap(ifp, m_head); |
171 | } | | 171 | } |
172 | | | 172 | |
173 | return IXGBE_SUCCESS; | | 173 | return IXGBE_SUCCESS; |
174 | } /* ixgbe_legacy_start_locked */ | | 174 | } /* ixgbe_legacy_start_locked */ |
175 | | | 175 | |
176 | /************************************************************************ | | 176 | /************************************************************************ |
177 | * ixgbe_legacy_start | | 177 | * ixgbe_legacy_start |
178 | * | | 178 | * |
179 | * Called by the stack, this always uses the first tx ring, | | 179 | * Called by the stack, this always uses the first tx ring, |
180 | * and should not be used with multiqueue tx enabled. | | 180 | * and should not be used with multiqueue tx enabled. |
181 | ************************************************************************/ | | 181 | ************************************************************************/ |
182 | void | | 182 | void |
183 | ixgbe_legacy_start(struct ifnet *ifp) | | 183 | ixgbe_legacy_start(struct ifnet *ifp) |
184 | { | | 184 | { |
185 | struct adapter *adapter = ifp->if_softc; | | 185 | struct adapter *adapter = ifp->if_softc; |
186 | struct tx_ring *txr = adapter->tx_rings; | | 186 | struct tx_ring *txr = adapter->tx_rings; |
187 | | | 187 | |
188 | if (ifp->if_flags & IFF_RUNNING) { | | 188 | if (ifp->if_flags & IFF_RUNNING) { |
189 | IXGBE_TX_LOCK(txr); | | 189 | IXGBE_TX_LOCK(txr); |
190 | ixgbe_legacy_start_locked(ifp, txr); | | 190 | ixgbe_legacy_start_locked(ifp, txr); |
191 | IXGBE_TX_UNLOCK(txr); | | 191 | IXGBE_TX_UNLOCK(txr); |
192 | } | | 192 | } |
193 | } /* ixgbe_legacy_start */ | | 193 | } /* ixgbe_legacy_start */ |
194 | | | 194 | |
195 | /************************************************************************ | | 195 | /************************************************************************ |
196 | * ixgbe_mq_start - Multiqueue Transmit Entry Point | | 196 | * ixgbe_mq_start - Multiqueue Transmit Entry Point |
197 | * | | 197 | * |
198 | * (if_transmit function) | | 198 | * (if_transmit function) |
199 | ************************************************************************/ | | 199 | ************************************************************************/ |
200 | int | | 200 | int |
201 | ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) | | 201 | ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) |
202 | { | | 202 | { |
203 | struct adapter *adapter = ifp->if_softc; | | 203 | struct adapter *adapter = ifp->if_softc; |
204 | struct tx_ring *txr; | | 204 | struct tx_ring *txr; |
205 | int i; | | 205 | int i; |
206 | #ifdef RSS | | 206 | #ifdef RSS |
207 | uint32_t bucket_id; | | 207 | uint32_t bucket_id; |
208 | #endif | | 208 | #endif |
209 | | | 209 | |
210 | /* | | 210 | /* |
211 | * When doing RSS, map it to the same outbound queue | | 211 | * When doing RSS, map it to the same outbound queue |
212 | * as the incoming flow would be mapped to. | | 212 | * as the incoming flow would be mapped to. |
213 | * | | 213 | * |
214 | * If everything is setup correctly, it should be the | | 214 | * If everything is setup correctly, it should be the |
215 | * same bucket that the current CPU we're on is. | | 215 | * same bucket that the current CPU we're on is. |
216 | */ | | 216 | */ |
217 | #ifdef RSS | | 217 | #ifdef RSS |
218 | if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { | | 218 | if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { |
219 | if ((adapter->feat_en & IXGBE_FEATURE_RSS) && | | 219 | if ((adapter->feat_en & IXGBE_FEATURE_RSS) && |
220 | (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), | | 220 | (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), |
221 | &bucket_id) == 0)) { | | 221 | &bucket_id) == 0)) { |
222 | i = bucket_id % adapter->num_queues; | | 222 | i = bucket_id % adapter->num_queues; |
223 | #ifdef IXGBE_DEBUG | | 223 | #ifdef IXGBE_DEBUG |
224 | if (bucket_id > adapter->num_queues) | | 224 | if (bucket_id > adapter->num_queues) |
225 | if_printf(ifp, | | 225 | if_printf(ifp, |
226 | "bucket_id (%d) > num_queues (%d)\n", | | 226 | "bucket_id (%d) > num_queues (%d)\n", |
227 | bucket_id, adapter->num_queues); | | 227 | bucket_id, adapter->num_queues); |
228 | #endif | | 228 | #endif |
229 | } else | | 229 | } else |
230 | i = m->m_pkthdr.flowid % adapter->num_queues; | | 230 | i = m->m_pkthdr.flowid % adapter->num_queues; |
231 | } else | | 231 | } else |
232 | #endif /* 0 */ | | 232 | #endif /* 0 */ |
233 | i = cpu_index(curcpu()) % adapter->num_queues; | | 233 | i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues; |
234 | | | 234 | |
235 | /* Check for a hung queue and pick alternative */ | | 235 | /* Check for a hung queue and pick alternative */ |
236 | if (((1 << i) & adapter->active_queues) == 0) | | 236 | if (((1 << i) & adapter->active_queues) == 0) |
237 | i = ffs64(adapter->active_queues); | | 237 | i = ffs64(adapter->active_queues); |
238 | | | 238 | |
239 | txr = &adapter->tx_rings[i]; | | 239 | txr = &adapter->tx_rings[i]; |
240 | | | 240 | |
241 | if (__predict_false(!pcq_put(txr->txr_interq, m))) { | | 241 | if (__predict_false(!pcq_put(txr->txr_interq, m))) { |
242 | m_freem(m); | | 242 | m_freem(m); |
243 | txr->pcq_drops.ev_count++; | | 243 | txr->pcq_drops.ev_count++; |
244 | return ENOBUFS; | | 244 | return ENOBUFS; |
245 | } | | 245 | } |
246 | if (IXGBE_TX_TRYLOCK(txr)) { | | 246 | if (IXGBE_TX_TRYLOCK(txr)) { |
247 | ixgbe_mq_start_locked(ifp, txr); | | 247 | ixgbe_mq_start_locked(ifp, txr); |
248 | IXGBE_TX_UNLOCK(txr); | | 248 | IXGBE_TX_UNLOCK(txr); |
249 | } else { | | 249 | } else { |
250 | if (adapter->txrx_use_workqueue) { | | 250 | if (adapter->txrx_use_workqueue) { |
251 | u_int *enqueued; | | 251 | u_int *enqueued; |
252 | | | 252 | |
253 | /* | | 253 | /* |
254 | * This function itself is not called in interrupt | | 254 | * This function itself is not called in interrupt |
255 | * context, however it can be called in fast softint | | 255 | * context, however it can be called in fast softint |
256 | * context right after receiving forwarding packets. | | 256 | * context right after receiving forwarding packets. |
257 | * So, it is required to protect workqueue from twice | | 257 | * So, it is required to protect workqueue from twice |
258 | * enqueuing when the machine uses both spontaneous | | 258 | * enqueuing when the machine uses both spontaneous |
259 | * packets and forwarding packets. | | 259 | * packets and forwarding packets. |
260 | */ | | 260 | */ |
261 | enqueued = percpu_getref(adapter->txr_wq_enqueued); | | 261 | enqueued = percpu_getref(adapter->txr_wq_enqueued); |
262 | if (*enqueued == 0) { | | 262 | if (*enqueued == 0) { |
263 | *enqueued = 1; | | 263 | *enqueued = 1; |
264 | percpu_putref(adapter->txr_wq_enqueued); | | 264 | percpu_putref(adapter->txr_wq_enqueued); |
265 | workqueue_enqueue(adapter->txr_wq, | | 265 | workqueue_enqueue(adapter->txr_wq, |
266 | &txr->wq_cookie, curcpu()); | | 266 | &txr->wq_cookie, curcpu()); |
267 | } else | | 267 | } else |
268 | percpu_putref(adapter->txr_wq_enqueued); | | 268 | percpu_putref(adapter->txr_wq_enqueued); |
269 | } else | | 269 | } else |
270 | softint_schedule(txr->txr_si); | | 270 | softint_schedule(txr->txr_si); |
271 | } | | 271 | } |
272 | | | 272 | |
273 | return (0); | | 273 | return (0); |
274 | } /* ixgbe_mq_start */ | | 274 | } /* ixgbe_mq_start */ |
275 | | | 275 | |
276 | /************************************************************************ | | 276 | /************************************************************************ |
277 | * ixgbe_mq_start_locked | | 277 | * ixgbe_mq_start_locked |
278 | ************************************************************************/ | | 278 | ************************************************************************/ |
279 | int | | 279 | int |
280 | ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) | | 280 | ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) |
281 | { | | 281 | { |
282 | struct mbuf *next; | | 282 | struct mbuf *next; |
283 | int enqueued = 0, err = 0; | | 283 | int enqueued = 0, err = 0; |
284 | | | 284 | |
285 | if (!txr->adapter->link_active) { | | 285 | if (!txr->adapter->link_active) { |
286 | /* | | 286 | /* |
287 | * discard all packets buffered in txr_interq to avoid | | 287 | * discard all packets buffered in txr_interq to avoid |
288 | * sending old packets at next link up timing. | | 288 | * sending old packets at next link up timing. |
289 | */ | | 289 | */ |
290 | ixgbe_drain(ifp, txr); | | 290 | ixgbe_drain(ifp, txr); |
291 | return (ENETDOWN); | | 291 | return (ENETDOWN); |
292 | } | | 292 | } |
293 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 293 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
294 | return (ENETDOWN); | | 294 | return (ENETDOWN); |
295 | if (txr->txr_no_space) | | 295 | if (txr->txr_no_space) |
296 | return (ENETDOWN); | | 296 | return (ENETDOWN); |
297 | | | 297 | |
298 | /* Process the queue */ | | 298 | /* Process the queue */ |
299 | while ((next = pcq_get(txr->txr_interq)) != NULL) { | | 299 | while ((next = pcq_get(txr->txr_interq)) != NULL) { |
300 | if ((err = ixgbe_xmit(txr, next)) != 0) { | | 300 | if ((err = ixgbe_xmit(txr, next)) != 0) { |
301 | m_freem(next); | | 301 | m_freem(next); |
302 | /* All errors are counted in ixgbe_xmit() */ | | 302 | /* All errors are counted in ixgbe_xmit() */ |
303 | break; | | 303 | break; |
304 | } | | 304 | } |
305 | enqueued++; | | 305 | enqueued++; |
306 | #if __FreeBSD_version >= 1100036 | | 306 | #if __FreeBSD_version >= 1100036 |
307 | /* | | 307 | /* |
308 | * Since we're looking at the tx ring, we can check | | 308 | * Since we're looking at the tx ring, we can check |
309 | * to see if we're a VF by examing our tail register | | 309 | * to see if we're a VF by examing our tail register |
310 | * address. | | 310 | * address. |
311 | */ | | 311 | */ |
312 | if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) && | | 312 | if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) && |
313 | (next->m_flags & M_MCAST)) | | 313 | (next->m_flags & M_MCAST)) |
314 | if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); | | 314 | if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); |
315 | #endif | | 315 | #endif |
316 | /* Send a copy of the frame to the BPF listener */ | | 316 | /* Send a copy of the frame to the BPF listener */ |
317 | bpf_mtap(ifp, next); | | 317 | bpf_mtap(ifp, next); |
318 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 318 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
319 | break; | | 319 | break; |
320 | } | | 320 | } |
321 | | | 321 | |
322 | if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter)) | | 322 | if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter)) |
323 | ixgbe_txeof(txr); | | 323 | ixgbe_txeof(txr); |
324 | | | 324 | |
325 | return (err); | | 325 | return (err); |
326 | } /* ixgbe_mq_start_locked */ | | 326 | } /* ixgbe_mq_start_locked */ |
327 | | | 327 | |
328 | /************************************************************************ | | 328 | /************************************************************************ |
329 | * ixgbe_deferred_mq_start | | 329 | * ixgbe_deferred_mq_start |
330 | * | | 330 | * |
331 | * Called from a softint and workqueue (indirectly) to drain queued | | 331 | * Called from a softint and workqueue (indirectly) to drain queued |
332 | * transmit packets. | | 332 | * transmit packets. |
333 | ************************************************************************/ | | 333 | ************************************************************************/ |
334 | void | | 334 | void |
335 | ixgbe_deferred_mq_start(void *arg) | | 335 | ixgbe_deferred_mq_start(void *arg) |
336 | { | | 336 | { |
337 | struct tx_ring *txr = arg; | | 337 | struct tx_ring *txr = arg; |
338 | struct adapter *adapter = txr->adapter; | | 338 | struct adapter *adapter = txr->adapter; |
339 | struct ifnet *ifp = adapter->ifp; | | 339 | struct ifnet *ifp = adapter->ifp; |
340 | | | 340 | |
341 | IXGBE_TX_LOCK(txr); | | 341 | IXGBE_TX_LOCK(txr); |
342 | if (pcq_peek(txr->txr_interq) != NULL) | | 342 | if (pcq_peek(txr->txr_interq) != NULL) |
343 | ixgbe_mq_start_locked(ifp, txr); | | 343 | ixgbe_mq_start_locked(ifp, txr); |
344 | IXGBE_TX_UNLOCK(txr); | | 344 | IXGBE_TX_UNLOCK(txr); |
345 | } /* ixgbe_deferred_mq_start */ | | 345 | } /* ixgbe_deferred_mq_start */ |
346 | | | 346 | |
347 | /************************************************************************ | | 347 | /************************************************************************ |
348 | * ixgbe_deferred_mq_start_work | | 348 | * ixgbe_deferred_mq_start_work |
349 | * | | 349 | * |
350 | * Called from a workqueue to drain queued transmit packets. | | 350 | * Called from a workqueue to drain queued transmit packets. |
351 | ************************************************************************/ | | 351 | ************************************************************************/ |
352 | void | | 352 | void |
353 | ixgbe_deferred_mq_start_work(struct work *wk, void *arg) | | 353 | ixgbe_deferred_mq_start_work(struct work *wk, void *arg) |
354 | { | | 354 | { |
355 | struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); | | 355 | struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); |
356 | struct adapter *adapter = txr->adapter; | | 356 | struct adapter *adapter = txr->adapter; |
357 | u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued); | | 357 | u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued); |
358 | *enqueued = 0; | | 358 | *enqueued = 0; |
359 | percpu_putref(adapter->txr_wq_enqueued); | | 359 | percpu_putref(adapter->txr_wq_enqueued); |
360 | | | 360 | |
361 | ixgbe_deferred_mq_start(txr); | | 361 | ixgbe_deferred_mq_start(txr); |
362 | } /* ixgbe_deferred_mq_start */ | | 362 | } /* ixgbe_deferred_mq_start */ |
363 | | | 363 | |
364 | /************************************************************************ | | 364 | /************************************************************************ |
365 | * ixgbe_drain_all | | 365 | * ixgbe_drain_all |
366 | ************************************************************************/ | | 366 | ************************************************************************/ |
367 | void | | 367 | void |
368 | ixgbe_drain_all(struct adapter *adapter) | | 368 | ixgbe_drain_all(struct adapter *adapter) |
369 | { | | 369 | { |
370 | struct ifnet *ifp = adapter->ifp; | | 370 | struct ifnet *ifp = adapter->ifp; |
371 | struct ix_queue *que = adapter->queues; | | 371 | struct ix_queue *que = adapter->queues; |
372 | | | 372 | |
373 | for (int i = 0; i < adapter->num_queues; i++, que++) { | | 373 | for (int i = 0; i < adapter->num_queues; i++, que++) { |
374 | struct tx_ring *txr = que->txr; | | 374 | struct tx_ring *txr = que->txr; |
375 | | | 375 | |
376 | IXGBE_TX_LOCK(txr); | | 376 | IXGBE_TX_LOCK(txr); |
377 | ixgbe_drain(ifp, txr); | | 377 | ixgbe_drain(ifp, txr); |
378 | IXGBE_TX_UNLOCK(txr); | | 378 | IXGBE_TX_UNLOCK(txr); |
379 | } | | 379 | } |
380 | } | | 380 | } |
381 | | | 381 | |
382 | /************************************************************************ | | 382 | /************************************************************************ |
383 | * ixgbe_xmit | | 383 | * ixgbe_xmit |
384 | * | | 384 | * |
385 | * Maps the mbufs to tx descriptors, allowing the | | 385 | * Maps the mbufs to tx descriptors, allowing the |
386 | * TX engine to transmit the packets. | | 386 | * TX engine to transmit the packets. |
387 | * | | 387 | * |
388 | * Return 0 on success, positive on failure | | 388 | * Return 0 on success, positive on failure |
389 | ************************************************************************/ | | 389 | ************************************************************************/ |
390 | static int | | 390 | static int |
391 | ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) | | 391 | ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) |
392 | { | | 392 | { |
393 | struct adapter *adapter = txr->adapter; | | 393 | struct adapter *adapter = txr->adapter; |
394 | struct ixgbe_tx_buf *txbuf; | | 394 | struct ixgbe_tx_buf *txbuf; |
395 | union ixgbe_adv_tx_desc *txd = NULL; | | 395 | union ixgbe_adv_tx_desc *txd = NULL; |
396 | struct ifnet *ifp = adapter->ifp; | | 396 | struct ifnet *ifp = adapter->ifp; |
397 | int i, j, error; | | 397 | int i, j, error; |
398 | int first; | | 398 | int first; |
399 | u32 olinfo_status = 0, cmd_type_len; | | 399 | u32 olinfo_status = 0, cmd_type_len; |
400 | bool remap = TRUE; | | 400 | bool remap = TRUE; |
401 | bus_dmamap_t map; | | 401 | bus_dmamap_t map; |
402 | | | 402 | |
403 | /* Basic descriptor defines */ | | 403 | /* Basic descriptor defines */ |
404 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | | | 404 | cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | |
405 | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); | | 405 | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); |
406 | | | 406 | |
407 | if (vlan_has_tag(m_head)) | | 407 | if (vlan_has_tag(m_head)) |
408 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; | | 408 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; |
409 | | | 409 | |
410 | /* | | 410 | /* |
411 | * Important to capture the first descriptor | | 411 | * Important to capture the first descriptor |
412 | * used because it will contain the index of | | 412 | * used because it will contain the index of |
413 | * the one we tell the hardware to report back | | 413 | * the one we tell the hardware to report back |
414 | */ | | 414 | */ |
415 | first = txr->next_avail_desc; | | 415 | first = txr->next_avail_desc; |
416 | txbuf = &txr->tx_buffers[first]; | | 416 | txbuf = &txr->tx_buffers[first]; |
417 | map = txbuf->map; | | 417 | map = txbuf->map; |
418 | | | 418 | |
419 | /* | | 419 | /* |
420 | * Map the packet for DMA. | | 420 | * Map the packet for DMA. |
421 | */ | | 421 | */ |
422 | retry: | | 422 | retry: |
423 | error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head, | | 423 | error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head, |
424 | BUS_DMA_NOWAIT); | | 424 | BUS_DMA_NOWAIT); |
425 | | | 425 | |
426 | if (__predict_false(error)) { | | 426 | if (__predict_false(error)) { |
427 | struct mbuf *m; | | 427 | struct mbuf *m; |
428 | | | 428 | |
429 | switch (error) { | | 429 | switch (error) { |
430 | case EAGAIN: | | 430 | case EAGAIN: |
431 | txr->q_eagain_tx_dma_setup++; | | 431 | txr->q_eagain_tx_dma_setup++; |
432 | return EAGAIN; | | 432 | return EAGAIN; |
433 | case ENOMEM: | | 433 | case ENOMEM: |
434 | txr->q_enomem_tx_dma_setup++; | | 434 | txr->q_enomem_tx_dma_setup++; |
435 | return EAGAIN; | | 435 | return EAGAIN; |
436 | case EFBIG: | | 436 | case EFBIG: |
437 | /* Try it again? - one try */ | | 437 | /* Try it again? - one try */ |
438 | if (remap == TRUE) { | | 438 | if (remap == TRUE) { |
439 | remap = FALSE; | | 439 | remap = FALSE; |
440 | /* | | 440 | /* |
441 | * XXX: m_defrag will choke on | | 441 | * XXX: m_defrag will choke on |
442 | * non-MCLBYTES-sized clusters | | 442 | * non-MCLBYTES-sized clusters |
443 | */ | | 443 | */ |
444 | txr->q_efbig_tx_dma_setup++; | | 444 | txr->q_efbig_tx_dma_setup++; |
445 | m = m_defrag(m_head, M_NOWAIT); | | 445 | m = m_defrag(m_head, M_NOWAIT); |
446 | if (m == NULL) { | | 446 | if (m == NULL) { |
447 | txr->q_mbuf_defrag_failed++; | | 447 | txr->q_mbuf_defrag_failed++; |
448 | return ENOBUFS; | | 448 | return ENOBUFS; |
449 | } | | 449 | } |
450 | m_head = m; | | 450 | m_head = m; |
451 | goto retry; | | 451 | goto retry; |
452 | } else { | | 452 | } else { |
453 | txr->q_efbig2_tx_dma_setup++; | | 453 | txr->q_efbig2_tx_dma_setup++; |
454 | return error; | | 454 | return error; |
455 | } | | 455 | } |
456 | case EINVAL: | | 456 | case EINVAL: |
457 | txr->q_einval_tx_dma_setup++; | | 457 | txr->q_einval_tx_dma_setup++; |
458 | return error; | | 458 | return error; |
459 | default: | | 459 | default: |
460 | txr->q_other_tx_dma_setup++; | | 460 | txr->q_other_tx_dma_setup++; |
461 | return error; | | 461 | return error; |
462 | } | | 462 | } |
463 | } | | 463 | } |
464 | | | 464 | |
465 | /* Make certain there are enough descriptors */ | | 465 | /* Make certain there are enough descriptors */ |
466 | if (txr->tx_avail < (map->dm_nsegs + 2)) { | | 466 | if (txr->tx_avail < (map->dm_nsegs + 2)) { |
467 | txr->txr_no_space = true; | | 467 | txr->txr_no_space = true; |
468 | txr->no_desc_avail.ev_count++; | | 468 | txr->no_desc_avail.ev_count++; |
469 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); | | 469 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); |
470 | return EAGAIN; | | 470 | return EAGAIN; |
471 | } | | 471 | } |
472 | | | 472 | |
473 | /* | | 473 | /* |
474 | * Set up the appropriate offload context | | 474 | * Set up the appropriate offload context |
475 | * this will consume the first descriptor | | 475 | * this will consume the first descriptor |
476 | */ | | 476 | */ |
477 | error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); | | 477 | error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); |
478 | if (__predict_false(error)) { | | 478 | if (__predict_false(error)) { |
479 | return (error); | | 479 | return (error); |
480 | } | | 480 | } |
481 | | | 481 | |
482 | /* Do the flow director magic */ | | 482 | /* Do the flow director magic */ |
483 | if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && | | 483 | if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && |
484 | (txr->atr_sample) && (!adapter->fdir_reinit)) { | | 484 | (txr->atr_sample) && (!adapter->fdir_reinit)) { |
485 | ++txr->atr_count; | | 485 | ++txr->atr_count; |
486 | if (txr->atr_count >= atr_sample_rate) { | | 486 | if (txr->atr_count >= atr_sample_rate) { |
487 | ixgbe_atr(txr, m_head); | | 487 | ixgbe_atr(txr, m_head); |
488 | txr->atr_count = 0; | | 488 | txr->atr_count = 0; |
489 | } | | 489 | } |
490 | } | | 490 | } |
491 | | | 491 | |
492 | olinfo_status |= IXGBE_ADVTXD_CC; | | 492 | olinfo_status |= IXGBE_ADVTXD_CC; |
493 | i = txr->next_avail_desc; | | 493 | i = txr->next_avail_desc; |
494 | for (j = 0; j < map->dm_nsegs; j++) { | | 494 | for (j = 0; j < map->dm_nsegs; j++) { |
495 | bus_size_t seglen; | | 495 | bus_size_t seglen; |
496 | bus_addr_t segaddr; | | 496 | bus_addr_t segaddr; |
497 | | | 497 | |
498 | txbuf = &txr->tx_buffers[i]; | | 498 | txbuf = &txr->tx_buffers[i]; |
499 | txd = &txr->tx_base[i]; | | 499 | txd = &txr->tx_base[i]; |
500 | seglen = map->dm_segs[j].ds_len; | | 500 | seglen = map->dm_segs[j].ds_len; |
501 | segaddr = htole64(map->dm_segs[j].ds_addr); | | 501 | segaddr = htole64(map->dm_segs[j].ds_addr); |
502 | | | 502 | |
503 | txd->read.buffer_addr = segaddr; | | 503 | txd->read.buffer_addr = segaddr; |
504 | txd->read.cmd_type_len = htole32(cmd_type_len | seglen); | | 504 | txd->read.cmd_type_len = htole32(cmd_type_len | seglen); |
505 | txd->read.olinfo_status = htole32(olinfo_status); | | 505 | txd->read.olinfo_status = htole32(olinfo_status); |
506 | | | 506 | |
507 | if (++i == txr->num_desc) | | 507 | if (++i == txr->num_desc) |
508 | i = 0; | | 508 | i = 0; |
509 | } | | 509 | } |
510 | | | 510 | |
511 | txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); | | 511 | txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); |
512 | txr->tx_avail -= map->dm_nsegs; | | 512 | txr->tx_avail -= map->dm_nsegs; |
513 | txr->next_avail_desc = i; | | 513 | txr->next_avail_desc = i; |
514 | | | 514 | |
515 | txbuf->m_head = m_head; | | 515 | txbuf->m_head = m_head; |
516 | /* | | 516 | /* |
517 | * Here we swap the map so the last descriptor, | | 517 | * Here we swap the map so the last descriptor, |
518 | * which gets the completion interrupt has the | | 518 | * which gets the completion interrupt has the |
519 | * real map, and the first descriptor gets the | | 519 | * real map, and the first descriptor gets the |
520 | * unused map from this descriptor. | | 520 | * unused map from this descriptor. |
521 | */ | | 521 | */ |
522 | txr->tx_buffers[first].map = txbuf->map; | | 522 | txr->tx_buffers[first].map = txbuf->map; |
523 | txbuf->map = map; | | 523 | txbuf->map = map; |
524 | bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, | | 524 | bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, |
525 | BUS_DMASYNC_PREWRITE); | | 525 | BUS_DMASYNC_PREWRITE); |
526 | | | 526 | |
527 | /* Set the EOP descriptor that will be marked done */ | | 527 | /* Set the EOP descriptor that will be marked done */ |
528 | txbuf = &txr->tx_buffers[first]; | | 528 | txbuf = &txr->tx_buffers[first]; |
529 | txbuf->eop = txd; | | 529 | txbuf->eop = txd; |
530 | | | 530 | |
531 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 531 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
532 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 532 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
533 | /* | | 533 | /* |
534 | * Advance the Transmit Descriptor Tail (Tdt), this tells the | | 534 | * Advance the Transmit Descriptor Tail (Tdt), this tells the |
535 | * hardware that this frame is available to transmit. | | 535 | * hardware that this frame is available to transmit. |
536 | */ | | 536 | */ |
537 | ++txr->total_packets.ev_count; | | 537 | ++txr->total_packets.ev_count; |
538 | IXGBE_WRITE_REG(&adapter->hw, txr->tail, i); | | 538 | IXGBE_WRITE_REG(&adapter->hw, txr->tail, i); |
539 | | | 539 | |
540 | /* | | 540 | /* |
541 | * XXXX NOMPSAFE: ifp->if_data should be percpu. | | 541 | * XXXX NOMPSAFE: ifp->if_data should be percpu. |
542 | */ | | 542 | */ |
543 | ifp->if_obytes += m_head->m_pkthdr.len; | | 543 | ifp->if_obytes += m_head->m_pkthdr.len; |
544 | if (m_head->m_flags & M_MCAST) | | 544 | if (m_head->m_flags & M_MCAST) |
545 | ifp->if_omcasts++; | | 545 | ifp->if_omcasts++; |
546 | | | 546 | |
547 | /* Mark queue as having work */ | | 547 | /* Mark queue as having work */ |
548 | if (txr->busy == 0) | | 548 | if (txr->busy == 0) |
549 | txr->busy = 1; | | 549 | txr->busy = 1; |
550 | | | 550 | |
551 | return (0); | | 551 | return (0); |
552 | } /* ixgbe_xmit */ | | 552 | } /* ixgbe_xmit */ |
553 | | | 553 | |
554 | /************************************************************************ | | 554 | /************************************************************************ |
555 | * ixgbe_drain | | 555 | * ixgbe_drain |
556 | ************************************************************************/ | | 556 | ************************************************************************/ |
557 | static void | | 557 | static void |
558 | ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr) | | 558 | ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr) |
559 | { | | 559 | { |
560 | struct mbuf *m; | | 560 | struct mbuf *m; |
561 | | | 561 | |
562 | IXGBE_TX_LOCK_ASSERT(txr); | | 562 | IXGBE_TX_LOCK_ASSERT(txr); |
563 | | | 563 | |
564 | if (txr->me == 0) { | | 564 | if (txr->me == 0) { |
565 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 565 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
566 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 566 | IFQ_DEQUEUE(&ifp->if_snd, m); |
567 | m_freem(m); | | 567 | m_freem(m); |
568 | IF_DROP(&ifp->if_snd); | | 568 | IF_DROP(&ifp->if_snd); |
569 | } | | 569 | } |
570 | } | | 570 | } |
571 | | | 571 | |
572 | while ((m = pcq_get(txr->txr_interq)) != NULL) { | | 572 | while ((m = pcq_get(txr->txr_interq)) != NULL) { |
573 | m_freem(m); | | 573 | m_freem(m); |
574 | txr->pcq_drops.ev_count++; | | 574 | txr->pcq_drops.ev_count++; |
575 | } | | 575 | } |
576 | } | | 576 | } |
577 | | | 577 | |
578 | /************************************************************************ | | 578 | /************************************************************************ |
579 | * ixgbe_allocate_transmit_buffers | | 579 | * ixgbe_allocate_transmit_buffers |
580 | * | | 580 | * |
581 | * Allocate memory for tx_buffer structures. The tx_buffer stores all | | 581 | * Allocate memory for tx_buffer structures. The tx_buffer stores all |
582 | * the information needed to transmit a packet on the wire. This is | | 582 | * the information needed to transmit a packet on the wire. This is |
583 | * called only once at attach, setup is done every reset. | | 583 | * called only once at attach, setup is done every reset. |
584 | ************************************************************************/ | | 584 | ************************************************************************/ |
585 | static int | | 585 | static int |
586 | ixgbe_allocate_transmit_buffers(struct tx_ring *txr) | | 586 | ixgbe_allocate_transmit_buffers(struct tx_ring *txr) |
587 | { | | 587 | { |
588 | struct adapter *adapter = txr->adapter; | | 588 | struct adapter *adapter = txr->adapter; |
589 | device_t dev = adapter->dev; | | 589 | device_t dev = adapter->dev; |
590 | struct ixgbe_tx_buf *txbuf; | | 590 | struct ixgbe_tx_buf *txbuf; |
591 | int error, i; | | 591 | int error, i; |
592 | | | 592 | |
593 | /* | | 593 | /* |
594 | * Setup DMA descriptor areas. | | 594 | * Setup DMA descriptor areas. |
595 | */ | | 595 | */ |
596 | error = ixgbe_dma_tag_create( | | 596 | error = ixgbe_dma_tag_create( |
597 | /* parent */ adapter->osdep.dmat, | | 597 | /* parent */ adapter->osdep.dmat, |
598 | /* alignment */ 1, | | 598 | /* alignment */ 1, |
599 | /* bounds */ 0, | | 599 | /* bounds */ 0, |
600 | /* maxsize */ IXGBE_TSO_SIZE, | | 600 | /* maxsize */ IXGBE_TSO_SIZE, |
601 | /* nsegments */ adapter->num_segs, | | 601 | /* nsegments */ adapter->num_segs, |
602 | /* maxsegsize */ PAGE_SIZE, | | 602 | /* maxsegsize */ PAGE_SIZE, |
603 | /* flags */ 0, | | 603 | /* flags */ 0, |
604 | &txr->txtag); | | 604 | &txr->txtag); |
605 | if (error != 0) { | | 605 | if (error != 0) { |
606 | aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); | | 606 | aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); |
607 | goto fail; | | 607 | goto fail; |
608 | } | | 608 | } |
609 | | | 609 | |
610 | txr->tx_buffers = | | 610 | txr->tx_buffers = |
611 | (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) * | | 611 | (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) * |
612 | adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); | | 612 | adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); |
613 | if (txr->tx_buffers == NULL) { | | 613 | if (txr->tx_buffers == NULL) { |
614 | aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n"); | | 614 | aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n"); |
615 | error = ENOMEM; | | 615 | error = ENOMEM; |
616 | goto fail; | | 616 | goto fail; |
617 | } | | 617 | } |
618 | | | 618 | |
619 | /* Create the descriptor buffer dma maps */ | | 619 | /* Create the descriptor buffer dma maps */ |
620 | txbuf = txr->tx_buffers; | | 620 | txbuf = txr->tx_buffers; |
621 | for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { | | 621 | for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { |
622 | error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); | | 622 | error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); |
623 | if (error != 0) { | | 623 | if (error != 0) { |
624 | aprint_error_dev(dev, | | 624 | aprint_error_dev(dev, |
625 | "Unable to create TX DMA map (%d)\n", error); | | 625 | "Unable to create TX DMA map (%d)\n", error); |
626 | goto fail; | | 626 | goto fail; |
627 | } | | 627 | } |
628 | } | | 628 | } |
629 | | | 629 | |
630 | return 0; | | 630 | return 0; |
631 | fail: | | 631 | fail: |
632 | /* We free all, it handles case where we are in the middle */ | | 632 | /* We free all, it handles case where we are in the middle */ |
633 | #if 0 /* XXX was FreeBSD */ | | 633 | #if 0 /* XXX was FreeBSD */ |
634 | ixgbe_free_transmit_structures(adapter); | | 634 | ixgbe_free_transmit_structures(adapter); |
635 | #else | | 635 | #else |
636 | ixgbe_free_transmit_buffers(txr); | | 636 | ixgbe_free_transmit_buffers(txr); |
637 | #endif | | 637 | #endif |
638 | return (error); | | 638 | return (error); |
639 | } /* ixgbe_allocate_transmit_buffers */ | | 639 | } /* ixgbe_allocate_transmit_buffers */ |
640 | | | 640 | |
641 | /************************************************************************ | | 641 | /************************************************************************ |
642 | * ixgbe_setup_transmit_ring - Initialize a transmit ring. | | 642 | * ixgbe_setup_transmit_ring - Initialize a transmit ring. |
643 | ************************************************************************/ | | 643 | ************************************************************************/ |
644 | static void | | 644 | static void |
645 | ixgbe_setup_transmit_ring(struct tx_ring *txr) | | 645 | ixgbe_setup_transmit_ring(struct tx_ring *txr) |
646 | { | | 646 | { |
647 | struct adapter *adapter = txr->adapter; | | 647 | struct adapter *adapter = txr->adapter; |
648 | struct ixgbe_tx_buf *txbuf; | | 648 | struct ixgbe_tx_buf *txbuf; |
649 | #ifdef DEV_NETMAP | | 649 | #ifdef DEV_NETMAP |
650 | struct netmap_adapter *na = NA(adapter->ifp); | | 650 | struct netmap_adapter *na = NA(adapter->ifp); |
651 | struct netmap_slot *slot; | | 651 | struct netmap_slot *slot; |
652 | #endif /* DEV_NETMAP */ | | 652 | #endif /* DEV_NETMAP */ |
653 | | | 653 | |
654 | /* Clear the old ring contents */ | | 654 | /* Clear the old ring contents */ |
655 | IXGBE_TX_LOCK(txr); | | 655 | IXGBE_TX_LOCK(txr); |
656 | | | 656 | |
657 | #ifdef DEV_NETMAP | | 657 | #ifdef DEV_NETMAP |
658 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { | | 658 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { |
659 | /* | | 659 | /* |
660 | * (under lock): if in netmap mode, do some consistency | | 660 | * (under lock): if in netmap mode, do some consistency |
661 | * checks and set slot to entry 0 of the netmap ring. | | 661 | * checks and set slot to entry 0 of the netmap ring. |
662 | */ | | 662 | */ |
663 | slot = netmap_reset(na, NR_TX, txr->me, 0); | | 663 | slot = netmap_reset(na, NR_TX, txr->me, 0); |
664 | } | | 664 | } |
665 | #endif /* DEV_NETMAP */ | | 665 | #endif /* DEV_NETMAP */ |
666 | | | 666 | |
667 | bzero((void *)txr->tx_base, | | 667 | bzero((void *)txr->tx_base, |
668 | (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); | | 668 | (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); |
669 | /* Reset indices */ | | 669 | /* Reset indices */ |
670 | txr->next_avail_desc = 0; | | 670 | txr->next_avail_desc = 0; |
671 | txr->next_to_clean = 0; | | 671 | txr->next_to_clean = 0; |
672 | | | 672 | |
673 | /* Free any existing tx buffers. */ | | 673 | /* Free any existing tx buffers. */ |
674 | txbuf = txr->tx_buffers; | | 674 | txbuf = txr->tx_buffers; |
675 | for (int i = 0; i < txr->num_desc; i++, txbuf++) { | | 675 | for (int i = 0; i < txr->num_desc; i++, txbuf++) { |
676 | if (txbuf->m_head != NULL) { | | 676 | if (txbuf->m_head != NULL) { |
677 | bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map, | | 677 | bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map, |
678 | 0, txbuf->m_head->m_pkthdr.len, | | 678 | 0, txbuf->m_head->m_pkthdr.len, |
679 | BUS_DMASYNC_POSTWRITE); | | 679 | BUS_DMASYNC_POSTWRITE); |
680 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); | | 680 | ixgbe_dmamap_unload(txr->txtag, txbuf->map); |
681 | m_freem(txbuf->m_head); | | 681 | m_freem(txbuf->m_head); |
682 | txbuf->m_head = NULL; | | 682 | txbuf->m_head = NULL; |
683 | } | | 683 | } |
684 | | | 684 | |
685 | #ifdef DEV_NETMAP | | 685 | #ifdef DEV_NETMAP |
686 | /* | | 686 | /* |
687 | * In netmap mode, set the map for the packet buffer. | | 687 | * In netmap mode, set the map for the packet buffer. |
688 | * NOTE: Some drivers (not this one) also need to set | | 688 | * NOTE: Some drivers (not this one) also need to set |
689 | * the physical buffer address in the NIC ring. | | 689 | * the physical buffer address in the NIC ring. |
690 | * Slots in the netmap ring (indexed by "si") are | | 690 | * Slots in the netmap ring (indexed by "si") are |
691 | * kring->nkr_hwofs positions "ahead" wrt the | | 691 | * kring->nkr_hwofs positions "ahead" wrt the |
692 | * corresponding slot in the NIC ring. In some drivers | | 692 | * corresponding slot in the NIC ring. In some drivers |
693 | * (not here) nkr_hwofs can be negative. Function | | 693 | * (not here) nkr_hwofs can be negative. Function |
694 | * netmap_idx_n2k() handles wraparounds properly. | | 694 | * netmap_idx_n2k() handles wraparounds properly. |
695 | */ | | 695 | */ |
696 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { | | 696 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { |
697 | int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); | | 697 | int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); |
698 | netmap_load_map(na, txr->txtag, | | 698 | netmap_load_map(na, txr->txtag, |
699 | txbuf->map, NMB(na, slot + si)); | | 699 | txbuf->map, NMB(na, slot + si)); |
700 | } | | 700 | } |
701 | #endif /* DEV_NETMAP */ | | 701 | #endif /* DEV_NETMAP */ |
702 | | | 702 | |
703 | /* Clear the EOP descriptor pointer */ | | 703 | /* Clear the EOP descriptor pointer */ |
704 | txbuf->eop = NULL; | | 704 | txbuf->eop = NULL; |
705 | } | | 705 | } |
706 | | | 706 | |
707 | /* Set the rate at which we sample packets */ | | 707 | /* Set the rate at which we sample packets */ |
708 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) | | 708 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) |
709 | txr->atr_sample = atr_sample_rate; | | 709 | txr->atr_sample = atr_sample_rate; |
710 | | | 710 | |
711 | /* Set number of descriptors available */ | | 711 | /* Set number of descriptors available */ |
712 | txr->tx_avail = adapter->num_tx_desc; | | 712 | txr->tx_avail = adapter->num_tx_desc; |
713 | | | 713 | |
714 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 714 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
715 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 715 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
716 | IXGBE_TX_UNLOCK(txr); | | 716 | IXGBE_TX_UNLOCK(txr); |
717 | } /* ixgbe_setup_transmit_ring */ | | 717 | } /* ixgbe_setup_transmit_ring */ |
718 | | | 718 | |
719 | /************************************************************************ | | 719 | /************************************************************************ |
720 | * ixgbe_setup_transmit_structures - Initialize all transmit rings. | | 720 | * ixgbe_setup_transmit_structures - Initialize all transmit rings. |
721 | ************************************************************************/ | | 721 | ************************************************************************/ |
722 | int | | 722 | int |
723 | ixgbe_setup_transmit_structures(struct adapter *adapter) | | 723 | ixgbe_setup_transmit_structures(struct adapter *adapter) |
724 | { | | 724 | { |
725 | struct tx_ring *txr = adapter->tx_rings; | | 725 | struct tx_ring *txr = adapter->tx_rings; |
726 | | | 726 | |
727 | for (int i = 0; i < adapter->num_queues; i++, txr++) | | 727 | for (int i = 0; i < adapter->num_queues; i++, txr++) |
728 | ixgbe_setup_transmit_ring(txr); | | 728 | ixgbe_setup_transmit_ring(txr); |
729 | | | 729 | |
730 | return (0); | | 730 | return (0); |
731 | } /* ixgbe_setup_transmit_structures */ | | 731 | } /* ixgbe_setup_transmit_structures */ |
732 | | | 732 | |
733 | /************************************************************************ | | 733 | /************************************************************************ |
734 | * ixgbe_free_transmit_structures - Free all transmit rings. | | 734 | * ixgbe_free_transmit_structures - Free all transmit rings. |
735 | ************************************************************************/ | | 735 | ************************************************************************/ |
736 | void | | 736 | void |
737 | ixgbe_free_transmit_structures(struct adapter *adapter) | | 737 | ixgbe_free_transmit_structures(struct adapter *adapter) |
738 | { | | 738 | { |
739 | struct tx_ring *txr = adapter->tx_rings; | | 739 | struct tx_ring *txr = adapter->tx_rings; |
740 | | | 740 | |
741 | for (int i = 0; i < adapter->num_queues; i++, txr++) { | | 741 | for (int i = 0; i < adapter->num_queues; i++, txr++) { |
742 | ixgbe_free_transmit_buffers(txr); | | 742 | ixgbe_free_transmit_buffers(txr); |
743 | ixgbe_dma_free(adapter, &txr->txdma); | | 743 | ixgbe_dma_free(adapter, &txr->txdma); |
744 | IXGBE_TX_LOCK_DESTROY(txr); | | 744 | IXGBE_TX_LOCK_DESTROY(txr); |
745 | } | | 745 | } |
746 | free(adapter->tx_rings, M_DEVBUF); | | 746 | free(adapter->tx_rings, M_DEVBUF); |
747 | } /* ixgbe_free_transmit_structures */ | | 747 | } /* ixgbe_free_transmit_structures */ |
748 | | | 748 | |
749 | /************************************************************************ | | 749 | /************************************************************************ |
750 | * ixgbe_free_transmit_buffers | | 750 | * ixgbe_free_transmit_buffers |
751 | * | | 751 | * |
752 | * Free transmit ring related data structures. | | 752 | * Free transmit ring related data structures. |
753 | ************************************************************************/ | | 753 | ************************************************************************/ |
754 | static void | | 754 | static void |
755 | ixgbe_free_transmit_buffers(struct tx_ring *txr) | | 755 | ixgbe_free_transmit_buffers(struct tx_ring *txr) |
756 | { | | 756 | { |
757 | struct adapter *adapter = txr->adapter; | | 757 | struct adapter *adapter = txr->adapter; |
758 | struct ixgbe_tx_buf *tx_buffer; | | 758 | struct ixgbe_tx_buf *tx_buffer; |
759 | int i; | | 759 | int i; |
760 | | | 760 | |
761 | INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); | | 761 | INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); |
762 | | | 762 | |
763 | if (txr->tx_buffers == NULL) | | 763 | if (txr->tx_buffers == NULL) |
764 | return; | | 764 | return; |
765 | | | 765 | |
766 | tx_buffer = txr->tx_buffers; | | 766 | tx_buffer = txr->tx_buffers; |
767 | for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { | | 767 | for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { |
768 | if (tx_buffer->m_head != NULL) { | | 768 | if (tx_buffer->m_head != NULL) { |
769 | bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, | | 769 | bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, |
770 | 0, tx_buffer->m_head->m_pkthdr.len, | | 770 | 0, tx_buffer->m_head->m_pkthdr.len, |
771 | BUS_DMASYNC_POSTWRITE); | | 771 | BUS_DMASYNC_POSTWRITE); |
772 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); | | 772 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); |
773 | m_freem(tx_buffer->m_head); | | 773 | m_freem(tx_buffer->m_head); |
774 | tx_buffer->m_head = NULL; | | 774 | tx_buffer->m_head = NULL; |
775 | if (tx_buffer->map != NULL) { | | 775 | if (tx_buffer->map != NULL) { |
776 | ixgbe_dmamap_destroy(txr->txtag, | | 776 | ixgbe_dmamap_destroy(txr->txtag, |
777 | tx_buffer->map); | | 777 | tx_buffer->map); |
778 | tx_buffer->map = NULL; | | 778 | tx_buffer->map = NULL; |
779 | } | | 779 | } |
780 | } else if (tx_buffer->map != NULL) { | | 780 | } else if (tx_buffer->map != NULL) { |
781 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); | | 781 | ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); |
782 | ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); | | 782 | ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); |
783 | tx_buffer->map = NULL; | | 783 | tx_buffer->map = NULL; |
784 | } | | 784 | } |
785 | } | | 785 | } |
786 | if (txr->txr_interq != NULL) { | | 786 | if (txr->txr_interq != NULL) { |
787 | struct mbuf *m; | | 787 | struct mbuf *m; |
788 | | | 788 | |
789 | while ((m = pcq_get(txr->txr_interq)) != NULL) | | 789 | while ((m = pcq_get(txr->txr_interq)) != NULL) |
790 | m_freem(m); | | 790 | m_freem(m); |
791 | pcq_destroy(txr->txr_interq); | | 791 | pcq_destroy(txr->txr_interq); |
792 | } | | 792 | } |
793 | if (txr->tx_buffers != NULL) { | | 793 | if (txr->tx_buffers != NULL) { |
794 | free(txr->tx_buffers, M_DEVBUF); | | 794 | free(txr->tx_buffers, M_DEVBUF); |
795 | txr->tx_buffers = NULL; | | 795 | txr->tx_buffers = NULL; |
796 | } | | 796 | } |
797 | if (txr->txtag != NULL) { | | 797 | if (txr->txtag != NULL) { |
798 | ixgbe_dma_tag_destroy(txr->txtag); | | 798 | ixgbe_dma_tag_destroy(txr->txtag); |
799 | txr->txtag = NULL; | | 799 | txr->txtag = NULL; |
800 | } | | 800 | } |
801 | } /* ixgbe_free_transmit_buffers */ | | 801 | } /* ixgbe_free_transmit_buffers */ |
802 | | | 802 | |
803 | /************************************************************************ | | 803 | /************************************************************************ |
804 | * ixgbe_tx_ctx_setup | | 804 | * ixgbe_tx_ctx_setup |
805 | * | | 805 | * |
806 | * Advanced Context Descriptor setup for VLAN, CSUM or TSO | | 806 | * Advanced Context Descriptor setup for VLAN, CSUM or TSO |
807 | ************************************************************************/ | | 807 | ************************************************************************/ |
808 | static int | | 808 | static int |
809 | ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, | | 809 | ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, |
810 | u32 *cmd_type_len, u32 *olinfo_status) | | 810 | u32 *cmd_type_len, u32 *olinfo_status) |
811 | { | | 811 | { |
812 | struct adapter *adapter = txr->adapter; | | 812 | struct adapter *adapter = txr->adapter; |
813 | struct ixgbe_adv_tx_context_desc *TXD; | | 813 | struct ixgbe_adv_tx_context_desc *TXD; |
814 | struct ether_vlan_header *eh; | | 814 | struct ether_vlan_header *eh; |
815 | #ifdef INET | | 815 | #ifdef INET |
816 | struct ip *ip; | | 816 | struct ip *ip; |
817 | #endif | | 817 | #endif |
818 | #ifdef INET6 | | 818 | #ifdef INET6 |
819 | struct ip6_hdr *ip6; | | 819 | struct ip6_hdr *ip6; |
820 | #endif | | 820 | #endif |
821 | int ehdrlen, ip_hlen = 0; | | 821 | int ehdrlen, ip_hlen = 0; |
822 | int offload = TRUE; | | 822 | int offload = TRUE; |
823 | int ctxd = txr->next_avail_desc; | | 823 | int ctxd = txr->next_avail_desc; |
824 | u32 vlan_macip_lens = 0; | | 824 | u32 vlan_macip_lens = 0; |
825 | u32 type_tucmd_mlhl = 0; | | 825 | u32 type_tucmd_mlhl = 0; |
826 | u16 vtag = 0; | | 826 | u16 vtag = 0; |
827 | u16 etype; | | 827 | u16 etype; |
828 | u8 ipproto = 0; | | 828 | u8 ipproto = 0; |
829 | char *l3d; | | 829 | char *l3d; |
830 | | | 830 | |
831 | | | 831 | |
832 | /* First check if TSO is to be used */ | | 832 | /* First check if TSO is to be used */ |
833 | if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { | | 833 | if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { |
834 | int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); | | 834 | int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); |
835 | | | 835 | |
836 | if (rv != 0) | | 836 | if (rv != 0) |
837 | ++adapter->tso_err.ev_count; | | 837 | ++adapter->tso_err.ev_count; |
838 | return rv; | | 838 | return rv; |
839 | } | | 839 | } |
840 | | | 840 | |
841 | if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) | | 841 | if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) |
842 | offload = FALSE; | | 842 | offload = FALSE; |
843 | | | 843 | |
844 | /* Indicate the whole packet as payload when not doing TSO */ | | 844 | /* Indicate the whole packet as payload when not doing TSO */ |
845 | *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; | | 845 | *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; |
846 | | | 846 | |
847 | /* Now ready a context descriptor */ | | 847 | /* Now ready a context descriptor */ |
848 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; | | 848 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; |
849 | | | 849 | |
850 | /* | | 850 | /* |
851 | * In advanced descriptors the vlan tag must | | 851 | * In advanced descriptors the vlan tag must |
852 | * be placed into the context descriptor. Hence | | 852 | * be placed into the context descriptor. Hence |
853 | * we need to make one even if not doing offloads. | | 853 | * we need to make one even if not doing offloads. |
854 | */ | | 854 | */ |
855 | if (vlan_has_tag(mp)) { | | 855 | if (vlan_has_tag(mp)) { |
856 | vtag = htole16(vlan_get_tag(mp)); | | 856 | vtag = htole16(vlan_get_tag(mp)); |
857 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); | | 857 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); |
858 | } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && | | 858 | } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && |
859 | (offload == FALSE)) | | 859 | (offload == FALSE)) |
860 | return (0); | | 860 | return (0); |
861 | | | 861 | |
862 | /* | | 862 | /* |
863 | * Determine where frame payload starts. | | 863 | * Determine where frame payload starts. |
864 | * Jump over vlan headers if already present, | | 864 | * Jump over vlan headers if already present, |
865 | * helpful for QinQ too. | | 865 | * helpful for QinQ too. |
866 | */ | | 866 | */ |
867 | KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag)); | | 867 | KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag)); |
868 | eh = mtod(mp, struct ether_vlan_header *); | | 868 | eh = mtod(mp, struct ether_vlan_header *); |
869 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { | | 869 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
870 | KASSERT(mp->m_len >= sizeof(struct ether_vlan_header)); | | 870 | KASSERT(mp->m_len >= sizeof(struct ether_vlan_header)); |
871 | etype = ntohs(eh->evl_proto); | | 871 | etype = ntohs(eh->evl_proto); |
872 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; | | 872 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
873 | } else { | | 873 | } else { |
874 | etype = ntohs(eh->evl_encap_proto); | | 874 | etype = ntohs(eh->evl_encap_proto); |
875 | ehdrlen = ETHER_HDR_LEN; | | 875 | ehdrlen = ETHER_HDR_LEN; |
876 | } | | 876 | } |
877 | | | 877 | |
878 | /* Set the ether header length */ | | 878 | /* Set the ether header length */ |
879 | vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; | | 879 | vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; |
880 | | | 880 | |
881 | if (offload == FALSE) | | 881 | if (offload == FALSE) |
882 | goto no_offloads; | | 882 | goto no_offloads; |
883 | | | 883 | |
884 | /* | | 884 | /* |
885 | * If the first mbuf only includes the ethernet header, | | 885 | * If the first mbuf only includes the ethernet header, |
886 | * jump to the next one | | 886 | * jump to the next one |
887 | * XXX: This assumes the stack splits mbufs containing headers | | 887 | * XXX: This assumes the stack splits mbufs containing headers |
888 | * on header boundaries | | 888 | * on header boundaries |
889 | * XXX: And assumes the entire IP header is contained in one mbuf | | 889 | * XXX: And assumes the entire IP header is contained in one mbuf |
890 | */ | | 890 | */ |
891 | if (mp->m_len == ehdrlen && mp->m_next) | | 891 | if (mp->m_len == ehdrlen && mp->m_next) |
892 | l3d = mtod(mp->m_next, char *); | | 892 | l3d = mtod(mp->m_next, char *); |
893 | else | | 893 | else |
894 | l3d = mtod(mp, char *) + ehdrlen; | | 894 | l3d = mtod(mp, char *) + ehdrlen; |
895 | | | 895 | |
896 | switch (etype) { | | 896 | switch (etype) { |
897 | #ifdef INET | | 897 | #ifdef INET |
898 | case ETHERTYPE_IP: | | 898 | case ETHERTYPE_IP: |
899 | ip = (struct ip *)(l3d); | | 899 | ip = (struct ip *)(l3d); |
900 | ip_hlen = ip->ip_hl << 2; | | 900 | ip_hlen = ip->ip_hl << 2; |
901 | ipproto = ip->ip_p; | | 901 | ipproto = ip->ip_p; |
902 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | | 902 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
903 | KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 || | | 903 | KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 || |
904 | ip->ip_sum == 0); | | 904 | ip->ip_sum == 0); |
905 | break; | | 905 | break; |
906 | #endif | | 906 | #endif |
907 | #ifdef INET6 | | 907 | #ifdef INET6 |
908 | case ETHERTYPE_IPV6: | | 908 | case ETHERTYPE_IPV6: |
909 | ip6 = (struct ip6_hdr *)(l3d); | | 909 | ip6 = (struct ip6_hdr *)(l3d); |
910 | ip_hlen = sizeof(struct ip6_hdr); | | 910 | ip_hlen = sizeof(struct ip6_hdr); |
911 | ipproto = ip6->ip6_nxt; | | 911 | ipproto = ip6->ip6_nxt; |
912 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; | | 912 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; |
913 | break; | | 913 | break; |
914 | #endif | | 914 | #endif |
915 | default: | | 915 | default: |
916 | offload = false; | | 916 | offload = false; |
917 | break; | | 917 | break; |
918 | } | | 918 | } |
919 | | | 919 | |
920 | if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) | | 920 | if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) |
921 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; | | 921 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; |
922 | | | 922 | |
923 | vlan_macip_lens |= ip_hlen; | | 923 | vlan_macip_lens |= ip_hlen; |
924 | | | 924 | |
925 | /* No support for offloads for non-L4 next headers */ | | 925 | /* No support for offloads for non-L4 next headers */ |
926 | switch (ipproto) { | | 926 | switch (ipproto) { |
927 | case IPPROTO_TCP: | | 927 | case IPPROTO_TCP: |
928 | if (mp->m_pkthdr.csum_flags & | | 928 | if (mp->m_pkthdr.csum_flags & |
929 | (M_CSUM_TCPv4 | M_CSUM_TCPv6)) | | 929 | (M_CSUM_TCPv4 | M_CSUM_TCPv6)) |
930 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | | 930 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; |
931 | else | | 931 | else |
932 | offload = false; | | 932 | offload = false; |
933 | break; | | 933 | break; |
934 | case IPPROTO_UDP: | | 934 | case IPPROTO_UDP: |
935 | if (mp->m_pkthdr.csum_flags & | | 935 | if (mp->m_pkthdr.csum_flags & |
936 | (M_CSUM_UDPv4 | M_CSUM_UDPv6)) | | 936 | (M_CSUM_UDPv4 | M_CSUM_UDPv6)) |
937 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; | | 937 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; |
938 | else | | 938 | else |
939 | offload = false; | | 939 | offload = false; |
940 | break; | | 940 | break; |
941 | default: | | 941 | default: |
942 | offload = false; | | 942 | offload = false; |
943 | break; | | 943 | break; |
944 | } | | 944 | } |
945 | | | 945 | |
946 | if (offload) /* Insert L4 checksum into data descriptors */ | | 946 | if (offload) /* Insert L4 checksum into data descriptors */ |
947 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; | | 947 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; |
948 | | | 948 | |
949 | no_offloads: | | 949 | no_offloads: |
950 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | | 950 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; |
951 | | | 951 | |
952 | /* Now copy bits into descriptor */ | | 952 | /* Now copy bits into descriptor */ |
953 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); | | 953 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); |
954 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); | | 954 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); |
955 | TXD->seqnum_seed = htole32(0); | | 955 | TXD->seqnum_seed = htole32(0); |
956 | TXD->mss_l4len_idx = htole32(0); | | 956 | TXD->mss_l4len_idx = htole32(0); |
957 | | | 957 | |
958 | /* We've consumed the first desc, adjust counters */ | | 958 | /* We've consumed the first desc, adjust counters */ |
959 | if (++ctxd == txr->num_desc) | | 959 | if (++ctxd == txr->num_desc) |
960 | ctxd = 0; | | 960 | ctxd = 0; |
961 | txr->next_avail_desc = ctxd; | | 961 | txr->next_avail_desc = ctxd; |
962 | --txr->tx_avail; | | 962 | --txr->tx_avail; |
963 | | | 963 | |
964 | return (0); | | 964 | return (0); |
965 | } /* ixgbe_tx_ctx_setup */ | | 965 | } /* ixgbe_tx_ctx_setup */ |
966 | | | 966 | |
967 | /************************************************************************ | | 967 | /************************************************************************ |
968 | * ixgbe_tso_setup | | 968 | * ixgbe_tso_setup |
969 | * | | 969 | * |
970 | * Setup work for hardware segmentation offload (TSO) on | | 970 | * Setup work for hardware segmentation offload (TSO) on |
971 | * adapters using advanced tx descriptors | | 971 | * adapters using advanced tx descriptors |
972 | ************************************************************************/ | | 972 | ************************************************************************/ |
973 | static int | | 973 | static int |
974 | ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, | | 974 | ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, |
975 | u32 *olinfo_status) | | 975 | u32 *olinfo_status) |
976 | { | | 976 | { |
977 | struct ixgbe_adv_tx_context_desc *TXD; | | 977 | struct ixgbe_adv_tx_context_desc *TXD; |
978 | struct ether_vlan_header *eh; | | 978 | struct ether_vlan_header *eh; |
979 | #ifdef INET6 | | 979 | #ifdef INET6 |
980 | struct ip6_hdr *ip6; | | 980 | struct ip6_hdr *ip6; |
981 | #endif | | 981 | #endif |
982 | #ifdef INET | | 982 | #ifdef INET |
983 | struct ip *ip; | | 983 | struct ip *ip; |
984 | #endif | | 984 | #endif |
985 | struct tcphdr *th; | | 985 | struct tcphdr *th; |
986 | int ctxd, ehdrlen, ip_hlen, tcp_hlen; | | 986 | int ctxd, ehdrlen, ip_hlen, tcp_hlen; |
987 | u32 vlan_macip_lens = 0; | | 987 | u32 vlan_macip_lens = 0; |
988 | u32 type_tucmd_mlhl = 0; | | 988 | u32 type_tucmd_mlhl = 0; |
989 | u32 mss_l4len_idx = 0, paylen; | | 989 | u32 mss_l4len_idx = 0, paylen; |
990 | u16 vtag = 0, eh_type; | | 990 | u16 vtag = 0, eh_type; |
991 | | | 991 | |
992 | /* | | 992 | /* |
993 | * Determine where frame payload starts. | | 993 | * Determine where frame payload starts. |
994 | * Jump over vlan headers if already present | | 994 | * Jump over vlan headers if already present |
995 | */ | | 995 | */ |
996 | eh = mtod(mp, struct ether_vlan_header *); | | 996 | eh = mtod(mp, struct ether_vlan_header *); |
997 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { | | 997 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
998 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; | | 998 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
999 | eh_type = eh->evl_proto; | | 999 | eh_type = eh->evl_proto; |
1000 | } else { | | 1000 | } else { |
1001 | ehdrlen = ETHER_HDR_LEN; | | 1001 | ehdrlen = ETHER_HDR_LEN; |
1002 | eh_type = eh->evl_encap_proto; | | 1002 | eh_type = eh->evl_encap_proto; |
1003 | } | | 1003 | } |
1004 | | | 1004 | |
1005 | switch (ntohs(eh_type)) { | | 1005 | switch (ntohs(eh_type)) { |
1006 | #ifdef INET | | 1006 | #ifdef INET |
1007 | case ETHERTYPE_IP: | | 1007 | case ETHERTYPE_IP: |
1008 | ip = (struct ip *)(mp->m_data + ehdrlen); | | 1008 | ip = (struct ip *)(mp->m_data + ehdrlen); |
1009 | if (ip->ip_p != IPPROTO_TCP) | | 1009 | if (ip->ip_p != IPPROTO_TCP) |
1010 | return (ENXIO); | | 1010 | return (ENXIO); |
1011 | ip->ip_sum = 0; | | 1011 | ip->ip_sum = 0; |
1012 | ip_hlen = ip->ip_hl << 2; | | 1012 | ip_hlen = ip->ip_hl << 2; |
1013 | th = (struct tcphdr *)((char *)ip + ip_hlen); | | 1013 | th = (struct tcphdr *)((char *)ip + ip_hlen); |
1014 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, | | 1014 | th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, |
1015 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); | | 1015 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
1016 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | | 1016 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; |
1017 | /* Tell transmit desc to also do IPv4 checksum. */ | | 1017 | /* Tell transmit desc to also do IPv4 checksum. */ |
1018 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; | | 1018 | *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; |
1019 | break; | | 1019 | break; |
1020 | #endif | | 1020 | #endif |
1021 | #ifdef INET6 | | 1021 | #ifdef INET6 |
1022 | case ETHERTYPE_IPV6: | | 1022 | case ETHERTYPE_IPV6: |
1023 | ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); | | 1023 | ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); |
1024 | /* XXX-BZ For now we do not pretend to support ext. hdrs. */ | | 1024 | /* XXX-BZ For now we do not pretend to support ext. hdrs. */ |
1025 | if (ip6->ip6_nxt != IPPROTO_TCP) | | 1025 | if (ip6->ip6_nxt != IPPROTO_TCP) |
1026 | return (ENXIO); | | 1026 | return (ENXIO); |
1027 | ip_hlen = sizeof(struct ip6_hdr); | | 1027 | ip_hlen = sizeof(struct ip6_hdr); |
1028 | ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); | | 1028 | ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); |
1029 | th = (struct tcphdr *)((char *)ip6 + ip_hlen); | | 1029 | th = (struct tcphdr *)((char *)ip6 + ip_hlen); |
1030 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, | | 1030 | th->th_sum = in6_cksum_phdr(&ip6->ip6_src, |
1031 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); | | 1031 | &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); |
1032 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; | | 1032 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; |
1033 | break; | | 1033 | break; |
1034 | #endif | | 1034 | #endif |
1035 | default: | | 1035 | default: |
1036 | panic("%s: CSUM_TSO but no supported IP version (0x%04x)", | | 1036 | panic("%s: CSUM_TSO but no supported IP version (0x%04x)", |
1037 | __func__, ntohs(eh_type)); | | 1037 | __func__, ntohs(eh_type)); |
1038 | break; | | 1038 | break; |
1039 | } | | 1039 | } |
1040 | | | 1040 | |
1041 | ctxd = txr->next_avail_desc; | | 1041 | ctxd = txr->next_avail_desc; |
1042 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; | | 1042 | TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; |
1043 | | | 1043 | |
1044 | tcp_hlen = th->th_off << 2; | | 1044 | tcp_hlen = th->th_off << 2; |
1045 | | | 1045 | |
1046 | /* This is used in the transmit desc in encap */ | | 1046 | /* This is used in the transmit desc in encap */ |
1047 | paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; | | 1047 | paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; |
1048 | | | 1048 | |
1049 | /* VLAN MACLEN IPLEN */ | | 1049 | /* VLAN MACLEN IPLEN */ |
1050 | if (vlan_has_tag(mp)) { | | 1050 | if (vlan_has_tag(mp)) { |
1051 | vtag = htole16(vlan_get_tag(mp)); | | 1051 | vtag = htole16(vlan_get_tag(mp)); |
1052 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); | | 1052 | vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); |
1053 | } | | 1053 | } |
1054 | | | 1054 | |
1055 | vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; | | 1055 | vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; |
1056 | vlan_macip_lens |= ip_hlen; | | 1056 | vlan_macip_lens |= ip_hlen; |
1057 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); | | 1057 | TXD->vlan_macip_lens = htole32(vlan_macip_lens); |
1058 | | | 1058 | |
1059 | /* ADV DTYPE TUCMD */ | | 1059 | /* ADV DTYPE TUCMD */ |
1060 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | | 1060 | type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; |
1061 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | | 1061 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; |
1062 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); | | 1062 | TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); |
1063 | | | 1063 | |
1064 | /* MSS L4LEN IDX */ | | 1064 | /* MSS L4LEN IDX */ |
1065 | mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT); | | 1065 | mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT); |
1066 | mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); | | 1066 | mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); |
1067 | TXD->mss_l4len_idx = htole32(mss_l4len_idx); | | 1067 | TXD->mss_l4len_idx = htole32(mss_l4len_idx); |
1068 | | | 1068 | |
1069 | TXD->seqnum_seed = htole32(0); | | 1069 | TXD->seqnum_seed = htole32(0); |
1070 | | | 1070 | |
1071 | if (++ctxd == txr->num_desc) | | 1071 | if (++ctxd == txr->num_desc) |
1072 | ctxd = 0; | | 1072 | ctxd = 0; |
1073 | | | 1073 | |
1074 | txr->tx_avail--; | | 1074 | txr->tx_avail--; |
1075 | txr->next_avail_desc = ctxd; | | 1075 | txr->next_avail_desc = ctxd; |
1076 | *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | | 1076 | *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; |
1077 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; | | 1077 | *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; |
1078 | *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; | | 1078 | *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; |
1079 | ++txr->tso_tx.ev_count; | | 1079 | ++txr->tso_tx.ev_count; |
1080 | | | 1080 | |
1081 | return (0); | | 1081 | return (0); |
1082 | } /* ixgbe_tso_setup */ | | 1082 | } /* ixgbe_tso_setup */ |
1083 | | | 1083 | |
1084 | | | 1084 | |
1085 | /************************************************************************ | | 1085 | /************************************************************************ |
1086 | * ixgbe_txeof | | 1086 | * ixgbe_txeof |
1087 | * | | 1087 | * |
1088 | * Examine each tx_buffer in the used queue. If the hardware is done | | 1088 | * Examine each tx_buffer in the used queue. If the hardware is done |
1089 | * processing the packet then free associated resources. The | | 1089 | * processing the packet then free associated resources. The |
1090 | * tx_buffer is put back on the free queue. | | 1090 | * tx_buffer is put back on the free queue. |
1091 | ************************************************************************/ | | 1091 | ************************************************************************/ |
1092 | bool | | 1092 | bool |
1093 | ixgbe_txeof(struct tx_ring *txr) | | 1093 | ixgbe_txeof(struct tx_ring *txr) |
1094 | { | | 1094 | { |
1095 | struct adapter *adapter = txr->adapter; | | 1095 | struct adapter *adapter = txr->adapter; |
1096 | struct ifnet *ifp = adapter->ifp; | | 1096 | struct ifnet *ifp = adapter->ifp; |
1097 | struct ixgbe_tx_buf *buf; | | 1097 | struct ixgbe_tx_buf *buf; |
1098 | union ixgbe_adv_tx_desc *txd; | | 1098 | union ixgbe_adv_tx_desc *txd; |
1099 | u32 work, processed = 0; | | 1099 | u32 work, processed = 0; |
1100 | u32 limit = adapter->tx_process_limit; | | 1100 | u32 limit = adapter->tx_process_limit; |
1101 | | | 1101 | |
1102 | KASSERT(mutex_owned(&txr->tx_mtx)); | | 1102 | KASSERT(mutex_owned(&txr->tx_mtx)); |
1103 | | | 1103 | |
1104 | #ifdef DEV_NETMAP | | 1104 | #ifdef DEV_NETMAP |
1105 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && | | 1105 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && |
1106 | (adapter->ifp->if_capenable & IFCAP_NETMAP)) { | | 1106 | (adapter->ifp->if_capenable & IFCAP_NETMAP)) { |
1107 | struct netmap_adapter *na = NA(adapter->ifp); | | 1107 | struct netmap_adapter *na = NA(adapter->ifp); |
1108 | struct netmap_kring *kring = &na->tx_rings[txr->me]; | | 1108 | struct netmap_kring *kring = &na->tx_rings[txr->me]; |
1109 | txd = txr->tx_base; | | 1109 | txd = txr->tx_base; |
1110 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1110 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1111 | BUS_DMASYNC_POSTREAD); | | 1111 | BUS_DMASYNC_POSTREAD); |
1112 | /* | | 1112 | /* |
1113 | * In netmap mode, all the work is done in the context | | 1113 | * In netmap mode, all the work is done in the context |
1114 | * of the client thread. Interrupt handlers only wake up | | 1114 | * of the client thread. Interrupt handlers only wake up |
1115 | * clients, which may be sleeping on individual rings | | 1115 | * clients, which may be sleeping on individual rings |
1116 | * or on a global resource for all rings. | | 1116 | * or on a global resource for all rings. |
1117 | * To implement tx interrupt mitigation, we wake up the client | | 1117 | * To implement tx interrupt mitigation, we wake up the client |
1118 | * thread roughly every half ring, even if the NIC interrupts | | 1118 | * thread roughly every half ring, even if the NIC interrupts |
1119 | * more frequently. This is implemented as follows: | | 1119 | * more frequently. This is implemented as follows: |
1120 | * - ixgbe_txsync() sets kring->nr_kflags with the index of | | 1120 | * - ixgbe_txsync() sets kring->nr_kflags with the index of |
1121 | * the slot that should wake up the thread (nkr_num_slots | | 1121 | * the slot that should wake up the thread (nkr_num_slots |
1122 | * means the user thread should not be woken up); | | 1122 | * means the user thread should not be woken up); |
1123 | * - the driver ignores tx interrupts unless netmap_mitigate=0 | | 1123 | * - the driver ignores tx interrupts unless netmap_mitigate=0 |
1124 | * or the slot has the DD bit set. | | 1124 | * or the slot has the DD bit set. |
1125 | */ | | 1125 | */ |
1126 | if (!netmap_mitigate || | | 1126 | if (!netmap_mitigate || |
1127 | (kring->nr_kflags < kring->nkr_num_slots && | | 1127 | (kring->nr_kflags < kring->nkr_num_slots && |
1128 | txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) { | | 1128 | txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) { |
1129 | netmap_tx_irq(ifp, txr->me); | | 1129 | netmap_tx_irq(ifp, txr->me); |
1130 | } | | 1130 | } |
1131 | return false; | | 1131 | return false; |
1132 | } | | 1132 | } |
1133 | #endif /* DEV_NETMAP */ | | 1133 | #endif /* DEV_NETMAP */ |
1134 | | | 1134 | |
1135 | if (txr->tx_avail == txr->num_desc) { | | 1135 | if (txr->tx_avail == txr->num_desc) { |
1136 | txr->busy = 0; | | 1136 | txr->busy = 0; |
1137 | return false; | | 1137 | return false; |
1138 | } | | 1138 | } |
1139 | | | 1139 | |
1140 | /* Get work starting point */ | | 1140 | /* Get work starting point */ |
1141 | work = txr->next_to_clean; | | 1141 | work = txr->next_to_clean; |
1142 | buf = &txr->tx_buffers[work]; | | 1142 | buf = &txr->tx_buffers[work]; |
1143 | txd = &txr->tx_base[work]; | | 1143 | txd = &txr->tx_base[work]; |
1144 | work -= txr->num_desc; /* The distance to ring end */ | | 1144 | work -= txr->num_desc; /* The distance to ring end */ |
1145 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1145 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1146 | BUS_DMASYNC_POSTREAD); | | 1146 | BUS_DMASYNC_POSTREAD); |
1147 | | | 1147 | |
1148 | do { | | 1148 | do { |
1149 | union ixgbe_adv_tx_desc *eop = buf->eop; | | 1149 | union ixgbe_adv_tx_desc *eop = buf->eop; |
1150 | if (eop == NULL) /* No work */ | | 1150 | if (eop == NULL) /* No work */ |
1151 | break; | | 1151 | break; |
1152 | | | 1152 | |
1153 | if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) | | 1153 | if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) |
1154 | break; /* I/O not complete */ | | 1154 | break; /* I/O not complete */ |
1155 | | | 1155 | |
1156 | if (buf->m_head) { | | 1156 | if (buf->m_head) { |
1157 | txr->bytes += buf->m_head->m_pkthdr.len; | | 1157 | txr->bytes += buf->m_head->m_pkthdr.len; |
1158 | bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, | | 1158 | bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, |
1159 | 0, buf->m_head->m_pkthdr.len, | | 1159 | 0, buf->m_head->m_pkthdr.len, |
1160 | BUS_DMASYNC_POSTWRITE); | | 1160 | BUS_DMASYNC_POSTWRITE); |
1161 | ixgbe_dmamap_unload(txr->txtag, buf->map); | | 1161 | ixgbe_dmamap_unload(txr->txtag, buf->map); |
1162 | m_freem(buf->m_head); | | 1162 | m_freem(buf->m_head); |
1163 | buf->m_head = NULL; | | 1163 | buf->m_head = NULL; |
1164 | } | | 1164 | } |
1165 | buf->eop = NULL; | | 1165 | buf->eop = NULL; |
1166 | txr->txr_no_space = false; | | 1166 | txr->txr_no_space = false; |
1167 | ++txr->tx_avail; | | 1167 | ++txr->tx_avail; |
1168 | | | 1168 | |
1169 | /* We clean the range if multi segment */ | | 1169 | /* We clean the range if multi segment */ |
1170 | while (txd != eop) { | | 1170 | while (txd != eop) { |
1171 | ++txd; | | 1171 | ++txd; |
1172 | ++buf; | | 1172 | ++buf; |
1173 | ++work; | | 1173 | ++work; |
1174 | /* wrap the ring? */ | | 1174 | /* wrap the ring? */ |
1175 | if (__predict_false(!work)) { | | 1175 | if (__predict_false(!work)) { |
1176 | work -= txr->num_desc; | | 1176 | work -= txr->num_desc; |
1177 | buf = txr->tx_buffers; | | 1177 | buf = txr->tx_buffers; |
1178 | txd = txr->tx_base; | | 1178 | txd = txr->tx_base; |
1179 | } | | 1179 | } |
1180 | if (buf->m_head) { | | 1180 | if (buf->m_head) { |
1181 | txr->bytes += | | 1181 | txr->bytes += |
1182 | buf->m_head->m_pkthdr.len; | | 1182 | buf->m_head->m_pkthdr.len; |
1183 | bus_dmamap_sync(txr->txtag->dt_dmat, | | 1183 | bus_dmamap_sync(txr->txtag->dt_dmat, |
1184 | buf->map, | | 1184 | buf->map, |
1185 | 0, buf->m_head->m_pkthdr.len, | | 1185 | 0, buf->m_head->m_pkthdr.len, |
1186 | BUS_DMASYNC_POSTWRITE); | | 1186 | BUS_DMASYNC_POSTWRITE); |
1187 | ixgbe_dmamap_unload(txr->txtag, | | 1187 | ixgbe_dmamap_unload(txr->txtag, |
1188 | buf->map); | | 1188 | buf->map); |
1189 | m_freem(buf->m_head); | | 1189 | m_freem(buf->m_head); |
1190 | buf->m_head = NULL; | | 1190 | buf->m_head = NULL; |
1191 | } | | 1191 | } |
1192 | ++txr->tx_avail; | | 1192 | ++txr->tx_avail; |
1193 | buf->eop = NULL; | | 1193 | buf->eop = NULL; |
1194 | | | 1194 | |
1195 | } | | 1195 | } |
1196 | ++txr->packets; | | 1196 | ++txr->packets; |
1197 | ++processed; | | 1197 | ++processed; |
1198 | ++ifp->if_opackets; | | 1198 | ++ifp->if_opackets; |
1199 | | | 1199 | |
1200 | /* Try the next packet */ | | 1200 | /* Try the next packet */ |
1201 | ++txd; | | 1201 | ++txd; |
1202 | ++buf; | | 1202 | ++buf; |
1203 | ++work; | | 1203 | ++work; |
1204 | /* reset with a wrap */ | | 1204 | /* reset with a wrap */ |
1205 | if (__predict_false(!work)) { | | 1205 | if (__predict_false(!work)) { |
1206 | work -= txr->num_desc; | | 1206 | work -= txr->num_desc; |
1207 | buf = txr->tx_buffers; | | 1207 | buf = txr->tx_buffers; |
1208 | txd = txr->tx_base; | | 1208 | txd = txr->tx_base; |
1209 | } | | 1209 | } |
1210 | prefetch(txd); | | 1210 | prefetch(txd); |
1211 | } while (__predict_true(--limit)); | | 1211 | } while (__predict_true(--limit)); |
1212 | | | 1212 | |
1213 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, | | 1213 | ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
1214 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1214 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1215 | | | 1215 | |
1216 | work += txr->num_desc; | | 1216 | work += txr->num_desc; |
1217 | txr->next_to_clean = work; | | 1217 | txr->next_to_clean = work; |
1218 | | | 1218 | |
1219 | /* | | 1219 | /* |
1220 | * Queue Hang detection, we know there's | | 1220 | * Queue Hang detection, we know there's |
1221 | * work outstanding or the first return | | 1221 | * work outstanding or the first return |
1222 | * would have been taken, so increment busy | | 1222 | * would have been taken, so increment busy |
1223 | * if nothing managed to get cleaned, then | | 1223 | * if nothing managed to get cleaned, then |
1224 | * in local_timer it will be checked and | | 1224 | * in local_timer it will be checked and |
1225 | * marked as HUNG if it exceeds a MAX attempt. | | 1225 | * marked as HUNG if it exceeds a MAX attempt. |
1226 | */ | | 1226 | */ |
1227 | if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) | | 1227 | if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) |
1228 | ++txr->busy; | | 1228 | ++txr->busy; |
1229 | /* | | 1229 | /* |
1230 | * If anything gets cleaned we reset state to 1, | | 1230 | * If anything gets cleaned we reset state to 1, |
1231 | * note this will turn off HUNG if its set. | | 1231 | * note this will turn off HUNG if its set. |
1232 | */ | | 1232 | */ |