Thu Jul 16 01:20:38 2020 UTC ()
Set PCI_COMMAND_MASTER_ENABLE and PCI_COMMAND_MEM_ENABLE
to activate the pci devices

This configuration is needed when BIOS or UEFI do not make them set.


(yamaguchi)
diff -r1.67 -r1.68 src/sys/dev/pci/if_ixl.c

cvs diff -r1.67 -r1.68 src/sys/dev/pci/if_ixl.c (switch to unified diff)

--- src/sys/dev/pci/if_ixl.c 2020/06/11 02:39:30 1.67
+++ src/sys/dev/pci/if_ixl.c 2020/07/16 01:20:38 1.68
@@ -1,2944 +1,2958 @@ @@ -1,2944 +1,2958 @@
1/* $NetBSD: if_ixl.c,v 1.67 2020/06/11 02:39:30 thorpej Exp $ */ 1/* $NetBSD: if_ixl.c,v 1.68 2020/07/16 01:20:38 yamaguchi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2013-2015, Intel Corporation 4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved. 5 * All rights reserved.
6 6
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met: 8 * modification, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its 17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from 18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission. 19 * this software without specific prior written permission.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34/* 34/*
35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org> 35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36 * 36 *
37 * Permission to use, copy, modify, and distribute this software for any 37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above 38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies. 39 * copyright notice and this permission notice appear in all copies.
40 * 40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */ 48 */
49 49
50/* 50/*
51 * Copyright (c) 2019 Internet Initiative Japan, Inc. 51 * Copyright (c) 2019 Internet Initiative Japan, Inc.
52 * All rights reserved. 52 * All rights reserved.
53 * 53 *
54 * Redistribution and use in source and binary forms, with or without 54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions 55 * modification, are permitted provided that the following conditions
56 * are met: 56 * are met:
57 * 1. Redistributions of source code must retain the above copyright 57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer. 58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright 59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the 60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution. 61 * documentation and/or other materials provided with the distribution.
62 * 62 *
63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 63 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE. 73 * POSSIBILITY OF SUCH DAMAGE.
74 */ 74 */
75 75
76#include <sys/cdefs.h> 76#include <sys/cdefs.h>
77__KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.67 2020/06/11 02:39:30 thorpej Exp $"); 77__KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.68 2020/07/16 01:20:38 yamaguchi Exp $");
78 78
79#ifdef _KERNEL_OPT 79#ifdef _KERNEL_OPT
80#include "opt_net_mpsafe.h" 80#include "opt_net_mpsafe.h"
81#include "opt_if_ixl.h" 81#include "opt_if_ixl.h"
82#endif 82#endif
83 83
84#include <sys/param.h> 84#include <sys/param.h>
85#include <sys/types.h> 85#include <sys/types.h>
86 86
87#include <sys/cpu.h> 87#include <sys/cpu.h>
88#include <sys/device.h> 88#include <sys/device.h>
89#include <sys/evcnt.h> 89#include <sys/evcnt.h>
90#include <sys/interrupt.h> 90#include <sys/interrupt.h>
91#include <sys/kmem.h> 91#include <sys/kmem.h>
92#include <sys/module.h> 92#include <sys/module.h>
93#include <sys/mutex.h> 93#include <sys/mutex.h>
94#include <sys/pcq.h> 94#include <sys/pcq.h>
95#include <sys/syslog.h> 95#include <sys/syslog.h>
96#include <sys/workqueue.h> 96#include <sys/workqueue.h>
97 97
98#include <sys/bus.h> 98#include <sys/bus.h>
99 99
100#include <net/bpf.h> 100#include <net/bpf.h>
101#include <net/if.h> 101#include <net/if.h>
102#include <net/if_dl.h> 102#include <net/if_dl.h>
103#include <net/if_media.h> 103#include <net/if_media.h>
104#include <net/if_ether.h> 104#include <net/if_ether.h>
105#include <net/rss_config.h> 105#include <net/rss_config.h>
106 106
107#include <netinet/tcp.h> /* for struct tcphdr */ 107#include <netinet/tcp.h> /* for struct tcphdr */
108#include <netinet/udp.h> /* for struct udphdr */ 108#include <netinet/udp.h> /* for struct udphdr */
109 109
110#include <dev/pci/pcivar.h> 110#include <dev/pci/pcivar.h>
111#include <dev/pci/pcidevs.h> 111#include <dev/pci/pcidevs.h>
112 112
113#include <dev/pci/if_ixlreg.h> 113#include <dev/pci/if_ixlreg.h>
114#include <dev/pci/if_ixlvar.h> 114#include <dev/pci/if_ixlvar.h>
115 115
116#include <prop/proplib.h> 116#include <prop/proplib.h>
117 117
118struct ixl_softc; /* defined */ 118struct ixl_softc; /* defined */
119 119
120#define I40E_PF_RESET_WAIT_COUNT 200 120#define I40E_PF_RESET_WAIT_COUNT 200
121#define I40E_AQ_LARGE_BUF 512 121#define I40E_AQ_LARGE_BUF 512
122 122
123/* bitfields for Tx queue mapping in QTX_CTL */ 123/* bitfields for Tx queue mapping in QTX_CTL */
124#define I40E_QTX_CTL_VF_QUEUE 0x0 124#define I40E_QTX_CTL_VF_QUEUE 0x0
125#define I40E_QTX_CTL_VM_QUEUE 0x1 125#define I40E_QTX_CTL_VM_QUEUE 0x1
126#define I40E_QTX_CTL_PF_QUEUE 0x2 126#define I40E_QTX_CTL_PF_QUEUE 0x2
127 127
128#define I40E_QUEUE_TYPE_EOL 0x7ff 128#define I40E_QUEUE_TYPE_EOL 0x7ff
129#define I40E_INTR_NOTX_QUEUE 0 129#define I40E_INTR_NOTX_QUEUE 0
130 130
131#define I40E_QUEUE_TYPE_RX 0x0 131#define I40E_QUEUE_TYPE_RX 0x0
132#define I40E_QUEUE_TYPE_TX 0x1 132#define I40E_QUEUE_TYPE_TX 0x1
133#define I40E_QUEUE_TYPE_PE_CEQ 0x2 133#define I40E_QUEUE_TYPE_PE_CEQ 0x2
134#define I40E_QUEUE_TYPE_UNKNOWN 0x3 134#define I40E_QUEUE_TYPE_UNKNOWN 0x3
135 135
136#define I40E_ITR_INDEX_RX 0x0 136#define I40E_ITR_INDEX_RX 0x0
137#define I40E_ITR_INDEX_TX 0x1 137#define I40E_ITR_INDEX_TX 0x1
138#define I40E_ITR_INDEX_OTHER 0x2 138#define I40E_ITR_INDEX_OTHER 0x2
139#define I40E_ITR_INDEX_NONE 0x3 139#define I40E_ITR_INDEX_NONE 0x3
140#define IXL_ITR_RX 0x7a /* 4K intrs/sec */ 140#define IXL_ITR_RX 0x7a /* 4K intrs/sec */
141#define IXL_ITR_TX 0x7a /* 4K intrs/sec */ 141#define IXL_ITR_TX 0x7a /* 4K intrs/sec */
142 142
143#define I40E_INTR_NOTX_QUEUE 0 143#define I40E_INTR_NOTX_QUEUE 0
144#define I40E_INTR_NOTX_INTR 0 144#define I40E_INTR_NOTX_INTR 0
145#define I40E_INTR_NOTX_RX_QUEUE 0 145#define I40E_INTR_NOTX_RX_QUEUE 0
146#define I40E_INTR_NOTX_TX_QUEUE 1 146#define I40E_INTR_NOTX_TX_QUEUE 1
147#define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK 147#define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
148#define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK 148#define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
149 149
150#define BIT_ULL(a) (1ULL << (a)) 150#define BIT_ULL(a) (1ULL << (a))
151#define IXL_RSS_HENA_DEFAULT_BASE \ 151#define IXL_RSS_HENA_DEFAULT_BASE \
152 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ 152 (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ 153 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ 154 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ 155 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ 156 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ 157 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ 158 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ 159 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ 160 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
161 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ 161 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
162 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) 162 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
163#define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE 163#define IXL_RSS_HENA_DEFAULT_XL710 IXL_RSS_HENA_DEFAULT_BASE
164#define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \ 164#define IXL_RSS_HENA_DEFAULT_X722 (IXL_RSS_HENA_DEFAULT_XL710 | \
165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ 165 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ 166 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ 167 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ 168 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ 169 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) 170 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
171#define I40E_HASH_LUT_SIZE_128 0 171#define I40E_HASH_LUT_SIZE_128 0
172#define IXL_RSS_KEY_SIZE_REG 13 172#define IXL_RSS_KEY_SIZE_REG 13
173 173
174#define IXL_ICR0_CRIT_ERR_MASK \ 174#define IXL_ICR0_CRIT_ERR_MASK \
175 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ 175 (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \
176 I40E_PFINT_ICR0_ECC_ERR_MASK | \ 176 I40E_PFINT_ICR0_ECC_ERR_MASK | \
177 I40E_PFINT_ICR0_PE_CRITERR_MASK) 177 I40E_PFINT_ICR0_PE_CRITERR_MASK)
178 178
179#define IXL_QUEUE_MAX_XL710 64 179#define IXL_QUEUE_MAX_XL710 64
180#define IXL_QUEUE_MAX_X722 128 180#define IXL_QUEUE_MAX_X722 128
181 181
182#define IXL_TX_PKT_DESCS 8 182#define IXL_TX_PKT_DESCS 8
183#define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS) 183#define IXL_TX_PKT_MAXSIZE (MCLBYTES * IXL_TX_PKT_DESCS)
184#define IXL_TX_QUEUE_ALIGN 128 184#define IXL_TX_QUEUE_ALIGN 128
185#define IXL_RX_QUEUE_ALIGN 128 185#define IXL_RX_QUEUE_ALIGN 128
186 186
187#define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN) 187#define IXL_MCLBYTES (MCLBYTES - ETHER_ALIGN)
188#define IXL_MTU_ETHERLEN ETHER_HDR_LEN \ 188#define IXL_MTU_ETHERLEN ETHER_HDR_LEN \
189 + ETHER_CRC_LEN 189 + ETHER_CRC_LEN
190#if 0 190#if 0
191#define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN) 191#define IXL_MAX_MTU (9728 - IXL_MTU_ETHERLEN)
192#else 192#else
193/* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */ 193/* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
194#define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN) 194#define IXL_MAX_MTU (9600 - IXL_MTU_ETHERLEN)
195#endif 195#endif
196#define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN) 196#define IXL_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
197 197
198#define IXL_PCIREG PCI_MAPREG_START 198#define IXL_PCIREG PCI_MAPREG_START
199 199
200#define IXL_ITR0 0x0 200#define IXL_ITR0 0x0
201#define IXL_ITR1 0x1 201#define IXL_ITR1 0x1
202#define IXL_ITR2 0x2 202#define IXL_ITR2 0x2
203#define IXL_NOITR 0x3 203#define IXL_NOITR 0x3
204 204
205#define IXL_AQ_NUM 256 205#define IXL_AQ_NUM 256
206#define IXL_AQ_MASK (IXL_AQ_NUM - 1) 206#define IXL_AQ_MASK (IXL_AQ_NUM - 1)
207#define IXL_AQ_ALIGN 64 /* lol */ 207#define IXL_AQ_ALIGN 64 /* lol */
208#define IXL_AQ_BUFLEN 4096 208#define IXL_AQ_BUFLEN 4096
209 209
210#define IXL_HMC_ROUNDUP 512 210#define IXL_HMC_ROUNDUP 512
211#define IXL_HMC_PGSIZE 4096 211#define IXL_HMC_PGSIZE 4096
212#define IXL_HMC_DVASZ sizeof(uint64_t) 212#define IXL_HMC_DVASZ sizeof(uint64_t)
213#define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ) 213#define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
214#define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS) 214#define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
215#define IXL_HMC_PDVALID 1ULL 215#define IXL_HMC_PDVALID 1ULL
216 216
217#define IXL_ATQ_EXEC_TIMEOUT (10 * hz) 217#define IXL_ATQ_EXEC_TIMEOUT (10 * hz)
218 218
219#define IXL_SRRD_SRCTL_ATTEMPTS 100000 219#define IXL_SRRD_SRCTL_ATTEMPTS 100000
220 220
221struct ixl_aq_regs { 221struct ixl_aq_regs {
222 bus_size_t atq_tail; 222 bus_size_t atq_tail;
223 bus_size_t atq_head; 223 bus_size_t atq_head;
224 bus_size_t atq_len; 224 bus_size_t atq_len;
225 bus_size_t atq_bal; 225 bus_size_t atq_bal;
226 bus_size_t atq_bah; 226 bus_size_t atq_bah;
227 227
228 bus_size_t arq_tail; 228 bus_size_t arq_tail;
229 bus_size_t arq_head; 229 bus_size_t arq_head;
230 bus_size_t arq_len; 230 bus_size_t arq_len;
231 bus_size_t arq_bal; 231 bus_size_t arq_bal;
232 bus_size_t arq_bah; 232 bus_size_t arq_bah;
233 233
234 uint32_t atq_len_enable; 234 uint32_t atq_len_enable;
235 uint32_t atq_tail_mask; 235 uint32_t atq_tail_mask;
236 uint32_t atq_head_mask; 236 uint32_t atq_head_mask;
237 237
238 uint32_t arq_len_enable; 238 uint32_t arq_len_enable;
239 uint32_t arq_tail_mask; 239 uint32_t arq_tail_mask;
240 uint32_t arq_head_mask; 240 uint32_t arq_head_mask;
241}; 241};
242 242
243struct ixl_phy_type { 243struct ixl_phy_type {
244 uint64_t phy_type; 244 uint64_t phy_type;
245 uint64_t ifm_type; 245 uint64_t ifm_type;
246}; 246};
247 247
248struct ixl_speed_type { 248struct ixl_speed_type {
249 uint8_t dev_speed; 249 uint8_t dev_speed;
250 uint64_t net_speed; 250 uint64_t net_speed;
251}; 251};
252 252
253struct ixl_aq_buf { 253struct ixl_aq_buf {
254 SIMPLEQ_ENTRY(ixl_aq_buf) 254 SIMPLEQ_ENTRY(ixl_aq_buf)
255 aqb_entry; 255 aqb_entry;
256 void *aqb_data; 256 void *aqb_data;
257 bus_dmamap_t aqb_map; 257 bus_dmamap_t aqb_map;
258 bus_dma_segment_t aqb_seg; 258 bus_dma_segment_t aqb_seg;
259 size_t aqb_size; 259 size_t aqb_size;
260 int aqb_nsegs; 260 int aqb_nsegs;
261}; 261};
262SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf); 262SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
263 263
264struct ixl_dmamem { 264struct ixl_dmamem {
265 bus_dmamap_t ixm_map; 265 bus_dmamap_t ixm_map;
266 bus_dma_segment_t ixm_seg; 266 bus_dma_segment_t ixm_seg;
267 int ixm_nsegs; 267 int ixm_nsegs;
268 size_t ixm_size; 268 size_t ixm_size;
269 void *ixm_kva; 269 void *ixm_kva;
270}; 270};
271 271
272#define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map) 272#define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
273#define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr) 273#define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
274#define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva) 274#define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
275#define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size) 275#define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
276 276
277struct ixl_hmc_entry { 277struct ixl_hmc_entry {
278 uint64_t hmc_base; 278 uint64_t hmc_base;
279 uint32_t hmc_count; 279 uint32_t hmc_count;
280 uint64_t hmc_size; 280 uint64_t hmc_size;
281}; 281};
282 282
283enum ixl_hmc_types { 283enum ixl_hmc_types {
284 IXL_HMC_LAN_TX = 0, 284 IXL_HMC_LAN_TX = 0,
285 IXL_HMC_LAN_RX, 285 IXL_HMC_LAN_RX,
286 IXL_HMC_FCOE_CTX, 286 IXL_HMC_FCOE_CTX,
287 IXL_HMC_FCOE_FILTER, 287 IXL_HMC_FCOE_FILTER,
288 IXL_HMC_COUNT 288 IXL_HMC_COUNT
289}; 289};
290 290
291struct ixl_hmc_pack { 291struct ixl_hmc_pack {
292 uint16_t offset; 292 uint16_t offset;
293 uint16_t width; 293 uint16_t width;
294 uint16_t lsb; 294 uint16_t lsb;
295}; 295};
296 296
297/* 297/*
298 * these hmc objects have weird sizes and alignments, so these are abstract 298 * these hmc objects have weird sizes and alignments, so these are abstract
299 * representations of them that are nice for c to populate. 299 * representations of them that are nice for c to populate.
300 * 300 *
301 * the packing code relies on little-endian values being stored in the fields, 301 * the packing code relies on little-endian values being stored in the fields,
302 * no high bits in the fields being set, and the fields must be packed in the 302 * no high bits in the fields being set, and the fields must be packed in the
303 * same order as they are in the ctx structure. 303 * same order as they are in the ctx structure.
304 */ 304 */
305 305
306struct ixl_hmc_rxq { 306struct ixl_hmc_rxq {
307 uint16_t head; 307 uint16_t head;
308 uint8_t cpuid; 308 uint8_t cpuid;
309 uint64_t base; 309 uint64_t base;
310#define IXL_HMC_RXQ_BASE_UNIT 128 310#define IXL_HMC_RXQ_BASE_UNIT 128
311 uint16_t qlen; 311 uint16_t qlen;
312 uint16_t dbuff; 312 uint16_t dbuff;
313#define IXL_HMC_RXQ_DBUFF_UNIT 128 313#define IXL_HMC_RXQ_DBUFF_UNIT 128
314 uint8_t hbuff; 314 uint8_t hbuff;
315#define IXL_HMC_RXQ_HBUFF_UNIT 64 315#define IXL_HMC_RXQ_HBUFF_UNIT 64
316 uint8_t dtype; 316 uint8_t dtype;
317#define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0 317#define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
318#define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1 318#define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
319#define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2 319#define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
320 uint8_t dsize; 320 uint8_t dsize;
321#define IXL_HMC_RXQ_DSIZE_16 0 321#define IXL_HMC_RXQ_DSIZE_16 0
322#define IXL_HMC_RXQ_DSIZE_32 1 322#define IXL_HMC_RXQ_DSIZE_32 1
323 uint8_t crcstrip; 323 uint8_t crcstrip;
324 uint8_t fc_ena; 324 uint8_t fc_ena;
325 uint8_t l2sel; 325 uint8_t l2sel;
326 uint8_t hsplit_0; 326 uint8_t hsplit_0;
327 uint8_t hsplit_1; 327 uint8_t hsplit_1;
328 uint8_t showiv; 328 uint8_t showiv;
329 uint16_t rxmax; 329 uint16_t rxmax;
330 uint8_t tphrdesc_ena; 330 uint8_t tphrdesc_ena;
331 uint8_t tphwdesc_ena; 331 uint8_t tphwdesc_ena;
332 uint8_t tphdata_ena; 332 uint8_t tphdata_ena;
333 uint8_t tphhead_ena; 333 uint8_t tphhead_ena;
334 uint8_t lrxqthresh; 334 uint8_t lrxqthresh;
335 uint8_t prefena; 335 uint8_t prefena;
336}; 336};
337 337
338static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = { 338static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
339 { offsetof(struct ixl_hmc_rxq, head), 13, 0 }, 339 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
340 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 }, 340 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
341 { offsetof(struct ixl_hmc_rxq, base), 57, 32 }, 341 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
342 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 }, 342 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
343 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 }, 343 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
344 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 }, 344 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
345 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 }, 345 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
346 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 }, 346 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
347 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 }, 347 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
348 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 }, 348 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
349 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 }, 349 { offsetof(struct ixl_hmc_rxq, l2sel), 1, 119 },
350 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 }, 350 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
351 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 }, 351 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
352 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 }, 352 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
353 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 }, 353 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
354 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 }, 354 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
355 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 }, 355 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
356 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 }, 356 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
357 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 }, 357 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
358 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 }, 358 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
359 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 }, 359 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
360}; 360};
361 361
362#define IXL_HMC_RXQ_MINSIZE (201 + 1) 362#define IXL_HMC_RXQ_MINSIZE (201 + 1)
363 363
364struct ixl_hmc_txq { 364struct ixl_hmc_txq {
365 uint16_t head; 365 uint16_t head;
366 uint8_t new_context; 366 uint8_t new_context;
367 uint64_t base; 367 uint64_t base;
368#define IXL_HMC_TXQ_BASE_UNIT 128 368#define IXL_HMC_TXQ_BASE_UNIT 128
369 uint8_t fc_ena; 369 uint8_t fc_ena;
370 uint8_t timesync_ena; 370 uint8_t timesync_ena;
371 uint8_t fd_ena; 371 uint8_t fd_ena;
372 uint8_t alt_vlan_ena; 372 uint8_t alt_vlan_ena;
373 uint8_t cpuid; 373 uint8_t cpuid;
374 uint16_t thead_wb; 374 uint16_t thead_wb;
375 uint8_t head_wb_ena; 375 uint8_t head_wb_ena;
376#define IXL_HMC_TXQ_DESC_WB 0 376#define IXL_HMC_TXQ_DESC_WB 0
377#define IXL_HMC_TXQ_HEAD_WB 1 377#define IXL_HMC_TXQ_HEAD_WB 1
378 uint16_t qlen; 378 uint16_t qlen;
379 uint8_t tphrdesc_ena; 379 uint8_t tphrdesc_ena;
380 uint8_t tphrpacket_ena; 380 uint8_t tphrpacket_ena;
381 uint8_t tphwdesc_ena; 381 uint8_t tphwdesc_ena;
382 uint64_t head_wb_addr; 382 uint64_t head_wb_addr;
383 uint32_t crc; 383 uint32_t crc;
384 uint16_t rdylist; 384 uint16_t rdylist;
385 uint8_t rdylist_act; 385 uint8_t rdylist_act;
386}; 386};
387 387
388static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = { 388static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
389 { offsetof(struct ixl_hmc_txq, head), 13, 0 }, 389 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
390 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 }, 390 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
391 { offsetof(struct ixl_hmc_txq, base), 57, 32 }, 391 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
392 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 }, 392 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
393 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 }, 393 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
394 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 }, 394 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
395 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 }, 395 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
396 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 }, 396 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
397/* line 1 */ 397/* line 1 */
398 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 }, 398 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
399 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 }, 399 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
400 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 }, 400 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
401 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 }, 401 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
402 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 }, 402 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
403 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 }, 403 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
404 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 }, 404 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
405/* line 7 */ 405/* line 7 */
406 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) }, 406 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
407 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) }, 407 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
408 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) }, 408 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
409}; 409};
410 410
411#define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1) 411#define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
412 412
413struct ixl_work { 413struct ixl_work {
414 struct work ixw_cookie; 414 struct work ixw_cookie;
415 void (*ixw_func)(void *); 415 void (*ixw_func)(void *);
416 void *ixw_arg; 416 void *ixw_arg;
417 unsigned int ixw_added; 417 unsigned int ixw_added;
418}; 418};
419#define IXL_WORKQUEUE_PRI PRI_SOFTNET 419#define IXL_WORKQUEUE_PRI PRI_SOFTNET
420 420
421struct ixl_tx_map { 421struct ixl_tx_map {
422 struct mbuf *txm_m; 422 struct mbuf *txm_m;
423 bus_dmamap_t txm_map; 423 bus_dmamap_t txm_map;
424 unsigned int txm_eop; 424 unsigned int txm_eop;
425}; 425};
426 426
427struct ixl_tx_ring { 427struct ixl_tx_ring {
428 kmutex_t txr_lock; 428 kmutex_t txr_lock;
429 struct ixl_softc *txr_sc; 429 struct ixl_softc *txr_sc;
430 430
431 unsigned int txr_prod; 431 unsigned int txr_prod;
432 unsigned int txr_cons; 432 unsigned int txr_cons;
433 433
434 struct ixl_tx_map *txr_maps; 434 struct ixl_tx_map *txr_maps;
435 struct ixl_dmamem txr_mem; 435 struct ixl_dmamem txr_mem;
436 436
437 bus_size_t txr_tail; 437 bus_size_t txr_tail;
438 unsigned int txr_qid; 438 unsigned int txr_qid;
439 pcq_t *txr_intrq; 439 pcq_t *txr_intrq;
440 void *txr_si; 440 void *txr_si;
441 441
442 struct evcnt txr_defragged; 442 struct evcnt txr_defragged;
443 struct evcnt txr_defrag_failed; 443 struct evcnt txr_defrag_failed;
444 struct evcnt txr_pcqdrop; 444 struct evcnt txr_pcqdrop;
445 struct evcnt txr_transmitdef; 445 struct evcnt txr_transmitdef;
446 struct evcnt txr_intr; 446 struct evcnt txr_intr;
447 struct evcnt txr_defer; 447 struct evcnt txr_defer;
448}; 448};
449 449
450struct ixl_rx_map { 450struct ixl_rx_map {
451 struct mbuf *rxm_m; 451 struct mbuf *rxm_m;
452 bus_dmamap_t rxm_map; 452 bus_dmamap_t rxm_map;
453}; 453};
454 454
455struct ixl_rx_ring { 455struct ixl_rx_ring {
456 kmutex_t rxr_lock; 456 kmutex_t rxr_lock;
457 457
458 unsigned int rxr_prod; 458 unsigned int rxr_prod;
459 unsigned int rxr_cons; 459 unsigned int rxr_cons;
460 460
461 struct ixl_rx_map *rxr_maps; 461 struct ixl_rx_map *rxr_maps;
462 struct ixl_dmamem rxr_mem; 462 struct ixl_dmamem rxr_mem;
463 463
464 struct mbuf *rxr_m_head; 464 struct mbuf *rxr_m_head;
465 struct mbuf **rxr_m_tail; 465 struct mbuf **rxr_m_tail;
466 466
467 bus_size_t rxr_tail; 467 bus_size_t rxr_tail;
468 unsigned int rxr_qid; 468 unsigned int rxr_qid;
469 469
470 struct evcnt rxr_mgethdr_failed; 470 struct evcnt rxr_mgethdr_failed;
471 struct evcnt rxr_mgetcl_failed; 471 struct evcnt rxr_mgetcl_failed;
472 struct evcnt rxr_mbuf_load_failed; 472 struct evcnt rxr_mbuf_load_failed;
473 struct evcnt rxr_intr; 473 struct evcnt rxr_intr;
474 struct evcnt rxr_defer; 474 struct evcnt rxr_defer;
475}; 475};
476 476
477struct ixl_queue_pair { 477struct ixl_queue_pair {
478 struct ixl_softc *qp_sc; 478 struct ixl_softc *qp_sc;
479 struct ixl_tx_ring *qp_txr; 479 struct ixl_tx_ring *qp_txr;
480 struct ixl_rx_ring *qp_rxr; 480 struct ixl_rx_ring *qp_rxr;
481 481
482 char qp_name[16]; 482 char qp_name[16];
483 483
484 void *qp_si; 484 void *qp_si;
485 struct work qp_work; 485 struct work qp_work;
486 bool qp_workqueue; 486 bool qp_workqueue;
487}; 487};
488 488
489struct ixl_atq { 489struct ixl_atq {
490 struct ixl_aq_desc iatq_desc; 490 struct ixl_aq_desc iatq_desc;
491 void (*iatq_fn)(struct ixl_softc *, 491 void (*iatq_fn)(struct ixl_softc *,
492 const struct ixl_aq_desc *); 492 const struct ixl_aq_desc *);
493}; 493};
494SIMPLEQ_HEAD(ixl_atq_list, ixl_atq); 494SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
495 495
496struct ixl_product { 496struct ixl_product {
497 unsigned int vendor_id; 497 unsigned int vendor_id;
498 unsigned int product_id; 498 unsigned int product_id;
499}; 499};
500 500
501struct ixl_stats_counters { 501struct ixl_stats_counters {
502 bool isc_has_offset; 502 bool isc_has_offset;
503 struct evcnt isc_crc_errors; 503 struct evcnt isc_crc_errors;
504 uint64_t isc_crc_errors_offset; 504 uint64_t isc_crc_errors_offset;
505 struct evcnt isc_illegal_bytes; 505 struct evcnt isc_illegal_bytes;
506 uint64_t isc_illegal_bytes_offset; 506 uint64_t isc_illegal_bytes_offset;
507 struct evcnt isc_rx_bytes; 507 struct evcnt isc_rx_bytes;
508 uint64_t isc_rx_bytes_offset; 508 uint64_t isc_rx_bytes_offset;
509 struct evcnt isc_rx_discards; 509 struct evcnt isc_rx_discards;
510 uint64_t isc_rx_discards_offset; 510 uint64_t isc_rx_discards_offset;
511 struct evcnt isc_rx_unicast; 511 struct evcnt isc_rx_unicast;
512 uint64_t isc_rx_unicast_offset; 512 uint64_t isc_rx_unicast_offset;
513 struct evcnt isc_rx_multicast; 513 struct evcnt isc_rx_multicast;
514 uint64_t isc_rx_multicast_offset; 514 uint64_t isc_rx_multicast_offset;
515 struct evcnt isc_rx_broadcast; 515 struct evcnt isc_rx_broadcast;
516 uint64_t isc_rx_broadcast_offset; 516 uint64_t isc_rx_broadcast_offset;
517 struct evcnt isc_rx_size_64; 517 struct evcnt isc_rx_size_64;
518 uint64_t isc_rx_size_64_offset; 518 uint64_t isc_rx_size_64_offset;
519 struct evcnt isc_rx_size_127; 519 struct evcnt isc_rx_size_127;
520 uint64_t isc_rx_size_127_offset; 520 uint64_t isc_rx_size_127_offset;
521 struct evcnt isc_rx_size_255; 521 struct evcnt isc_rx_size_255;
522 uint64_t isc_rx_size_255_offset; 522 uint64_t isc_rx_size_255_offset;
523 struct evcnt isc_rx_size_511; 523 struct evcnt isc_rx_size_511;
524 uint64_t isc_rx_size_511_offset; 524 uint64_t isc_rx_size_511_offset;
525 struct evcnt isc_rx_size_1023; 525 struct evcnt isc_rx_size_1023;
526 uint64_t isc_rx_size_1023_offset; 526 uint64_t isc_rx_size_1023_offset;
527 struct evcnt isc_rx_size_1522; 527 struct evcnt isc_rx_size_1522;
528 uint64_t isc_rx_size_1522_offset; 528 uint64_t isc_rx_size_1522_offset;
529 struct evcnt isc_rx_size_big; 529 struct evcnt isc_rx_size_big;
530 uint64_t isc_rx_size_big_offset; 530 uint64_t isc_rx_size_big_offset;
531 struct evcnt isc_rx_undersize; 531 struct evcnt isc_rx_undersize;
532 uint64_t isc_rx_undersize_offset; 532 uint64_t isc_rx_undersize_offset;
533 struct evcnt isc_rx_oversize; 533 struct evcnt isc_rx_oversize;
534 uint64_t isc_rx_oversize_offset; 534 uint64_t isc_rx_oversize_offset;
535 struct evcnt isc_rx_fragments; 535 struct evcnt isc_rx_fragments;
536 uint64_t isc_rx_fragments_offset; 536 uint64_t isc_rx_fragments_offset;
537 struct evcnt isc_rx_jabber; 537 struct evcnt isc_rx_jabber;
538 uint64_t isc_rx_jabber_offset; 538 uint64_t isc_rx_jabber_offset;
539 struct evcnt isc_tx_bytes; 539 struct evcnt isc_tx_bytes;
540 uint64_t isc_tx_bytes_offset; 540 uint64_t isc_tx_bytes_offset;
541 struct evcnt isc_tx_dropped_link_down; 541 struct evcnt isc_tx_dropped_link_down;
542 uint64_t isc_tx_dropped_link_down_offset; 542 uint64_t isc_tx_dropped_link_down_offset;
543 struct evcnt isc_tx_unicast; 543 struct evcnt isc_tx_unicast;
544 uint64_t isc_tx_unicast_offset; 544 uint64_t isc_tx_unicast_offset;
545 struct evcnt isc_tx_multicast; 545 struct evcnt isc_tx_multicast;
546 uint64_t isc_tx_multicast_offset; 546 uint64_t isc_tx_multicast_offset;
547 struct evcnt isc_tx_broadcast; 547 struct evcnt isc_tx_broadcast;
548 uint64_t isc_tx_broadcast_offset; 548 uint64_t isc_tx_broadcast_offset;
549 struct evcnt isc_tx_size_64; 549 struct evcnt isc_tx_size_64;
550 uint64_t isc_tx_size_64_offset; 550 uint64_t isc_tx_size_64_offset;
551 struct evcnt isc_tx_size_127; 551 struct evcnt isc_tx_size_127;
552 uint64_t isc_tx_size_127_offset; 552 uint64_t isc_tx_size_127_offset;
553 struct evcnt isc_tx_size_255; 553 struct evcnt isc_tx_size_255;
554 uint64_t isc_tx_size_255_offset; 554 uint64_t isc_tx_size_255_offset;
555 struct evcnt isc_tx_size_511; 555 struct evcnt isc_tx_size_511;
556 uint64_t isc_tx_size_511_offset; 556 uint64_t isc_tx_size_511_offset;
557 struct evcnt isc_tx_size_1023; 557 struct evcnt isc_tx_size_1023;
558 uint64_t isc_tx_size_1023_offset; 558 uint64_t isc_tx_size_1023_offset;
559 struct evcnt isc_tx_size_1522; 559 struct evcnt isc_tx_size_1522;
560 uint64_t isc_tx_size_1522_offset; 560 uint64_t isc_tx_size_1522_offset;
561 struct evcnt isc_tx_size_big; 561 struct evcnt isc_tx_size_big;
562 uint64_t isc_tx_size_big_offset; 562 uint64_t isc_tx_size_big_offset;
563 struct evcnt isc_mac_local_faults; 563 struct evcnt isc_mac_local_faults;
564 uint64_t isc_mac_local_faults_offset; 564 uint64_t isc_mac_local_faults_offset;
565 struct evcnt isc_mac_remote_faults; 565 struct evcnt isc_mac_remote_faults;
566 uint64_t isc_mac_remote_faults_offset; 566 uint64_t isc_mac_remote_faults_offset;
567 struct evcnt isc_link_xon_rx; 567 struct evcnt isc_link_xon_rx;
568 uint64_t isc_link_xon_rx_offset; 568 uint64_t isc_link_xon_rx_offset;
569 struct evcnt isc_link_xon_tx; 569 struct evcnt isc_link_xon_tx;
570 uint64_t isc_link_xon_tx_offset; 570 uint64_t isc_link_xon_tx_offset;
571 struct evcnt isc_link_xoff_rx; 571 struct evcnt isc_link_xoff_rx;
572 uint64_t isc_link_xoff_rx_offset; 572 uint64_t isc_link_xoff_rx_offset;
573 struct evcnt isc_link_xoff_tx; 573 struct evcnt isc_link_xoff_tx;
574 uint64_t isc_link_xoff_tx_offset; 574 uint64_t isc_link_xoff_tx_offset;
575 struct evcnt isc_vsi_rx_discards; 575 struct evcnt isc_vsi_rx_discards;
576 uint64_t isc_vsi_rx_discards_offset; 576 uint64_t isc_vsi_rx_discards_offset;
577 struct evcnt isc_vsi_rx_bytes; 577 struct evcnt isc_vsi_rx_bytes;
578 uint64_t isc_vsi_rx_bytes_offset; 578 uint64_t isc_vsi_rx_bytes_offset;
579 struct evcnt isc_vsi_rx_unicast; 579 struct evcnt isc_vsi_rx_unicast;
580 uint64_t isc_vsi_rx_unicast_offset; 580 uint64_t isc_vsi_rx_unicast_offset;
581 struct evcnt isc_vsi_rx_multicast; 581 struct evcnt isc_vsi_rx_multicast;
582 uint64_t isc_vsi_rx_multicast_offset; 582 uint64_t isc_vsi_rx_multicast_offset;
583 struct evcnt isc_vsi_rx_broadcast; 583 struct evcnt isc_vsi_rx_broadcast;
584 uint64_t isc_vsi_rx_broadcast_offset; 584 uint64_t isc_vsi_rx_broadcast_offset;
585 struct evcnt isc_vsi_tx_errors; 585 struct evcnt isc_vsi_tx_errors;
586 uint64_t isc_vsi_tx_errors_offset; 586 uint64_t isc_vsi_tx_errors_offset;
587 struct evcnt isc_vsi_tx_bytes; 587 struct evcnt isc_vsi_tx_bytes;
588 uint64_t isc_vsi_tx_bytes_offset; 588 uint64_t isc_vsi_tx_bytes_offset;
589 struct evcnt isc_vsi_tx_unicast; 589 struct evcnt isc_vsi_tx_unicast;
590 uint64_t isc_vsi_tx_unicast_offset; 590 uint64_t isc_vsi_tx_unicast_offset;
591 struct evcnt isc_vsi_tx_multicast; 591 struct evcnt isc_vsi_tx_multicast;
592 uint64_t isc_vsi_tx_multicast_offset; 592 uint64_t isc_vsi_tx_multicast_offset;
593 struct evcnt isc_vsi_tx_broadcast; 593 struct evcnt isc_vsi_tx_broadcast;
594 uint64_t isc_vsi_tx_broadcast_offset; 594 uint64_t isc_vsi_tx_broadcast_offset;
595}; 595};
596 596
597/* 597/*
598 * Locking notes: 598 * Locking notes:
599 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and 599 * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
600 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex). 600 * a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
601 * - more than one lock of them cannot be held at once. 601 * - more than one lock of them cannot be held at once.
602 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock 602 * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
603 * (a spin mutex). 603 * (a spin mutex).
604 * - the lock cannot held with txr_lock or rxr_lock. 604 * - the lock cannot held with txr_lock or rxr_lock.
605 * + a field named sc_arq_* is not protected by any lock. 605 * + a field named sc_arq_* is not protected by any lock.
606 * - operations for sc_arq_* is done in one context related to 606 * - operations for sc_arq_* is done in one context related to
607 * sc_arq_task. 607 * sc_arq_task.
608 * + other fields in ixl_softc is protected by sc_cfg_lock 608 * + other fields in ixl_softc is protected by sc_cfg_lock
609 * (an adaptive mutex) 609 * (an adaptive mutex)
610 * - It must be held before another lock is held, and It can be 610 * - It must be held before another lock is held, and It can be
611 * released after the other lock is released. 611 * released after the other lock is released.
612 * */ 612 * */
613 613
614struct ixl_softc { 614struct ixl_softc {
615 device_t sc_dev; 615 device_t sc_dev;
616 struct ethercom sc_ec; 616 struct ethercom sc_ec;
617 bool sc_attached; 617 bool sc_attached;
618 bool sc_dead; 618 bool sc_dead;
619 uint32_t sc_port; 619 uint32_t sc_port;
620 struct sysctllog *sc_sysctllog; 620 struct sysctllog *sc_sysctllog;
621 struct workqueue *sc_workq; 621 struct workqueue *sc_workq;
622 struct workqueue *sc_workq_txrx; 622 struct workqueue *sc_workq_txrx;
623 int sc_stats_intval; 623 int sc_stats_intval;
624 callout_t sc_stats_callout; 624 callout_t sc_stats_callout;
625 struct ixl_work sc_stats_task; 625 struct ixl_work sc_stats_task;
626 struct ixl_stats_counters 626 struct ixl_stats_counters
627 sc_stats_counters; 627 sc_stats_counters;
628 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 628 uint8_t sc_enaddr[ETHER_ADDR_LEN];
629 struct ifmedia sc_media; 629 struct ifmedia sc_media;
630 uint64_t sc_media_status; 630 uint64_t sc_media_status;
631 uint64_t sc_media_active; 631 uint64_t sc_media_active;
632 uint64_t sc_phy_types; 632 uint64_t sc_phy_types;
633 uint8_t sc_phy_abilities; 633 uint8_t sc_phy_abilities;
634 uint8_t sc_phy_linkspeed; 634 uint8_t sc_phy_linkspeed;
635 uint8_t sc_phy_fec_cfg; 635 uint8_t sc_phy_fec_cfg;
636 uint16_t sc_eee_cap; 636 uint16_t sc_eee_cap;
637 uint32_t sc_eeer_val; 637 uint32_t sc_eeer_val;
638 uint8_t sc_d3_lpan; 638 uint8_t sc_d3_lpan;
639 kmutex_t sc_cfg_lock; 639 kmutex_t sc_cfg_lock;
640 enum i40e_mac_type sc_mac_type; 640 enum i40e_mac_type sc_mac_type;
641 uint32_t sc_rss_table_size; 641 uint32_t sc_rss_table_size;
642 uint32_t sc_rss_table_entry_width; 642 uint32_t sc_rss_table_entry_width;
643 bool sc_txrx_workqueue; 643 bool sc_txrx_workqueue;
644 u_int sc_tx_process_limit; 644 u_int sc_tx_process_limit;
645 u_int sc_rx_process_limit; 645 u_int sc_rx_process_limit;
646 u_int sc_tx_intr_process_limit; 646 u_int sc_tx_intr_process_limit;
647 u_int sc_rx_intr_process_limit; 647 u_int sc_rx_intr_process_limit;
648 648
649 int sc_cur_ec_capenable; 649 int sc_cur_ec_capenable;
650 650
651 struct pci_attach_args sc_pa; 651 struct pci_attach_args sc_pa;
652 pci_intr_handle_t *sc_ihp; 652 pci_intr_handle_t *sc_ihp;
653 void **sc_ihs; 653 void **sc_ihs;
654 unsigned int sc_nintrs; 654 unsigned int sc_nintrs;
655 655
656 bus_dma_tag_t sc_dmat; 656 bus_dma_tag_t sc_dmat;
657 bus_space_tag_t sc_memt; 657 bus_space_tag_t sc_memt;
658 bus_space_handle_t sc_memh; 658 bus_space_handle_t sc_memh;
659 bus_size_t sc_mems; 659 bus_size_t sc_mems;
660 660
661 uint8_t sc_pf_id; 661 uint8_t sc_pf_id;
662 uint16_t sc_uplink_seid; /* le */ 662 uint16_t sc_uplink_seid; /* le */
663 uint16_t sc_downlink_seid; /* le */ 663 uint16_t sc_downlink_seid; /* le */
664 uint16_t sc_vsi_number; 664 uint16_t sc_vsi_number;
665 uint16_t sc_vsi_stat_counter_idx; 665 uint16_t sc_vsi_stat_counter_idx;
666 uint16_t sc_seid; 666 uint16_t sc_seid;
667 unsigned int sc_base_queue; 667 unsigned int sc_base_queue;
668 668
669 pci_intr_type_t sc_intrtype; 669 pci_intr_type_t sc_intrtype;
670 unsigned int sc_msix_vector_queue; 670 unsigned int sc_msix_vector_queue;
671 671
672 struct ixl_dmamem sc_scratch; 672 struct ixl_dmamem sc_scratch;
673 struct ixl_dmamem sc_aqbuf; 673 struct ixl_dmamem sc_aqbuf;
674 674
675 const struct ixl_aq_regs * 675 const struct ixl_aq_regs *
676 sc_aq_regs; 676 sc_aq_regs;
677 uint32_t sc_aq_flags; 677 uint32_t sc_aq_flags;
678#define IXL_SC_AQ_FLAG_RXCTL __BIT(0) 678#define IXL_SC_AQ_FLAG_RXCTL __BIT(0)
679#define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1) 679#define IXL_SC_AQ_FLAG_NVMLOCK __BIT(1)
680#define IXL_SC_AQ_FLAG_NVMREAD __BIT(2) 680#define IXL_SC_AQ_FLAG_NVMREAD __BIT(2)
681#define IXL_SC_AQ_FLAG_RSS __BIT(3) 681#define IXL_SC_AQ_FLAG_RSS __BIT(3)
682 682
683 kmutex_t sc_atq_lock; 683 kmutex_t sc_atq_lock;
684 kcondvar_t sc_atq_cv; 684 kcondvar_t sc_atq_cv;
685 struct ixl_dmamem sc_atq; 685 struct ixl_dmamem sc_atq;
686 unsigned int sc_atq_prod; 686 unsigned int sc_atq_prod;
687 unsigned int sc_atq_cons; 687 unsigned int sc_atq_cons;
688 688
689 struct ixl_dmamem sc_arq; 689 struct ixl_dmamem sc_arq;
690 struct ixl_work sc_arq_task; 690 struct ixl_work sc_arq_task;
691 struct ixl_aq_bufs sc_arq_idle; 691 struct ixl_aq_bufs sc_arq_idle;
692 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM]; 692 struct ixl_aq_buf *sc_arq_live[IXL_AQ_NUM];
693 unsigned int sc_arq_prod; 693 unsigned int sc_arq_prod;
694 unsigned int sc_arq_cons; 694 unsigned int sc_arq_cons;
695 695
696 struct ixl_work sc_link_state_task; 696 struct ixl_work sc_link_state_task;
697 struct ixl_atq sc_link_state_atq; 697 struct ixl_atq sc_link_state_atq;
698 698
699 struct ixl_dmamem sc_hmc_sd; 699 struct ixl_dmamem sc_hmc_sd;
700 struct ixl_dmamem sc_hmc_pd; 700 struct ixl_dmamem sc_hmc_pd;
701 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT]; 701 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
702 702
703 unsigned int sc_tx_ring_ndescs; 703 unsigned int sc_tx_ring_ndescs;
704 unsigned int sc_rx_ring_ndescs; 704 unsigned int sc_rx_ring_ndescs;
705 unsigned int sc_nqueue_pairs; 705 unsigned int sc_nqueue_pairs;
706 unsigned int sc_nqueue_pairs_max; 706 unsigned int sc_nqueue_pairs_max;
707 unsigned int sc_nqueue_pairs_device; 707 unsigned int sc_nqueue_pairs_device;
708 struct ixl_queue_pair *sc_qps; 708 struct ixl_queue_pair *sc_qps;
709 uint32_t sc_itr_rx; 709 uint32_t sc_itr_rx;
710 uint32_t sc_itr_tx; 710 uint32_t sc_itr_tx;
711 711
712 struct evcnt sc_event_atq; 712 struct evcnt sc_event_atq;
713 struct evcnt sc_event_link; 713 struct evcnt sc_event_link;
714 struct evcnt sc_event_ecc_err; 714 struct evcnt sc_event_ecc_err;
715 struct evcnt sc_event_pci_exception; 715 struct evcnt sc_event_pci_exception;
716 struct evcnt sc_event_crit_err; 716 struct evcnt sc_event_crit_err;
717}; 717};
718 718
719#define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX 719#define IXL_TXRX_PROCESS_UNLIMIT UINT_MAX
720#define IXL_TX_PROCESS_LIMIT 256 720#define IXL_TX_PROCESS_LIMIT 256
721#define IXL_RX_PROCESS_LIMIT 256 721#define IXL_RX_PROCESS_LIMIT 256
722#define IXL_TX_INTR_PROCESS_LIMIT 256 722#define IXL_TX_INTR_PROCESS_LIMIT 256
723#define IXL_RX_INTR_PROCESS_LIMIT 0U 723#define IXL_RX_INTR_PROCESS_LIMIT 0U
724 724
725#define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \ 725#define IXL_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
726 IFCAP_CSUM_TCPv4_Rx | \ 726 IFCAP_CSUM_TCPv4_Rx | \
727 IFCAP_CSUM_UDPv4_Rx | \ 727 IFCAP_CSUM_UDPv4_Rx | \
728 IFCAP_CSUM_TCPv6_Rx | \ 728 IFCAP_CSUM_TCPv6_Rx | \
729 IFCAP_CSUM_UDPv6_Rx) 729 IFCAP_CSUM_UDPv6_Rx)
730#define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \ 730#define IXL_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
731 IFCAP_CSUM_TCPv4_Tx | \ 731 IFCAP_CSUM_TCPv4_Tx | \
732 IFCAP_CSUM_UDPv4_Tx | \ 732 IFCAP_CSUM_UDPv4_Tx | \
733 IFCAP_CSUM_TCPv6_Tx | \ 733 IFCAP_CSUM_TCPv6_Tx | \
734 IFCAP_CSUM_UDPv6_Tx) 734 IFCAP_CSUM_UDPv6_Tx)
735#define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \ 735#define IXL_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
736 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \ 736 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
737 M_CSUM_UDPv4 | M_CSUM_UDPv6) 737 M_CSUM_UDPv4 | M_CSUM_UDPv6)
738 738
739#define delaymsec(_x) DELAY(1000 * (_x)) 739#define delaymsec(_x) DELAY(1000 * (_x))
740#ifdef IXL_DEBUG 740#ifdef IXL_DEBUG
741#define DDPRINTF(sc, fmt, args...) \ 741#define DDPRINTF(sc, fmt, args...) \
742do { \ 742do { \
743 if ((sc) != NULL) { \ 743 if ((sc) != NULL) { \
744 device_printf( \ 744 device_printf( \
745 ((struct ixl_softc *)(sc))->sc_dev, \ 745 ((struct ixl_softc *)(sc))->sc_dev, \
746 ""); \ 746 ""); \
747 } \ 747 } \
748 printf("%s:\t" fmt, __func__, ##args); \ 748 printf("%s:\t" fmt, __func__, ##args); \
749} while (0) 749} while (0)
750#else 750#else
751#define DDPRINTF(sc, fmt, args...) __nothing 751#define DDPRINTF(sc, fmt, args...) __nothing
752#endif 752#endif
753#ifndef IXL_STATS_INTERVAL_MSEC 753#ifndef IXL_STATS_INTERVAL_MSEC
754#define IXL_STATS_INTERVAL_MSEC 10000 754#define IXL_STATS_INTERVAL_MSEC 10000
755#endif 755#endif
756#ifndef IXL_QUEUE_NUM 756#ifndef IXL_QUEUE_NUM
757#define IXL_QUEUE_NUM 0 757#define IXL_QUEUE_NUM 0
758#endif 758#endif
759 759
760static bool ixl_param_nomsix = false; 760static bool ixl_param_nomsix = false;
761static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 761static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
762static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 762static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
763static unsigned int ixl_param_tx_ndescs = 1024; 763static unsigned int ixl_param_tx_ndescs = 1024;
764static unsigned int ixl_param_rx_ndescs = 1024; 764static unsigned int ixl_param_rx_ndescs = 1024;
765 765
766static enum i40e_mac_type 766static enum i40e_mac_type
767 ixl_mactype(pci_product_id_t); 767 ixl_mactype(pci_product_id_t);
 768static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
768static void ixl_clear_hw(struct ixl_softc *); 769static void ixl_clear_hw(struct ixl_softc *);
769static int ixl_pf_reset(struct ixl_softc *); 770static int ixl_pf_reset(struct ixl_softc *);
770 771
771static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 772static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
772 bus_size_t, bus_size_t); 773 bus_size_t, bus_size_t);
773static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 774static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
774 775
775static int ixl_arq_fill(struct ixl_softc *); 776static int ixl_arq_fill(struct ixl_softc *);
776static void ixl_arq_unfill(struct ixl_softc *); 777static void ixl_arq_unfill(struct ixl_softc *);
777 778
778static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 779static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
779 unsigned int); 780 unsigned int);
780static void ixl_atq_set(struct ixl_atq *, 781static void ixl_atq_set(struct ixl_atq *,
781 void (*)(struct ixl_softc *, const struct ixl_aq_desc *)); 782 void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
782static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *); 783static int ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
783static void ixl_atq_done(struct ixl_softc *); 784static void ixl_atq_done(struct ixl_softc *);
784static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *); 785static int ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
785static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *); 786static int ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *);
786static int ixl_get_version(struct ixl_softc *); 787static int ixl_get_version(struct ixl_softc *);
787static int ixl_get_nvm_version(struct ixl_softc *); 788static int ixl_get_nvm_version(struct ixl_softc *);
788static int ixl_get_hw_capabilities(struct ixl_softc *); 789static int ixl_get_hw_capabilities(struct ixl_softc *);
789static int ixl_pxe_clear(struct ixl_softc *); 790static int ixl_pxe_clear(struct ixl_softc *);
790static int ixl_lldp_shut(struct ixl_softc *); 791static int ixl_lldp_shut(struct ixl_softc *);
791static int ixl_get_mac(struct ixl_softc *); 792static int ixl_get_mac(struct ixl_softc *);
792static int ixl_get_switch_config(struct ixl_softc *); 793static int ixl_get_switch_config(struct ixl_softc *);
793static int ixl_phy_mask_ints(struct ixl_softc *); 794static int ixl_phy_mask_ints(struct ixl_softc *);
794static int ixl_get_phy_info(struct ixl_softc *); 795static int ixl_get_phy_info(struct ixl_softc *);
795static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool); 796static int ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
796static int ixl_set_phy_autoselect(struct ixl_softc *); 797static int ixl_set_phy_autoselect(struct ixl_softc *);
797static int ixl_restart_an(struct ixl_softc *); 798static int ixl_restart_an(struct ixl_softc *);
798static int ixl_hmc(struct ixl_softc *); 799static int ixl_hmc(struct ixl_softc *);
799static void ixl_hmc_free(struct ixl_softc *); 800static void ixl_hmc_free(struct ixl_softc *);
800static int ixl_get_vsi(struct ixl_softc *); 801static int ixl_get_vsi(struct ixl_softc *);
801static int ixl_set_vsi(struct ixl_softc *); 802static int ixl_set_vsi(struct ixl_softc *);
802static void ixl_set_filter_control(struct ixl_softc *); 803static void ixl_set_filter_control(struct ixl_softc *);
803static void ixl_get_link_status(void *); 804static void ixl_get_link_status(void *);
804static int ixl_get_link_status_poll(struct ixl_softc *, int *); 805static int ixl_get_link_status_poll(struct ixl_softc *, int *);
805static void ixl_get_link_status_done(struct ixl_softc *, 806static void ixl_get_link_status_done(struct ixl_softc *,
806 const struct ixl_aq_desc *); 807 const struct ixl_aq_desc *);
807static int ixl_set_link_status_locked(struct ixl_softc *, 808static int ixl_set_link_status_locked(struct ixl_softc *,
808 const struct ixl_aq_desc *); 809 const struct ixl_aq_desc *);
809static uint64_t ixl_search_link_speed(uint8_t); 810static uint64_t ixl_search_link_speed(uint8_t);
810static uint8_t ixl_search_baudrate(uint64_t); 811static uint8_t ixl_search_baudrate(uint64_t);
811static void ixl_config_rss(struct ixl_softc *); 812static void ixl_config_rss(struct ixl_softc *);
812static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *, 813static int ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
813 uint16_t, uint16_t); 814 uint16_t, uint16_t);
814static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *, 815static int ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
815 uint16_t, uint16_t); 816 uint16_t, uint16_t);
816static void ixl_arq(void *); 817static void ixl_arq(void *);
817static void ixl_hmc_pack(void *, const void *, 818static void ixl_hmc_pack(void *, const void *,
818 const struct ixl_hmc_pack *, unsigned int); 819 const struct ixl_hmc_pack *, unsigned int);
819static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t); 820static uint32_t ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
820static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t); 821static void ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
821static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *); 822static int ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
822 823
823static int ixl_match(device_t, cfdata_t, void *); 824static int ixl_match(device_t, cfdata_t, void *);
824static void ixl_attach(device_t, device_t, void *); 825static void ixl_attach(device_t, device_t, void *);
825static int ixl_detach(device_t, int); 826static int ixl_detach(device_t, int);
826 827
827static void ixl_media_add(struct ixl_softc *); 828static void ixl_media_add(struct ixl_softc *);
828static int ixl_media_change(struct ifnet *); 829static int ixl_media_change(struct ifnet *);
829static void ixl_media_status(struct ifnet *, struct ifmediareq *); 830static void ixl_media_status(struct ifnet *, struct ifmediareq *);
830static void ixl_watchdog(struct ifnet *); 831static void ixl_watchdog(struct ifnet *);
831static int ixl_ioctl(struct ifnet *, u_long, void *); 832static int ixl_ioctl(struct ifnet *, u_long, void *);
832static void ixl_start(struct ifnet *); 833static void ixl_start(struct ifnet *);
833static int ixl_transmit(struct ifnet *, struct mbuf *); 834static int ixl_transmit(struct ifnet *, struct mbuf *);
834static void ixl_deferred_transmit(void *); 835static void ixl_deferred_transmit(void *);
835static int ixl_intr(void *); 836static int ixl_intr(void *);
836static int ixl_queue_intr(void *); 837static int ixl_queue_intr(void *);
837static int ixl_other_intr(void *); 838static int ixl_other_intr(void *);
838static void ixl_handle_queue(void *); 839static void ixl_handle_queue(void *);
839static void ixl_handle_queue_wk(struct work *, void *); 840static void ixl_handle_queue_wk(struct work *, void *);
840static void ixl_sched_handle_queue(struct ixl_softc *, 841static void ixl_sched_handle_queue(struct ixl_softc *,
841 struct ixl_queue_pair *); 842 struct ixl_queue_pair *);
842static int ixl_init(struct ifnet *); 843static int ixl_init(struct ifnet *);
843static int ixl_init_locked(struct ixl_softc *); 844static int ixl_init_locked(struct ixl_softc *);
844static void ixl_stop(struct ifnet *, int); 845static void ixl_stop(struct ifnet *, int);
845static void ixl_stop_locked(struct ixl_softc *); 846static void ixl_stop_locked(struct ixl_softc *);
846static int ixl_iff(struct ixl_softc *); 847static int ixl_iff(struct ixl_softc *);
847static int ixl_ifflags_cb(struct ethercom *); 848static int ixl_ifflags_cb(struct ethercom *);
848static int ixl_setup_interrupts(struct ixl_softc *); 849static int ixl_setup_interrupts(struct ixl_softc *);
849static int ixl_establish_intx(struct ixl_softc *); 850static int ixl_establish_intx(struct ixl_softc *);
850static int ixl_establish_msix(struct ixl_softc *); 851static int ixl_establish_msix(struct ixl_softc *);
851static void ixl_enable_queue_intr(struct ixl_softc *, 852static void ixl_enable_queue_intr(struct ixl_softc *,
852 struct ixl_queue_pair *); 853 struct ixl_queue_pair *);
853static void ixl_disable_queue_intr(struct ixl_softc *, 854static void ixl_disable_queue_intr(struct ixl_softc *,
854 struct ixl_queue_pair *); 855 struct ixl_queue_pair *);
855static void ixl_enable_other_intr(struct ixl_softc *); 856static void ixl_enable_other_intr(struct ixl_softc *);
856static void ixl_disable_other_intr(struct ixl_softc *); 857static void ixl_disable_other_intr(struct ixl_softc *);
857static void ixl_config_queue_intr(struct ixl_softc *); 858static void ixl_config_queue_intr(struct ixl_softc *);
858static void ixl_config_other_intr(struct ixl_softc *); 859static void ixl_config_other_intr(struct ixl_softc *);
859 860
860static struct ixl_tx_ring * 861static struct ixl_tx_ring *
861 ixl_txr_alloc(struct ixl_softc *, unsigned int); 862 ixl_txr_alloc(struct ixl_softc *, unsigned int);
862static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int); 863static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
863static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *); 864static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
864static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *); 865static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
865static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *); 866static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
866static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *); 867static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
867static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *); 868static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
868static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *); 869static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
869static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int); 870static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
870 871
871static struct ixl_rx_ring * 872static struct ixl_rx_ring *
872 ixl_rxr_alloc(struct ixl_softc *, unsigned int); 873 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
873static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *); 874static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
874static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *); 875static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
875static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *); 876static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
876static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *); 877static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
877static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *); 878static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
878static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *); 879static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
879static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int); 880static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
880static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *); 881static int ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
881 882
882static struct workqueue * 883static struct workqueue *
883 ixl_workq_create(const char *, pri_t, int, int); 884 ixl_workq_create(const char *, pri_t, int, int);
884static void ixl_workq_destroy(struct workqueue *); 885static void ixl_workq_destroy(struct workqueue *);
885static int ixl_workqs_teardown(device_t); 886static int ixl_workqs_teardown(device_t);
886static void ixl_work_set(struct ixl_work *, void (*)(void *), void *); 887static void ixl_work_set(struct ixl_work *, void (*)(void *), void *);
887static void ixl_work_add(struct workqueue *, struct ixl_work *); 888static void ixl_work_add(struct workqueue *, struct ixl_work *);
888static void ixl_work_wait(struct workqueue *, struct ixl_work *); 889static void ixl_work_wait(struct workqueue *, struct ixl_work *);
889static void ixl_workq_work(struct work *, void *); 890static void ixl_workq_work(struct work *, void *);
890static const struct ixl_product * 891static const struct ixl_product *
891 ixl_lookup(const struct pci_attach_args *pa); 892 ixl_lookup(const struct pci_attach_args *pa);
892static void ixl_link_state_update(struct ixl_softc *, 893static void ixl_link_state_update(struct ixl_softc *,
893 const struct ixl_aq_desc *); 894 const struct ixl_aq_desc *);
894static int ixl_vlan_cb(struct ethercom *, uint16_t, bool); 895static int ixl_vlan_cb(struct ethercom *, uint16_t, bool);
895static int ixl_setup_vlan_hwfilter(struct ixl_softc *); 896static int ixl_setup_vlan_hwfilter(struct ixl_softc *);
896static void ixl_teardown_vlan_hwfilter(struct ixl_softc *); 897static void ixl_teardown_vlan_hwfilter(struct ixl_softc *);
897static int ixl_update_macvlan(struct ixl_softc *); 898static int ixl_update_macvlan(struct ixl_softc *);
898static int ixl_setup_interrupts(struct ixl_softc *); 899static int ixl_setup_interrupts(struct ixl_softc *);
899static void ixl_teardown_interrupts(struct ixl_softc *); 900static void ixl_teardown_interrupts(struct ixl_softc *);
900static int ixl_setup_stats(struct ixl_softc *); 901static int ixl_setup_stats(struct ixl_softc *);
901static void ixl_teardown_stats(struct ixl_softc *); 902static void ixl_teardown_stats(struct ixl_softc *);
902static void ixl_stats_callout(void *); 903static void ixl_stats_callout(void *);
903static void ixl_stats_update(void *); 904static void ixl_stats_update(void *);
904static int ixl_setup_sysctls(struct ixl_softc *); 905static int ixl_setup_sysctls(struct ixl_softc *);
905static void ixl_teardown_sysctls(struct ixl_softc *); 906static void ixl_teardown_sysctls(struct ixl_softc *);
906static int ixl_queue_pairs_alloc(struct ixl_softc *); 907static int ixl_queue_pairs_alloc(struct ixl_softc *);
907static void ixl_queue_pairs_free(struct ixl_softc *); 908static void ixl_queue_pairs_free(struct ixl_softc *);
908 909
909static const struct ixl_phy_type ixl_phy_type_map[] = { 910static const struct ixl_phy_type ixl_phy_type_map[] = {
910 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII }, 911 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
911 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX }, 912 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
912 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 }, 913 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
913 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR }, 914 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
914 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 }, 915 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
915 { 1ULL << IXL_PHY_TYPE_XAUI | 916 { 1ULL << IXL_PHY_TYPE_XAUI |
916 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 }, 917 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
917 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI }, 918 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
918 { 1ULL << IXL_PHY_TYPE_XLAUI | 919 { 1ULL << IXL_PHY_TYPE_XLAUI |
919 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI }, 920 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
920 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU | 921 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
921 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 }, 922 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
922 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU | 923 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
923 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 }, 924 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
924 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC }, 925 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
925 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC }, 926 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
926 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX }, 927 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
927 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL | 928 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
928 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T }, 929 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
929 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T }, 930 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
930 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR }, 931 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
931 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR }, 932 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
932 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX }, 933 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_TWINAX },
933 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 }, 934 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
934 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 }, 935 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
935 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX }, 936 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
936 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX }, 937 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
937 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 }, 938 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
938 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR }, 939 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
939 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR }, 940 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
940 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR }, 941 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
941 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR }, 942 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
942 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC }, 943 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
943 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC }, 944 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_ACC },
944}; 945};
945 946
946static const struct ixl_speed_type ixl_speed_type_map[] = { 947static const struct ixl_speed_type ixl_speed_type_map[] = {
947 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) }, 948 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
948 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) }, 949 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
949 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) }, 950 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
950 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) }, 951 { IXL_AQ_LINK_SPEED_1000MB, IF_Mbps(1000) },
951 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)}, 952 { IXL_AQ_LINK_SPEED_100MB, IF_Mbps(100)},
952}; 953};
953 954
954static const struct ixl_aq_regs ixl_pf_aq_regs = { 955static const struct ixl_aq_regs ixl_pf_aq_regs = {
955 .atq_tail = I40E_PF_ATQT, 956 .atq_tail = I40E_PF_ATQT,
956 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK, 957 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
957 .atq_head = I40E_PF_ATQH, 958 .atq_head = I40E_PF_ATQH,
958 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK, 959 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
959 .atq_len = I40E_PF_ATQLEN, 960 .atq_len = I40E_PF_ATQLEN,
960 .atq_bal = I40E_PF_ATQBAL, 961 .atq_bal = I40E_PF_ATQBAL,
961 .atq_bah = I40E_PF_ATQBAH, 962 .atq_bah = I40E_PF_ATQBAH,
962 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK, 963 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
963 964
964 .arq_tail = I40E_PF_ARQT, 965 .arq_tail = I40E_PF_ARQT,
965 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK, 966 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
966 .arq_head = I40E_PF_ARQH, 967 .arq_head = I40E_PF_ARQH,
967 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK, 968 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
968 .arq_len = I40E_PF_ARQLEN, 969 .arq_len = I40E_PF_ARQLEN,
969 .arq_bal = I40E_PF_ARQBAL, 970 .arq_bal = I40E_PF_ARQBAL,
970 .arq_bah = I40E_PF_ARQBAH, 971 .arq_bah = I40E_PF_ARQBAH,
971 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK, 972 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
972}; 973};
973 974
974#define ixl_rd(_s, _r) \ 975#define ixl_rd(_s, _r) \
975 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r)) 976 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
976#define ixl_wr(_s, _r, _v) \ 977#define ixl_wr(_s, _r, _v) \
977 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v)) 978 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
978#define ixl_barrier(_s, _r, _l, _o) \ 979#define ixl_barrier(_s, _r, _l, _o) \
979 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o)) 980 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
980#define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT) 981#define ixl_flush(_s) (void)ixl_rd((_s), I40E_GLGEN_STAT)
981#define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1)) 982#define ixl_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
982 983
983static inline uint32_t 984static inline uint32_t
984ixl_dmamem_hi(struct ixl_dmamem *ixm) 985ixl_dmamem_hi(struct ixl_dmamem *ixm)
985{ 986{
986 uint32_t retval; 987 uint32_t retval;
987 uint64_t val; 988 uint64_t val;
988 989
989 if (sizeof(IXL_DMA_DVA(ixm)) > 4) { 990 if (sizeof(IXL_DMA_DVA(ixm)) > 4) {
990 val = (intptr_t)IXL_DMA_DVA(ixm); 991 val = (intptr_t)IXL_DMA_DVA(ixm);
991 retval = (uint32_t)(val >> 32); 992 retval = (uint32_t)(val >> 32);
992 } else { 993 } else {
993 retval = 0; 994 retval = 0;
994 } 995 }
995 996
996 return retval; 997 return retval;
997} 998}
998 999
999static inline uint32_t 1000static inline uint32_t
1000ixl_dmamem_lo(struct ixl_dmamem *ixm) 1001ixl_dmamem_lo(struct ixl_dmamem *ixm)
1001{ 1002{
1002 1003
1003 return (uint32_t)IXL_DMA_DVA(ixm); 1004 return (uint32_t)IXL_DMA_DVA(ixm);
1004} 1005}
1005 1006
1006static inline void 1007static inline void
1007ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr) 1008ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1008{ 1009{
1009 uint64_t val; 1010 uint64_t val;
1010 1011
1011 if (sizeof(addr) > 4) { 1012 if (sizeof(addr) > 4) {
1012 val = (intptr_t)addr; 1013 val = (intptr_t)addr;
1013 iaq->iaq_param[2] = htole32(val >> 32); 1014 iaq->iaq_param[2] = htole32(val >> 32);
1014 } else { 1015 } else {
1015 iaq->iaq_param[2] = htole32(0); 1016 iaq->iaq_param[2] = htole32(0);
1016 } 1017 }
1017 1018
1018 iaq->iaq_param[3] = htole32(addr); 1019 iaq->iaq_param[3] = htole32(addr);
1019} 1020}
1020 1021
1021static inline unsigned int 1022static inline unsigned int
1022ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs) 1023ixl_rxr_unrefreshed(unsigned int prod, unsigned int cons, unsigned int ndescs)
1023{ 1024{
1024 unsigned int num; 1025 unsigned int num;
1025 1026
1026 if (prod < cons) 1027 if (prod < cons)
1027 num = cons - prod; 1028 num = cons - prod;
1028 else 1029 else
1029 num = (ndescs - prod) + cons; 1030 num = (ndescs - prod) + cons;
1030 1031
1031 if (__predict_true(num > 0)) { 1032 if (__predict_true(num > 0)) {
1032 /* device cannot receive packets if all descripter is filled */ 1033 /* device cannot receive packets if all descripter is filled */
1033 num -= 1; 1034 num -= 1;
1034 } 1035 }
1035 1036
1036 return num; 1037 return num;
1037} 1038}
1038 1039
1039CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc), 1040CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
1040 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL, 1041 ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
1041 DVF_DETACH_SHUTDOWN); 1042 DVF_DETACH_SHUTDOWN);
1042 1043
1043static const struct ixl_product ixl_products[] = { 1044static const struct ixl_product ixl_products[] = {
1044 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP }, 1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_SFP },
1045 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B }, 1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_B },
1046 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C }, 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_KX_C },
1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A }, 1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_A },
1048 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B }, 1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_B },
1049 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_C },
1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T }, 1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T },
1051 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 }, 1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1052 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G }, 1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1054 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP }, 1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1055 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX }, 1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_KX },
1057 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP }, 1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_QSFP },
1058 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_SFP },
1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET }, 1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G_BASET },
1060 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET }, 1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_BASET },
1061 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP }, 1062 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_I_SFP },
1062 /* required last entry */ 1063 /* required last entry */
1063 {0, 0} 1064 {0, 0}
1064}; 1065};
1065 1066
1066static const struct ixl_product * 1067static const struct ixl_product *
1067ixl_lookup(const struct pci_attach_args *pa) 1068ixl_lookup(const struct pci_attach_args *pa)
1068{ 1069{
1069 const struct ixl_product *ixlp; 1070 const struct ixl_product *ixlp;
1070 1071
1071 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) { 1072 for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
1072 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id && 1073 if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
1073 PCI_PRODUCT(pa->pa_id) == ixlp->product_id) 1074 PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
1074 return ixlp; 1075 return ixlp;
1075 } 1076 }
1076 1077
1077 return NULL; 1078 return NULL;
1078} 1079}
1079 1080
1080static int 1081static int
1081ixl_match(device_t parent, cfdata_t match, void *aux) 1082ixl_match(device_t parent, cfdata_t match, void *aux)
1082{ 1083{
1083 const struct pci_attach_args *pa = aux; 1084 const struct pci_attach_args *pa = aux;
1084 1085
1085 return (ixl_lookup(pa) != NULL) ? 1 : 0; 1086 return (ixl_lookup(pa) != NULL) ? 1 : 0;
1086} 1087}
1087 1088
1088static void 1089static void
1089ixl_attach(device_t parent, device_t self, void *aux) 1090ixl_attach(device_t parent, device_t self, void *aux)
1090{ 1091{
1091 struct ixl_softc *sc; 1092 struct ixl_softc *sc;
1092 struct pci_attach_args *pa = aux; 1093 struct pci_attach_args *pa = aux;
1093 struct ifnet *ifp; 1094 struct ifnet *ifp;
1094 pcireg_t memtype; 1095 pcireg_t memtype;
1095 uint32_t firstq, port, ari, func; 1096 uint32_t firstq, port, ari, func;
1096 char xnamebuf[32]; 1097 char xnamebuf[32];
1097 int tries, rv, link; 1098 int tries, rv, link;
1098 1099
1099 sc = device_private(self); 1100 sc = device_private(self);
1100 sc->sc_dev = self; 1101 sc->sc_dev = self;
1101 ifp = &sc->sc_ec.ec_if; 1102 ifp = &sc->sc_ec.ec_if;
1102 1103
1103 sc->sc_pa = *pa; 1104 sc->sc_pa = *pa;
1104 sc->sc_dmat = (pci_dma64_available(pa)) ? 1105 sc->sc_dmat = (pci_dma64_available(pa)) ?
1105 pa->pa_dmat64 : pa->pa_dmat; 1106 pa->pa_dmat64 : pa->pa_dmat;
1106 sc->sc_aq_regs = &ixl_pf_aq_regs; 1107 sc->sc_aq_regs = &ixl_pf_aq_regs;
1107 1108
1108 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1109 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1109 1110
 1111 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag);
 1112
1110 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1113 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1111 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1114 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1112 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1115 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1113 aprint_error(": unable to map registers\n"); 1116 aprint_error(": unable to map registers\n");
1114 return; 1117 return;
1115 } 1118 }
1116 1119
1117 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1120 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1118 1121
1119 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1122 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1120 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1123 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1121 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1124 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1122 sc->sc_base_queue = firstq; 1125 sc->sc_base_queue = firstq;
1123 1126
1124 ixl_clear_hw(sc); 1127 ixl_clear_hw(sc);
1125 if (ixl_pf_reset(sc) == -1) { 1128 if (ixl_pf_reset(sc) == -1) {
1126 /* error printed by ixl pf_reset */ 1129 /* error printed by ixl pf_reset */
1127 goto unmap; 1130 goto unmap;
1128 } 1131 }
1129 1132
1130 port = ixl_rd(sc, I40E_PFGEN_PORTNUM); 1133 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1131 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK; 1134 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1132 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 1135 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1133 sc->sc_port = port; 1136 sc->sc_port = port;
1134 aprint_normal(": port %u", sc->sc_port); 1137 aprint_normal(": port %u", sc->sc_port);
1135 1138
1136 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP); 1139 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1137 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK; 1140 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1138 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 1141 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1139 1142
1140 func = ixl_rd(sc, I40E_PF_FUNC_RID); 1143 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1141 sc->sc_pf_id = func & (ari ? 0xff : 0x7); 1144 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1142 1145
1143 /* initialise the adminq */ 1146 /* initialise the adminq */
1144 1147
1145 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET); 1148 mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1146 1149
1147 if (ixl_dmamem_alloc(sc, &sc->sc_atq, 1150 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1148 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1151 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1149 aprint_error("\n" "%s: unable to allocate atq\n", 1152 aprint_error("\n" "%s: unable to allocate atq\n",
1150 device_xname(self)); 1153 device_xname(self));
1151 goto unmap; 1154 goto unmap;
1152 } 1155 }
1153 1156
1154 SIMPLEQ_INIT(&sc->sc_arq_idle); 1157 SIMPLEQ_INIT(&sc->sc_arq_idle);
1155 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc); 1158 ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1156 sc->sc_arq_cons = 0; 1159 sc->sc_arq_cons = 0;
1157 sc->sc_arq_prod = 0; 1160 sc->sc_arq_prod = 0;
1158 1161
1159 if (ixl_dmamem_alloc(sc, &sc->sc_arq, 1162 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1160 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) { 1163 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1161 aprint_error("\n" "%s: unable to allocate arq\n", 1164 aprint_error("\n" "%s: unable to allocate arq\n",
1162 device_xname(self)); 1165 device_xname(self));
1163 goto free_atq; 1166 goto free_atq;
1164 } 1167 }
1165 1168
1166 if (!ixl_arq_fill(sc)) { 1169 if (!ixl_arq_fill(sc)) {
1167 aprint_error("\n" "%s: unable to fill arq descriptors\n", 1170 aprint_error("\n" "%s: unable to fill arq descriptors\n",
1168 device_xname(self)); 1171 device_xname(self));
1169 goto free_arq; 1172 goto free_arq;
1170 } 1173 }
1171 1174
1172 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1175 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1173 0, IXL_DMA_LEN(&sc->sc_atq), 1176 0, IXL_DMA_LEN(&sc->sc_atq),
1174 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1177 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1175 1178
1176 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1179 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1177 0, IXL_DMA_LEN(&sc->sc_arq), 1180 0, IXL_DMA_LEN(&sc->sc_arq),
1178 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1181 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1179 1182
1180 for (tries = 0; tries < 10; tries++) { 1183 for (tries = 0; tries < 10; tries++) {
1181 sc->sc_atq_cons = 0; 1184 sc->sc_atq_cons = 0;
1182 sc->sc_atq_prod = 0; 1185 sc->sc_atq_prod = 0;
1183 1186
1184 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1187 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1185 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1188 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1186 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1189 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1187 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1190 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1188 1191
1189 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE); 1192 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1190 1193
1191 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 1194 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1192 ixl_dmamem_lo(&sc->sc_atq)); 1195 ixl_dmamem_lo(&sc->sc_atq));
1193 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 1196 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1194 ixl_dmamem_hi(&sc->sc_atq)); 1197 ixl_dmamem_hi(&sc->sc_atq));
1195 ixl_wr(sc, sc->sc_aq_regs->atq_len, 1198 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1196 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM); 1199 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1197 1200
1198 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 1201 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1199 ixl_dmamem_lo(&sc->sc_arq)); 1202 ixl_dmamem_lo(&sc->sc_arq));
1200 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 1203 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1201 ixl_dmamem_hi(&sc->sc_arq)); 1204 ixl_dmamem_hi(&sc->sc_arq));
1202 ixl_wr(sc, sc->sc_aq_regs->arq_len, 1205 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1203 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM); 1206 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1204 1207
1205 rv = ixl_get_version(sc); 1208 rv = ixl_get_version(sc);
1206 if (rv == 0) 1209 if (rv == 0)
1207 break; 1210 break;
1208 if (rv != ETIMEDOUT) { 1211 if (rv != ETIMEDOUT) {
1209 aprint_error(", unable to get firmware version\n"); 1212 aprint_error(", unable to get firmware version\n");
1210 goto shutdown; 1213 goto shutdown;
1211 } 1214 }
1212 1215
1213 delaymsec(100); 1216 delaymsec(100);
1214 } 1217 }
1215 1218
1216 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod); 1219 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1217 1220
1218 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) { 1221 if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1219 aprint_error_dev(self, ", unable to allocate nvm buffer\n"); 1222 aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1220 goto shutdown; 1223 goto shutdown;
1221 } 1224 }
1222 1225
1223 ixl_get_nvm_version(sc); 1226 ixl_get_nvm_version(sc);
1224 1227
1225 if (sc->sc_mac_type == I40E_MAC_X722) 1228 if (sc->sc_mac_type == I40E_MAC_X722)
1226 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722; 1229 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1227 else 1230 else
1228 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710; 1231 sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1229 1232
1230 rv = ixl_get_hw_capabilities(sc); 1233 rv = ixl_get_hw_capabilities(sc);
1231 if (rv != 0) { 1234 if (rv != 0) {
1232 aprint_error(", GET HW CAPABILITIES %s\n", 1235 aprint_error(", GET HW CAPABILITIES %s\n",
1233 rv == ETIMEDOUT ? "timeout" : "error"); 1236 rv == ETIMEDOUT ? "timeout" : "error");
1234 goto free_aqbuf; 1237 goto free_aqbuf;
1235 } 1238 }
1236 1239
1237 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu); 1240 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1238 if (ixl_param_nqps_limit > 0) { 1241 if (ixl_param_nqps_limit > 0) {
1239 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max, 1242 sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1240 ixl_param_nqps_limit); 1243 ixl_param_nqps_limit);
1241 } 1244 }
1242 1245
1243 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 1246 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1244 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs; 1247 sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1245 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs; 1248 sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1246 1249
1247 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs); 1250 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1248 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs); 1251 KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1249 1252
1250 if (ixl_get_mac(sc) != 0) { 1253 if (ixl_get_mac(sc) != 0) {
1251 /* error printed by ixl_get_mac */ 1254 /* error printed by ixl_get_mac */
1252 goto free_aqbuf; 1255 goto free_aqbuf;
1253 } 1256 }
1254 1257
1255 aprint_normal("\n"); 1258 aprint_normal("\n");
1256 aprint_naive("\n"); 1259 aprint_naive("\n");
1257 1260
1258 aprint_normal_dev(self, "Ethernet address %s\n", 1261 aprint_normal_dev(self, "Ethernet address %s\n",
1259 ether_sprintf(sc->sc_enaddr)); 1262 ether_sprintf(sc->sc_enaddr));
1260 1263
1261 rv = ixl_pxe_clear(sc); 1264 rv = ixl_pxe_clear(sc);
1262 if (rv != 0) { 1265 if (rv != 0) {
1263 aprint_debug_dev(self, "CLEAR PXE MODE %s\n", 1266 aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1264 rv == ETIMEDOUT ? "timeout" : "error"); 1267 rv == ETIMEDOUT ? "timeout" : "error");
1265 } 1268 }
1266 1269
1267 ixl_set_filter_control(sc); 1270 ixl_set_filter_control(sc);
1268 1271
1269 if (ixl_hmc(sc) != 0) { 1272 if (ixl_hmc(sc) != 0) {
1270 /* error printed by ixl_hmc */ 1273 /* error printed by ixl_hmc */
1271 goto free_aqbuf; 1274 goto free_aqbuf;
1272 } 1275 }
1273 1276
1274 if (ixl_lldp_shut(sc) != 0) { 1277 if (ixl_lldp_shut(sc) != 0) {
1275 /* error printed by ixl_lldp_shut */ 1278 /* error printed by ixl_lldp_shut */
1276 goto free_hmc; 1279 goto free_hmc;
1277 } 1280 }
1278 1281
1279 if (ixl_phy_mask_ints(sc) != 0) { 1282 if (ixl_phy_mask_ints(sc) != 0) {
1280 /* error printed by ixl_phy_mask_ints */ 1283 /* error printed by ixl_phy_mask_ints */
1281 goto free_hmc; 1284 goto free_hmc;
1282 } 1285 }
1283 1286
1284 if (ixl_restart_an(sc) != 0) { 1287 if (ixl_restart_an(sc) != 0) {
1285 /* error printed by ixl_restart_an */ 1288 /* error printed by ixl_restart_an */
1286 goto free_hmc; 1289 goto free_hmc;
1287 } 1290 }
1288 1291
1289 if (ixl_get_switch_config(sc) != 0) { 1292 if (ixl_get_switch_config(sc) != 0) {
1290 /* error printed by ixl_get_switch_config */ 1293 /* error printed by ixl_get_switch_config */
1291 goto free_hmc; 1294 goto free_hmc;
1292 } 1295 }
1293 1296
1294 rv = ixl_get_link_status_poll(sc, NULL); 1297 rv = ixl_get_link_status_poll(sc, NULL);
1295 if (rv != 0) { 1298 if (rv != 0) {
1296 aprint_error_dev(self, "GET LINK STATUS %s\n", 1299 aprint_error_dev(self, "GET LINK STATUS %s\n",
1297 rv == ETIMEDOUT ? "timeout" : "error"); 1300 rv == ETIMEDOUT ? "timeout" : "error");
1298 goto free_hmc; 1301 goto free_hmc;
1299 } 1302 }
1300 1303
1301 /* 1304 /*
1302 * The FW often returns EIO in "Get PHY Abilities" command 1305 * The FW often returns EIO in "Get PHY Abilities" command
1303 * if there is no delay 1306 * if there is no delay
1304 */ 1307 */
1305 DELAY(500); 1308 DELAY(500);
1306 if (ixl_get_phy_info(sc) != 0) { 1309 if (ixl_get_phy_info(sc) != 0) {
1307 /* error printed by ixl_get_phy_info */ 1310 /* error printed by ixl_get_phy_info */
1308 goto free_hmc; 1311 goto free_hmc;
1309 } 1312 }
1310 1313
1311 if (ixl_dmamem_alloc(sc, &sc->sc_scratch, 1314 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1312 sizeof(struct ixl_aq_vsi_data), 8) != 0) { 1315 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1313 aprint_error_dev(self, "unable to allocate scratch buffer\n"); 1316 aprint_error_dev(self, "unable to allocate scratch buffer\n");
1314 goto free_hmc; 1317 goto free_hmc;
1315 } 1318 }
1316 1319
1317 rv = ixl_get_vsi(sc); 1320 rv = ixl_get_vsi(sc);
1318 if (rv != 0) { 1321 if (rv != 0) {
1319 aprint_error_dev(self, "GET VSI %s %d\n", 1322 aprint_error_dev(self, "GET VSI %s %d\n",
1320 rv == ETIMEDOUT ? "timeout" : "error", rv); 1323 rv == ETIMEDOUT ? "timeout" : "error", rv);
1321 goto free_scratch; 1324 goto free_scratch;
1322 } 1325 }
1323 1326
1324 rv = ixl_set_vsi(sc); 1327 rv = ixl_set_vsi(sc);
1325 if (rv != 0) { 1328 if (rv != 0) {
1326 aprint_error_dev(self, "UPDATE VSI error %s %d\n", 1329 aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1327 rv == ETIMEDOUT ? "timeout" : "error", rv); 1330 rv == ETIMEDOUT ? "timeout" : "error", rv);
1328 goto free_scratch; 1331 goto free_scratch;
1329 } 1332 }
1330 1333
1331 if (ixl_queue_pairs_alloc(sc) != 0) { 1334 if (ixl_queue_pairs_alloc(sc) != 0) {
1332 /* error printed by ixl_queue_pairs_alloc */ 1335 /* error printed by ixl_queue_pairs_alloc */
1333 goto free_scratch; 1336 goto free_scratch;
1334 } 1337 }
1335 1338
1336 if (ixl_setup_interrupts(sc) != 0) { 1339 if (ixl_setup_interrupts(sc) != 0) {
1337 /* error printed by ixl_setup_interrupts */ 1340 /* error printed by ixl_setup_interrupts */
1338 goto free_queue_pairs; 1341 goto free_queue_pairs;
1339 } 1342 }
1340 1343
1341 if (ixl_setup_stats(sc) != 0) { 1344 if (ixl_setup_stats(sc) != 0) {
1342 aprint_error_dev(self, "failed to setup event counters\n"); 1345 aprint_error_dev(self, "failed to setup event counters\n");
1343 goto teardown_intrs; 1346 goto teardown_intrs;
1344 } 1347 }
1345 1348
1346 if (ixl_setup_sysctls(sc) != 0) { 1349 if (ixl_setup_sysctls(sc) != 0) {
1347 /* error printed by ixl_setup_sysctls */ 1350 /* error printed by ixl_setup_sysctls */
1348 goto teardown_stats; 1351 goto teardown_stats;
1349 } 1352 }
1350 1353
1351 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self)); 1354 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1352 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI, 1355 sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1353 IPL_NET, WQ_MPSAFE); 1356 IPL_NET, WQ_MPSAFE);
1354 if (sc->sc_workq == NULL) 1357 if (sc->sc_workq == NULL)
1355 goto teardown_sysctls; 1358 goto teardown_sysctls;
1356 1359
1357 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self)); 1360 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1358 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk, 1361 rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1359 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE); 1362 sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1360 if (rv != 0) { 1363 if (rv != 0) {
1361 sc->sc_workq_txrx = NULL; 1364 sc->sc_workq_txrx = NULL;
1362 goto teardown_wqs; 1365 goto teardown_wqs;
1363 } 1366 }
1364 1367
1365 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self)); 1368 snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1366 cv_init(&sc->sc_atq_cv, xnamebuf); 1369 cv_init(&sc->sc_atq_cv, xnamebuf);
1367 1370
1368 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 1371 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1369 1372
1370 ifp->if_softc = sc; 1373 ifp->if_softc = sc;
1371 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1374 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1372 ifp->if_extflags = IFEF_MPSAFE; 1375 ifp->if_extflags = IFEF_MPSAFE;
1373 ifp->if_ioctl = ixl_ioctl; 1376 ifp->if_ioctl = ixl_ioctl;
1374 ifp->if_start = ixl_start; 1377 ifp->if_start = ixl_start;
1375 ifp->if_transmit = ixl_transmit; 1378 ifp->if_transmit = ixl_transmit;
1376 ifp->if_watchdog = ixl_watchdog; 1379 ifp->if_watchdog = ixl_watchdog;
1377 ifp->if_init = ixl_init; 1380 ifp->if_init = ixl_init;
1378 ifp->if_stop = ixl_stop; 1381 ifp->if_stop = ixl_stop;
1379 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs); 1382 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1380 IFQ_SET_READY(&ifp->if_snd); 1383 IFQ_SET_READY(&ifp->if_snd);
1381 ifp->if_capabilities |= IXL_IFCAP_RXCSUM; 1384 ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1382 ifp->if_capabilities |= IXL_IFCAP_TXCSUM; 1385 ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1383#if 0 1386#if 0
1384 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; 1387 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1385#endif 1388#endif
1386 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb); 1389 ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1387 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1390 sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1388 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1391 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1389 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1392 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1390 1393
1391 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities; 1394 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1392 /* Disable VLAN_HWFILTER by default */ 1395 /* Disable VLAN_HWFILTER by default */
1393 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1396 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1394 1397
1395 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable; 1398 sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1396 1399
1397 sc->sc_ec.ec_ifmedia = &sc->sc_media; 1400 sc->sc_ec.ec_ifmedia = &sc->sc_media;
1398 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change, 1401 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change,
1399 ixl_media_status, &sc->sc_cfg_lock); 1402 ixl_media_status, &sc->sc_cfg_lock);
1400 1403
1401 ixl_media_add(sc); 1404 ixl_media_add(sc);
1402 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 1405 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1403 if (ISSET(sc->sc_phy_abilities, 1406 if (ISSET(sc->sc_phy_abilities,
1404 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1407 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1405 ifmedia_add(&sc->sc_media, 1408 ifmedia_add(&sc->sc_media,
1406 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL); 1409 IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1407 } 1410 }
1408 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL); 1411 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1409 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 1412 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1410 1413
1411 if_attach(ifp); 1414 if_attach(ifp);
1412 if_deferred_start_init(ifp, NULL); 1415 if_deferred_start_init(ifp, NULL);
1413 ether_ifattach(ifp, sc->sc_enaddr); 1416 ether_ifattach(ifp, sc->sc_enaddr);
1414 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb); 1417 ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1415 1418
1416 rv = ixl_get_link_status_poll(sc, &link); 1419 rv = ixl_get_link_status_poll(sc, &link);
1417 if (rv != 0) 1420 if (rv != 0)
1418 link = LINK_STATE_UNKNOWN; 1421 link = LINK_STATE_UNKNOWN;
1419 if_link_state_change(ifp, link); 1422 if_link_state_change(ifp, link);
1420 1423
1421 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done); 1424 ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
1422 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc); 1425 ixl_work_set(&sc->sc_link_state_task, ixl_get_link_status, sc);
1423 1426
1424 ixl_config_other_intr(sc); 1427 ixl_config_other_intr(sc);
1425 ixl_enable_other_intr(sc); 1428 ixl_enable_other_intr(sc);
1426 1429
1427 ixl_set_phy_autoselect(sc); 1430 ixl_set_phy_autoselect(sc);
1428 1431
1429 /* remove default mac filter and replace it so we can see vlans */ 1432 /* remove default mac filter and replace it so we can see vlans */
1430 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0); 1433 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1431 if (rv != ENOENT) { 1434 if (rv != ENOENT) {
1432 aprint_debug_dev(self, 1435 aprint_debug_dev(self,
1433 "unable to remove macvlan %u\n", rv); 1436 "unable to remove macvlan %u\n", rv);
1434 } 1437 }
1435 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 1438 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1436 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1439 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1437 if (rv != ENOENT) { 1440 if (rv != ENOENT) {
1438 aprint_debug_dev(self, 1441 aprint_debug_dev(self,
1439 "unable to remove macvlan, ignore vlan %u\n", rv); 1442 "unable to remove macvlan, ignore vlan %u\n", rv);
1440 } 1443 }
1441 1444
1442 if (ixl_update_macvlan(sc) != 0) { 1445 if (ixl_update_macvlan(sc) != 0) {
1443 aprint_debug_dev(self, 1446 aprint_debug_dev(self,
1444 "couldn't enable vlan hardware filter\n"); 1447 "couldn't enable vlan hardware filter\n");
1445 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 1448 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1446 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 1449 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1447 } 1450 }
1448 1451
1449 sc->sc_txrx_workqueue = true; 1452 sc->sc_txrx_workqueue = true;
1450 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT; 1453 sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1451 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT; 1454 sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1452 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT; 1455 sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1453 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT; 1456 sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1454 1457
1455 ixl_stats_update(sc); 1458 ixl_stats_update(sc);
1456 sc->sc_stats_counters.isc_has_offset = true; 1459 sc->sc_stats_counters.isc_has_offset = true;
1457 1460
1458 if (pmf_device_register(self, NULL, NULL) != true) 1461 if (pmf_device_register(self, NULL, NULL) != true)
1459 aprint_debug_dev(self, "couldn't establish power handler\n"); 1462 aprint_debug_dev(self, "couldn't establish power handler\n");
1460 sc->sc_itr_rx = IXL_ITR_RX; 1463 sc->sc_itr_rx = IXL_ITR_RX;
1461 sc->sc_itr_tx = IXL_ITR_TX; 1464 sc->sc_itr_tx = IXL_ITR_TX;
1462 sc->sc_attached = true; 1465 sc->sc_attached = true;
1463 return; 1466 return;
1464 1467
1465teardown_wqs: 1468teardown_wqs:
1466 config_finalize_register(self, ixl_workqs_teardown); 1469 config_finalize_register(self, ixl_workqs_teardown);
1467teardown_sysctls: 1470teardown_sysctls:
1468 ixl_teardown_sysctls(sc); 1471 ixl_teardown_sysctls(sc);
1469teardown_stats: 1472teardown_stats:
1470 ixl_teardown_stats(sc); 1473 ixl_teardown_stats(sc);
1471teardown_intrs: 1474teardown_intrs:
1472 ixl_teardown_interrupts(sc); 1475 ixl_teardown_interrupts(sc);
1473free_queue_pairs: 1476free_queue_pairs:
1474 ixl_queue_pairs_free(sc); 1477 ixl_queue_pairs_free(sc);
1475free_scratch: 1478free_scratch:
1476 ixl_dmamem_free(sc, &sc->sc_scratch); 1479 ixl_dmamem_free(sc, &sc->sc_scratch);
1477free_hmc: 1480free_hmc:
1478 ixl_hmc_free(sc); 1481 ixl_hmc_free(sc);
1479free_aqbuf: 1482free_aqbuf:
1480 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1483 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1481shutdown: 1484shutdown:
1482 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1485 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1483 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1486 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1484 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1487 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1485 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1488 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1486 1489
1487 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1490 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1488 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1491 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1489 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1492 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1490 1493
1491 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1494 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1492 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1495 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1493 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1496 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1494 1497
1495 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1498 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1496 0, IXL_DMA_LEN(&sc->sc_arq), 1499 0, IXL_DMA_LEN(&sc->sc_arq),
1497 BUS_DMASYNC_POSTREAD); 1500 BUS_DMASYNC_POSTREAD);
1498 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1501 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1499 0, IXL_DMA_LEN(&sc->sc_atq), 1502 0, IXL_DMA_LEN(&sc->sc_atq),
1500 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1503 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1501 1504
1502 ixl_arq_unfill(sc); 1505 ixl_arq_unfill(sc);
1503free_arq: 1506free_arq:
1504 ixl_dmamem_free(sc, &sc->sc_arq); 1507 ixl_dmamem_free(sc, &sc->sc_arq);
1505free_atq: 1508free_atq:
1506 ixl_dmamem_free(sc, &sc->sc_atq); 1509 ixl_dmamem_free(sc, &sc->sc_atq);
1507unmap: 1510unmap:
1508 mutex_destroy(&sc->sc_atq_lock); 1511 mutex_destroy(&sc->sc_atq_lock);
1509 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1512 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1510 mutex_destroy(&sc->sc_cfg_lock); 1513 mutex_destroy(&sc->sc_cfg_lock);
1511 sc->sc_mems = 0; 1514 sc->sc_mems = 0;
1512 1515
1513 sc->sc_attached = false; 1516 sc->sc_attached = false;
1514} 1517}
1515 1518
1516static int 1519static int
1517ixl_detach(device_t self, int flags) 1520ixl_detach(device_t self, int flags)
1518{ 1521{
1519 struct ixl_softc *sc = device_private(self); 1522 struct ixl_softc *sc = device_private(self);
1520 struct ifnet *ifp = &sc->sc_ec.ec_if; 1523 struct ifnet *ifp = &sc->sc_ec.ec_if;
1521 1524
1522 if (!sc->sc_attached) 1525 if (!sc->sc_attached)
1523 return 0; 1526 return 0;
1524 1527
1525 ixl_stop(ifp, 1); 1528 ixl_stop(ifp, 1);
1526 1529
1527 ixl_disable_other_intr(sc); 1530 ixl_disable_other_intr(sc);
1528 1531
1529 callout_halt(&sc->sc_stats_callout, NULL); 1532 callout_halt(&sc->sc_stats_callout, NULL);
1530 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task); 1533 ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1531 1534
1532 /* wait for ATQ handler */ 1535 /* wait for ATQ handler */
1533 mutex_enter(&sc->sc_atq_lock); 1536 mutex_enter(&sc->sc_atq_lock);
1534 mutex_exit(&sc->sc_atq_lock); 1537 mutex_exit(&sc->sc_atq_lock);
1535 1538
1536 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task); 1539 ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1537 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task); 1540 ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1538 1541
1539 if (sc->sc_workq != NULL) { 1542 if (sc->sc_workq != NULL) {
1540 ixl_workq_destroy(sc->sc_workq); 1543 ixl_workq_destroy(sc->sc_workq);
1541 sc->sc_workq = NULL; 1544 sc->sc_workq = NULL;
1542 } 1545 }
1543 1546
1544 if (sc->sc_workq_txrx != NULL) { 1547 if (sc->sc_workq_txrx != NULL) {
1545 workqueue_destroy(sc->sc_workq_txrx); 1548 workqueue_destroy(sc->sc_workq_txrx);
1546 sc->sc_workq_txrx = NULL; 1549 sc->sc_workq_txrx = NULL;
1547 } 1550 }
1548 1551
1549 ether_ifdetach(ifp); 1552 ether_ifdetach(ifp);
1550 if_detach(ifp); 1553 if_detach(ifp);
1551 ifmedia_fini(&sc->sc_media); 1554 ifmedia_fini(&sc->sc_media);
1552 1555
1553 ixl_teardown_interrupts(sc); 1556 ixl_teardown_interrupts(sc);
1554 ixl_teardown_stats(sc); 1557 ixl_teardown_stats(sc);
1555 ixl_teardown_sysctls(sc); 1558 ixl_teardown_sysctls(sc);
1556 1559
1557 ixl_queue_pairs_free(sc); 1560 ixl_queue_pairs_free(sc);
1558 1561
1559 ixl_dmamem_free(sc, &sc->sc_scratch); 1562 ixl_dmamem_free(sc, &sc->sc_scratch);
1560 ixl_hmc_free(sc); 1563 ixl_hmc_free(sc);
1561 1564
1562 /* shutdown */ 1565 /* shutdown */
1563 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0); 1566 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1564 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0); 1567 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1565 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0); 1568 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1566 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0); 1569 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1567 1570
1568 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0); 1571 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1569 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0); 1572 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1570 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0); 1573 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1571 1574
1572 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0); 1575 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1573 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0); 1576 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1574 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0); 1577 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1575 1578
1576 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq), 1579 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1577 0, IXL_DMA_LEN(&sc->sc_arq), 1580 0, IXL_DMA_LEN(&sc->sc_arq),
1578 BUS_DMASYNC_POSTREAD); 1581 BUS_DMASYNC_POSTREAD);
1579 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq), 1582 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1580 0, IXL_DMA_LEN(&sc->sc_atq), 1583 0, IXL_DMA_LEN(&sc->sc_atq),
1581 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1584 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1582 1585
1583 ixl_arq_unfill(sc); 1586 ixl_arq_unfill(sc);
1584 1587
1585 ixl_dmamem_free(sc, &sc->sc_arq); 1588 ixl_dmamem_free(sc, &sc->sc_arq);
1586 ixl_dmamem_free(sc, &sc->sc_atq); 1589 ixl_dmamem_free(sc, &sc->sc_atq);
1587 ixl_dmamem_free(sc, &sc->sc_aqbuf); 1590 ixl_dmamem_free(sc, &sc->sc_aqbuf);
1588 1591
1589 cv_destroy(&sc->sc_atq_cv); 1592 cv_destroy(&sc->sc_atq_cv);
1590 mutex_destroy(&sc->sc_atq_lock); 1593 mutex_destroy(&sc->sc_atq_lock);
1591 1594
1592 if (sc->sc_mems != 0) { 1595 if (sc->sc_mems != 0) {
1593 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 1596 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1594 sc->sc_mems = 0; 1597 sc->sc_mems = 0;
1595 } 1598 }
1596 1599
1597 mutex_destroy(&sc->sc_cfg_lock); 1600 mutex_destroy(&sc->sc_cfg_lock);
1598 1601
1599 return 0; 1602 return 0;
1600} 1603}
1601 1604
1602static int 1605static int
1603ixl_workqs_teardown(device_t self) 1606ixl_workqs_teardown(device_t self)
1604{ 1607{
1605 struct ixl_softc *sc = device_private(self); 1608 struct ixl_softc *sc = device_private(self);
1606 1609
1607 if (sc->sc_workq != NULL) { 1610 if (sc->sc_workq != NULL) {
1608 ixl_workq_destroy(sc->sc_workq); 1611 ixl_workq_destroy(sc->sc_workq);
1609 sc->sc_workq = NULL; 1612 sc->sc_workq = NULL;
1610 } 1613 }
1611 1614
1612 if (sc->sc_workq_txrx != NULL) { 1615 if (sc->sc_workq_txrx != NULL) {
1613 workqueue_destroy(sc->sc_workq_txrx); 1616 workqueue_destroy(sc->sc_workq_txrx);
1614 sc->sc_workq_txrx = NULL; 1617 sc->sc_workq_txrx = NULL;
1615 } 1618 }
1616 1619
1617 return 0; 1620 return 0;
1618} 1621}
1619 1622
1620static int 1623static int
1621ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 1624ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1622{ 1625{
1623 struct ifnet *ifp = &ec->ec_if; 1626 struct ifnet *ifp = &ec->ec_if;
1624 struct ixl_softc *sc = ifp->if_softc; 1627 struct ixl_softc *sc = ifp->if_softc;
1625 int rv; 1628 int rv;
1626 1629
1627 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 1630 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1628 return 0; 1631 return 0;
1629 } 1632 }
1630 1633
1631 if (set) { 1634 if (set) {
1632 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid, 1635 rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1633 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1636 IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1634 if (rv == 0) { 1637 if (rv == 0) {
1635 rv = ixl_add_macvlan(sc, etherbroadcastaddr, 1638 rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1636 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH); 1639 vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1637 } 1640 }
1638 } else { 1641 } else {
1639 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid, 1642 rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1640 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1643 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1641 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid, 1644 (void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1642 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH); 1645 IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1643 } 1646 }
1644 1647
1645 return rv; 1648 return rv;
1646} 1649}
1647 1650
1648static void 1651static void
1649ixl_media_add(struct ixl_softc *sc) 1652ixl_media_add(struct ixl_softc *sc)
1650{ 1653{
1651 struct ifmedia *ifm = &sc->sc_media; 1654 struct ifmedia *ifm = &sc->sc_media;
1652 const struct ixl_phy_type *itype; 1655 const struct ixl_phy_type *itype;
1653 unsigned int i; 1656 unsigned int i;
1654 bool flow; 1657 bool flow;
1655 1658
1656 if (ISSET(sc->sc_phy_abilities, 1659 if (ISSET(sc->sc_phy_abilities,
1657 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) { 1660 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1658 flow = true; 1661 flow = true;
1659 } else { 1662 } else {
1660 flow = false; 1663 flow = false;
1661 } 1664 }
1662 1665
1663 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) { 1666 for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1664 itype = &ixl_phy_type_map[i]; 1667 itype = &ixl_phy_type_map[i];
1665 1668
1666 if (ISSET(sc->sc_phy_types, itype->phy_type)) { 1669 if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1667 ifmedia_add(ifm, 1670 ifmedia_add(ifm,
1668 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL); 1671 IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1669 1672
1670 if (flow) { 1673 if (flow) {
1671 ifmedia_add(ifm, 1674 ifmedia_add(ifm,
1672 IFM_ETHER | IFM_FDX | IFM_FLOW | 1675 IFM_ETHER | IFM_FDX | IFM_FLOW |
1673 itype->ifm_type, 0, NULL); 1676 itype->ifm_type, 0, NULL);
1674 } 1677 }
1675 1678
1676 if (itype->ifm_type != IFM_100_TX) 1679 if (itype->ifm_type != IFM_100_TX)
1677 continue; 1680 continue;
1678 1681
1679 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 1682 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1680 0, NULL); 1683 0, NULL);
1681 if (flow) { 1684 if (flow) {
1682 ifmedia_add(ifm, 1685 ifmedia_add(ifm,
1683 IFM_ETHER | IFM_FLOW | itype->ifm_type, 1686 IFM_ETHER | IFM_FLOW | itype->ifm_type,
1684 0, NULL); 1687 0, NULL);
1685 } 1688 }
1686 } 1689 }
1687 } 1690 }
1688} 1691}
1689 1692
1690static void 1693static void
1691ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1694ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1692{ 1695{
1693 struct ixl_softc *sc = ifp->if_softc; 1696 struct ixl_softc *sc = ifp->if_softc;
1694 1697
1695 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 1698 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1696 1699
1697 ifmr->ifm_status = sc->sc_media_status; 1700 ifmr->ifm_status = sc->sc_media_status;
1698 ifmr->ifm_active = sc->sc_media_active; 1701 ifmr->ifm_active = sc->sc_media_active;
1699} 1702}
1700 1703
1701static int 1704static int
1702ixl_media_change(struct ifnet *ifp) 1705ixl_media_change(struct ifnet *ifp)
1703{ 1706{
1704 struct ixl_softc *sc = ifp->if_softc; 1707 struct ixl_softc *sc = ifp->if_softc;
1705 struct ifmedia *ifm = &sc->sc_media; 1708 struct ifmedia *ifm = &sc->sc_media;
1706 uint64_t ifm_active = sc->sc_media_active; 1709 uint64_t ifm_active = sc->sc_media_active;
1707 uint8_t link_speed, abilities; 1710 uint8_t link_speed, abilities;
1708 1711
1709 switch (IFM_SUBTYPE(ifm_active)) { 1712 switch (IFM_SUBTYPE(ifm_active)) {
1710 case IFM_1000_SGMII: 1713 case IFM_1000_SGMII:
1711 case IFM_1000_KX: 1714 case IFM_1000_KX:
1712 case IFM_10G_KX4: 1715 case IFM_10G_KX4:
1713 case IFM_10G_KR: 1716 case IFM_10G_KR:
1714 case IFM_40G_KR4: 1717 case IFM_40G_KR4:
1715 case IFM_20G_KR2: 1718 case IFM_20G_KR2:
1716 case IFM_25G_KR: 1719 case IFM_25G_KR:
1717 /* backplanes */ 1720 /* backplanes */
1718 return EINVAL; 1721 return EINVAL;
1719 } 1722 }
1720 1723
1721 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP; 1724 abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1722 1725
1723 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1726 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1724 case IFM_AUTO: 1727 case IFM_AUTO:
1725 link_speed = sc->sc_phy_linkspeed; 1728 link_speed = sc->sc_phy_linkspeed;
1726 break; 1729 break;
1727 case IFM_NONE: 1730 case IFM_NONE:
1728 link_speed = 0; 1731 link_speed = 0;
1729 CLR(abilities, IXL_PHY_ABILITY_LINKUP); 1732 CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1730 break; 1733 break;
1731 default: 1734 default:
1732 link_speed = ixl_search_baudrate( 1735 link_speed = ixl_search_baudrate(
1733 ifmedia_baudrate(ifm->ifm_media)); 1736 ifmedia_baudrate(ifm->ifm_media));
1734 } 1737 }
1735 1738
1736 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) { 1739 if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1737 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0) 1740 if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1738 return EINVAL; 1741 return EINVAL;
1739 } 1742 }
1740 1743
1741 if (ifm->ifm_media & IFM_FLOW) { 1744 if (ifm->ifm_media & IFM_FLOW) {
1742 abilities |= sc->sc_phy_abilities & 1745 abilities |= sc->sc_phy_abilities &
1743 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX); 1746 (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1744 } 1747 }
1745 1748
1746 return ixl_set_phy_config(sc, link_speed, abilities, false); 1749 return ixl_set_phy_config(sc, link_speed, abilities, false);
1747} 1750}
1748 1751
1749static void 1752static void
1750ixl_watchdog(struct ifnet *ifp) 1753ixl_watchdog(struct ifnet *ifp)
1751{ 1754{
1752 1755
1753} 1756}
1754 1757
1755static void 1758static void
1756ixl_del_all_multiaddr(struct ixl_softc *sc) 1759ixl_del_all_multiaddr(struct ixl_softc *sc)
1757{ 1760{
1758 struct ethercom *ec = &sc->sc_ec; 1761 struct ethercom *ec = &sc->sc_ec;
1759 struct ether_multi *enm; 1762 struct ether_multi *enm;
1760 struct ether_multistep step; 1763 struct ether_multistep step;
1761 1764
1762 ETHER_LOCK(ec); 1765 ETHER_LOCK(ec);
1763 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1766 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1764 ETHER_NEXT_MULTI(step, enm)) { 1767 ETHER_NEXT_MULTI(step, enm)) {
1765 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1768 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1766 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1769 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1767 } 1770 }
1768 ETHER_UNLOCK(ec); 1771 ETHER_UNLOCK(ec);
1769} 1772}
1770 1773
1771static int 1774static int
1772ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1775ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1773{ 1776{
1774 struct ifnet *ifp = &sc->sc_ec.ec_if; 1777 struct ifnet *ifp = &sc->sc_ec.ec_if;
1775 int rv; 1778 int rv;
1776 1779
1777 if (ISSET(ifp->if_flags, IFF_ALLMULTI)) 1780 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1778 return 0; 1781 return 0;
1779 1782
1780 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) { 1783 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1781 ixl_del_all_multiaddr(sc); 1784 ixl_del_all_multiaddr(sc);
1782 SET(ifp->if_flags, IFF_ALLMULTI); 1785 SET(ifp->if_flags, IFF_ALLMULTI);
1783 return ENETRESET; 1786 return ENETRESET;
1784 } 1787 }
1785 1788
1786 /* multicast address can not use VLAN HWFILTER */ 1789 /* multicast address can not use VLAN HWFILTER */
1787 rv = ixl_add_macvlan(sc, addrlo, 0, 1790 rv = ixl_add_macvlan(sc, addrlo, 0,
1788 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1791 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1789 1792
1790 if (rv == ENOSPC) { 1793 if (rv == ENOSPC) {
1791 ixl_del_all_multiaddr(sc); 1794 ixl_del_all_multiaddr(sc);
1792 SET(ifp->if_flags, IFF_ALLMULTI); 1795 SET(ifp->if_flags, IFF_ALLMULTI);
1793 return ENETRESET; 1796 return ENETRESET;
1794 } 1797 }
1795 1798
1796 return rv; 1799 return rv;
1797} 1800}
1798 1801
1799static int 1802static int
1800ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi) 1803ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1801{ 1804{
1802 struct ifnet *ifp = &sc->sc_ec.ec_if; 1805 struct ifnet *ifp = &sc->sc_ec.ec_if;
1803 struct ethercom *ec = &sc->sc_ec; 1806 struct ethercom *ec = &sc->sc_ec;
1804 struct ether_multi *enm, *enm_last; 1807 struct ether_multi *enm, *enm_last;
1805 struct ether_multistep step; 1808 struct ether_multistep step;
1806 int error, rv = 0; 1809 int error, rv = 0;
1807 1810
1808 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 1811 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1809 ixl_remove_macvlan(sc, addrlo, 0, 1812 ixl_remove_macvlan(sc, addrlo, 0,
1810 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1813 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1811 return 0; 1814 return 0;
1812 } 1815 }
1813 1816
1814 ETHER_LOCK(ec); 1817 ETHER_LOCK(ec);
1815 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1818 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1816 ETHER_NEXT_MULTI(step, enm)) { 1819 ETHER_NEXT_MULTI(step, enm)) {
1817 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1820 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1818 ETHER_ADDR_LEN) != 0) { 1821 ETHER_ADDR_LEN) != 0) {
1819 goto out; 1822 goto out;
1820 } 1823 }
1821 } 1824 }
1822 1825
1823 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1826 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1824 ETHER_NEXT_MULTI(step, enm)) { 1827 ETHER_NEXT_MULTI(step, enm)) {
1825 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0, 1828 error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1826 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN); 1829 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1827 if (error != 0) 1830 if (error != 0)
1828 break; 1831 break;
1829 } 1832 }
1830 1833
1831 if (enm != NULL) { 1834 if (enm != NULL) {
1832 enm_last = enm; 1835 enm_last = enm;
1833 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL; 1836 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1834 ETHER_NEXT_MULTI(step, enm)) { 1837 ETHER_NEXT_MULTI(step, enm)) {
1835 if (enm == enm_last) 1838 if (enm == enm_last)
1836 break; 1839 break;
1837 1840
1838 ixl_remove_macvlan(sc, enm->enm_addrlo, 0, 1841 ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1839 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN); 1842 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1840 } 1843 }
1841 } else { 1844 } else {
1842 CLR(ifp->if_flags, IFF_ALLMULTI); 1845 CLR(ifp->if_flags, IFF_ALLMULTI);
1843 rv = ENETRESET; 1846 rv = ENETRESET;
1844 } 1847 }
1845 1848
1846out: 1849out:
1847 ETHER_UNLOCK(ec); 1850 ETHER_UNLOCK(ec);
1848 return rv; 1851 return rv;
1849} 1852}
1850 1853
1851static int 1854static int
1852ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1855ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1853{ 1856{
1854 struct ifreq *ifr = (struct ifreq *)data; 1857 struct ifreq *ifr = (struct ifreq *)data;
1855 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc; 1858 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1856 const struct sockaddr *sa; 1859 const struct sockaddr *sa;
1857 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN]; 1860 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1858 int s, error = 0; 1861 int s, error = 0;
1859 unsigned int nmtu; 1862 unsigned int nmtu;
1860 1863
1861 switch (cmd) { 1864 switch (cmd) {
1862 case SIOCSIFMTU: 1865 case SIOCSIFMTU:
1863 nmtu = ifr->ifr_mtu; 1866 nmtu = ifr->ifr_mtu;
1864 1867
1865 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) { 1868 if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1866 error = EINVAL; 1869 error = EINVAL;
1867 break; 1870 break;
1868 } 1871 }
1869 if (ifp->if_mtu != nmtu) { 1872 if (ifp->if_mtu != nmtu) {
1870 s = splnet(); 1873 s = splnet();
1871 error = ether_ioctl(ifp, cmd, data); 1874 error = ether_ioctl(ifp, cmd, data);
1872 splx(s); 1875 splx(s);
1873 if (error == ENETRESET) 1876 if (error == ENETRESET)
1874 error = ixl_init(ifp); 1877 error = ixl_init(ifp);
1875 } 1878 }
1876 break; 1879 break;
1877 case SIOCADDMULTI: 1880 case SIOCADDMULTI:
1878 sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1881 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1879 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) { 1882 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1880 error = ether_multiaddr(sa, addrlo, addrhi); 1883 error = ether_multiaddr(sa, addrlo, addrhi);
1881 if (error != 0) 1884 if (error != 0)
1882 return error; 1885 return error;
1883 1886
1884 error = ixl_add_multi(sc, addrlo, addrhi); 1887 error = ixl_add_multi(sc, addrlo, addrhi);
1885 if (error != 0 && error != ENETRESET) { 1888 if (error != 0 && error != ENETRESET) {
1886 ether_delmulti(sa, &sc->sc_ec); 1889 ether_delmulti(sa, &sc->sc_ec);
1887 error = EIO; 1890 error = EIO;
1888 } 1891 }
1889 } 1892 }
1890 break; 1893 break;
1891 1894
1892 case SIOCDELMULTI: 1895 case SIOCDELMULTI:
1893 sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1896 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1894 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) { 1897 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1895 error = ether_multiaddr(sa, addrlo, addrhi); 1898 error = ether_multiaddr(sa, addrlo, addrhi);
1896 if (error != 0) 1899 if (error != 0)
1897 return error; 1900 return error;
1898 1901
1899 error = ixl_del_multi(sc, addrlo, addrhi); 1902 error = ixl_del_multi(sc, addrlo, addrhi);
1900 } 1903 }
1901 break; 1904 break;
1902 1905
1903 default: 1906 default:
1904 s = splnet(); 1907 s = splnet();
1905 error = ether_ioctl(ifp, cmd, data); 1908 error = ether_ioctl(ifp, cmd, data);
1906 splx(s); 1909 splx(s);
1907 } 1910 }
1908 1911
1909 if (error == ENETRESET) 1912 if (error == ENETRESET)
1910 error = ixl_iff(sc); 1913 error = ixl_iff(sc);
1911 1914
1912 return error; 1915 return error;
1913} 1916}
1914 1917
1915static enum i40e_mac_type 1918static enum i40e_mac_type
1916ixl_mactype(pci_product_id_t id) 1919ixl_mactype(pci_product_id_t id)
1917{ 1920{
1918 1921
1919 switch (id) { 1922 switch (id) {
1920 case PCI_PRODUCT_INTEL_XL710_SFP: 1923 case PCI_PRODUCT_INTEL_XL710_SFP:
1921 case PCI_PRODUCT_INTEL_XL710_KX_B: 1924 case PCI_PRODUCT_INTEL_XL710_KX_B:
1922 case PCI_PRODUCT_INTEL_XL710_KX_C: 1925 case PCI_PRODUCT_INTEL_XL710_KX_C:
1923 case PCI_PRODUCT_INTEL_XL710_QSFP_A: 1926 case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1924 case PCI_PRODUCT_INTEL_XL710_QSFP_B: 1927 case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1925 case PCI_PRODUCT_INTEL_XL710_QSFP_C: 1928 case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1926 case PCI_PRODUCT_INTEL_X710_10G_T: 1929 case PCI_PRODUCT_INTEL_X710_10G_T:
1927 case PCI_PRODUCT_INTEL_XL710_20G_BP_1: 1930 case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1928 case PCI_PRODUCT_INTEL_XL710_20G_BP_2: 1931 case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1929 case PCI_PRODUCT_INTEL_X710_T4_10G: 1932 case PCI_PRODUCT_INTEL_X710_T4_10G:
1930 case PCI_PRODUCT_INTEL_XXV710_25G_BP: 1933 case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1931 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28: 1934 case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1932 return I40E_MAC_XL710; 1935 return I40E_MAC_XL710;
1933 1936
1934 case PCI_PRODUCT_INTEL_X722_KX: 1937 case PCI_PRODUCT_INTEL_X722_KX:
1935 case PCI_PRODUCT_INTEL_X722_QSFP: 1938 case PCI_PRODUCT_INTEL_X722_QSFP:
1936 case PCI_PRODUCT_INTEL_X722_SFP: 1939 case PCI_PRODUCT_INTEL_X722_SFP:
1937 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1940 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1938 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1941 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1939 case PCI_PRODUCT_INTEL_X722_I_SFP: 1942 case PCI_PRODUCT_INTEL_X722_I_SFP:
1940 return I40E_MAC_X722; 1943 return I40E_MAC_X722;
1941 } 1944 }
1942 1945
1943 return I40E_MAC_GENERIC; 1946 return I40E_MAC_GENERIC;
1944} 1947}
1945 1948
 1949static void
 1950ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
 1951{
 1952 pcireg_t csr;
 1953
 1954 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
 1955 csr |= (PCI_COMMAND_MASTER_ENABLE |
 1956 PCI_COMMAND_MEM_ENABLE);
 1957 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
 1958}
 1959
1946static inline void * 1960static inline void *
1947ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1961ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1948{ 1962{
1949 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1963 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1950 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1964 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1951 1965
1952 if (i >= e->hmc_count) 1966 if (i >= e->hmc_count)
1953 return NULL; 1967 return NULL;
1954 1968
1955 kva += e->hmc_base; 1969 kva += e->hmc_base;
1956 kva += i * e->hmc_size; 1970 kva += i * e->hmc_size;
1957 1971
1958 return kva; 1972 return kva;
1959} 1973}
1960 1974
1961static inline size_t 1975static inline size_t
1962ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type) 1976ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1963{ 1977{
1964 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1978 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1965 1979
1966 return e->hmc_size; 1980 return e->hmc_size;
1967} 1981}
1968 1982
1969static void 1983static void
1970ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1984ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1971{ 1985{
1972 struct ixl_rx_ring *rxr = qp->qp_rxr; 1986 struct ixl_rx_ring *rxr = qp->qp_rxr;
1973 1987
1974 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 1988 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1975 I40E_PFINT_DYN_CTLN_INTENA_MASK | 1989 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1976 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | 1990 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1977 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 1991 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1978 ixl_flush(sc); 1992 ixl_flush(sc);
1979} 1993}
1980 1994
1981static void 1995static void
1982ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp) 1996ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1983{ 1997{
1984 struct ixl_rx_ring *rxr = qp->qp_rxr; 1998 struct ixl_rx_ring *rxr = qp->qp_rxr;
1985 1999
1986 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid), 2000 ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1987 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); 2001 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1988 ixl_flush(sc); 2002 ixl_flush(sc);
1989} 2003}
1990 2004
1991static void 2005static void
1992ixl_enable_other_intr(struct ixl_softc *sc) 2006ixl_enable_other_intr(struct ixl_softc *sc)
1993{ 2007{
1994 2008
1995 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2009 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1996 I40E_PFINT_DYN_CTL0_INTENA_MASK | 2010 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1997 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | 2011 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1998 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2012 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1999 ixl_flush(sc); 2013 ixl_flush(sc);
2000} 2014}
2001 2015
2002static void 2016static void
2003ixl_disable_other_intr(struct ixl_softc *sc) 2017ixl_disable_other_intr(struct ixl_softc *sc)
2004{ 2018{
2005 2019
2006 ixl_wr(sc, I40E_PFINT_DYN_CTL0, 2020 ixl_wr(sc, I40E_PFINT_DYN_CTL0,
2007 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); 2021 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
2008 ixl_flush(sc); 2022 ixl_flush(sc);
2009} 2023}
2010 2024
2011static int 2025static int
2012ixl_reinit(struct ixl_softc *sc) 2026ixl_reinit(struct ixl_softc *sc)
2013{ 2027{
2014 struct ixl_rx_ring *rxr; 2028 struct ixl_rx_ring *rxr;
2015 struct ixl_tx_ring *txr; 2029 struct ixl_tx_ring *txr;
2016 unsigned int i; 2030 unsigned int i;
2017 uint32_t reg; 2031 uint32_t reg;
2018 2032
2019 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2033 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2020 2034
2021 if (ixl_get_vsi(sc) != 0) 2035 if (ixl_get_vsi(sc) != 0)
2022 return EIO; 2036 return EIO;
2023 2037
2024 if (ixl_set_vsi(sc) != 0) 2038 if (ixl_set_vsi(sc) != 0)
2025 return EIO; 2039 return EIO;
2026 2040
2027 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2041 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2028 txr = sc->sc_qps[i].qp_txr; 2042 txr = sc->sc_qps[i].qp_txr;
2029 rxr = sc->sc_qps[i].qp_rxr; 2043 rxr = sc->sc_qps[i].qp_rxr;
2030 2044
2031 ixl_txr_config(sc, txr); 2045 ixl_txr_config(sc, txr);
2032 ixl_rxr_config(sc, rxr); 2046 ixl_rxr_config(sc, rxr);
2033 } 2047 }
2034 2048
2035 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2049 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2036 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE); 2050 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
2037 2051
2038 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2052 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2039 txr = sc->sc_qps[i].qp_txr; 2053 txr = sc->sc_qps[i].qp_txr;
2040 rxr = sc->sc_qps[i].qp_rxr; 2054 rxr = sc->sc_qps[i].qp_rxr;
2041 2055
2042 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE | 2056 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2043 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)); 2057 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2044 ixl_flush(sc); 2058 ixl_flush(sc);
2045 2059
2046 ixl_wr(sc, txr->txr_tail, txr->txr_prod); 2060 ixl_wr(sc, txr->txr_tail, txr->txr_prod);
2047 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod); 2061 ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2048 2062
2049 /* ixl_rxfill() needs lock held */ 2063 /* ixl_rxfill() needs lock held */
2050 mutex_enter(&rxr->rxr_lock); 2064 mutex_enter(&rxr->rxr_lock);
2051 ixl_rxfill(sc, rxr); 2065 ixl_rxfill(sc, rxr);
2052 mutex_exit(&rxr->rxr_lock); 2066 mutex_exit(&rxr->rxr_lock);
2053 2067
2054 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2068 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2055 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2069 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2056 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2070 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2057 if (ixl_rxr_enabled(sc, rxr) != 0) 2071 if (ixl_rxr_enabled(sc, rxr) != 0)
2058 goto stop; 2072 goto stop;
2059 2073
2060 ixl_txr_qdis(sc, txr, 1); 2074 ixl_txr_qdis(sc, txr, 1);
2061 2075
2062 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2076 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2063 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2077 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2064 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2078 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2065 2079
2066 if (ixl_txr_enabled(sc, txr) != 0) 2080 if (ixl_txr_enabled(sc, txr) != 0)
2067 goto stop; 2081 goto stop;
2068 } 2082 }
2069 2083
2070 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2084 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2071 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2085 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2072 2086
2073 return 0; 2087 return 0;
2074 2088
2075stop: 2089stop:
2076 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd), 2090 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2077 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE); 2091 0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2078 2092
2079 return ETIMEDOUT; 2093 return ETIMEDOUT;
2080} 2094}
2081 2095
2082static int 2096static int
2083ixl_init_locked(struct ixl_softc *sc) 2097ixl_init_locked(struct ixl_softc *sc)
2084{ 2098{
2085 struct ifnet *ifp = &sc->sc_ec.ec_if; 2099 struct ifnet *ifp = &sc->sc_ec.ec_if;
2086 unsigned int i; 2100 unsigned int i;
2087 int error, eccap_change; 2101 int error, eccap_change;
2088 2102
2089 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2103 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2090 2104
2091 if (ISSET(ifp->if_flags, IFF_RUNNING)) 2105 if (ISSET(ifp->if_flags, IFF_RUNNING))
2092 ixl_stop_locked(sc); 2106 ixl_stop_locked(sc);
2093 2107
2094 if (sc->sc_dead) { 2108 if (sc->sc_dead) {
2095 return ENXIO; 2109 return ENXIO;
2096 } 2110 }
2097 2111
2098 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable; 2112 eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2099 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING)) 2113 if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2100 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING; 2114 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2101 2115
2102 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) { 2116 if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2103 if (ixl_update_macvlan(sc) == 0) { 2117 if (ixl_update_macvlan(sc) == 0) {
2104 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER; 2118 sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2105 } else { 2119 } else {
2106 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER); 2120 CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2107 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER); 2121 CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2108 } 2122 }
2109 } 2123 }
2110 2124
2111 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX) 2125 if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2112 sc->sc_nqueue_pairs = 1; 2126 sc->sc_nqueue_pairs = 1;
2113 else 2127 else
2114 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max; 2128 sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2115 2129
2116 error = ixl_reinit(sc); 2130 error = ixl_reinit(sc);
2117 if (error) { 2131 if (error) {
2118 ixl_stop_locked(sc); 2132 ixl_stop_locked(sc);
2119 return error; 2133 return error;
2120 } 2134 }
2121 2135
2122 SET(ifp->if_flags, IFF_RUNNING); 2136 SET(ifp->if_flags, IFF_RUNNING);
2123 CLR(ifp->if_flags, IFF_OACTIVE); 2137 CLR(ifp->if_flags, IFF_OACTIVE);
2124 2138
2125 ixl_config_rss(sc); 2139 ixl_config_rss(sc);
2126 ixl_config_queue_intr(sc); 2140 ixl_config_queue_intr(sc);
2127 2141
2128 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2142 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2129 ixl_enable_queue_intr(sc, &sc->sc_qps[i]); 2143 ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2130 } 2144 }
2131 2145
2132 error = ixl_iff(sc); 2146 error = ixl_iff(sc);
2133 if (error) { 2147 if (error) {
2134 ixl_stop_locked(sc); 2148 ixl_stop_locked(sc);
2135 return error; 2149 return error;
2136 } 2150 }
2137 2151
2138 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval)); 2152 callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2139 2153
2140 return 0; 2154 return 0;
2141} 2155}
2142 2156
2143static int 2157static int
2144ixl_init(struct ifnet *ifp) 2158ixl_init(struct ifnet *ifp)
2145{ 2159{
2146 struct ixl_softc *sc = ifp->if_softc; 2160 struct ixl_softc *sc = ifp->if_softc;
2147 int error; 2161 int error;
2148 2162
2149 mutex_enter(&sc->sc_cfg_lock); 2163 mutex_enter(&sc->sc_cfg_lock);
2150 error = ixl_init_locked(sc); 2164 error = ixl_init_locked(sc);
2151 mutex_exit(&sc->sc_cfg_lock); 2165 mutex_exit(&sc->sc_cfg_lock);
2152 2166
2153 if (error == 0) 2167 if (error == 0)
2154 (void)ixl_get_link_status(sc); 2168 (void)ixl_get_link_status(sc);
2155 2169
2156 return error; 2170 return error;
2157} 2171}
2158 2172
2159static int 2173static int
2160ixl_iff(struct ixl_softc *sc) 2174ixl_iff(struct ixl_softc *sc)
2161{ 2175{
2162 struct ifnet *ifp = &sc->sc_ec.ec_if; 2176 struct ifnet *ifp = &sc->sc_ec.ec_if;
2163 struct ixl_atq iatq; 2177 struct ixl_atq iatq;
2164 struct ixl_aq_desc *iaq; 2178 struct ixl_aq_desc *iaq;
2165 struct ixl_aq_vsi_promisc_param *param; 2179 struct ixl_aq_vsi_promisc_param *param;
2166 uint16_t flag_add, flag_del; 2180 uint16_t flag_add, flag_del;
2167 int error; 2181 int error;
2168 2182
2169 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 2183 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2170 return 0; 2184 return 0;
2171 2185
2172 memset(&iatq, 0, sizeof(iatq)); 2186 memset(&iatq, 0, sizeof(iatq));
2173 2187
2174 iaq = &iatq.iatq_desc; 2188 iaq = &iatq.iatq_desc;
2175 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC); 2189 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2176 2190
2177 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param; 2191 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2178 param->flags = htole16(0); 2192 param->flags = htole16(0);
2179 2193
2180 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER) 2194 if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2181 || ISSET(ifp->if_flags, IFF_PROMISC)) { 2195 || ISSET(ifp->if_flags, IFF_PROMISC)) {
2182 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2196 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2183 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2197 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2184 } 2198 }
2185 2199
2186 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 2200 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2187 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2201 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2188 IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2202 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2189 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) { 2203 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2190 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST); 2204 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2191 } 2205 }
2192 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST | 2206 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2193 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST | 2207 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2194 IXL_AQ_VSI_PROMISC_FLAG_VLAN); 2208 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2195 param->seid = sc->sc_seid; 2209 param->seid = sc->sc_seid;
2196 2210
2197 error = ixl_atq_exec(sc, &iatq); 2211 error = ixl_atq_exec(sc, &iatq);
2198 if (error) 2212 if (error)
2199 return error; 2213 return error;
2200 2214
2201 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) 2215 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2202 return EIO; 2216 return EIO;
2203 2217
2204 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) { 2218 if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2205 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) { 2219 if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2206 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH; 2220 flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2207 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH; 2221 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2208 } else { 2222 } else {
2209 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN; 2223 flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2210 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN; 2224 flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2211 } 2225 }
2212 2226
2213 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del); 2227 ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2214 2228
2215 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 2229 memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2216 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add); 2230 ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2217 } 2231 }
2218 return 0; 2232 return 0;
2219} 2233}
2220 2234
2221static void 2235static void
2222ixl_stop_rendezvous(struct ixl_softc *sc) 2236ixl_stop_rendezvous(struct ixl_softc *sc)
2223{ 2237{
2224 struct ixl_tx_ring *txr; 2238 struct ixl_tx_ring *txr;
2225 struct ixl_rx_ring *rxr; 2239 struct ixl_rx_ring *rxr;
2226 unsigned int i; 2240 unsigned int i;
2227 2241
2228 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2242 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2229 txr = sc->sc_qps[i].qp_txr; 2243 txr = sc->sc_qps[i].qp_txr;
2230 rxr = sc->sc_qps[i].qp_rxr; 2244 rxr = sc->sc_qps[i].qp_rxr;
2231 2245
2232 mutex_enter(&txr->txr_lock); 2246 mutex_enter(&txr->txr_lock);
2233 mutex_exit(&txr->txr_lock); 2247 mutex_exit(&txr->txr_lock);
2234 2248
2235 mutex_enter(&rxr->rxr_lock); 2249 mutex_enter(&rxr->rxr_lock);
2236 mutex_exit(&rxr->rxr_lock); 2250 mutex_exit(&rxr->rxr_lock);
2237 2251
2238 sc->sc_qps[i].qp_workqueue = false; 2252 sc->sc_qps[i].qp_workqueue = false;
2239 workqueue_wait(sc->sc_workq_txrx, 2253 workqueue_wait(sc->sc_workq_txrx,
2240 &sc->sc_qps[i].qp_work); 2254 &sc->sc_qps[i].qp_work);
2241 } 2255 }
2242} 2256}
2243 2257
2244static void 2258static void
2245ixl_stop_locked(struct ixl_softc *sc) 2259ixl_stop_locked(struct ixl_softc *sc)
2246{ 2260{
2247 struct ifnet *ifp = &sc->sc_ec.ec_if; 2261 struct ifnet *ifp = &sc->sc_ec.ec_if;
2248 struct ixl_rx_ring *rxr; 2262 struct ixl_rx_ring *rxr;
2249 struct ixl_tx_ring *txr; 2263 struct ixl_tx_ring *txr;
2250 unsigned int i; 2264 unsigned int i;
2251 uint32_t reg; 2265 uint32_t reg;
2252 2266
2253 KASSERT(mutex_owned(&sc->sc_cfg_lock)); 2267 KASSERT(mutex_owned(&sc->sc_cfg_lock));
2254 2268
2255 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE); 2269 CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2256 callout_stop(&sc->sc_stats_callout); 2270 callout_stop(&sc->sc_stats_callout);
2257 2271
2258 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2272 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2259 txr = sc->sc_qps[i].qp_txr; 2273 txr = sc->sc_qps[i].qp_txr;
2260 rxr = sc->sc_qps[i].qp_rxr; 2274 rxr = sc->sc_qps[i].qp_rxr;
2261 2275
2262 ixl_disable_queue_intr(sc, &sc->sc_qps[i]); 2276 ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2263 2277
2264 mutex_enter(&txr->txr_lock); 2278 mutex_enter(&txr->txr_lock);
2265 ixl_txr_qdis(sc, txr, 0); 2279 ixl_txr_qdis(sc, txr, 0);
2266 mutex_exit(&txr->txr_lock); 2280 mutex_exit(&txr->txr_lock);
2267 } 2281 }
2268 2282
2269 /* XXX wait at least 400 usec for all tx queues in one go */ 2283 /* XXX wait at least 400 usec for all tx queues in one go */
2270 ixl_flush(sc); 2284 ixl_flush(sc);
2271 DELAY(500); 2285 DELAY(500);
2272 2286
2273 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2287 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2274 txr = sc->sc_qps[i].qp_txr; 2288 txr = sc->sc_qps[i].qp_txr;
2275 rxr = sc->sc_qps[i].qp_rxr; 2289 rxr = sc->sc_qps[i].qp_rxr;
2276 2290
2277 mutex_enter(&txr->txr_lock); 2291 mutex_enter(&txr->txr_lock);
2278 reg = ixl_rd(sc, I40E_QTX_ENA(i)); 2292 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2279 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK); 2293 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2280 ixl_wr(sc, I40E_QTX_ENA(i), reg); 2294 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2281 mutex_exit(&txr->txr_lock); 2295 mutex_exit(&txr->txr_lock);
2282 2296
2283 mutex_enter(&rxr->rxr_lock); 2297 mutex_enter(&rxr->rxr_lock);
2284 reg = ixl_rd(sc, I40E_QRX_ENA(i)); 2298 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2285 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK); 2299 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2286 ixl_wr(sc, I40E_QRX_ENA(i), reg); 2300 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2287 mutex_exit(&rxr->rxr_lock); 2301 mutex_exit(&rxr->rxr_lock);
2288 } 2302 }
2289 2303
2290 /* XXX short wait for all queue disables to settle */ 2304 /* XXX short wait for all queue disables to settle */
2291 ixl_flush(sc); 2305 ixl_flush(sc);
2292 DELAY(50); 2306 DELAY(50);
2293 2307
2294 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2308 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2295 txr = sc->sc_qps[i].qp_txr; 2309 txr = sc->sc_qps[i].qp_txr;
2296 rxr = sc->sc_qps[i].qp_rxr; 2310 rxr = sc->sc_qps[i].qp_rxr;
2297 2311
2298 mutex_enter(&txr->txr_lock); 2312 mutex_enter(&txr->txr_lock);
2299 if (ixl_txr_disabled(sc, txr) != 0) { 2313 if (ixl_txr_disabled(sc, txr) != 0) {
2300 mutex_exit(&txr->txr_lock); 2314 mutex_exit(&txr->txr_lock);
2301 goto die; 2315 goto die;
2302 } 2316 }
2303 mutex_exit(&txr->txr_lock); 2317 mutex_exit(&txr->txr_lock);
2304 2318
2305 mutex_enter(&rxr->rxr_lock); 2319 mutex_enter(&rxr->rxr_lock);
2306 if (ixl_rxr_disabled(sc, rxr) != 0) { 2320 if (ixl_rxr_disabled(sc, rxr) != 0) {
2307 mutex_exit(&rxr->rxr_lock); 2321 mutex_exit(&rxr->rxr_lock);
2308 goto die; 2322 goto die;
2309 } 2323 }
2310 mutex_exit(&rxr->rxr_lock); 2324 mutex_exit(&rxr->rxr_lock);
2311 } 2325 }
2312 2326
2313 ixl_stop_rendezvous(sc); 2327 ixl_stop_rendezvous(sc);
2314 2328
2315 for (i = 0; i < sc->sc_nqueue_pairs; i++) { 2329 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2316 txr = sc->sc_qps[i].qp_txr; 2330 txr = sc->sc_qps[i].qp_txr;
2317 rxr = sc->sc_qps[i].qp_rxr; 2331 rxr = sc->sc_qps[i].qp_rxr;
2318 2332
2319 mutex_enter(&txr->txr_lock); 2333 mutex_enter(&txr->txr_lock);
2320 ixl_txr_unconfig(sc, txr); 2334 ixl_txr_unconfig(sc, txr);
2321 mutex_exit(&txr->txr_lock); 2335 mutex_exit(&txr->txr_lock);
2322 2336
2323 mutex_enter(&rxr->rxr_lock); 2337 mutex_enter(&rxr->rxr_lock);
2324 ixl_rxr_unconfig(sc, rxr); 2338 ixl_rxr_unconfig(sc, rxr);
2325 mutex_exit(&rxr->rxr_lock); 2339 mutex_exit(&rxr->rxr_lock);
2326 2340
2327 ixl_txr_clean(sc, txr); 2341 ixl_txr_clean(sc, txr);
2328 ixl_rxr_clean(sc, rxr); 2342 ixl_rxr_clean(sc, rxr);
2329 } 2343 }
2330 2344
2331 return; 2345 return;
2332die: 2346die:
2333 sc->sc_dead = true; 2347 sc->sc_dead = true;
2334 log(LOG_CRIT, "%s: failed to shut down rings", 2348 log(LOG_CRIT, "%s: failed to shut down rings",
2335 device_xname(sc->sc_dev)); 2349 device_xname(sc->sc_dev));
2336 return; 2350 return;
2337} 2351}
2338 2352
2339static void 2353static void
2340ixl_stop(struct ifnet *ifp, int disable) 2354ixl_stop(struct ifnet *ifp, int disable)
2341{ 2355{
2342 struct ixl_softc *sc = ifp->if_softc; 2356 struct ixl_softc *sc = ifp->if_softc;
2343 2357
2344 mutex_enter(&sc->sc_cfg_lock); 2358 mutex_enter(&sc->sc_cfg_lock);
2345 ixl_stop_locked(sc); 2359 ixl_stop_locked(sc);
2346 mutex_exit(&sc->sc_cfg_lock); 2360 mutex_exit(&sc->sc_cfg_lock);
2347} 2361}
2348 2362
2349static int 2363static int
2350ixl_queue_pairs_alloc(struct ixl_softc *sc) 2364ixl_queue_pairs_alloc(struct ixl_softc *sc)
2351{ 2365{
2352 struct ixl_queue_pair *qp; 2366 struct ixl_queue_pair *qp;
2353 unsigned int i; 2367 unsigned int i;
2354 size_t sz; 2368 size_t sz;
2355 2369
2356 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2370 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2357 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP); 2371 sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2358 2372
2359 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2373 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2360 qp = &sc->sc_qps[i]; 2374 qp = &sc->sc_qps[i];
2361 2375
2362 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2376 qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2363 ixl_handle_queue, qp); 2377 ixl_handle_queue, qp);
2364 if (qp->qp_si == NULL) 2378 if (qp->qp_si == NULL)
2365 goto free; 2379 goto free;
2366 2380
2367 qp->qp_txr = ixl_txr_alloc(sc, i); 2381 qp->qp_txr = ixl_txr_alloc(sc, i);
2368 if (qp->qp_txr == NULL) 2382 if (qp->qp_txr == NULL)
2369 goto free; 2383 goto free;
2370 2384
2371 qp->qp_rxr = ixl_rxr_alloc(sc, i); 2385 qp->qp_rxr = ixl_rxr_alloc(sc, i);
2372 if (qp->qp_rxr == NULL) 2386 if (qp->qp_rxr == NULL)
2373 goto free; 2387 goto free;
2374 2388
2375 qp->qp_sc = sc; 2389 qp->qp_sc = sc;
2376 snprintf(qp->qp_name, sizeof(qp->qp_name), 2390 snprintf(qp->qp_name, sizeof(qp->qp_name),
2377 "%s-TXRX%d", device_xname(sc->sc_dev), i); 2391 "%s-TXRX%d", device_xname(sc->sc_dev), i);
2378 } 2392 }
2379 2393
2380 return 0; 2394 return 0;
2381free: 2395free:
2382 if (sc->sc_qps != NULL) { 2396 if (sc->sc_qps != NULL) {
2383 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2397 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2384 qp = &sc->sc_qps[i]; 2398 qp = &sc->sc_qps[i];
2385 2399
2386 if (qp->qp_txr != NULL) 2400 if (qp->qp_txr != NULL)
2387 ixl_txr_free(sc, qp->qp_txr); 2401 ixl_txr_free(sc, qp->qp_txr);
2388 if (qp->qp_rxr != NULL) 2402 if (qp->qp_rxr != NULL)
2389 ixl_rxr_free(sc, qp->qp_rxr); 2403 ixl_rxr_free(sc, qp->qp_rxr);
2390 if (qp->qp_si != NULL) 2404 if (qp->qp_si != NULL)
2391 softint_disestablish(qp->qp_si); 2405 softint_disestablish(qp->qp_si);
2392 } 2406 }
2393 2407
2394 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2408 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2395 kmem_free(sc->sc_qps, sz); 2409 kmem_free(sc->sc_qps, sz);
2396 sc->sc_qps = NULL; 2410 sc->sc_qps = NULL;
2397 } 2411 }
2398 2412
2399 return -1; 2413 return -1;
2400} 2414}
2401 2415
2402static void 2416static void
2403ixl_queue_pairs_free(struct ixl_softc *sc) 2417ixl_queue_pairs_free(struct ixl_softc *sc)
2404{ 2418{
2405 struct ixl_queue_pair *qp; 2419 struct ixl_queue_pair *qp;
2406 unsigned int i; 2420 unsigned int i;
2407 size_t sz; 2421 size_t sz;
2408 2422
2409 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) { 2423 for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2410 qp = &sc->sc_qps[i]; 2424 qp = &sc->sc_qps[i];
2411 ixl_txr_free(sc, qp->qp_txr); 2425 ixl_txr_free(sc, qp->qp_txr);
2412 ixl_rxr_free(sc, qp->qp_rxr); 2426 ixl_rxr_free(sc, qp->qp_rxr);
2413 softint_disestablish(qp->qp_si); 2427 softint_disestablish(qp->qp_si);
2414 } 2428 }
2415 2429
2416 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max; 2430 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2417 kmem_free(sc->sc_qps, sz); 2431 kmem_free(sc->sc_qps, sz);
2418 sc->sc_qps = NULL; 2432 sc->sc_qps = NULL;
2419} 2433}
2420 2434
2421static struct ixl_tx_ring * 2435static struct ixl_tx_ring *
2422ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid) 2436ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2423{ 2437{
2424 struct ixl_tx_ring *txr = NULL; 2438 struct ixl_tx_ring *txr = NULL;
2425 struct ixl_tx_map *maps = NULL, *txm; 2439 struct ixl_tx_map *maps = NULL, *txm;
2426 unsigned int i; 2440 unsigned int i;
2427 2441
2428 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP); 2442 txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2429 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs, 2443 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2430 KM_SLEEP); 2444 KM_SLEEP);
2431 2445
2432 if (ixl_dmamem_alloc(sc, &txr->txr_mem, 2446 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2433 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs, 2447 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2434 IXL_TX_QUEUE_ALIGN) != 0) 2448 IXL_TX_QUEUE_ALIGN) != 0)
2435 goto free; 2449 goto free;
2436 2450
2437 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2451 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2438 txm = &maps[i]; 2452 txm = &maps[i];
2439 2453
2440 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE, 2454 if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2441 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0, 2455 IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2442 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0) 2456 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2443 goto uncreate; 2457 goto uncreate;
2444 2458
2445 txm->txm_eop = -1; 2459 txm->txm_eop = -1;
2446 txm->txm_m = NULL; 2460 txm->txm_m = NULL;
2447 } 2461 }
2448 2462
2449 txr->txr_cons = txr->txr_prod = 0; 2463 txr->txr_cons = txr->txr_prod = 0;
2450 txr->txr_maps = maps; 2464 txr->txr_maps = maps;
2451 2465
2452 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP); 2466 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2453 if (txr->txr_intrq == NULL) 2467 if (txr->txr_intrq == NULL)
2454 goto uncreate; 2468 goto uncreate;
2455 2469
2456 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE, 2470 txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2457 ixl_deferred_transmit, txr); 2471 ixl_deferred_transmit, txr);
2458 if (txr->txr_si == NULL) 2472 if (txr->txr_si == NULL)
2459 goto destroy_pcq; 2473 goto destroy_pcq;
2460 2474
2461 txr->txr_tail = I40E_QTX_TAIL(qid); 2475 txr->txr_tail = I40E_QTX_TAIL(qid);
2462 txr->txr_qid = qid; 2476 txr->txr_qid = qid;
2463 txr->txr_sc = sc; 2477 txr->txr_sc = sc;
2464 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET); 2478 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2465 2479
2466 return txr; 2480 return txr;
2467 2481
2468destroy_pcq: 2482destroy_pcq:
2469 pcq_destroy(txr->txr_intrq); 2483 pcq_destroy(txr->txr_intrq);
2470uncreate: 2484uncreate:
2471 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2485 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2472 txm = &maps[i]; 2486 txm = &maps[i];
2473 2487
2474 if (txm->txm_map == NULL) 2488 if (txm->txm_map == NULL)
2475 continue; 2489 continue;
2476 2490
2477 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2491 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2478 } 2492 }
2479 2493
2480 ixl_dmamem_free(sc, &txr->txr_mem); 2494 ixl_dmamem_free(sc, &txr->txr_mem);
2481free: 2495free:
2482 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2496 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2483 kmem_free(txr, sizeof(*txr)); 2497 kmem_free(txr, sizeof(*txr));
2484 2498
2485 return NULL; 2499 return NULL;
2486} 2500}
2487 2501
2488static void 2502static void
2489ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable) 2503ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2490{ 2504{
2491 unsigned int qid; 2505 unsigned int qid;
2492 bus_size_t reg; 2506 bus_size_t reg;
2493 uint32_t r; 2507 uint32_t r;
2494 2508
2495 qid = txr->txr_qid + sc->sc_base_queue; 2509 qid = txr->txr_qid + sc->sc_base_queue;
2496 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128); 2510 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2497 qid %= 128; 2511 qid %= 128;
2498 2512
2499 r = ixl_rd(sc, reg); 2513 r = ixl_rd(sc, reg);
2500 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK); 2514 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2501 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 2515 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2502 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK : 2516 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2503 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK); 2517 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2504 ixl_wr(sc, reg, r); 2518 ixl_wr(sc, reg, r);
2505} 2519}
2506 2520
2507static void 2521static void
2508ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2522ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2509{ 2523{
2510 struct ixl_hmc_txq txq; 2524 struct ixl_hmc_txq txq;
2511 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch); 2525 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2512 void *hmc; 2526 void *hmc;
2513 2527
2514 memset(&txq, 0, sizeof(txq)); 2528 memset(&txq, 0, sizeof(txq));
2515 txq.head = htole16(txr->txr_cons); 2529 txq.head = htole16(txr->txr_cons);
2516 txq.new_context = 1; 2530 txq.new_context = 1;
2517 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT); 2531 txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2518 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB; 2532 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2519 txq.qlen = htole16(sc->sc_tx_ring_ndescs); 2533 txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2520 txq.tphrdesc_ena = 0; 2534 txq.tphrdesc_ena = 0;
2521 txq.tphrpacket_ena = 0; 2535 txq.tphrpacket_ena = 0;
2522 txq.tphwdesc_ena = 0; 2536 txq.tphwdesc_ena = 0;
2523 txq.rdylist = data->qs_handle[0]; 2537 txq.rdylist = data->qs_handle[0];
2524 2538
2525 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2539 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2526 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2540 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2527 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, 2541 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2528 __arraycount(ixl_hmc_pack_txq)); 2542 __arraycount(ixl_hmc_pack_txq));
2529} 2543}
2530 2544
2531static void 2545static void
2532ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2546ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2533{ 2547{
2534 void *hmc; 2548 void *hmc;
2535 2549
2536 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid); 2550 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2537 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX)); 2551 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2538 txr->txr_cons = txr->txr_prod = 0; 2552 txr->txr_cons = txr->txr_prod = 0;
2539} 2553}
2540 2554
2541static void 2555static void
2542ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2556ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2543{ 2557{
2544 struct ixl_tx_map *maps, *txm; 2558 struct ixl_tx_map *maps, *txm;
2545 bus_dmamap_t map; 2559 bus_dmamap_t map;
2546 unsigned int i; 2560 unsigned int i;
2547 2561
2548 maps = txr->txr_maps; 2562 maps = txr->txr_maps;
2549 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2563 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2550 txm = &maps[i]; 2564 txm = &maps[i];
2551 2565
2552 if (txm->txm_m == NULL) 2566 if (txm->txm_m == NULL)
2553 continue; 2567 continue;
2554 2568
2555 map = txm->txm_map; 2569 map = txm->txm_map;
2556 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2570 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2557 BUS_DMASYNC_POSTWRITE); 2571 BUS_DMASYNC_POSTWRITE);
2558 bus_dmamap_unload(sc->sc_dmat, map); 2572 bus_dmamap_unload(sc->sc_dmat, map);
2559 2573
2560 m_freem(txm->txm_m); 2574 m_freem(txm->txm_m);
2561 txm->txm_m = NULL; 2575 txm->txm_m = NULL;
2562 } 2576 }
2563} 2577}
2564 2578
2565static int 2579static int
2566ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2580ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2567{ 2581{
2568 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2582 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2569 uint32_t reg; 2583 uint32_t reg;
2570 int i; 2584 int i;
2571 2585
2572 for (i = 0; i < 10; i++) { 2586 for (i = 0; i < 10; i++) {
2573 reg = ixl_rd(sc, ena); 2587 reg = ixl_rd(sc, ena);
2574 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK)) 2588 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2575 return 0; 2589 return 0;
2576 2590
2577 delaymsec(10); 2591 delaymsec(10);
2578 } 2592 }
2579 2593
2580 return ETIMEDOUT; 2594 return ETIMEDOUT;
2581} 2595}
2582 2596
2583static int 2597static int
2584ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2598ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2585{ 2599{
2586 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid); 2600 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2587 uint32_t reg; 2601 uint32_t reg;
2588 int i; 2602 int i;
2589 2603
2590 KASSERT(mutex_owned(&txr->txr_lock)); 2604 KASSERT(mutex_owned(&txr->txr_lock));
2591 2605
2592 for (i = 0; i < 10; i++) { 2606 for (i = 0; i < 10; i++) {
2593 reg = ixl_rd(sc, ena); 2607 reg = ixl_rd(sc, ena);
2594 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2608 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2595 return 0; 2609 return 0;
2596 2610
2597 delaymsec(10); 2611 delaymsec(10);
2598 } 2612 }
2599 2613
2600 return ETIMEDOUT; 2614 return ETIMEDOUT;
2601} 2615}
2602 2616
2603static void 2617static void
2604ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr) 2618ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2605{ 2619{
2606 struct ixl_tx_map *maps, *txm; 2620 struct ixl_tx_map *maps, *txm;
2607 struct mbuf *m; 2621 struct mbuf *m;
2608 unsigned int i; 2622 unsigned int i;
2609 2623
2610 softint_disestablish(txr->txr_si); 2624 softint_disestablish(txr->txr_si);
2611 while ((m = pcq_get(txr->txr_intrq)) != NULL) 2625 while ((m = pcq_get(txr->txr_intrq)) != NULL)
2612 m_freem(m); 2626 m_freem(m);
2613 pcq_destroy(txr->txr_intrq); 2627 pcq_destroy(txr->txr_intrq);
2614 2628
2615 maps = txr->txr_maps; 2629 maps = txr->txr_maps;
2616 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) { 2630 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2617 txm = &maps[i]; 2631 txm = &maps[i];
2618 2632
2619 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map); 2633 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2620 } 2634 }
2621 2635
2622 ixl_dmamem_free(sc, &txr->txr_mem); 2636 ixl_dmamem_free(sc, &txr->txr_mem);
2623 mutex_destroy(&txr->txr_lock); 2637 mutex_destroy(&txr->txr_lock);
2624 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs); 2638 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2625 kmem_free(txr, sizeof(*txr)); 2639 kmem_free(txr, sizeof(*txr));
2626} 2640}
2627 2641
2628static inline int 2642static inline int
2629ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0, 2643ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2630 struct ixl_tx_ring *txr) 2644 struct ixl_tx_ring *txr)
2631{ 2645{
2632 struct mbuf *m; 2646 struct mbuf *m;
2633 int error; 2647 int error;
2634 2648
2635 KASSERT(mutex_owned(&txr->txr_lock)); 2649 KASSERT(mutex_owned(&txr->txr_lock));
2636 2650
2637 m = *m0; 2651 m = *m0;
2638 2652
2639 error = bus_dmamap_load_mbuf(dmat, map, m, 2653 error = bus_dmamap_load_mbuf(dmat, map, m,
2640 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2654 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2641 if (error != EFBIG) 2655 if (error != EFBIG)
2642 return error; 2656 return error;
2643 2657
2644 m = m_defrag(m, M_DONTWAIT); 2658 m = m_defrag(m, M_DONTWAIT);
2645 if (m != NULL) { 2659 if (m != NULL) {
2646 *m0 = m; 2660 *m0 = m;
2647 txr->txr_defragged.ev_count++; 2661 txr->txr_defragged.ev_count++;
2648 2662
2649 error = bus_dmamap_load_mbuf(dmat, map, m, 2663 error = bus_dmamap_load_mbuf(dmat, map, m,
2650 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT); 2664 BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2651 } else { 2665 } else {
2652 txr->txr_defrag_failed.ev_count++; 2666 txr->txr_defrag_failed.ev_count++;
2653 error = ENOBUFS; 2667 error = ENOBUFS;
2654 } 2668 }
2655 2669
2656 return error; 2670 return error;
2657} 2671}
2658 2672
2659static inline int 2673static inline int
2660ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd) 2674ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2661{ 2675{
2662 struct ether_header *eh; 2676 struct ether_header *eh;
2663 size_t len; 2677 size_t len;
2664 uint64_t cmd; 2678 uint64_t cmd;
2665 2679
2666 cmd = 0; 2680 cmd = 0;
2667 2681
2668 eh = mtod(m, struct ether_header *); 2682 eh = mtod(m, struct ether_header *);
2669 switch (htons(eh->ether_type)) { 2683 switch (htons(eh->ether_type)) {
2670 case ETHERTYPE_IP: 2684 case ETHERTYPE_IP:
2671 case ETHERTYPE_IPV6: 2685 case ETHERTYPE_IPV6:
2672 len = ETHER_HDR_LEN; 2686 len = ETHER_HDR_LEN;
2673 break; 2687 break;
2674 case ETHERTYPE_VLAN: 2688 case ETHERTYPE_VLAN:
2675 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2689 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2676 break; 2690 break;
2677 default: 2691 default:
2678 len = 0; 2692 len = 0;
2679 } 2693 }
2680 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT); 2694 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2681 2695
2682 if (m->m_pkthdr.csum_flags & 2696 if (m->m_pkthdr.csum_flags &
2683 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2697 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2684 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4; 2698 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2685 } 2699 }
2686 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2700 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2687 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM; 2701 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2688 } 2702 }
2689 2703
2690 if (m->m_pkthdr.csum_flags & 2704 if (m->m_pkthdr.csum_flags &
2691 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2705 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2692 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6; 2706 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2693 } 2707 }
2694 2708
2695 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) { 2709 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2696 case IXL_TX_DESC_CMD_IIPT_IPV4: 2710 case IXL_TX_DESC_CMD_IIPT_IPV4:
2697 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM: 2711 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2698 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2712 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2699 break; 2713 break;
2700 case IXL_TX_DESC_CMD_IIPT_IPV6: 2714 case IXL_TX_DESC_CMD_IIPT_IPV6:
2701 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2715 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2702 break; 2716 break;
2703 default: 2717 default:
2704 len = 0; 2718 len = 0;
2705 } 2719 }
2706 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT); 2720 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2707 2721
2708 if (m->m_pkthdr.csum_flags & 2722 if (m->m_pkthdr.csum_flags &
2709 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) { 2723 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2710 len = sizeof(struct tcphdr); 2724 len = sizeof(struct tcphdr);
2711 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP; 2725 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2712 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) { 2726 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2713 len = sizeof(struct udphdr); 2727 len = sizeof(struct udphdr);
2714 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP; 2728 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2715 } else { 2729 } else {
2716 len = 0; 2730 len = 0;
2717 } 2731 }
2718 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT); 2732 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2719 2733
2720 *cmd_txd |= cmd; 2734 *cmd_txd |= cmd;
2721 return 0; 2735 return 0;
2722} 2736}
2723 2737
2724static void 2738static void
2725ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr, 2739ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2726 bool is_transmit) 2740 bool is_transmit)
2727{ 2741{
2728 struct ixl_softc *sc = ifp->if_softc; 2742 struct ixl_softc *sc = ifp->if_softc;
2729 struct ixl_tx_desc *ring, *txd; 2743 struct ixl_tx_desc *ring, *txd;
2730 struct ixl_tx_map *txm; 2744 struct ixl_tx_map *txm;
2731 bus_dmamap_t map; 2745 bus_dmamap_t map;
2732 struct mbuf *m; 2746 struct mbuf *m;
2733 uint64_t cmd, cmd_txd; 2747 uint64_t cmd, cmd_txd;
2734 unsigned int prod, free, last, i; 2748 unsigned int prod, free, last, i;
2735 unsigned int mask; 2749 unsigned int mask;
2736 int post = 0; 2750 int post = 0;
2737 2751
2738 KASSERT(mutex_owned(&txr->txr_lock)); 2752 KASSERT(mutex_owned(&txr->txr_lock));
2739 2753
2740 if (!ISSET(ifp->if_flags, IFF_RUNNING) 2754 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2741 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) { 2755 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2742 if (!is_transmit) 2756 if (!is_transmit)
2743 IFQ_PURGE(&ifp->if_snd); 2757 IFQ_PURGE(&ifp->if_snd);
2744 return; 2758 return;
2745 } 2759 }
2746 2760
2747 prod = txr->txr_prod; 2761 prod = txr->txr_prod;
2748 free = txr->txr_cons; 2762 free = txr->txr_cons;
2749 if (free <= prod) 2763 if (free <= prod)
2750 free += sc->sc_tx_ring_ndescs; 2764 free += sc->sc_tx_ring_ndescs;
2751 free -= prod; 2765 free -= prod;
2752 2766
2753 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2767 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2754 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE); 2768 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2755 2769
2756 ring = IXL_DMA_KVA(&txr->txr_mem); 2770 ring = IXL_DMA_KVA(&txr->txr_mem);
2757 mask = sc->sc_tx_ring_ndescs - 1; 2771 mask = sc->sc_tx_ring_ndescs - 1;
2758 last = prod; 2772 last = prod;
2759 cmd = 0; 2773 cmd = 0;
2760 txd = NULL; 2774 txd = NULL;
2761 2775
2762 for (;;) { 2776 for (;;) {
2763 if (free <= IXL_TX_PKT_DESCS) { 2777 if (free <= IXL_TX_PKT_DESCS) {
2764 if (!is_transmit) 2778 if (!is_transmit)
2765 SET(ifp->if_flags, IFF_OACTIVE); 2779 SET(ifp->if_flags, IFF_OACTIVE);
2766 break; 2780 break;
2767 } 2781 }
2768 2782
2769 if (is_transmit) 2783 if (is_transmit)
2770 m = pcq_get(txr->txr_intrq); 2784 m = pcq_get(txr->txr_intrq);
2771 else 2785 else
2772 IFQ_DEQUEUE(&ifp->if_snd, m); 2786 IFQ_DEQUEUE(&ifp->if_snd, m);
2773 2787
2774 if (m == NULL) 2788 if (m == NULL)
2775 break; 2789 break;
2776 2790
2777 txm = &txr->txr_maps[prod]; 2791 txm = &txr->txr_maps[prod];
2778 map = txm->txm_map; 2792 map = txm->txm_map;
2779 2793
2780 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) { 2794 if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2781 if_statinc(ifp, if_oerrors); 2795 if_statinc(ifp, if_oerrors);
2782 m_freem(m); 2796 m_freem(m);
2783 continue; 2797 continue;
2784 } 2798 }
2785 2799
2786 cmd_txd = 0; 2800 cmd_txd = 0;
2787 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) { 2801 if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2788 ixl_tx_setup_offloads(m, &cmd_txd); 2802 ixl_tx_setup_offloads(m, &cmd_txd);
2789 } 2803 }
2790 2804
2791 if (vlan_has_tag(m)) { 2805 if (vlan_has_tag(m)) {
2792 cmd_txd |= (uint64_t)vlan_get_tag(m) << 2806 cmd_txd |= (uint64_t)vlan_get_tag(m) <<
2793 IXL_TX_DESC_L2TAG1_SHIFT; 2807 IXL_TX_DESC_L2TAG1_SHIFT;
2794 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1; 2808 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2795 } 2809 }
2796 2810
2797 bus_dmamap_sync(sc->sc_dmat, map, 0, 2811 bus_dmamap_sync(sc->sc_dmat, map, 0,
2798 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2812 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2799 2813
2800 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) { 2814 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2801 txd = &ring[prod]; 2815 txd = &ring[prod];
2802 2816
2803 cmd = (uint64_t)map->dm_segs[i].ds_len << 2817 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2804 IXL_TX_DESC_BSIZE_SHIFT; 2818 IXL_TX_DESC_BSIZE_SHIFT;
2805 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC; 2819 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2806 cmd |= cmd_txd; 2820 cmd |= cmd_txd;
2807 2821
2808 txd->addr = htole64(map->dm_segs[i].ds_addr); 2822 txd->addr = htole64(map->dm_segs[i].ds_addr);
2809 txd->cmd = htole64(cmd); 2823 txd->cmd = htole64(cmd);
2810 2824
2811 last = prod; 2825 last = prod;
2812 2826
2813 prod++; 2827 prod++;
2814 prod &= mask; 2828 prod &= mask;
2815 } 2829 }
2816 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS; 2830 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2817 txd->cmd = htole64(cmd); 2831 txd->cmd = htole64(cmd);
2818 2832
2819 txm->txm_m = m; 2833 txm->txm_m = m;
2820 txm->txm_eop = last; 2834 txm->txm_eop = last;
2821 2835
2822 bpf_mtap(ifp, m, BPF_D_OUT); 2836 bpf_mtap(ifp, m, BPF_D_OUT);
2823 2837
2824 free -= i; 2838 free -= i;
2825 post = 1; 2839 post = 1;
2826 } 2840 }
2827 2841
2828 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2842 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2829 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE); 2843 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2830 2844
2831 if (post) { 2845 if (post) {
2832 txr->txr_prod = prod; 2846 txr->txr_prod = prod;
2833 ixl_wr(sc, txr->txr_tail, prod); 2847 ixl_wr(sc, txr->txr_tail, prod);
2834 } 2848 }
2835} 2849}
2836 2850
2837static int 2851static int
2838ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit) 2852ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2839{ 2853{
2840 struct ifnet *ifp = &sc->sc_ec.ec_if; 2854 struct ifnet *ifp = &sc->sc_ec.ec_if;
2841 struct ixl_tx_desc *ring, *txd; 2855 struct ixl_tx_desc *ring, *txd;
2842 struct ixl_tx_map *txm; 2856 struct ixl_tx_map *txm;
2843 struct mbuf *m; 2857 struct mbuf *m;
2844 bus_dmamap_t map; 2858 bus_dmamap_t map;
2845 unsigned int cons, prod, last; 2859 unsigned int cons, prod, last;
2846 unsigned int mask; 2860 unsigned int mask;
2847 uint64_t dtype; 2861 uint64_t dtype;
2848 int done = 0, more = 0; 2862 int done = 0, more = 0;
2849 2863
2850 KASSERT(mutex_owned(&txr->txr_lock)); 2864 KASSERT(mutex_owned(&txr->txr_lock));
2851 2865
2852 prod = txr->txr_prod; 2866 prod = txr->txr_prod;
2853 cons = txr->txr_cons; 2867 cons = txr->txr_cons;
2854 2868
2855 if (cons == prod) 2869 if (cons == prod)
2856 return 0; 2870 return 0;
2857 2871
2858 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2872 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2859 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD); 2873 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2860 2874
2861 ring = IXL_DMA_KVA(&txr->txr_mem); 2875 ring = IXL_DMA_KVA(&txr->txr_mem);
2862 mask = sc->sc_tx_ring_ndescs - 1; 2876 mask = sc->sc_tx_ring_ndescs - 1;
2863 2877
2864 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2878 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2865 2879
2866 do { 2880 do {
2867 if (txlimit-- <= 0) { 2881 if (txlimit-- <= 0) {
2868 more = 1; 2882 more = 1;
2869 break; 2883 break;
2870 } 2884 }
2871 2885
2872 txm = &txr->txr_maps[cons]; 2886 txm = &txr->txr_maps[cons];
2873 last = txm->txm_eop; 2887 last = txm->txm_eop;
2874 txd = &ring[last]; 2888 txd = &ring[last];
2875 2889
2876 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK); 2890 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2877 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE)) 2891 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2878 break; 2892 break;
2879 2893
2880 map = txm->txm_map; 2894 map = txm->txm_map;
2881 2895
2882 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 2896 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2883 BUS_DMASYNC_POSTWRITE); 2897 BUS_DMASYNC_POSTWRITE);
2884 bus_dmamap_unload(sc->sc_dmat, map); 2898 bus_dmamap_unload(sc->sc_dmat, map);
2885 2899
2886 m = txm->txm_m; 2900 m = txm->txm_m;
2887 if (m != NULL) { 2901 if (m != NULL) {
2888 if_statinc_ref(nsr, if_opackets); 2902 if_statinc_ref(nsr, if_opackets);
2889 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 2903 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
2890 if (ISSET(m->m_flags, M_MCAST)) 2904 if (ISSET(m->m_flags, M_MCAST))
2891 if_statinc_ref(nsr, if_omcasts); 2905 if_statinc_ref(nsr, if_omcasts);
2892 m_freem(m); 2906 m_freem(m);
2893 } 2907 }
2894 2908
2895 txm->txm_m = NULL; 2909 txm->txm_m = NULL;
2896 txm->txm_eop = -1; 2910 txm->txm_eop = -1;
2897 2911
2898 cons = last + 1; 2912 cons = last + 1;
2899 cons &= mask; 2913 cons &= mask;
2900 done = 1; 2914 done = 1;
2901 } while (cons != prod); 2915 } while (cons != prod);
2902 2916
2903 IF_STAT_PUTREF(ifp); 2917 IF_STAT_PUTREF(ifp);
2904 2918
2905 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem), 2919 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2906 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD); 2920 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2907 2921
2908 txr->txr_cons = cons; 2922 txr->txr_cons = cons;
2909 2923
2910 if (done) { 2924 if (done) {
2911 softint_schedule(txr->txr_si); 2925 softint_schedule(txr->txr_si);
2912 if (txr->txr_qid == 0) { 2926 if (txr->txr_qid == 0) {
2913 CLR(ifp->if_flags, IFF_OACTIVE); 2927 CLR(ifp->if_flags, IFF_OACTIVE);
2914 if_schedule_deferred_start(ifp); 2928 if_schedule_deferred_start(ifp);
2915 } 2929 }
2916 } 2930 }
2917 2931
2918 return more; 2932 return more;
2919} 2933}
2920 2934
2921static void 2935static void
2922ixl_start(struct ifnet *ifp) 2936ixl_start(struct ifnet *ifp)
2923{ 2937{
2924 struct ixl_softc *sc; 2938 struct ixl_softc *sc;
2925 struct ixl_tx_ring *txr; 2939 struct ixl_tx_ring *txr;
2926 2940
2927 sc = ifp->if_softc; 2941 sc = ifp->if_softc;
2928 txr = sc->sc_qps[0].qp_txr; 2942 txr = sc->sc_qps[0].qp_txr;
2929 2943
2930 mutex_enter(&txr->txr_lock); 2944 mutex_enter(&txr->txr_lock);
2931 ixl_tx_common_locked(ifp, txr, false); 2945 ixl_tx_common_locked(ifp, txr, false);
2932 mutex_exit(&txr->txr_lock); 2946 mutex_exit(&txr->txr_lock);
2933} 2947}
2934 2948
2935static inline unsigned int 2949static inline unsigned int
2936ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m) 2950ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2937{ 2951{
2938 u_int cpuid; 2952 u_int cpuid;
2939 2953
2940 cpuid = cpu_index(curcpu()); 2954 cpuid = cpu_index(curcpu());
2941 2955
2942 return (unsigned int)(cpuid % sc->sc_nqueue_pairs); 2956 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2943} 2957}
2944 2958