Thu Oct 12 05:50:56 2023 UTC ()
ixg(4): Don't print wrong error message about ixgbe_num_queues.

 Don't override the ixgbe_num_queues global variable. It's the default
value of the number of queues and should not override it because it
will be referenced by later device attach. For example, the number of
MSI-X vector is 64 on X540 and 18 on 82599. When both cards are inserted
to a machine that the number of CPU is 24 and X540 is probed earlier,
ixgbe_num_queues is overridden to 24 and the following error message is
printed when attaching 82599:

	ixg2: autoconfiguration error: ixgbe_num_queues (24) is too large,
	using reduced amount (17).

Note that the number of queues is in sc->num_queuss and referenced
by hw.ixgN.num_queues sysctl.


(msaitoh)
diff -r1.341 -r1.342 src/sys/dev/pci/ixgbe/ixgbe.c

cvs diff -r1.341 -r1.342 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2023/10/12 03:43:55 1.341
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2023/10/12 05:50:55 1.342
@@ -1,1066 +1,1066 @@ @@ -1,1066 +1,1066 @@
1/* $NetBSD: ixgbe.c,v 1.341 2023/10/12 03:43:55 msaitoh Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.342 2023/10/12 05:50:55 msaitoh Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#include <sys/cdefs.h> 66#include <sys/cdefs.h>
67__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.341 2023/10/12 03:43:55 msaitoh Exp $"); 67__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.342 2023/10/12 05:50:55 msaitoh Exp $");
68 68
69#ifdef _KERNEL_OPT 69#ifdef _KERNEL_OPT
70#include "opt_inet.h" 70#include "opt_inet.h"
71#include "opt_inet6.h" 71#include "opt_inet6.h"
72#include "opt_net_mpsafe.h" 72#include "opt_net_mpsafe.h"
73#endif 73#endif
74 74
75#include "ixgbe.h" 75#include "ixgbe.h"
76#include "ixgbe_phy.h" 76#include "ixgbe_phy.h"
77#include "ixgbe_sriov.h" 77#include "ixgbe_sriov.h"
78 78
79#include <sys/cprng.h> 79#include <sys/cprng.h>
80#include <dev/mii/mii.h> 80#include <dev/mii/mii.h>
81#include <dev/mii/miivar.h> 81#include <dev/mii/miivar.h>
82 82
83/************************************************************************ 83/************************************************************************
84 * Driver version 84 * Driver version
85 ************************************************************************/ 85 ************************************************************************/
86static const char ixgbe_driver_version[] = "4.0.1-k"; 86static const char ixgbe_driver_version[] = "4.0.1-k";
87/* XXX NetBSD: + 3.3.24 */ 87/* XXX NetBSD: + 3.3.24 */
88 88
89/************************************************************************ 89/************************************************************************
90 * PCI Device ID Table 90 * PCI Device ID Table
91 * 91 *
92 * Used by probe to select devices to load on 92 * Used by probe to select devices to load on
93 * Last field stores an index into ixgbe_strings 93 * Last field stores an index into ixgbe_strings
94 * Last entry must be all 0s 94 * Last entry must be all 0s
95 * 95 *
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 ************************************************************************/ 97 ************************************************************************/
98static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 98static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99{ 99{
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
150 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 150 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
151 /* required last entry */ 151 /* required last entry */
152 {0, 0, 0, 0, 0} 152 {0, 0, 0, 0, 0}
153}; 153};
154 154
155/************************************************************************ 155/************************************************************************
156 * Table of branding strings 156 * Table of branding strings
157 ************************************************************************/ 157 ************************************************************************/
158static const char *ixgbe_strings[] = { 158static const char *ixgbe_strings[] = {
159 "Intel(R) PRO/10GbE PCI-Express Network Driver" 159 "Intel(R) PRO/10GbE PCI-Express Network Driver"
160}; 160};
161 161
162/************************************************************************ 162/************************************************************************
163 * Function prototypes 163 * Function prototypes
164 ************************************************************************/ 164 ************************************************************************/
165static int ixgbe_probe(device_t, cfdata_t, void *); 165static int ixgbe_probe(device_t, cfdata_t, void *);
166static void ixgbe_quirks(struct ixgbe_softc *); 166static void ixgbe_quirks(struct ixgbe_softc *);
167static void ixgbe_attach(device_t, device_t, void *); 167static void ixgbe_attach(device_t, device_t, void *);
168static int ixgbe_detach(device_t, int); 168static int ixgbe_detach(device_t, int);
169#if 0 169#if 0
170static int ixgbe_shutdown(device_t); 170static int ixgbe_shutdown(device_t);
171#endif 171#endif
172static bool ixgbe_suspend(device_t, const pmf_qual_t *); 172static bool ixgbe_suspend(device_t, const pmf_qual_t *);
173static bool ixgbe_resume(device_t, const pmf_qual_t *); 173static bool ixgbe_resume(device_t, const pmf_qual_t *);
174static int ixgbe_ifflags_cb(struct ethercom *); 174static int ixgbe_ifflags_cb(struct ethercom *);
175static int ixgbe_ioctl(struct ifnet *, u_long, void *); 175static int ixgbe_ioctl(struct ifnet *, u_long, void *);
176static int ixgbe_init(struct ifnet *); 176static int ixgbe_init(struct ifnet *);
177static void ixgbe_init_locked(struct ixgbe_softc *); 177static void ixgbe_init_locked(struct ixgbe_softc *);
178static void ixgbe_ifstop(struct ifnet *, int); 178static void ixgbe_ifstop(struct ifnet *, int);
179static void ixgbe_stop_locked(void *); 179static void ixgbe_stop_locked(void *);
180static void ixgbe_init_device_features(struct ixgbe_softc *); 180static void ixgbe_init_device_features(struct ixgbe_softc *);
181static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); 181static int ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool);
182static void ixgbe_add_media_types(struct ixgbe_softc *); 182static void ixgbe_add_media_types(struct ixgbe_softc *);
183static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 183static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
184static int ixgbe_media_change(struct ifnet *); 184static int ixgbe_media_change(struct ifnet *);
185static int ixgbe_allocate_pci_resources(struct ixgbe_softc *, 185static int ixgbe_allocate_pci_resources(struct ixgbe_softc *,
186 const struct pci_attach_args *); 186 const struct pci_attach_args *);
187static void ixgbe_free_deferred_handlers(struct ixgbe_softc *); 187static void ixgbe_free_deferred_handlers(struct ixgbe_softc *);
188static void ixgbe_get_slot_info(struct ixgbe_softc *); 188static void ixgbe_get_slot_info(struct ixgbe_softc *);
189static int ixgbe_allocate_msix(struct ixgbe_softc *, 189static int ixgbe_allocate_msix(struct ixgbe_softc *,
190 const struct pci_attach_args *); 190 const struct pci_attach_args *);
191static int ixgbe_allocate_legacy(struct ixgbe_softc *, 191static int ixgbe_allocate_legacy(struct ixgbe_softc *,
192 const struct pci_attach_args *); 192 const struct pci_attach_args *);
193static int ixgbe_configure_interrupts(struct ixgbe_softc *); 193static int ixgbe_configure_interrupts(struct ixgbe_softc *);
194static void ixgbe_free_pciintr_resources(struct ixgbe_softc *); 194static void ixgbe_free_pciintr_resources(struct ixgbe_softc *);
195static void ixgbe_free_pci_resources(struct ixgbe_softc *); 195static void ixgbe_free_pci_resources(struct ixgbe_softc *);
196static void ixgbe_local_timer(void *); 196static void ixgbe_local_timer(void *);
197static void ixgbe_handle_timer(struct work *, void *); 197static void ixgbe_handle_timer(struct work *, void *);
198static void ixgbe_recovery_mode_timer(void *); 198static void ixgbe_recovery_mode_timer(void *);
199static void ixgbe_handle_recovery_mode_timer(struct work *, void *); 199static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
200static int ixgbe_setup_interface(device_t, struct ixgbe_softc *); 200static int ixgbe_setup_interface(device_t, struct ixgbe_softc *);
201static void ixgbe_config_gpie(struct ixgbe_softc *); 201static void ixgbe_config_gpie(struct ixgbe_softc *);
202static void ixgbe_config_dmac(struct ixgbe_softc *); 202static void ixgbe_config_dmac(struct ixgbe_softc *);
203static void ixgbe_config_delay_values(struct ixgbe_softc *); 203static void ixgbe_config_delay_values(struct ixgbe_softc *);
204static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *); 204static void ixgbe_schedule_admin_tasklet(struct ixgbe_softc *);
205static void ixgbe_config_link(struct ixgbe_softc *); 205static void ixgbe_config_link(struct ixgbe_softc *);
206static void ixgbe_check_wol_support(struct ixgbe_softc *); 206static void ixgbe_check_wol_support(struct ixgbe_softc *);
207static int ixgbe_setup_low_power_mode(struct ixgbe_softc *); 207static int ixgbe_setup_low_power_mode(struct ixgbe_softc *);
208#if 0 208#if 0
209static void ixgbe_rearm_queues(struct ixgbe_softc *, u64); 209static void ixgbe_rearm_queues(struct ixgbe_softc *, u64);
210#endif 210#endif
211 211
212static void ixgbe_initialize_transmit_units(struct ixgbe_softc *); 212static void ixgbe_initialize_transmit_units(struct ixgbe_softc *);
213static void ixgbe_initialize_receive_units(struct ixgbe_softc *); 213static void ixgbe_initialize_receive_units(struct ixgbe_softc *);
214static void ixgbe_enable_rx_drop(struct ixgbe_softc *); 214static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
215static void ixgbe_disable_rx_drop(struct ixgbe_softc *); 215static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
216static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); 216static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *);
217 217
218static void ixgbe_enable_intr(struct ixgbe_softc *); 218static void ixgbe_enable_intr(struct ixgbe_softc *);
219static void ixgbe_disable_intr(struct ixgbe_softc *); 219static void ixgbe_disable_intr(struct ixgbe_softc *);
220static void ixgbe_update_stats_counters(struct ixgbe_softc *); 220static void ixgbe_update_stats_counters(struct ixgbe_softc *);
221static void ixgbe_set_rxfilter(struct ixgbe_softc *); 221static void ixgbe_set_rxfilter(struct ixgbe_softc *);
222static void ixgbe_update_link_status(struct ixgbe_softc *); 222static void ixgbe_update_link_status(struct ixgbe_softc *);
223static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); 223static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8);
224static void ixgbe_configure_ivars(struct ixgbe_softc *); 224static void ixgbe_configure_ivars(struct ixgbe_softc *);
225static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 225static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
226static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t); 226static void ixgbe_eitr_write(struct ixgbe_softc *, uint32_t, uint32_t);
227 227
228static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *); 228static void ixgbe_setup_vlan_hw_tagging(struct ixgbe_softc *);
229static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *); 229static void ixgbe_setup_vlan_hw_support(struct ixgbe_softc *);
230static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 230static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
231static int ixgbe_register_vlan(struct ixgbe_softc *, u16); 231static int ixgbe_register_vlan(struct ixgbe_softc *, u16);
232static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16); 232static int ixgbe_unregister_vlan(struct ixgbe_softc *, u16);
233 233
234static void ixgbe_add_device_sysctls(struct ixgbe_softc *); 234static void ixgbe_add_device_sysctls(struct ixgbe_softc *);
235static void ixgbe_add_hw_stats(struct ixgbe_softc *); 235static void ixgbe_add_hw_stats(struct ixgbe_softc *);
236static void ixgbe_clear_evcnt(struct ixgbe_softc *); 236static void ixgbe_clear_evcnt(struct ixgbe_softc *);
237static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); 237static int ixgbe_set_flowcntl(struct ixgbe_softc *, int);
238static int ixgbe_set_advertise(struct ixgbe_softc *, int); 238static int ixgbe_set_advertise(struct ixgbe_softc *, int);
239static int ixgbe_get_default_advertise(struct ixgbe_softc *); 239static int ixgbe_get_default_advertise(struct ixgbe_softc *);
240 240
241/* Sysctl handlers */ 241/* Sysctl handlers */
242static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 242static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
243static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
244static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 244static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
245static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
246static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 246static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
247static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 247static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
248#ifdef IXGBE_DEBUG 248#ifdef IXGBE_DEBUG
249static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 249static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
250static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
251#endif 251#endif
252static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 252static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
253static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
256static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
257static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 257static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
258static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 258static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
259static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 259static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
260static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO); 260static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
261static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO); 261static int ixgbe_sysctl_tx_process_limit(SYSCTLFN_PROTO);
262static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO); 262static int ixgbe_sysctl_rx_process_limit(SYSCTLFN_PROTO);
263static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 263static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
264static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 264static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
265 265
266/* Interrupt functions */ 266/* Interrupt functions */
267static int ixgbe_msix_que(void *); 267static int ixgbe_msix_que(void *);
268static int ixgbe_msix_admin(void *); 268static int ixgbe_msix_admin(void *);
269static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *); 269static void ixgbe_intr_admin_common(struct ixgbe_softc *, u32, u32 *);
270static int ixgbe_legacy_irq(void *); 270static int ixgbe_legacy_irq(void *);
271 271
272/* Event handlers running on workqueue */ 272/* Event handlers running on workqueue */
273static void ixgbe_handle_que(void *); 273static void ixgbe_handle_que(void *);
274static void ixgbe_handle_link(void *); 274static void ixgbe_handle_link(void *);
275static void ixgbe_handle_msf(void *); 275static void ixgbe_handle_msf(void *);
276static void ixgbe_handle_mod(void *, bool); 276static void ixgbe_handle_mod(void *, bool);
277static void ixgbe_handle_phy(void *); 277static void ixgbe_handle_phy(void *);
278 278
279/* Deferred workqueue handlers */ 279/* Deferred workqueue handlers */
280static void ixgbe_handle_admin(struct work *, void *); 280static void ixgbe_handle_admin(struct work *, void *);
281static void ixgbe_handle_que_work(struct work *, void *); 281static void ixgbe_handle_que_work(struct work *, void *);
282 282
283static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 283static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
284 284
285/************************************************************************ 285/************************************************************************
286 * NetBSD Device Interface Entry Points 286 * NetBSD Device Interface Entry Points
287 ************************************************************************/ 287 ************************************************************************/
288CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc), 288CFATTACH_DECL3_NEW(ixg, sizeof(struct ixgbe_softc),
289 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 289 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
290 DVF_DETACH_SHUTDOWN); 290 DVF_DETACH_SHUTDOWN);
291 291
292#if 0 292#if 0
293devclass_t ix_devclass; 293devclass_t ix_devclass;
294DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 294DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
295 295
296MODULE_DEPEND(ix, pci, 1, 1, 1); 296MODULE_DEPEND(ix, pci, 1, 1, 1);
297MODULE_DEPEND(ix, ether, 1, 1, 1); 297MODULE_DEPEND(ix, ether, 1, 1, 1);
298#ifdef DEV_NETMAP 298#ifdef DEV_NETMAP
299MODULE_DEPEND(ix, netmap, 1, 1, 1); 299MODULE_DEPEND(ix, netmap, 1, 1, 1);
300#endif 300#endif
301#endif 301#endif
302 302
303/* 303/*
304 * TUNEABLE PARAMETERS: 304 * TUNEABLE PARAMETERS:
305 */ 305 */
306 306
307/* 307/*
308 * AIM: Adaptive Interrupt Moderation 308 * AIM: Adaptive Interrupt Moderation
309 * which means that the interrupt rate 309 * which means that the interrupt rate
310 * is varied over time based on the 310 * is varied over time based on the
311 * traffic for that interrupt vector 311 * traffic for that interrupt vector
312 */ 312 */
313static bool ixgbe_enable_aim = true; 313static bool ixgbe_enable_aim = true;
314#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 314#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
315SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 315SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
316 "Enable adaptive interrupt moderation"); 316 "Enable adaptive interrupt moderation");
317 317
318static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 318static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
319SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 319SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
320 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 320 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
321 321
322/* How many packets rxeof tries to clean at a time */ 322/* How many packets rxeof tries to clean at a time */
323static int ixgbe_rx_process_limit = 256; 323static int ixgbe_rx_process_limit = 256;
324SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 324SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
325 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 325 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
326 326
327/* How many packets txeof tries to clean at a time */ 327/* How many packets txeof tries to clean at a time */
328static int ixgbe_tx_process_limit = 256; 328static int ixgbe_tx_process_limit = 256;
329SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 329SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
330 &ixgbe_tx_process_limit, 0, 330 &ixgbe_tx_process_limit, 0,
331 "Maximum number of sent packets to process at a time, -1 means unlimited"); 331 "Maximum number of sent packets to process at a time, -1 means unlimited");
332 332
333/* Flow control setting, default to full */ 333/* Flow control setting, default to full */
334static int ixgbe_flow_control = ixgbe_fc_full; 334static int ixgbe_flow_control = ixgbe_fc_full;
335SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 335SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
336 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 336 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
337 337
338/* Which packet processing uses workqueue or softint */ 338/* Which packet processing uses workqueue or softint */
339static bool ixgbe_txrx_workqueue = false; 339static bool ixgbe_txrx_workqueue = false;
340 340
341/* 341/*
342 * Smart speed setting, default to on 342 * Smart speed setting, default to on
343 * this only works as a compile option 343 * this only works as a compile option
344 * right now as its during attach, set 344 * right now as its during attach, set
345 * this to 'ixgbe_smart_speed_off' to 345 * this to 'ixgbe_smart_speed_off' to
346 * disable. 346 * disable.
347 */ 347 */
348static int ixgbe_smart_speed = ixgbe_smart_speed_on; 348static int ixgbe_smart_speed = ixgbe_smart_speed_on;
349 349
350/* 350/*
351 * MSI-X should be the default for best performance, 351 * MSI-X should be the default for best performance,
352 * but this allows it to be forced off for testing. 352 * but this allows it to be forced off for testing.
353 */ 353 */
354static int ixgbe_enable_msix = 1; 354static int ixgbe_enable_msix = 1;
355SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 355SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
356 "Enable MSI-X interrupts"); 356 "Enable MSI-X interrupts");
357 357
358/* 358/*
359 * Number of Queues, can be set to 0, 359 * Number of Queues, can be set to 0,
360 * it then autoconfigures based on the 360 * it then autoconfigures based on the
361 * number of cpus with a max of 8. This 361 * number of cpus with a max of 8. This
362 * can be overridden manually here. 362 * can be overridden manually here.
363 */ 363 */
364static int ixgbe_num_queues = 0; 364static int ixgbe_num_queues = 0;
365SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 365SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
366 "Number of queues to configure, 0 indicates autoconfigure"); 366 "Number of queues to configure, 0 indicates autoconfigure");
367 367
368/* 368/*
369 * Number of TX descriptors per ring, 369 * Number of TX descriptors per ring,
370 * setting higher than RX as this seems 370 * setting higher than RX as this seems
371 * the better performing choice. 371 * the better performing choice.
372 */ 372 */
373static int ixgbe_txd = DEFAULT_TXD; 373static int ixgbe_txd = DEFAULT_TXD;
374SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 374SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
375 "Number of transmit descriptors per queue"); 375 "Number of transmit descriptors per queue");
376 376
377/* Number of RX descriptors per ring */ 377/* Number of RX descriptors per ring */
378static int ixgbe_rxd = DEFAULT_RXD; 378static int ixgbe_rxd = DEFAULT_RXD;
379SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 379SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
380 "Number of receive descriptors per queue"); 380 "Number of receive descriptors per queue");
381 381
382/* 382/*
383 * Defining this on will allow the use 383 * Defining this on will allow the use
384 * of unsupported SFP+ modules, note that 384 * of unsupported SFP+ modules, note that
385 * doing so you are on your own :) 385 * doing so you are on your own :)
386 */ 386 */
387static int allow_unsupported_sfp = false; 387static int allow_unsupported_sfp = false;
388#define TUNABLE_INT(__x, __y) 388#define TUNABLE_INT(__x, __y)
389TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 389TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
390 390
391/* 391/*
392 * Not sure if Flow Director is fully baked, 392 * Not sure if Flow Director is fully baked,
393 * so we'll default to turning it off. 393 * so we'll default to turning it off.
394 */ 394 */
395static int ixgbe_enable_fdir = 0; 395static int ixgbe_enable_fdir = 0;
396SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 396SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
397 "Enable Flow Director"); 397 "Enable Flow Director");
398 398
399/* Legacy Transmit (single queue) */ 399/* Legacy Transmit (single queue) */
400static int ixgbe_enable_legacy_tx = 0; 400static int ixgbe_enable_legacy_tx = 0;
401SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 401SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
402 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 402 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
403 403
404/* Receive-Side Scaling */ 404/* Receive-Side Scaling */
405static int ixgbe_enable_rss = 1; 405static int ixgbe_enable_rss = 1;
406SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 406SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
407 "Enable Receive-Side Scaling (RSS)"); 407 "Enable Receive-Side Scaling (RSS)");
408 408
409#if 0 409#if 0
410static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 410static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
411static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 411static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
412#endif 412#endif
413 413
414#ifdef NET_MPSAFE 414#ifdef NET_MPSAFE
415#define IXGBE_MPSAFE 1 415#define IXGBE_MPSAFE 1
416#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 416#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
417#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 417#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
418#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 418#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
419#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE 419#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
420#else 420#else
421#define IXGBE_CALLOUT_FLAGS 0 421#define IXGBE_CALLOUT_FLAGS 0
422#define IXGBE_SOFTINT_FLAGS 0 422#define IXGBE_SOFTINT_FLAGS 0
423#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 423#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
424#define IXGBE_TASKLET_WQ_FLAGS 0 424#define IXGBE_TASKLET_WQ_FLAGS 0
425#endif 425#endif
426#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 426#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
427 427
428/* Interval between reports of errors */ 428/* Interval between reports of errors */
429static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */ 429static const struct timeval ixgbe_errlog_intrvl = { 60, 0 }; /* 60s */
430 430
431/************************************************************************ 431/************************************************************************
432 * ixgbe_initialize_rss_mapping 432 * ixgbe_initialize_rss_mapping
433 ************************************************************************/ 433 ************************************************************************/
434static void 434static void
435ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) 435ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
436{ 436{
437 struct ixgbe_hw *hw = &sc->hw; 437 struct ixgbe_hw *hw = &sc->hw;
438 u32 reta = 0, mrqc, rss_key[10]; 438 u32 reta = 0, mrqc, rss_key[10];
439 int queue_id, table_size, index_mult; 439 int queue_id, table_size, index_mult;
440 int i, j; 440 int i, j;
441 u32 rss_hash_config; 441 u32 rss_hash_config;
442 442
443 /* force use default RSS key. */ 443 /* force use default RSS key. */
444#ifdef __NetBSD__ 444#ifdef __NetBSD__
445 rss_getkey((uint8_t *) &rss_key); 445 rss_getkey((uint8_t *) &rss_key);
446#else 446#else
447 if (sc->feat_en & IXGBE_FEATURE_RSS) { 447 if (sc->feat_en & IXGBE_FEATURE_RSS) {
448 /* Fetch the configured RSS key */ 448 /* Fetch the configured RSS key */
449 rss_getkey((uint8_t *) &rss_key); 449 rss_getkey((uint8_t *) &rss_key);
450 } else { 450 } else {
451 /* set up random bits */ 451 /* set up random bits */
452 cprng_fast(&rss_key, sizeof(rss_key)); 452 cprng_fast(&rss_key, sizeof(rss_key));
453 } 453 }
454#endif 454#endif
455 455
456 /* Set multiplier for RETA setup and table size based on MAC */ 456 /* Set multiplier for RETA setup and table size based on MAC */
457 index_mult = 0x1; 457 index_mult = 0x1;
458 table_size = 128; 458 table_size = 128;
459 switch (sc->hw.mac.type) { 459 switch (sc->hw.mac.type) {
460 case ixgbe_mac_82598EB: 460 case ixgbe_mac_82598EB:
461 index_mult = 0x11; 461 index_mult = 0x11;
462 break; 462 break;
463 case ixgbe_mac_X550: 463 case ixgbe_mac_X550:
464 case ixgbe_mac_X550EM_x: 464 case ixgbe_mac_X550EM_x:
465 case ixgbe_mac_X550EM_a: 465 case ixgbe_mac_X550EM_a:
466 table_size = 512; 466 table_size = 512;
467 break; 467 break;
468 default: 468 default:
469 break; 469 break;
470 } 470 }
471 471
472 /* Set up the redirection table */ 472 /* Set up the redirection table */
473 for (i = 0, j = 0; i < table_size; i++, j++) { 473 for (i = 0, j = 0; i < table_size; i++, j++) {
474 if (j == sc->num_queues) 474 if (j == sc->num_queues)
475 j = 0; 475 j = 0;
476 476
477 if (sc->feat_en & IXGBE_FEATURE_RSS) { 477 if (sc->feat_en & IXGBE_FEATURE_RSS) {
478 /* 478 /*
479 * Fetch the RSS bucket id for the given indirection 479 * Fetch the RSS bucket id for the given indirection
480 * entry. Cap it at the number of configured buckets 480 * entry. Cap it at the number of configured buckets
481 * (which is num_queues.) 481 * (which is num_queues.)
482 */ 482 */
483 queue_id = rss_get_indirection_to_bucket(i); 483 queue_id = rss_get_indirection_to_bucket(i);
484 queue_id = queue_id % sc->num_queues; 484 queue_id = queue_id % sc->num_queues;
485 } else 485 } else
486 queue_id = (j * index_mult); 486 queue_id = (j * index_mult);
487 487
488 /* 488 /*
489 * The low 8 bits are for hash value (n+0); 489 * The low 8 bits are for hash value (n+0);
490 * The next 8 bits are for hash value (n+1), etc. 490 * The next 8 bits are for hash value (n+1), etc.
491 */ 491 */
492 reta = reta >> 8; 492 reta = reta >> 8;
493 reta = reta | (((uint32_t) queue_id) << 24); 493 reta = reta | (((uint32_t) queue_id) << 24);
494 if ((i & 3) == 3) { 494 if ((i & 3) == 3) {
495 if (i < 128) 495 if (i < 128)
496 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 496 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
497 else 497 else
498 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 498 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
499 reta); 499 reta);
500 reta = 0; 500 reta = 0;
501 } 501 }
502 } 502 }
503 503
504 /* Now fill our hash function seeds */ 504 /* Now fill our hash function seeds */
505 for (i = 0; i < 10; i++) 505 for (i = 0; i < 10; i++)
506 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 506 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
507 507
508 /* Perform hash on these packet types */ 508 /* Perform hash on these packet types */
509 if (sc->feat_en & IXGBE_FEATURE_RSS) 509 if (sc->feat_en & IXGBE_FEATURE_RSS)
510 rss_hash_config = rss_gethashconfig(); 510 rss_hash_config = rss_gethashconfig();
511 else { 511 else {
512 /* 512 /*
513 * Disable UDP - IP fragments aren't currently being handled 513 * Disable UDP - IP fragments aren't currently being handled
514 * and so we end up with a mix of 2-tuple and 4-tuple 514 * and so we end up with a mix of 2-tuple and 4-tuple
515 * traffic. 515 * traffic.
516 */ 516 */
517 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 517 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
518 | RSS_HASHTYPE_RSS_TCP_IPV4 518 | RSS_HASHTYPE_RSS_TCP_IPV4
519 | RSS_HASHTYPE_RSS_IPV6 519 | RSS_HASHTYPE_RSS_IPV6
520 | RSS_HASHTYPE_RSS_TCP_IPV6 520 | RSS_HASHTYPE_RSS_TCP_IPV6
521 | RSS_HASHTYPE_RSS_IPV6_EX 521 | RSS_HASHTYPE_RSS_IPV6_EX
522 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 522 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
523 } 523 }
524 524
525 mrqc = IXGBE_MRQC_RSSEN; 525 mrqc = IXGBE_MRQC_RSSEN;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 534 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 536 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
540 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 540 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
541 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 541 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
542 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 542 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
543 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 543 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
544 mrqc |= ixgbe_get_mrqc(sc->iov_mode); 544 mrqc |= ixgbe_get_mrqc(sc->iov_mode);
545 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 545 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
546} /* ixgbe_initialize_rss_mapping */ 546} /* ixgbe_initialize_rss_mapping */
547 547
548/************************************************************************ 548/************************************************************************
549 * ixgbe_initialize_receive_units - Setup receive registers and features. 549 * ixgbe_initialize_receive_units - Setup receive registers and features.
550 ************************************************************************/ 550 ************************************************************************/
551#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 551#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
552 552
553static void 553static void
554ixgbe_initialize_receive_units(struct ixgbe_softc *sc) 554ixgbe_initialize_receive_units(struct ixgbe_softc *sc)
555{ 555{
556 struct rx_ring *rxr = sc->rx_rings; 556 struct rx_ring *rxr = sc->rx_rings;
557 struct ixgbe_hw *hw = &sc->hw; 557 struct ixgbe_hw *hw = &sc->hw;
558 struct ifnet *ifp = sc->ifp; 558 struct ifnet *ifp = sc->ifp;
559 int i, j; 559 int i, j;
560 u32 bufsz, fctrl, srrctl, rxcsum; 560 u32 bufsz, fctrl, srrctl, rxcsum;
561 u32 hlreg; 561 u32 hlreg;
562 562
563 /* 563 /*
564 * Make sure receives are disabled while 564 * Make sure receives are disabled while
565 * setting up the descriptor ring 565 * setting up the descriptor ring
566 */ 566 */
567 ixgbe_disable_rx(hw); 567 ixgbe_disable_rx(hw);
568 568
569 /* Enable broadcasts */ 569 /* Enable broadcasts */
570 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 570 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
571 fctrl |= IXGBE_FCTRL_BAM; 571 fctrl |= IXGBE_FCTRL_BAM;
572 if (sc->hw.mac.type == ixgbe_mac_82598EB) { 572 if (sc->hw.mac.type == ixgbe_mac_82598EB) {
573 fctrl |= IXGBE_FCTRL_DPF; 573 fctrl |= IXGBE_FCTRL_DPF;
574 fctrl |= IXGBE_FCTRL_PMCF; 574 fctrl |= IXGBE_FCTRL_PMCF;
575 } 575 }
576 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 576 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
577 577
578 /* Set for Jumbo Frames? */ 578 /* Set for Jumbo Frames? */
579 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 579 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
580 if (ifp->if_mtu > ETHERMTU) 580 if (ifp->if_mtu > ETHERMTU)
581 hlreg |= IXGBE_HLREG0_JUMBOEN; 581 hlreg |= IXGBE_HLREG0_JUMBOEN;
582 else 582 else
583 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 583 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
584 584
585#ifdef DEV_NETMAP 585#ifdef DEV_NETMAP
586 /* CRC stripping is conditional in Netmap */ 586 /* CRC stripping is conditional in Netmap */
587 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) && 587 if ((sc->feat_en & IXGBE_FEATURE_NETMAP) &&
588 (ifp->if_capenable & IFCAP_NETMAP) && 588 (ifp->if_capenable & IFCAP_NETMAP) &&
589 !ix_crcstrip) 589 !ix_crcstrip)
590 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 590 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
591 else 591 else
592#endif /* DEV_NETMAP */ 592#endif /* DEV_NETMAP */
593 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 593 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
594 594
595 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 595 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
596 596
597 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 597 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
598 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 598 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
599 599
600 for (i = 0; i < sc->num_queues; i++, rxr++) { 600 for (i = 0; i < sc->num_queues; i++, rxr++) {
601 u64 rdba = rxr->rxdma.dma_paddr; 601 u64 rdba = rxr->rxdma.dma_paddr;
602 u32 reg; 602 u32 reg;
603 int regnum = i / 4; /* 1 register per 4 queues */ 603 int regnum = i / 4; /* 1 register per 4 queues */
604 int regshift = i % 4; /* 4 bits per 1 queue */ 604 int regshift = i % 4; /* 4 bits per 1 queue */
605 j = rxr->me; 605 j = rxr->me;
606 606
607 /* Setup the Base and Length of the Rx Descriptor Ring */ 607 /* Setup the Base and Length of the Rx Descriptor Ring */
608 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 608 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
609 (rdba & 0x00000000ffffffffULL)); 609 (rdba & 0x00000000ffffffffULL));
610 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 610 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
611 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 611 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
612 sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 612 sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
613 613
614 /* Set up the SRRCTL register */ 614 /* Set up the SRRCTL register */
615 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 615 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
616 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 616 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
617 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 617 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
618 srrctl |= bufsz; 618 srrctl |= bufsz;
619 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 619 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
620 620
621 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 621 /* Set RQSMR (Receive Queue Statistic Mapping) register */
622 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 622 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
623 reg &= ~(0x000000ffUL << (regshift * 8)); 623 reg &= ~(0x000000ffUL << (regshift * 8));
624 reg |= i << (regshift * 8); 624 reg |= i << (regshift * 8);
625 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 625 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
626 626
627 /* 627 /*
628 * Set DROP_EN iff we have no flow control and >1 queue. 628 * Set DROP_EN iff we have no flow control and >1 queue.
629 * Note that srrctl was cleared shortly before during reset, 629 * Note that srrctl was cleared shortly before during reset,
630 * so we do not need to clear the bit, but do it just in case 630 * so we do not need to clear the bit, but do it just in case
631 * this code is moved elsewhere. 631 * this code is moved elsewhere.
632 */ 632 */
633 if ((sc->num_queues > 1) && 633 if ((sc->num_queues > 1) &&
634 (sc->hw.fc.requested_mode == ixgbe_fc_none)) 634 (sc->hw.fc.requested_mode == ixgbe_fc_none))
635 srrctl |= IXGBE_SRRCTL_DROP_EN; 635 srrctl |= IXGBE_SRRCTL_DROP_EN;
636 else 636 else
637 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 637 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
638 638
639 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 639 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
640 640
641 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 641 /* Setup the HW Rx Head and Tail Descriptor Pointers */
642 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 642 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
643 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 643 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
644 644
645 /* Set the driver rx tail address */ 645 /* Set the driver rx tail address */
646 rxr->tail = IXGBE_RDT(rxr->me); 646 rxr->tail = IXGBE_RDT(rxr->me);
647 } 647 }
648 648
649 if (sc->hw.mac.type != ixgbe_mac_82598EB) { 649 if (sc->hw.mac.type != ixgbe_mac_82598EB) {
650 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 650 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
651 | IXGBE_PSRTYPE_UDPHDR 651 | IXGBE_PSRTYPE_UDPHDR
652 | IXGBE_PSRTYPE_IPV4HDR 652 | IXGBE_PSRTYPE_IPV4HDR
653 | IXGBE_PSRTYPE_IPV6HDR; 653 | IXGBE_PSRTYPE_IPV6HDR;
654 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 654 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
655 } 655 }
656 656
657 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 657 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
658 658
659 ixgbe_initialize_rss_mapping(sc); 659 ixgbe_initialize_rss_mapping(sc);
660 660
661 if (sc->num_queues > 1) { 661 if (sc->num_queues > 1) {
662 /* RSS and RX IPP Checksum are mutually exclusive */ 662 /* RSS and RX IPP Checksum are mutually exclusive */
663 rxcsum |= IXGBE_RXCSUM_PCSD; 663 rxcsum |= IXGBE_RXCSUM_PCSD;
664 } 664 }
665 665
666 if (ifp->if_capenable & IFCAP_RXCSUM) 666 if (ifp->if_capenable & IFCAP_RXCSUM)
667 rxcsum |= IXGBE_RXCSUM_PCSD; 667 rxcsum |= IXGBE_RXCSUM_PCSD;
668 668
669 /* This is useful for calculating UDP/IP fragment checksums */ 669 /* This is useful for calculating UDP/IP fragment checksums */
670 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 670 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
671 rxcsum |= IXGBE_RXCSUM_IPPCSE; 671 rxcsum |= IXGBE_RXCSUM_IPPCSE;
672 672
673 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 673 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
674 674
675} /* ixgbe_initialize_receive_units */ 675} /* ixgbe_initialize_receive_units */
676 676
677/************************************************************************ 677/************************************************************************
678 * ixgbe_initialize_transmit_units - Enable transmit units. 678 * ixgbe_initialize_transmit_units - Enable transmit units.
679 ************************************************************************/ 679 ************************************************************************/
680static void 680static void
681ixgbe_initialize_transmit_units(struct ixgbe_softc *sc) 681ixgbe_initialize_transmit_units(struct ixgbe_softc *sc)
682{ 682{
683 struct tx_ring *txr = sc->tx_rings; 683 struct tx_ring *txr = sc->tx_rings;
684 struct ixgbe_hw *hw = &sc->hw; 684 struct ixgbe_hw *hw = &sc->hw;
685 int i; 685 int i;
686 686
687 INIT_DEBUGOUT("ixgbe_initialize_transmit_units"); 687 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
688 688
689 /* Setup the Base and Length of the Tx Descriptor Ring */ 689 /* Setup the Base and Length of the Tx Descriptor Ring */
690 for (i = 0; i < sc->num_queues; i++, txr++) { 690 for (i = 0; i < sc->num_queues; i++, txr++) {
691 u64 tdba = txr->txdma.dma_paddr; 691 u64 tdba = txr->txdma.dma_paddr;
692 u32 txctrl = 0; 692 u32 txctrl = 0;
693 u32 tqsmreg, reg; 693 u32 tqsmreg, reg;
694 int regnum = i / 4; /* 1 register per 4 queues */ 694 int regnum = i / 4; /* 1 register per 4 queues */
695 int regshift = i % 4; /* 4 bits per 1 queue */ 695 int regshift = i % 4; /* 4 bits per 1 queue */
696 int j = txr->me; 696 int j = txr->me;
697 697
698 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 698 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
699 (tdba & 0x00000000ffffffffULL)); 699 (tdba & 0x00000000ffffffffULL));
700 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 700 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
701 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 701 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
702 sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 702 sc->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
703 703
704 /* 704 /*
705 * Set TQSMR (Transmit Queue Statistic Mapping) register. 705 * Set TQSMR (Transmit Queue Statistic Mapping) register.
706 * Register location is different between 82598 and others. 706 * Register location is different between 82598 and others.
707 */ 707 */
708 if (sc->hw.mac.type == ixgbe_mac_82598EB) 708 if (sc->hw.mac.type == ixgbe_mac_82598EB)
709 tqsmreg = IXGBE_TQSMR(regnum); 709 tqsmreg = IXGBE_TQSMR(regnum);
710 else 710 else
711 tqsmreg = IXGBE_TQSM(regnum); 711 tqsmreg = IXGBE_TQSM(regnum);
712 reg = IXGBE_READ_REG(hw, tqsmreg); 712 reg = IXGBE_READ_REG(hw, tqsmreg);
713 reg &= ~(0x000000ffUL << (regshift * 8)); 713 reg &= ~(0x000000ffUL << (regshift * 8));
714 reg |= i << (regshift * 8); 714 reg |= i << (regshift * 8);
715 IXGBE_WRITE_REG(hw, tqsmreg, reg); 715 IXGBE_WRITE_REG(hw, tqsmreg, reg);
716 716
717 /* Setup the HW Tx Head and Tail descriptor pointers */ 717 /* Setup the HW Tx Head and Tail descriptor pointers */
718 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 718 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
719 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 719 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
720 720
721 /* Cache the tail address */ 721 /* Cache the tail address */
722 txr->tail = IXGBE_TDT(j); 722 txr->tail = IXGBE_TDT(j);
723 723
724 txr->txr_no_space = false; 724 txr->txr_no_space = false;
725 725
726 /* Disable Head Writeback */ 726 /* Disable Head Writeback */
727 /* 727 /*
728 * Note: for X550 series devices, these registers are actually 728 * Note: for X550 series devices, these registers are actually
729 * prefixed with TPH_ instead of DCA_, but the addresses and 729 * prefixed with TPH_ instead of DCA_, but the addresses and
730 * fields remain the same. 730 * fields remain the same.
731 */ 731 */
732 switch (hw->mac.type) { 732 switch (hw->mac.type) {
733 case ixgbe_mac_82598EB: 733 case ixgbe_mac_82598EB:
734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
735 break; 735 break;
736 default: 736 default:
737 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 737 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
738 break; 738 break;
739 } 739 }
740 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 740 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
741 switch (hw->mac.type) { 741 switch (hw->mac.type) {
742 case ixgbe_mac_82598EB: 742 case ixgbe_mac_82598EB:
743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
744 break; 744 break;
745 default: 745 default:
746 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 746 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
747 break; 747 break;
748 } 748 }
749 749
750 } 750 }
751 751
752 if (hw->mac.type != ixgbe_mac_82598EB) { 752 if (hw->mac.type != ixgbe_mac_82598EB) {
753 u32 dmatxctl, rttdcs; 753 u32 dmatxctl, rttdcs;
754 754
755 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 755 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
756 dmatxctl |= IXGBE_DMATXCTL_TE; 756 dmatxctl |= IXGBE_DMATXCTL_TE;
757 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 757 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
758 /* Disable arbiter to set MTQC */ 758 /* Disable arbiter to set MTQC */
759 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 759 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
760 rttdcs |= IXGBE_RTTDCS_ARBDIS; 760 rttdcs |= IXGBE_RTTDCS_ARBDIS;
761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 762 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
763 ixgbe_get_mtqc(sc->iov_mode)); 763 ixgbe_get_mtqc(sc->iov_mode));
764 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 764 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
765 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 765 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
766 } 766 }
767} /* ixgbe_initialize_transmit_units */ 767} /* ixgbe_initialize_transmit_units */
768 768
769static void 769static void
770ixgbe_quirks(struct ixgbe_softc *sc) 770ixgbe_quirks(struct ixgbe_softc *sc)
771{ 771{
772 device_t dev = sc->dev; 772 device_t dev = sc->dev;
773 struct ixgbe_hw *hw = &sc->hw; 773 struct ixgbe_hw *hw = &sc->hw;
774 const char *vendor, *product; 774 const char *vendor, *product;
775 775
776 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { 776 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
777 /* 777 /*
778 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE 778 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
779 * MA10-ST0. 779 * MA10-ST0.
780 */ 780 */
781 vendor = pmf_get_platform("system-vendor"); 781 vendor = pmf_get_platform("system-vendor");
782 product = pmf_get_platform("system-product"); 782 product = pmf_get_platform("system-product");
783 783
784 if ((vendor == NULL) || (product == NULL)) 784 if ((vendor == NULL) || (product == NULL))
785 return; 785 return;
786 786
787 if ((strcmp(vendor, "GIGABYTE") == 0) && 787 if ((strcmp(vendor, "GIGABYTE") == 0) &&
788 (strcmp(product, "MA10-ST0") == 0)) { 788 (strcmp(product, "MA10-ST0") == 0)) {
789 aprint_verbose_dev(dev, 789 aprint_verbose_dev(dev,
790 "Enable SFP+ MOD_ABS inverse quirk\n"); 790 "Enable SFP+ MOD_ABS inverse quirk\n");
791 sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT; 791 sc->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
792 } 792 }
793 } 793 }
794} 794}
795 795
796/************************************************************************ 796/************************************************************************
797 * ixgbe_attach - Device initialization routine 797 * ixgbe_attach - Device initialization routine
798 * 798 *
799 * Called when the driver is being loaded. 799 * Called when the driver is being loaded.
800 * Identifies the type of hardware, allocates all resources 800 * Identifies the type of hardware, allocates all resources
801 * and initializes the hardware. 801 * and initializes the hardware.
802 * 802 *
803 * return 0 on success, positive on failure 803 * return 0 on success, positive on failure
804 ************************************************************************/ 804 ************************************************************************/
805static void 805static void
806ixgbe_attach(device_t parent, device_t dev, void *aux) 806ixgbe_attach(device_t parent, device_t dev, void *aux)
807{ 807{
808 struct ixgbe_softc *sc; 808 struct ixgbe_softc *sc;
809 struct ixgbe_hw *hw; 809 struct ixgbe_hw *hw;
810 int error = -1; 810 int error = -1;
811 u32 ctrl_ext; 811 u32 ctrl_ext;
812 u16 high, low, nvmreg, dev_caps; 812 u16 high, low, nvmreg, dev_caps;
813 pcireg_t id, subid; 813 pcireg_t id, subid;
814 const ixgbe_vendor_info_t *ent; 814 const ixgbe_vendor_info_t *ent;
815 struct pci_attach_args *pa = aux; 815 struct pci_attach_args *pa = aux;
816 bool unsupported_sfp = false; 816 bool unsupported_sfp = false;
817 const char *str; 817 const char *str;
818 char wqname[MAXCOMLEN]; 818 char wqname[MAXCOMLEN];
819 char buf[256]; 819 char buf[256];
820 820
821 INIT_DEBUGOUT("ixgbe_attach: begin"); 821 INIT_DEBUGOUT("ixgbe_attach: begin");
822 822
823 /* Allocate, clear, and link in our adapter structure */ 823 /* Allocate, clear, and link in our adapter structure */
824 sc = device_private(dev); 824 sc = device_private(dev);
825 sc->hw.back = sc; 825 sc->hw.back = sc;
826 sc->dev = dev; 826 sc->dev = dev;
827 hw = &sc->hw; 827 hw = &sc->hw;
828 sc->osdep.pc = pa->pa_pc; 828 sc->osdep.pc = pa->pa_pc;
829 sc->osdep.tag = pa->pa_tag; 829 sc->osdep.tag = pa->pa_tag;
830 if (pci_dma64_available(pa)) 830 if (pci_dma64_available(pa))
831 sc->osdep.dmat = pa->pa_dmat64; 831 sc->osdep.dmat = pa->pa_dmat64;
832 else 832 else
833 sc->osdep.dmat = pa->pa_dmat; 833 sc->osdep.dmat = pa->pa_dmat;
834 sc->osdep.attached = false; 834 sc->osdep.attached = false;
835 sc->osdep.detaching = false; 835 sc->osdep.detaching = false;
836 836
837 ent = ixgbe_lookup(pa); 837 ent = ixgbe_lookup(pa);
838 838
839 KASSERT(ent != NULL); 839 KASSERT(ent != NULL);
840 840
841 aprint_normal(": %s, Version - %s\n", 841 aprint_normal(": %s, Version - %s\n",
842 ixgbe_strings[ent->index], ixgbe_driver_version); 842 ixgbe_strings[ent->index], ixgbe_driver_version);
843 843
844 /* Core Lock Init */ 844 /* Core Lock Init */
845 IXGBE_CORE_LOCK_INIT(sc, device_xname(dev)); 845 IXGBE_CORE_LOCK_INIT(sc, device_xname(dev));
846 846
847 /* Set up the timer callout and workqueue */ 847 /* Set up the timer callout and workqueue */
848 callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS); 848 callout_init(&sc->timer, IXGBE_CALLOUT_FLAGS);
849 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); 849 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
850 error = workqueue_create(&sc->timer_wq, wqname, 850 error = workqueue_create(&sc->timer_wq, wqname,
851 ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 851 ixgbe_handle_timer, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
852 IXGBE_TASKLET_WQ_FLAGS); 852 IXGBE_TASKLET_WQ_FLAGS);
853 if (error) { 853 if (error) {
854 aprint_error_dev(dev, 854 aprint_error_dev(dev,
855 "could not create timer workqueue (%d)\n", error); 855 "could not create timer workqueue (%d)\n", error);
856 goto err_out; 856 goto err_out;
857 } 857 }
858 858
859 /* Determine hardware revision */ 859 /* Determine hardware revision */
860 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 860 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
861 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 861 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
862 862
863 hw->vendor_id = PCI_VENDOR(id); 863 hw->vendor_id = PCI_VENDOR(id);
864 hw->device_id = PCI_PRODUCT(id); 864 hw->device_id = PCI_PRODUCT(id);
865 hw->revision_id = 865 hw->revision_id =
866 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 866 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
867 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 867 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
868 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 868 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
869 869
870 /* Set quirk flags */ 870 /* Set quirk flags */
871 ixgbe_quirks(sc); 871 ixgbe_quirks(sc);
872 872
873 /* 873 /*
874 * Make sure BUSMASTER is set 874 * Make sure BUSMASTER is set
875 */ 875 */
876 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 876 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
877 877
878 /* Do base PCI setup - map BAR0 */ 878 /* Do base PCI setup - map BAR0 */
879 if (ixgbe_allocate_pci_resources(sc, pa)) { 879 if (ixgbe_allocate_pci_resources(sc, pa)) {
880 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 880 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
881 error = ENXIO; 881 error = ENXIO;
882 goto err_out; 882 goto err_out;
883 } 883 }
884 884
885 /* let hardware know driver is loaded */ 885 /* let hardware know driver is loaded */
886 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 886 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
887 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 887 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
888 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 888 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
889 889
890 /* 890 /*
891 * Initialize the shared code 891 * Initialize the shared code
892 */ 892 */
893 if (ixgbe_init_shared_code(hw) != 0) { 893 if (ixgbe_init_shared_code(hw) != 0) {
894 aprint_error_dev(dev, 894 aprint_error_dev(dev,
895 "Unable to initialize the shared code\n"); 895 "Unable to initialize the shared code\n");
896 error = ENXIO; 896 error = ENXIO;
897 goto err_out; 897 goto err_out;
898 } 898 }
899 899
900 switch (hw->mac.type) { 900 switch (hw->mac.type) {
901 case ixgbe_mac_82598EB: 901 case ixgbe_mac_82598EB:
902 str = "82598EB"; 902 str = "82598EB";
903 break; 903 break;
904 case ixgbe_mac_82599EB: 904 case ixgbe_mac_82599EB:
905 str = "82599EB"; 905 str = "82599EB";
906 break; 906 break;
907 case ixgbe_mac_X540: 907 case ixgbe_mac_X540:
908 str = "X540"; 908 str = "X540";
909 break; 909 break;
910 case ixgbe_mac_X550: 910 case ixgbe_mac_X550:
911 str = "X550"; 911 str = "X550";
912 break; 912 break;
913 case ixgbe_mac_X550EM_x: 913 case ixgbe_mac_X550EM_x:
914 str = "X550EM X"; 914 str = "X550EM X";
915 break; 915 break;
916 case ixgbe_mac_X550EM_a: 916 case ixgbe_mac_X550EM_a:
917 str = "X550EM A"; 917 str = "X550EM A";
918 break; 918 break;
919 default: 919 default:
920 str = "Unknown"; 920 str = "Unknown";
921 break; 921 break;
922 } 922 }
923 aprint_normal_dev(dev, "device %s\n", str); 923 aprint_normal_dev(dev, "device %s\n", str);
924 924
925 hw->allow_unsupported_sfp = allow_unsupported_sfp; 925 hw->allow_unsupported_sfp = allow_unsupported_sfp;
926 926
927 /* Pick up the 82599 settings */ 927 /* Pick up the 82599 settings */
928 if (hw->mac.type != ixgbe_mac_82598EB) 928 if (hw->mac.type != ixgbe_mac_82598EB)
929 hw->phy.smart_speed = ixgbe_smart_speed; 929 hw->phy.smart_speed = ixgbe_smart_speed;
930 930
931 /* Set the right number of segments */ 931 /* Set the right number of segments */
932 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT); 932 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
933 sc->num_segs = IXGBE_SCATTER_DEFAULT; 933 sc->num_segs = IXGBE_SCATTER_DEFAULT;
934 934
935 /* Ensure SW/FW semaphore is free */ 935 /* Ensure SW/FW semaphore is free */
936 ixgbe_init_swfw_semaphore(hw); 936 ixgbe_init_swfw_semaphore(hw);
937 937
938 hw->mac.ops.set_lan_id(hw); 938 hw->mac.ops.set_lan_id(hw);
939 ixgbe_init_device_features(sc); 939 ixgbe_init_device_features(sc);
940 940
941 if (ixgbe_configure_interrupts(sc)) { 941 if (ixgbe_configure_interrupts(sc)) {
942 error = ENXIO; 942 error = ENXIO;
943 goto err_out; 943 goto err_out;
944 } 944 }
945 945
946 /* Allocate multicast array memory. */ 946 /* Allocate multicast array memory. */
947 sc->mta = malloc(sizeof(*sc->mta) * 947 sc->mta = malloc(sizeof(*sc->mta) *
948 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK); 948 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
949 949
950 /* Enable WoL (if supported) */ 950 /* Enable WoL (if supported) */
951 ixgbe_check_wol_support(sc); 951 ixgbe_check_wol_support(sc);
952 952
953 /* Register for VLAN events */ 953 /* Register for VLAN events */
954 ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb); 954 ether_set_vlan_cb(&sc->osdep.ec, ixgbe_vlan_cb);
955 955
956 /* Verify adapter fan is still functional (if applicable) */ 956 /* Verify adapter fan is still functional (if applicable) */
957 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { 957 if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
958 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 958 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
959 ixgbe_check_fan_failure(sc, esdp, FALSE); 959 ixgbe_check_fan_failure(sc, esdp, FALSE);
960 } 960 }
961 961
962 /* Set an initial default flow control value */ 962 /* Set an initial default flow control value */
963 hw->fc.requested_mode = ixgbe_flow_control; 963 hw->fc.requested_mode = ixgbe_flow_control;
964 964
965 /* Do descriptor calc and sanity checks */ 965 /* Do descriptor calc and sanity checks */
966 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 966 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 967 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 aprint_error_dev(dev, "Invalid TX ring size (%d). " 968 aprint_error_dev(dev, "Invalid TX ring size (%d). "
969 "It must be between %d and %d, " 969 "It must be between %d and %d, "
970 "inclusive, and must be a multiple of %zu. " 970 "inclusive, and must be a multiple of %zu. "
971 "Using default value of %d instead.\n", 971 "Using default value of %d instead.\n",
972 ixgbe_txd, MIN_TXD, MAX_TXD, 972 ixgbe_txd, MIN_TXD, MAX_TXD,
973 DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc), 973 DBA_ALIGN / sizeof(union ixgbe_adv_tx_desc),
974 DEFAULT_TXD); 974 DEFAULT_TXD);
975 sc->num_tx_desc = DEFAULT_TXD; 975 sc->num_tx_desc = DEFAULT_TXD;
976 } else 976 } else
977 sc->num_tx_desc = ixgbe_txd; 977 sc->num_tx_desc = ixgbe_txd;
978 978
979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 979 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 980 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
981 aprint_error_dev(dev, "Invalid RX ring size (%d). " 981 aprint_error_dev(dev, "Invalid RX ring size (%d). "
982 "It must be between %d and %d, " 982 "It must be between %d and %d, "
983 "inclusive, and must be a multiple of %zu. " 983 "inclusive, and must be a multiple of %zu. "
984 "Using default value of %d instead.\n", 984 "Using default value of %d instead.\n",
985 ixgbe_rxd, MIN_RXD, MAX_RXD, 985 ixgbe_rxd, MIN_RXD, MAX_RXD,
986 DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc), 986 DBA_ALIGN / sizeof(union ixgbe_adv_rx_desc),
987 DEFAULT_RXD); 987 DEFAULT_RXD);
988 sc->num_rx_desc = DEFAULT_RXD; 988 sc->num_rx_desc = DEFAULT_RXD;
989 } else 989 } else
990 sc->num_rx_desc = ixgbe_rxd; 990 sc->num_rx_desc = ixgbe_rxd;
991 991
992 /* Sysctls for limiting the amount of work done in the taskqueues */ 992 /* Sysctls for limiting the amount of work done in the taskqueues */
993 sc->rx_process_limit 993 sc->rx_process_limit
994 = (ixgbe_rx_process_limit <= sc->num_rx_desc) 994 = (ixgbe_rx_process_limit <= sc->num_rx_desc)
995 ? ixgbe_rx_process_limit : sc->num_rx_desc; 995 ? ixgbe_rx_process_limit : sc->num_rx_desc;
996 sc->tx_process_limit 996 sc->tx_process_limit
997 = (ixgbe_tx_process_limit <= sc->num_tx_desc) 997 = (ixgbe_tx_process_limit <= sc->num_tx_desc)
998 ? ixgbe_tx_process_limit : sc->num_tx_desc; 998 ? ixgbe_tx_process_limit : sc->num_tx_desc;
999 999
1000 /* Set default high limit of copying mbuf in rxeof */ 1000 /* Set default high limit of copying mbuf in rxeof */
1001 sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX; 1001 sc->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
1002 1002
1003 /* Allocate our TX/RX Queues */ 1003 /* Allocate our TX/RX Queues */
1004 if (ixgbe_allocate_queues(sc)) { 1004 if (ixgbe_allocate_queues(sc)) {
1005 error = ENOMEM; 1005 error = ENOMEM;
1006 goto err_out; 1006 goto err_out;
1007 } 1007 }
1008 1008
1009 hw->phy.reset_if_overtemp = TRUE; 1009 hw->phy.reset_if_overtemp = TRUE;
1010 error = ixgbe_reset_hw(hw); 1010 error = ixgbe_reset_hw(hw);
1011 hw->phy.reset_if_overtemp = FALSE; 1011 hw->phy.reset_if_overtemp = FALSE;
1012 if (error == IXGBE_ERR_SFP_NOT_PRESENT) 1012 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1013 error = IXGBE_SUCCESS; 1013 error = IXGBE_SUCCESS;
1014 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1014 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1015 aprint_error_dev(dev, 1015 aprint_error_dev(dev,
1016 "Unsupported SFP+ module type was detected.\n"); 1016 "Unsupported SFP+ module type was detected.\n");
1017 unsupported_sfp = true; 1017 unsupported_sfp = true;
1018 error = IXGBE_SUCCESS; 1018 error = IXGBE_SUCCESS;
1019 } else if (error) { 1019 } else if (error) {
1020 aprint_error_dev(dev, 1020 aprint_error_dev(dev,
1021 "Hardware initialization failed(error = %d)\n", error); 1021 "Hardware initialization failed(error = %d)\n", error);
1022 error = EIO; 1022 error = EIO;
1023 goto err_late; 1023 goto err_late;
1024 } 1024 }
1025 1025
1026 /* Make sure we have a good EEPROM before we read from it */ 1026 /* Make sure we have a good EEPROM before we read from it */
1027 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { 1027 if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) {
1028 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); 1028 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1029 error = EIO; 1029 error = EIO;
1030 goto err_late; 1030 goto err_late;
1031 } 1031 }
1032 1032
1033 aprint_normal("%s:", device_xname(dev)); 1033 aprint_normal("%s:", device_xname(dev));
1034 /* NVM Image Version */ 1034 /* NVM Image Version */
1035 high = low = 0; 1035 high = low = 0;
1036 switch (hw->mac.type) { 1036 switch (hw->mac.type) {
1037 case ixgbe_mac_82598EB: 1037 case ixgbe_mac_82598EB:
1038 /* 1038 /*
1039 * Print version from the dev starter version (0x29). The 1039 * Print version from the dev starter version (0x29). The
1040 * location is the same as newer device's IXGBE_NVM_MAP_VER. 1040 * location is the same as newer device's IXGBE_NVM_MAP_VER.
1041 */ 1041 */
1042 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1042 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1043 if (nvmreg == 0xffff) 1043 if (nvmreg == 0xffff)
1044 break; 1044 break;
1045 high = (nvmreg >> 12) & 0x0f; 1045 high = (nvmreg >> 12) & 0x0f;
1046 low = (nvmreg >> 4) & 0xff; 1046 low = (nvmreg >> 4) & 0xff;
1047 id = nvmreg & 0x0f; 1047 id = nvmreg & 0x0f;
1048 /* 1048 /*
1049 * The following output might not be correct. Some 82598 cards 1049 * The following output might not be correct. Some 82598 cards
1050 * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0. 1050 * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1051 */ 1051 */
1052 aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id); 1052 aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1053 break; 1053 break;
1054 case ixgbe_mac_X540: 1054 case ixgbe_mac_X540:
1055 case ixgbe_mac_X550EM_a: 1055 case ixgbe_mac_X550EM_a:
1056 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1056 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1057 if (nvmreg == 0xffff) 1057 if (nvmreg == 0xffff)
1058 break; 1058 break;
1059 high = (nvmreg >> 12) & 0x0f; 1059 high = (nvmreg >> 12) & 0x0f;
1060 low = (nvmreg >> 4) & 0xff; 1060 low = (nvmreg >> 4) & 0xff;
1061 id = nvmreg & 0x0f; 1061 id = nvmreg & 0x0f;
1062 aprint_normal(" NVM Image Version %u.", high); 1062 aprint_normal(" NVM Image Version %u.", high);
1063 if (hw->mac.type == ixgbe_mac_X540) 1063 if (hw->mac.type == ixgbe_mac_X540)
1064 str = "%x"; 1064 str = "%x";
1065 else 1065 else
1066 str = "%02x"; 1066 str = "%02x";
@@ -6125,1102 +6125,1099 @@ ixgbe_sysctl_eee_state(SYSCTLFN_ARGS) @@ -6125,1102 +6125,1099 @@ ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
6125 sc->feat_en &= ~IXGBE_FEATURE_EEE; 6125 sc->feat_en &= ~IXGBE_FEATURE_EEE;
6126 6126
6127 return (error); 6127 return (error);
6128} /* ixgbe_sysctl_eee_state */ 6128} /* ixgbe_sysctl_eee_state */
6129 6129
6130#define PRINTQS(sc, regname) \ 6130#define PRINTQS(sc, regname) \
6131 do { \ 6131 do { \
6132 struct ixgbe_hw *_hw = &(sc)->hw; \ 6132 struct ixgbe_hw *_hw = &(sc)->hw; \
6133 int _i; \ 6133 int _i; \
6134 \ 6134 \
6135 printf("%s: %s", device_xname((sc)->dev), #regname); \ 6135 printf("%s: %s", device_xname((sc)->dev), #regname); \
6136 for (_i = 0; _i < (sc)->num_queues; _i++) { \ 6136 for (_i = 0; _i < (sc)->num_queues; _i++) { \
6137 printf((_i == 0) ? "\t" : " "); \ 6137 printf((_i == 0) ? "\t" : " "); \
6138 printf("%08x", IXGBE_READ_REG(_hw, \ 6138 printf("%08x", IXGBE_READ_REG(_hw, \
6139 IXGBE_##regname(_i))); \ 6139 IXGBE_##regname(_i))); \
6140 } \ 6140 } \
6141 printf("\n"); \ 6141 printf("\n"); \
6142 } while (0) 6142 } while (0)
6143 6143
6144/************************************************************************ 6144/************************************************************************
6145 * ixgbe_print_debug_info 6145 * ixgbe_print_debug_info
6146 * 6146 *
6147 * Called only when em_display_debug_stats is enabled. 6147 * Called only when em_display_debug_stats is enabled.
6148 * Provides a way to take a look at important statistics 6148 * Provides a way to take a look at important statistics
6149 * maintained by the driver and hardware. 6149 * maintained by the driver and hardware.
6150 ************************************************************************/ 6150 ************************************************************************/
6151static void 6151static void
6152ixgbe_print_debug_info(struct ixgbe_softc *sc) 6152ixgbe_print_debug_info(struct ixgbe_softc *sc)
6153{ 6153{
6154 device_t dev = sc->dev; 6154 device_t dev = sc->dev;
6155 struct ixgbe_hw *hw = &sc->hw; 6155 struct ixgbe_hw *hw = &sc->hw;
6156 int table_size; 6156 int table_size;
6157 int i; 6157 int i;
6158 6158
6159 switch (sc->hw.mac.type) { 6159 switch (sc->hw.mac.type) {
6160 case ixgbe_mac_X550: 6160 case ixgbe_mac_X550:
6161 case ixgbe_mac_X550EM_x: 6161 case ixgbe_mac_X550EM_x:
6162 case ixgbe_mac_X550EM_a: 6162 case ixgbe_mac_X550EM_a:
6163 table_size = 128; 6163 table_size = 128;
6164 break; 6164 break;
6165 default: 6165 default:
6166 table_size = 32; 6166 table_size = 32;
6167 break; 6167 break;
6168 } 6168 }
6169 6169
6170 device_printf(dev, "[E]RETA:\n"); 6170 device_printf(dev, "[E]RETA:\n");
6171 for (i = 0; i < table_size; i++) { 6171 for (i = 0; i < table_size; i++) {
6172 if (i < 32) 6172 if (i < 32)
6173 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw, 6173 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6174 IXGBE_RETA(i))); 6174 IXGBE_RETA(i)));
6175 else 6175 else
6176 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw, 6176 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
6177 IXGBE_ERETA(i - 32))); 6177 IXGBE_ERETA(i - 32)));
6178 } 6178 }
6179 6179
6180 device_printf(dev, "queue:"); 6180 device_printf(dev, "queue:");
6181 for (i = 0; i < sc->num_queues; i++) { 6181 for (i = 0; i < sc->num_queues; i++) {
6182 printf((i == 0) ? "\t" : " "); 6182 printf((i == 0) ? "\t" : " ");
6183 printf("%8d", i); 6183 printf("%8d", i);
6184 } 6184 }
6185 printf("\n"); 6185 printf("\n");
6186 PRINTQS(sc, RDBAL); 6186 PRINTQS(sc, RDBAL);
6187 PRINTQS(sc, RDBAH); 6187 PRINTQS(sc, RDBAH);
6188 PRINTQS(sc, RDLEN); 6188 PRINTQS(sc, RDLEN);
6189 PRINTQS(sc, SRRCTL); 6189 PRINTQS(sc, SRRCTL);
6190 PRINTQS(sc, RDH); 6190 PRINTQS(sc, RDH);
6191 PRINTQS(sc, RDT); 6191 PRINTQS(sc, RDT);
6192 PRINTQS(sc, RXDCTL); 6192 PRINTQS(sc, RXDCTL);
6193 6193
6194 device_printf(dev, "RQSMR:"); 6194 device_printf(dev, "RQSMR:");
6195 for (i = 0; i < sc->num_queues / 4; i++) { 6195 for (i = 0; i < sc->num_queues / 4; i++) {
6196 printf((i == 0) ? "\t" : " "); 6196 printf((i == 0) ? "\t" : " ");
6197 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i))); 6197 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
6198 } 6198 }
6199 printf("\n"); 6199 printf("\n");
6200 6200
6201 device_printf(dev, "disabled_count:"); 6201 device_printf(dev, "disabled_count:");
6202 for (i = 0; i < sc->num_queues; i++) { 6202 for (i = 0; i < sc->num_queues; i++) {
6203 printf((i == 0) ? "\t" : " "); 6203 printf((i == 0) ? "\t" : " ");
6204 printf("%8d", sc->queues[i].disabled_count); 6204 printf("%8d", sc->queues[i].disabled_count);
6205 } 6205 }
6206 printf("\n"); 6206 printf("\n");
6207 6207
6208 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS)); 6208 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
6209 if (hw->mac.type != ixgbe_mac_82598EB) { 6209 if (hw->mac.type != ixgbe_mac_82598EB) {
6210 device_printf(dev, "EIMS_EX(0):\t%08x\n", 6210 device_printf(dev, "EIMS_EX(0):\t%08x\n",
6211 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0))); 6211 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
6212 device_printf(dev, "EIMS_EX(1):\t%08x\n", 6212 device_printf(dev, "EIMS_EX(1):\t%08x\n",
6213 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1))); 6213 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
6214 } 6214 }
6215 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM)); 6215 device_printf(dev, "EIAM:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAM));
6216 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC)); 6216 device_printf(dev, "EIAC:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIAC));
6217} /* ixgbe_print_debug_info */ 6217} /* ixgbe_print_debug_info */
6218 6218
6219/************************************************************************ 6219/************************************************************************
6220 * ixgbe_sysctl_debug 6220 * ixgbe_sysctl_debug
6221 ************************************************************************/ 6221 ************************************************************************/
6222static int 6222static int
6223ixgbe_sysctl_debug(SYSCTLFN_ARGS) 6223ixgbe_sysctl_debug(SYSCTLFN_ARGS)
6224{ 6224{
6225 struct sysctlnode node = *rnode; 6225 struct sysctlnode node = *rnode;
6226 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6226 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6227 int error, result = 0; 6227 int error, result = 0;
6228 6228
6229 if (ixgbe_fw_recovery_mode_swflag(sc)) 6229 if (ixgbe_fw_recovery_mode_swflag(sc))
6230 return (EPERM); 6230 return (EPERM);
6231 6231
6232 node.sysctl_data = &result; 6232 node.sysctl_data = &result;
6233 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6233 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6234 6234
6235 if (error || newp == NULL) 6235 if (error || newp == NULL)
6236 return error; 6236 return error;
6237 6237
6238 if (result == 1) 6238 if (result == 1)
6239 ixgbe_print_debug_info(sc); 6239 ixgbe_print_debug_info(sc);
6240 6240
6241 return 0; 6241 return 0;
6242} /* ixgbe_sysctl_debug */ 6242} /* ixgbe_sysctl_debug */
6243 6243
6244/************************************************************************ 6244/************************************************************************
6245 * ixgbe_sysctl_rx_copy_len 6245 * ixgbe_sysctl_rx_copy_len
6246 ************************************************************************/ 6246 ************************************************************************/
6247static int 6247static int
6248ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS) 6248ixgbe_sysctl_rx_copy_len(SYSCTLFN_ARGS)
6249{ 6249{
6250 struct sysctlnode node = *rnode; 6250 struct sysctlnode node = *rnode;
6251 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6251 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6252 int error; 6252 int error;
6253 int result = sc->rx_copy_len; 6253 int result = sc->rx_copy_len;
6254 6254
6255 node.sysctl_data = &result; 6255 node.sysctl_data = &result;
6256 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6256 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6257 6257
6258 if (error || newp == NULL) 6258 if (error || newp == NULL)
6259 return error; 6259 return error;
6260 6260
6261 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX)) 6261 if ((result < 0) || (result > IXGBE_RX_COPY_LEN_MAX))
6262 return EINVAL; 6262 return EINVAL;
6263 6263
6264 sc->rx_copy_len = result; 6264 sc->rx_copy_len = result;
6265 6265
6266 return 0; 6266 return 0;
6267} /* ixgbe_sysctl_rx_copy_len */ 6267} /* ixgbe_sysctl_rx_copy_len */
6268 6268
6269/************************************************************************ 6269/************************************************************************
6270 * ixgbe_sysctl_tx_process_limit 6270 * ixgbe_sysctl_tx_process_limit
6271 ************************************************************************/ 6271 ************************************************************************/
6272static int 6272static int
6273ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS) 6273ixgbe_sysctl_tx_process_limit(SYSCTLFN_ARGS)
6274{ 6274{
6275 struct sysctlnode node = *rnode; 6275 struct sysctlnode node = *rnode;
6276 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6276 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6277 int error; 6277 int error;
6278 int result = sc->tx_process_limit; 6278 int result = sc->tx_process_limit;
6279 6279
6280 node.sysctl_data = &result; 6280 node.sysctl_data = &result;
6281 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6281 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6282 6282
6283 if (error || newp == NULL) 6283 if (error || newp == NULL)
6284 return error; 6284 return error;
6285 6285
6286 if ((result <= 0) || (result > sc->num_tx_desc)) 6286 if ((result <= 0) || (result > sc->num_tx_desc))
6287 return EINVAL; 6287 return EINVAL;
6288 6288
6289 sc->tx_process_limit = result; 6289 sc->tx_process_limit = result;
6290 6290
6291 return 0; 6291 return 0;
6292} /* ixgbe_sysctl_tx_process_limit */ 6292} /* ixgbe_sysctl_tx_process_limit */
6293 6293
6294/************************************************************************ 6294/************************************************************************
6295 * ixgbe_sysctl_rx_process_limit 6295 * ixgbe_sysctl_rx_process_limit
6296 ************************************************************************/ 6296 ************************************************************************/
6297static int 6297static int
6298ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS) 6298ixgbe_sysctl_rx_process_limit(SYSCTLFN_ARGS)
6299{ 6299{
6300 struct sysctlnode node = *rnode; 6300 struct sysctlnode node = *rnode;
6301 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data; 6301 struct ixgbe_softc *sc = (struct ixgbe_softc *)node.sysctl_data;
6302 int error; 6302 int error;
6303 int result = sc->rx_process_limit; 6303 int result = sc->rx_process_limit;
6304 6304
6305 node.sysctl_data = &result; 6305 node.sysctl_data = &result;
6306 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6306 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6307 6307
6308 if (error || newp == NULL) 6308 if (error || newp == NULL)
6309 return error; 6309 return error;
6310 6310
6311 if ((result <= 0) || (result > sc->num_rx_desc)) 6311 if ((result <= 0) || (result > sc->num_rx_desc))
6312 return EINVAL; 6312 return EINVAL;
6313 6313
6314 sc->rx_process_limit = result; 6314 sc->rx_process_limit = result;
6315 6315
6316 return 0; 6316 return 0;
6317} /* ixgbe_sysctl_rx_process_limit */ 6317} /* ixgbe_sysctl_rx_process_limit */
6318 6318
6319/************************************************************************ 6319/************************************************************************
6320 * ixgbe_init_device_features 6320 * ixgbe_init_device_features
6321 ************************************************************************/ 6321 ************************************************************************/
6322static void 6322static void
6323ixgbe_init_device_features(struct ixgbe_softc *sc) 6323ixgbe_init_device_features(struct ixgbe_softc *sc)
6324{ 6324{
6325 sc->feat_cap = IXGBE_FEATURE_NETMAP 6325 sc->feat_cap = IXGBE_FEATURE_NETMAP
6326 | IXGBE_FEATURE_RSS 6326 | IXGBE_FEATURE_RSS
6327 | IXGBE_FEATURE_MSI 6327 | IXGBE_FEATURE_MSI
6328 | IXGBE_FEATURE_MSIX 6328 | IXGBE_FEATURE_MSIX
6329 | IXGBE_FEATURE_LEGACY_IRQ 6329 | IXGBE_FEATURE_LEGACY_IRQ
6330 | IXGBE_FEATURE_LEGACY_TX; 6330 | IXGBE_FEATURE_LEGACY_TX;
6331 6331
6332 /* Set capabilities first... */ 6332 /* Set capabilities first... */
6333 switch (sc->hw.mac.type) { 6333 switch (sc->hw.mac.type) {
6334 case ixgbe_mac_82598EB: 6334 case ixgbe_mac_82598EB:
6335 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT) 6335 if (sc->hw.device_id == IXGBE_DEV_ID_82598AT)
6336 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 6336 sc->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6337 break; 6337 break;
6338 case ixgbe_mac_X540: 6338 case ixgbe_mac_X540:
6339 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6339 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6340 sc->feat_cap |= IXGBE_FEATURE_FDIR; 6340 sc->feat_cap |= IXGBE_FEATURE_FDIR;
6341 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 6341 if ((sc->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6342 (sc->hw.bus.func == 0)) 6342 (sc->hw.bus.func == 0))
6343 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 6343 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6344 break; 6344 break;
6345 case ixgbe_mac_X550: 6345 case ixgbe_mac_X550:
6346 /* 6346 /*
6347 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6347 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6348 * NVM Image version. 6348 * NVM Image version.
6349 */ 6349 */
6350 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 6350 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6351 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6351 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6352 sc->feat_cap |= IXGBE_FEATURE_FDIR; 6352 sc->feat_cap |= IXGBE_FEATURE_FDIR;
6353 break; 6353 break;
6354 case ixgbe_mac_X550EM_x: 6354 case ixgbe_mac_X550EM_x:
6355 /* 6355 /*
6356 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6356 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6357 * NVM Image version. 6357 * NVM Image version.
6358 */ 6358 */
6359 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6359 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6360 sc->feat_cap |= IXGBE_FEATURE_FDIR; 6360 sc->feat_cap |= IXGBE_FEATURE_FDIR;
6361 break; 6361 break;
6362 case ixgbe_mac_X550EM_a: 6362 case ixgbe_mac_X550EM_a:
6363 /* 6363 /*
6364 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6364 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6365 * NVM Image version. 6365 * NVM Image version.
6366 */ 6366 */
6367 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6367 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6368 sc->feat_cap |= IXGBE_FEATURE_FDIR; 6368 sc->feat_cap |= IXGBE_FEATURE_FDIR;
6369 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 6369 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6370 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 6370 if ((sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6371 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 6371 (sc->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6372 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 6372 sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6373 sc->feat_cap |= IXGBE_FEATURE_EEE; 6373 sc->feat_cap |= IXGBE_FEATURE_EEE;
6374 } 6374 }
6375 break; 6375 break;
6376 case ixgbe_mac_82599EB: 6376 case ixgbe_mac_82599EB:
6377 sc->feat_cap |= IXGBE_FEATURE_SRIOV; 6377 sc->feat_cap |= IXGBE_FEATURE_SRIOV;
6378 sc->feat_cap |= IXGBE_FEATURE_FDIR; 6378 sc->feat_cap |= IXGBE_FEATURE_FDIR;
6379 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 6379 if ((sc->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6380 (sc->hw.bus.func == 0)) 6380 (sc->hw.bus.func == 0))
6381 sc->feat_cap |= IXGBE_FEATURE_BYPASS; 6381 sc->feat_cap |= IXGBE_FEATURE_BYPASS;
6382 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 6382 if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6383 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 6383 sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6384 break; 6384 break;
6385 default: 6385 default:
6386 break; 6386 break;
6387 } 6387 }
6388 6388
6389 /* Enabled by default... */ 6389 /* Enabled by default... */
6390 /* Fan failure detection */ 6390 /* Fan failure detection */
6391 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL) 6391 if (sc->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6392 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL; 6392 sc->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6393 /* Netmap */ 6393 /* Netmap */
6394 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 6394 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
6395 sc->feat_en |= IXGBE_FEATURE_NETMAP; 6395 sc->feat_en |= IXGBE_FEATURE_NETMAP;
6396 /* EEE */ 6396 /* EEE */
6397 if (sc->feat_cap & IXGBE_FEATURE_EEE) 6397 if (sc->feat_cap & IXGBE_FEATURE_EEE)
6398 sc->feat_en |= IXGBE_FEATURE_EEE; 6398 sc->feat_en |= IXGBE_FEATURE_EEE;
6399 /* Thermal Sensor */ 6399 /* Thermal Sensor */
6400 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 6400 if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6401 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 6401 sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6402 /* 6402 /*
6403 * Recovery mode: 6403 * Recovery mode:
6404 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading 6404 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6405 * NVM Image version. 6405 * NVM Image version.
6406 */ 6406 */
6407 6407
6408 /* Enabled via global sysctl... */ 6408 /* Enabled via global sysctl... */
6409 /* Flow Director */ 6409 /* Flow Director */
6410 if (ixgbe_enable_fdir) { 6410 if (ixgbe_enable_fdir) {
6411 if (sc->feat_cap & IXGBE_FEATURE_FDIR) 6411 if (sc->feat_cap & IXGBE_FEATURE_FDIR)
6412 sc->feat_en |= IXGBE_FEATURE_FDIR; 6412 sc->feat_en |= IXGBE_FEATURE_FDIR;
6413 else 6413 else
6414 device_printf(sc->dev, "Device does not support " 6414 device_printf(sc->dev, "Device does not support "
6415 "Flow Director. Leaving disabled."); 6415 "Flow Director. Leaving disabled.");
6416 } 6416 }
6417 /* Legacy (single queue) transmit */ 6417 /* Legacy (single queue) transmit */
6418 if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 6418 if ((sc->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6419 ixgbe_enable_legacy_tx) 6419 ixgbe_enable_legacy_tx)
6420 sc->feat_en |= IXGBE_FEATURE_LEGACY_TX; 6420 sc->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6421 /* 6421 /*
6422 * Message Signal Interrupts - Extended (MSI-X) 6422 * Message Signal Interrupts - Extended (MSI-X)
6423 * Normal MSI is only enabled if MSI-X calls fail. 6423 * Normal MSI is only enabled if MSI-X calls fail.
6424 */ 6424 */
6425 if (!ixgbe_enable_msix) 6425 if (!ixgbe_enable_msix)
6426 sc->feat_cap &= ~IXGBE_FEATURE_MSIX; 6426 sc->feat_cap &= ~IXGBE_FEATURE_MSIX;
6427 /* Receive-Side Scaling (RSS) */ 6427 /* Receive-Side Scaling (RSS) */
6428 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 6428 if ((sc->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6429 sc->feat_en |= IXGBE_FEATURE_RSS; 6429 sc->feat_en |= IXGBE_FEATURE_RSS;
6430 6430
6431 /* Disable features with unmet dependencies... */ 6431 /* Disable features with unmet dependencies... */
6432 /* No MSI-X */ 6432 /* No MSI-X */
6433 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) { 6433 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) {
6434 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 6434 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
6435 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 6435 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6436 sc->feat_en &= ~IXGBE_FEATURE_RSS; 6436 sc->feat_en &= ~IXGBE_FEATURE_RSS;
6437 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 6437 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
6438 } 6438 }
6439} /* ixgbe_init_device_features */ 6439} /* ixgbe_init_device_features */
6440 6440
6441/************************************************************************ 6441/************************************************************************
6442 * ixgbe_probe - Device identification routine 6442 * ixgbe_probe - Device identification routine
6443 * 6443 *
6444 * Determines if the driver should be loaded on 6444 * Determines if the driver should be loaded on
6445 * adapter based on its PCI vendor/device ID. 6445 * adapter based on its PCI vendor/device ID.
6446 * 6446 *
6447 * return BUS_PROBE_DEFAULT on success, positive on failure 6447 * return BUS_PROBE_DEFAULT on success, positive on failure
6448 ************************************************************************/ 6448 ************************************************************************/
6449static int 6449static int
6450ixgbe_probe(device_t dev, cfdata_t cf, void *aux) 6450ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6451{ 6451{
6452 const struct pci_attach_args *pa = aux; 6452 const struct pci_attach_args *pa = aux;
6453 6453
6454 return (ixgbe_lookup(pa) != NULL) ? 1 : 0; 6454 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6455} 6455}
6456 6456
6457static const ixgbe_vendor_info_t * 6457static const ixgbe_vendor_info_t *
6458ixgbe_lookup(const struct pci_attach_args *pa) 6458ixgbe_lookup(const struct pci_attach_args *pa)
6459{ 6459{
6460 const ixgbe_vendor_info_t *ent; 6460 const ixgbe_vendor_info_t *ent;
6461 pcireg_t subid; 6461 pcireg_t subid;
6462 6462
6463 INIT_DEBUGOUT("ixgbe_lookup: begin"); 6463 INIT_DEBUGOUT("ixgbe_lookup: begin");
6464 6464
6465 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 6465 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6466 return NULL; 6466 return NULL;
6467 6467
6468 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 6468 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6469 6469
6470 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) { 6470 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6471 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 6471 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6472 (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 6472 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6473 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 6473 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6474 (ent->subvendor_id == 0)) && 6474 (ent->subvendor_id == 0)) &&
6475 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 6475 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6476 (ent->subdevice_id == 0))) { 6476 (ent->subdevice_id == 0))) {
6477 return ent; 6477 return ent;
6478 } 6478 }
6479 } 6479 }
6480 return NULL; 6480 return NULL;
6481} 6481}
6482 6482
6483static int 6483static int
6484ixgbe_ifflags_cb(struct ethercom *ec) 6484ixgbe_ifflags_cb(struct ethercom *ec)
6485{ 6485{
6486 struct ifnet *ifp = &ec->ec_if; 6486 struct ifnet *ifp = &ec->ec_if;
6487 struct ixgbe_softc *sc = ifp->if_softc; 6487 struct ixgbe_softc *sc = ifp->if_softc;
6488 u_short change; 6488 u_short change;
6489 int rv = 0; 6489 int rv = 0;
6490 6490
6491 IXGBE_CORE_LOCK(sc); 6491 IXGBE_CORE_LOCK(sc);
6492 6492
6493 change = ifp->if_flags ^ sc->if_flags; 6493 change = ifp->if_flags ^ sc->if_flags;
6494 if (change != 0) 6494 if (change != 0)
6495 sc->if_flags = ifp->if_flags; 6495 sc->if_flags = ifp->if_flags;
6496 6496
6497 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 6497 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6498 rv = ENETRESET; 6498 rv = ENETRESET;
6499 goto out; 6499 goto out;
6500 } else if ((change & IFF_PROMISC) != 0) 6500 } else if ((change & IFF_PROMISC) != 0)
6501 ixgbe_set_rxfilter(sc); 6501 ixgbe_set_rxfilter(sc);
6502 6502
6503 /* Check for ec_capenable. */ 6503 /* Check for ec_capenable. */
6504 change = ec->ec_capenable ^ sc->ec_capenable; 6504 change = ec->ec_capenable ^ sc->ec_capenable;
6505 sc->ec_capenable = ec->ec_capenable; 6505 sc->ec_capenable = ec->ec_capenable;
6506 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 6506 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6507 | ETHERCAP_VLAN_HWFILTER)) != 0) { 6507 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6508 rv = ENETRESET; 6508 rv = ENETRESET;
6509 goto out; 6509 goto out;
6510 } 6510 }
6511 6511
6512 /* 6512 /*
6513 * Special handling is not required for ETHERCAP_VLAN_MTU. 6513 * Special handling is not required for ETHERCAP_VLAN_MTU.
6514 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 6514 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6515 */ 6515 */
6516 6516
6517 /* Set up VLAN support and filter */ 6517 /* Set up VLAN support and filter */
6518 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 6518 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6519 ixgbe_setup_vlan_hw_support(sc); 6519 ixgbe_setup_vlan_hw_support(sc);
6520 6520
6521out: 6521out:
6522 IXGBE_CORE_UNLOCK(sc); 6522 IXGBE_CORE_UNLOCK(sc);
6523 6523
6524 return rv; 6524 return rv;
6525} 6525}
6526 6526
6527/************************************************************************ 6527/************************************************************************
6528 * ixgbe_ioctl - Ioctl entry point 6528 * ixgbe_ioctl - Ioctl entry point
6529 * 6529 *
6530 * Called when the user wants to configure the interface. 6530 * Called when the user wants to configure the interface.
6531 * 6531 *
6532 * return 0 on success, positive on failure 6532 * return 0 on success, positive on failure
6533 ************************************************************************/ 6533 ************************************************************************/
6534static int 6534static int
6535ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data) 6535ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6536{ 6536{
6537 struct ixgbe_softc *sc = ifp->if_softc; 6537 struct ixgbe_softc *sc = ifp->if_softc;
6538 struct ixgbe_hw *hw = &sc->hw; 6538 struct ixgbe_hw *hw = &sc->hw;
6539 struct ifcapreq *ifcr = data; 6539 struct ifcapreq *ifcr = data;
6540 struct ifreq *ifr = data; 6540 struct ifreq *ifr = data;
6541 int error = 0; 6541 int error = 0;
6542 int l4csum_en; 6542 int l4csum_en;
6543 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 6543 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6544 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 6544 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6545 6545
6546 if (ixgbe_fw_recovery_mode_swflag(sc)) 6546 if (ixgbe_fw_recovery_mode_swflag(sc))
6547 return (EPERM); 6547 return (EPERM);
6548 6548
6549 switch (command) { 6549 switch (command) {
6550 case SIOCSIFFLAGS: 6550 case SIOCSIFFLAGS:
6551 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 6551 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6552 break; 6552 break;
6553 case SIOCADDMULTI: 6553 case SIOCADDMULTI:
6554 case SIOCDELMULTI: 6554 case SIOCDELMULTI:
6555 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 6555 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6556 break; 6556 break;
6557 case SIOCSIFMEDIA: 6557 case SIOCSIFMEDIA:
6558 case SIOCGIFMEDIA: 6558 case SIOCGIFMEDIA:
6559 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 6559 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6560 break; 6560 break;
6561 case SIOCSIFCAP: 6561 case SIOCSIFCAP:
6562 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 6562 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6563 break; 6563 break;
6564 case SIOCSIFMTU: 6564 case SIOCSIFMTU:
6565 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 6565 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6566 break; 6566 break;
6567#ifdef __NetBSD__ 6567#ifdef __NetBSD__
6568 case SIOCINITIFADDR: 6568 case SIOCINITIFADDR:
6569 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR"); 6569 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6570 break; 6570 break;
6571 case SIOCGIFFLAGS: 6571 case SIOCGIFFLAGS:
6572 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS"); 6572 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6573 break; 6573 break;
6574 case SIOCGIFAFLAG_IN: 6574 case SIOCGIFAFLAG_IN:
6575 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN"); 6575 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6576 break; 6576 break;
6577 case SIOCGIFADDR: 6577 case SIOCGIFADDR:
6578 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR"); 6578 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6579 break; 6579 break;
6580 case SIOCGIFMTU: 6580 case SIOCGIFMTU:
6581 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)"); 6581 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6582 break; 6582 break;
6583 case SIOCGIFCAP: 6583 case SIOCGIFCAP:
6584 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)"); 6584 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6585 break; 6585 break;
6586 case SIOCGETHERCAP: 6586 case SIOCGETHERCAP:
6587 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)"); 6587 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6588 break; 6588 break;
6589 case SIOCGLIFADDR: 6589 case SIOCGLIFADDR:
6590 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)"); 6590 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6591 break; 6591 break;
6592 case SIOCZIFDATA: 6592 case SIOCZIFDATA:
6593 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); 6593 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6594 hw->mac.ops.clear_hw_cntrs(hw); 6594 hw->mac.ops.clear_hw_cntrs(hw);
6595 ixgbe_clear_evcnt(sc); 6595 ixgbe_clear_evcnt(sc);
6596 break; 6596 break;
6597 case SIOCAIFADDR: 6597 case SIOCAIFADDR:
6598 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)"); 6598 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6599 break; 6599 break;
6600#endif 6600#endif
6601 default: 6601 default:
6602 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 6602 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6603 break; 6603 break;
6604 } 6604 }
6605 6605
6606 switch (command) { 6606 switch (command) {
6607 case SIOCGI2C: 6607 case SIOCGI2C:
6608 { 6608 {
6609 struct ixgbe_i2c_req i2c; 6609 struct ixgbe_i2c_req i2c;
6610 6610
6611 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 6611 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6612 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 6612 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6613 if (error != 0) 6613 if (error != 0)
6614 break; 6614 break;
6615 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 6615 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6616 error = EINVAL; 6616 error = EINVAL;
6617 break; 6617 break;
6618 } 6618 }
6619 if (i2c.len > sizeof(i2c.data)) { 6619 if (i2c.len > sizeof(i2c.data)) {
6620 error = EINVAL; 6620 error = EINVAL;
6621 break; 6621 break;
6622 } 6622 }
6623 6623
6624 hw->phy.ops.read_i2c_byte(hw, i2c.offset, 6624 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6625 i2c.dev_addr, i2c.data); 6625 i2c.dev_addr, i2c.data);
6626 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 6626 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6627 break; 6627 break;
6628 } 6628 }
6629 case SIOCSIFCAP: 6629 case SIOCSIFCAP:
6630 /* Layer-4 Rx checksum offload has to be turned on and 6630 /* Layer-4 Rx checksum offload has to be turned on and
6631 * off as a unit. 6631 * off as a unit.
6632 */ 6632 */
6633 l4csum_en = ifcr->ifcr_capenable & l4csum; 6633 l4csum_en = ifcr->ifcr_capenable & l4csum;
6634 if (l4csum_en != l4csum && l4csum_en != 0) 6634 if (l4csum_en != l4csum && l4csum_en != 0)
6635 return EINVAL; 6635 return EINVAL;
6636 /*FALLTHROUGH*/ 6636 /*FALLTHROUGH*/
6637 case SIOCADDMULTI: 6637 case SIOCADDMULTI:
6638 case SIOCDELMULTI: 6638 case SIOCDELMULTI:
6639 case SIOCSIFFLAGS: 6639 case SIOCSIFFLAGS:
6640 case SIOCSIFMTU: 6640 case SIOCSIFMTU:
6641 default: 6641 default:
6642 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 6642 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6643 return error; 6643 return error;
6644 if ((ifp->if_flags & IFF_RUNNING) == 0) 6644 if ((ifp->if_flags & IFF_RUNNING) == 0)
6645 ; 6645 ;
6646 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 6646 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6647 IXGBE_CORE_LOCK(sc); 6647 IXGBE_CORE_LOCK(sc);
6648 if ((ifp->if_flags & IFF_RUNNING) != 0) 6648 if ((ifp->if_flags & IFF_RUNNING) != 0)
6649 ixgbe_init_locked(sc); 6649 ixgbe_init_locked(sc);
6650 ixgbe_recalculate_max_frame(sc); 6650 ixgbe_recalculate_max_frame(sc);
6651 IXGBE_CORE_UNLOCK(sc); 6651 IXGBE_CORE_UNLOCK(sc);
6652 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 6652 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6653 /* 6653 /*
6654 * Multicast list has changed; set the hardware filter 6654 * Multicast list has changed; set the hardware filter
6655 * accordingly. 6655 * accordingly.
6656 */ 6656 */
6657 IXGBE_CORE_LOCK(sc); 6657 IXGBE_CORE_LOCK(sc);
6658 ixgbe_disable_intr(sc); 6658 ixgbe_disable_intr(sc);
6659 ixgbe_set_rxfilter(sc); 6659 ixgbe_set_rxfilter(sc);
6660 ixgbe_enable_intr(sc); 6660 ixgbe_enable_intr(sc);
6661 IXGBE_CORE_UNLOCK(sc); 6661 IXGBE_CORE_UNLOCK(sc);
6662 } 6662 }
6663 return 0; 6663 return 0;
6664 } 6664 }
6665 6665
6666 return error; 6666 return error;
6667} /* ixgbe_ioctl */ 6667} /* ixgbe_ioctl */
6668 6668
6669/************************************************************************ 6669/************************************************************************
6670 * ixgbe_check_fan_failure 6670 * ixgbe_check_fan_failure
6671 ************************************************************************/ 6671 ************************************************************************/
6672static int 6672static int
6673ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt) 6673ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
6674{ 6674{
6675 u32 mask; 6675 u32 mask;
6676 6676
6677 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) : 6677 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&sc->hw) :
6678 IXGBE_ESDP_SDP1; 6678 IXGBE_ESDP_SDP1;
6679 6679
6680 if ((reg & mask) == 0) 6680 if ((reg & mask) == 0)
6681 return IXGBE_SUCCESS; 6681 return IXGBE_SUCCESS;
6682 6682
6683 /* 6683 /*
6684 * Use ratecheck() just in case interrupt occur frequently. 6684 * Use ratecheck() just in case interrupt occur frequently.
6685 * When EXPX9501AT's fan stopped, interrupt occurred only once, 6685 * When EXPX9501AT's fan stopped, interrupt occurred only once,
6686 * an red LED on the board turned on and link never up until 6686 * an red LED on the board turned on and link never up until
6687 * power off. 6687 * power off.
6688 */ 6688 */
6689 if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl)) 6689 if (ratecheck(&sc->lasterr_time, &ixgbe_errlog_intrvl))
6690 device_printf(sc->dev, 6690 device_printf(sc->dev,
6691 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 6691 "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6692 6692
6693 return IXGBE_ERR_FAN_FAILURE; 6693 return IXGBE_ERR_FAN_FAILURE;
6694} /* ixgbe_check_fan_failure */ 6694} /* ixgbe_check_fan_failure */
6695 6695
6696/************************************************************************ 6696/************************************************************************
6697 * ixgbe_handle_que 6697 * ixgbe_handle_que
6698 ************************************************************************/ 6698 ************************************************************************/
6699static void 6699static void
6700ixgbe_handle_que(void *context) 6700ixgbe_handle_que(void *context)
6701{ 6701{
6702 struct ix_queue *que = context; 6702 struct ix_queue *que = context;
6703 struct ixgbe_softc *sc = que->sc; 6703 struct ixgbe_softc *sc = que->sc;
6704 struct tx_ring *txr = que->txr; 6704 struct tx_ring *txr = que->txr;
6705 struct ifnet *ifp = sc->ifp; 6705 struct ifnet *ifp = sc->ifp;
6706 bool more = false; 6706 bool more = false;
6707 6707
6708 IXGBE_EVC_ADD(&que->handleq, 1); 6708 IXGBE_EVC_ADD(&que->handleq, 1);
6709 6709
6710 if (ifp->if_flags & IFF_RUNNING) { 6710 if (ifp->if_flags & IFF_RUNNING) {
6711 IXGBE_TX_LOCK(txr); 6711 IXGBE_TX_LOCK(txr);
6712 more = ixgbe_txeof(txr); 6712 more = ixgbe_txeof(txr);
6713 if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) 6713 if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX))
6714 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) 6714 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
6715 ixgbe_mq_start_locked(ifp, txr); 6715 ixgbe_mq_start_locked(ifp, txr);
6716 /* Only for queue 0 */ 6716 /* Only for queue 0 */
6717 /* NetBSD still needs this for CBQ */ 6717 /* NetBSD still needs this for CBQ */
6718 if ((&sc->queues[0] == que) 6718 if ((&sc->queues[0] == que)
6719 && (!ixgbe_legacy_ring_empty(ifp, NULL))) 6719 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
6720 ixgbe_legacy_start_locked(ifp, txr); 6720 ixgbe_legacy_start_locked(ifp, txr);
6721 IXGBE_TX_UNLOCK(txr); 6721 IXGBE_TX_UNLOCK(txr);
6722 more |= ixgbe_rxeof(que); 6722 more |= ixgbe_rxeof(que);
6723 } 6723 }
6724 6724
6725 if (more) { 6725 if (more) {
6726 IXGBE_EVC_ADD(&que->req, 1); 6726 IXGBE_EVC_ADD(&que->req, 1);
6727 ixgbe_sched_handle_que(sc, que); 6727 ixgbe_sched_handle_que(sc, que);
6728 } else if (que->res != NULL) { 6728 } else if (que->res != NULL) {
6729 /* MSIX: Re-enable this interrupt */ 6729 /* MSIX: Re-enable this interrupt */
6730 ixgbe_enable_queue(sc, que->msix); 6730 ixgbe_enable_queue(sc, que->msix);
6731 } else { 6731 } else {
6732 /* INTx or MSI */ 6732 /* INTx or MSI */
6733 ixgbe_enable_queue(sc, 0); 6733 ixgbe_enable_queue(sc, 0);
6734 } 6734 }
6735 6735
6736 return; 6736 return;
6737} /* ixgbe_handle_que */ 6737} /* ixgbe_handle_que */
6738 6738
6739/************************************************************************ 6739/************************************************************************
6740 * ixgbe_handle_que_work 6740 * ixgbe_handle_que_work
6741 ************************************************************************/ 6741 ************************************************************************/
6742static void 6742static void
6743ixgbe_handle_que_work(struct work *wk, void *context) 6743ixgbe_handle_que_work(struct work *wk, void *context)
6744{ 6744{
6745 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); 6745 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
6746 6746
6747 /* 6747 /*
6748 * "enqueued flag" is not required here. 6748 * "enqueued flag" is not required here.
6749 * See ixgbe_msix_que(). 6749 * See ixgbe_msix_que().
6750 */ 6750 */
6751 ixgbe_handle_que(que); 6751 ixgbe_handle_que(que);
6752} 6752}
6753 6753
6754/************************************************************************ 6754/************************************************************************
6755 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler 6755 * ixgbe_allocate_legacy - Setup the Legacy or MSI Interrupt handler
6756 ************************************************************************/ 6756 ************************************************************************/
6757static int 6757static int
6758ixgbe_allocate_legacy(struct ixgbe_softc *sc, 6758ixgbe_allocate_legacy(struct ixgbe_softc *sc,
6759 const struct pci_attach_args *pa) 6759 const struct pci_attach_args *pa)
6760{ 6760{
6761 device_t dev = sc->dev; 6761 device_t dev = sc->dev;
6762 struct ix_queue *que = sc->queues; 6762 struct ix_queue *que = sc->queues;
6763 struct tx_ring *txr = sc->tx_rings; 6763 struct tx_ring *txr = sc->tx_rings;
6764 int counts[PCI_INTR_TYPE_SIZE]; 6764 int counts[PCI_INTR_TYPE_SIZE];
6765 pci_intr_type_t intr_type, max_type; 6765 pci_intr_type_t intr_type, max_type;
6766 char intrbuf[PCI_INTRSTR_LEN]; 6766 char intrbuf[PCI_INTRSTR_LEN];
6767 char wqname[MAXCOMLEN]; 6767 char wqname[MAXCOMLEN];
6768 const char *intrstr = NULL; 6768 const char *intrstr = NULL;
6769 int defertx_error = 0, error; 6769 int defertx_error = 0, error;
6770 6770
6771 /* We allocate a single interrupt resource */ 6771 /* We allocate a single interrupt resource */
6772 max_type = PCI_INTR_TYPE_MSI; 6772 max_type = PCI_INTR_TYPE_MSI;
6773 counts[PCI_INTR_TYPE_MSIX] = 0; 6773 counts[PCI_INTR_TYPE_MSIX] = 0;
6774 counts[PCI_INTR_TYPE_MSI] = 6774 counts[PCI_INTR_TYPE_MSI] =
6775 (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0; 6775 (sc->feat_en & IXGBE_FEATURE_MSI) ? 1 : 0;
6776 /* Check not feat_en but feat_cap to fallback to INTx */ 6776 /* Check not feat_en but feat_cap to fallback to INTx */
6777 counts[PCI_INTR_TYPE_INTX] = 6777 counts[PCI_INTR_TYPE_INTX] =
6778 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0; 6778 (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) ? 1 : 0;
6779 6779
6780alloc_retry: 6780alloc_retry:
6781 if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) { 6781 if (pci_intr_alloc(pa, &sc->osdep.intrs, counts, max_type) != 0) {
6782 aprint_error_dev(dev, "couldn't alloc interrupt\n"); 6782 aprint_error_dev(dev, "couldn't alloc interrupt\n");
6783 return ENXIO; 6783 return ENXIO;
6784 } 6784 }
6785 sc->osdep.nintrs = 1; 6785 sc->osdep.nintrs = 1;
6786 intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0], 6786 intrstr = pci_intr_string(sc->osdep.pc, sc->osdep.intrs[0],
6787 intrbuf, sizeof(intrbuf)); 6787 intrbuf, sizeof(intrbuf));
6788 sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc, 6788 sc->osdep.ihs[0] = pci_intr_establish_xname(sc->osdep.pc,
6789 sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que, 6789 sc->osdep.intrs[0], IPL_NET, ixgbe_legacy_irq, que,
6790 device_xname(dev)); 6790 device_xname(dev));
6791 intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]); 6791 intr_type = pci_intr_type(sc->osdep.pc, sc->osdep.intrs[0]);
6792 if (sc->osdep.ihs[0] == NULL) { 6792 if (sc->osdep.ihs[0] == NULL) {
6793 aprint_error_dev(dev,"unable to establish %s\n", 6793 aprint_error_dev(dev,"unable to establish %s\n",
6794 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx"); 6794 (intr_type == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
6795 pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1); 6795 pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6796 sc->osdep.intrs = NULL; 6796 sc->osdep.intrs = NULL;
6797 switch (intr_type) { 6797 switch (intr_type) {
6798 case PCI_INTR_TYPE_MSI: 6798 case PCI_INTR_TYPE_MSI:
6799 /* The next try is for INTx: Disable MSI */ 6799 /* The next try is for INTx: Disable MSI */
6800 max_type = PCI_INTR_TYPE_INTX; 6800 max_type = PCI_INTR_TYPE_INTX;
6801 counts[PCI_INTR_TYPE_INTX] = 1; 6801 counts[PCI_INTR_TYPE_INTX] = 1;
6802 sc->feat_en &= ~IXGBE_FEATURE_MSI; 6802 sc->feat_en &= ~IXGBE_FEATURE_MSI;
6803 if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) { 6803 if (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) {
6804 sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 6804 sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6805 goto alloc_retry; 6805 goto alloc_retry;
6806 } else 6806 } else
6807 break; 6807 break;
6808 case PCI_INTR_TYPE_INTX: 6808 case PCI_INTR_TYPE_INTX:
6809 default: 6809 default:
6810 /* See below */ 6810 /* See below */
6811 break; 6811 break;
6812 } 6812 }
6813 } 6813 }
6814 if (intr_type == PCI_INTR_TYPE_INTX) { 6814 if (intr_type == PCI_INTR_TYPE_INTX) {
6815 sc->feat_en &= ~IXGBE_FEATURE_MSI; 6815 sc->feat_en &= ~IXGBE_FEATURE_MSI;
6816 sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 6816 sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
6817 } 6817 }
6818 if (sc->osdep.ihs[0] == NULL) { 6818 if (sc->osdep.ihs[0] == NULL) {
6819 aprint_error_dev(dev, 6819 aprint_error_dev(dev,
6820 "couldn't establish interrupt%s%s\n", 6820 "couldn't establish interrupt%s%s\n",
6821 intrstr ? " at " : "", intrstr ? intrstr : ""); 6821 intrstr ? " at " : "", intrstr ? intrstr : "");
6822 pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1); 6822 pci_intr_release(sc->osdep.pc, sc->osdep.intrs, 1);
6823 sc->osdep.intrs = NULL; 6823 sc->osdep.intrs = NULL;
6824 return ENXIO; 6824 return ENXIO;
6825 } 6825 }
6826 aprint_normal_dev(dev, "interrupting at %s\n", intrstr); 6826 aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
6827 /* 6827 /*
6828 * Try allocating a fast interrupt and the associated deferred 6828 * Try allocating a fast interrupt and the associated deferred
6829 * processing contexts. 6829 * processing contexts.
6830 */ 6830 */
6831 if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 6831 if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6832 txr->txr_si = 6832 txr->txr_si =
6833 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 6833 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6834 ixgbe_deferred_mq_start, txr); 6834 ixgbe_deferred_mq_start, txr);
6835 6835
6836 snprintf(wqname, sizeof(wqname), "%sdeferTx", 6836 snprintf(wqname, sizeof(wqname), "%sdeferTx",
6837 device_xname(dev)); 6837 device_xname(dev));
6838 defertx_error = workqueue_create(&sc->txr_wq, wqname, 6838 defertx_error = workqueue_create(&sc->txr_wq, wqname,
6839 ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, 6839 ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI,
6840 IPL_NET, IXGBE_WORKQUEUE_FLAGS); 6840 IPL_NET, IXGBE_WORKQUEUE_FLAGS);
6841 sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 6841 sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
6842 } 6842 }
6843 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 6843 que->que_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6844 ixgbe_handle_que, que); 6844 ixgbe_handle_que, que);
6845 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 6845 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
6846 error = workqueue_create(&sc->que_wq, wqname, 6846 error = workqueue_create(&sc->que_wq, wqname,
6847 ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 6847 ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
6848 IXGBE_WORKQUEUE_FLAGS); 6848 IXGBE_WORKQUEUE_FLAGS);
6849 6849
6850 if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX) 6850 if ((!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)
6851 && ((txr->txr_si == NULL) || defertx_error != 0)) 6851 && ((txr->txr_si == NULL) || defertx_error != 0))
6852 || (que->que_si == NULL) || error != 0) { 6852 || (que->que_si == NULL) || error != 0) {
6853 aprint_error_dev(dev, 6853 aprint_error_dev(dev,
6854 "could not establish software interrupts\n"); 6854 "could not establish software interrupts\n");
6855 6855
6856 return ENXIO; 6856 return ENXIO;
6857 } 6857 }
6858 /* For simplicity in the handlers */ 6858 /* For simplicity in the handlers */
6859 sc->active_queues = IXGBE_EIMS_ENABLE_MASK; 6859 sc->active_queues = IXGBE_EIMS_ENABLE_MASK;
6860 6860
6861 return (0); 6861 return (0);
6862} /* ixgbe_allocate_legacy */ 6862} /* ixgbe_allocate_legacy */
6863 6863
6864/************************************************************************ 6864/************************************************************************
6865 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers 6865 * ixgbe_allocate_msix - Setup MSI-X Interrupt resources and handlers
6866 ************************************************************************/ 6866 ************************************************************************/
6867static int 6867static int
6868ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa) 6868ixgbe_allocate_msix(struct ixgbe_softc *sc, const struct pci_attach_args *pa)
6869{ 6869{
6870 device_t dev = sc->dev; 6870 device_t dev = sc->dev;
6871 struct ix_queue *que = sc->queues; 6871 struct ix_queue *que = sc->queues;
6872 struct tx_ring *txr = sc->tx_rings; 6872 struct tx_ring *txr = sc->tx_rings;
6873 pci_chipset_tag_t pc; 6873 pci_chipset_tag_t pc;
6874 char intrbuf[PCI_INTRSTR_LEN]; 6874 char intrbuf[PCI_INTRSTR_LEN];
6875 char intr_xname[32]; 6875 char intr_xname[32];
6876 char wqname[MAXCOMLEN]; 6876 char wqname[MAXCOMLEN];
6877 const char *intrstr = NULL; 6877 const char *intrstr = NULL;
6878 int error, vector = 0; 6878 int error, vector = 0;
6879 int cpu_id = 0; 6879 int cpu_id = 0;
6880 kcpuset_t *affinity; 6880 kcpuset_t *affinity;
6881#ifdef RSS 6881#ifdef RSS
6882 unsigned int rss_buckets = 0; 6882 unsigned int rss_buckets = 0;
6883 kcpuset_t cpu_mask; 6883 kcpuset_t cpu_mask;
6884#endif 6884#endif
6885 6885
6886 pc = sc->osdep.pc; 6886 pc = sc->osdep.pc;
6887#ifdef RSS 6887#ifdef RSS
6888 /* 6888 /*
6889 * If we're doing RSS, the number of queues needs to 6889 * If we're doing RSS, the number of queues needs to
6890 * match the number of RSS buckets that are configured. 6890 * match the number of RSS buckets that are configured.
6891 * 6891 *
6892 * + If there's more queues than RSS buckets, we'll end 6892 * + If there's more queues than RSS buckets, we'll end
6893 * up with queues that get no traffic. 6893 * up with queues that get no traffic.
6894 * 6894 *
6895 * + If there's more RSS buckets than queues, we'll end 6895 * + If there's more RSS buckets than queues, we'll end
6896 * up having multiple RSS buckets map to the same queue, 6896 * up having multiple RSS buckets map to the same queue,
6897 * so there'll be some contention. 6897 * so there'll be some contention.
6898 */ 6898 */
6899 rss_buckets = rss_getnumbuckets(); 6899 rss_buckets = rss_getnumbuckets();
6900 if ((sc->feat_en & IXGBE_FEATURE_RSS) && 6900 if ((sc->feat_en & IXGBE_FEATURE_RSS) &&
6901 (sc->num_queues != rss_buckets)) { 6901 (sc->num_queues != rss_buckets)) {
6902 device_printf(dev, 6902 device_printf(dev,
6903 "%s: number of queues (%d) != number of RSS buckets (%d)" 6903 "%s: number of queues (%d) != number of RSS buckets (%d)"
6904 "; performance will be impacted.\n", 6904 "; performance will be impacted.\n",
6905 __func__, sc->num_queues, rss_buckets); 6905 __func__, sc->num_queues, rss_buckets);
6906 } 6906 }
6907#endif 6907#endif
6908 6908
6909 sc->osdep.nintrs = sc->num_queues + 1; 6909 sc->osdep.nintrs = sc->num_queues + 1;
6910 if (pci_msix_alloc_exact(pa, &sc->osdep.intrs, 6910 if (pci_msix_alloc_exact(pa, &sc->osdep.intrs,
6911 sc->osdep.nintrs) != 0) { 6911 sc->osdep.nintrs) != 0) {
6912 aprint_error_dev(dev, 6912 aprint_error_dev(dev,
6913 "failed to allocate MSI-X interrupt\n"); 6913 "failed to allocate MSI-X interrupt\n");
6914 sc->feat_en &= ~IXGBE_FEATURE_MSIX; 6914 sc->feat_en &= ~IXGBE_FEATURE_MSIX;
6915 return (ENXIO); 6915 return (ENXIO);
6916 } 6916 }
6917 6917
6918 kcpuset_create(&affinity, false); 6918 kcpuset_create(&affinity, false);
6919 for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) { 6919 for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
6920 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", 6920 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
6921 device_xname(dev), i); 6921 device_xname(dev), i);
6922 intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf, 6922 intrstr = pci_intr_string(pc, sc->osdep.intrs[i], intrbuf,
6923 sizeof(intrbuf)); 6923 sizeof(intrbuf));
6924#ifdef IXGBE_MPSAFE 6924#ifdef IXGBE_MPSAFE
6925 pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE, 6925 pci_intr_setattr(pc, &sc->osdep.intrs[i], PCI_INTR_MPSAFE,
6926 true); 6926 true);
6927#endif 6927#endif
6928 /* Set the handler function */ 6928 /* Set the handler function */
6929 que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc, 6929 que->res = sc->osdep.ihs[i] = pci_intr_establish_xname(pc,
6930 sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que, 6930 sc->osdep.intrs[i], IPL_NET, ixgbe_msix_que, que,
6931 intr_xname); 6931 intr_xname);
6932 if (que->res == NULL) { 6932 if (que->res == NULL) {
6933 aprint_error_dev(dev, 6933 aprint_error_dev(dev,
6934 "Failed to register QUE handler\n"); 6934 "Failed to register QUE handler\n");
6935 error = ENXIO; 6935 error = ENXIO;
6936 goto err_out; 6936 goto err_out;
6937 } 6937 }
6938 que->msix = vector; 6938 que->msix = vector;
6939 sc->active_queues |= 1ULL << que->msix; 6939 sc->active_queues |= 1ULL << que->msix;
6940 6940
6941 if (sc->feat_en & IXGBE_FEATURE_RSS) { 6941 if (sc->feat_en & IXGBE_FEATURE_RSS) {
6942#ifdef RSS 6942#ifdef RSS
6943 /* 6943 /*
6944 * The queue ID is used as the RSS layer bucket ID. 6944 * The queue ID is used as the RSS layer bucket ID.
6945 * We look up the queue ID -> RSS CPU ID and select 6945 * We look up the queue ID -> RSS CPU ID and select
6946 * that. 6946 * that.
6947 */ 6947 */
6948 cpu_id = rss_getcpu(i % rss_getnumbuckets()); 6948 cpu_id = rss_getcpu(i % rss_getnumbuckets());
6949 CPU_SETOF(cpu_id, &cpu_mask); 6949 CPU_SETOF(cpu_id, &cpu_mask);
6950#endif 6950#endif
6951 } else { 6951 } else {
6952 /* 6952 /*
6953 * Bind the MSI-X vector, and thus the 6953 * Bind the MSI-X vector, and thus the
6954 * rings to the corresponding CPU. 6954 * rings to the corresponding CPU.
6955 * 6955 *
6956 * This just happens to match the default RSS 6956 * This just happens to match the default RSS
6957 * round-robin bucket -> queue -> CPU allocation. 6957 * round-robin bucket -> queue -> CPU allocation.
6958 */ 6958 */
6959 if (sc->num_queues > 1) 6959 if (sc->num_queues > 1)
6960 cpu_id = i; 6960 cpu_id = i;
6961 } 6961 }
6962 /* Round-robin affinity */ 6962 /* Round-robin affinity */
6963 kcpuset_zero(affinity); 6963 kcpuset_zero(affinity);
6964 kcpuset_set(affinity, cpu_id % ncpu); 6964 kcpuset_set(affinity, cpu_id % ncpu);
6965 error = interrupt_distribute(sc->osdep.ihs[i], affinity, 6965 error = interrupt_distribute(sc->osdep.ihs[i], affinity,
6966 NULL); 6966 NULL);
6967 aprint_normal_dev(dev, "for TX/RX, interrupting at %s", 6967 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
6968 intrstr); 6968 intrstr);
6969 if (error == 0) { 6969 if (error == 0) {
6970#if 1 /* def IXGBE_DEBUG */ 6970#if 1 /* def IXGBE_DEBUG */
6971#ifdef RSS 6971#ifdef RSS
6972 aprint_normal(", bound RSS bucket %d to CPU %d", i, 6972 aprint_normal(", bound RSS bucket %d to CPU %d", i,
6973 cpu_id % ncpu); 6973 cpu_id % ncpu);
6974#else 6974#else
6975 aprint_normal(", bound queue %d to cpu %d", i, 6975 aprint_normal(", bound queue %d to cpu %d", i,
6976 cpu_id % ncpu); 6976 cpu_id % ncpu);
6977#endif 6977#endif
6978#endif /* IXGBE_DEBUG */ 6978#endif /* IXGBE_DEBUG */
6979 } 6979 }
6980 aprint_normal("\n"); 6980 aprint_normal("\n");
6981 6981
6982 if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 6982 if (!(sc->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
6983 txr->txr_si = softint_establish( 6983 txr->txr_si = softint_establish(
6984 SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 6984 SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6985 ixgbe_deferred_mq_start, txr); 6985 ixgbe_deferred_mq_start, txr);
6986 if (txr->txr_si == NULL) { 6986 if (txr->txr_si == NULL) {
6987 aprint_error_dev(dev, 6987 aprint_error_dev(dev,
6988 "couldn't establish software interrupt\n"); 6988 "couldn't establish software interrupt\n");
6989 error = ENXIO; 6989 error = ENXIO;
6990 goto err_out; 6990 goto err_out;
6991 } 6991 }
6992 } 6992 }
6993 que->que_si 6993 que->que_si
6994 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 6994 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
6995 ixgbe_handle_que, que); 6995 ixgbe_handle_que, que);
6996 if (que->que_si == NULL) { 6996 if (que->que_si == NULL) {
6997 aprint_error_dev(dev, 6997 aprint_error_dev(dev,
6998 "couldn't establish software interrupt\n"); 6998 "couldn't establish software interrupt\n");
6999 error = ENXIO; 6999 error = ENXIO;
7000 goto err_out; 7000 goto err_out;
7001 } 7001 }
7002 } 7002 }
7003 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); 7003 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
7004 error = workqueue_create(&sc->txr_wq, wqname, 7004 error = workqueue_create(&sc->txr_wq, wqname,
7005 ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 7005 ixgbe_deferred_mq_start_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7006 IXGBE_WORKQUEUE_FLAGS); 7006 IXGBE_WORKQUEUE_FLAGS);
7007 if (error) { 7007 if (error) {
7008 aprint_error_dev(dev, 7008 aprint_error_dev(dev,
7009 "couldn't create workqueue for deferred Tx\n"); 7009 "couldn't create workqueue for deferred Tx\n");
7010 goto err_out; 7010 goto err_out;
7011 } 7011 }
7012 sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 7012 sc->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
7013 7013
7014 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 7014 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
7015 error = workqueue_create(&sc->que_wq, wqname, 7015 error = workqueue_create(&sc->que_wq, wqname,
7016 ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET, 7016 ixgbe_handle_que_work, sc, IXGBE_WORKQUEUE_PRI, IPL_NET,
7017 IXGBE_WORKQUEUE_FLAGS); 7017 IXGBE_WORKQUEUE_FLAGS);
7018 if (error) { 7018 if (error) {
7019 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n"); 7019 aprint_error_dev(dev, "couldn't create workqueue for Tx/Rx\n");
7020 goto err_out; 7020 goto err_out;
7021 } 7021 }
7022 7022
7023 /* and Link */ 7023 /* and Link */
7024 cpu_id++; 7024 cpu_id++;
7025 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); 7025 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
7026 sc->vector = vector; 7026 sc->vector = vector;
7027 intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf, 7027 intrstr = pci_intr_string(pc, sc->osdep.intrs[vector], intrbuf,
7028 sizeof(intrbuf)); 7028 sizeof(intrbuf));
7029#ifdef IXGBE_MPSAFE 7029#ifdef IXGBE_MPSAFE
7030 pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE, 7030 pci_intr_setattr(pc, &sc->osdep.intrs[vector], PCI_INTR_MPSAFE,
7031 true); 7031 true);
7032#endif 7032#endif
7033 /* Set the link handler function */ 7033 /* Set the link handler function */
7034 sc->osdep.ihs[vector] = pci_intr_establish_xname(pc, 7034 sc->osdep.ihs[vector] = pci_intr_establish_xname(pc,
7035 sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc, 7035 sc->osdep.intrs[vector], IPL_NET, ixgbe_msix_admin, sc,
7036 intr_xname); 7036 intr_xname);
7037 if (sc->osdep.ihs[vector] == NULL) { 7037 if (sc->osdep.ihs[vector] == NULL) {
7038 aprint_error_dev(dev, "Failed to register LINK handler\n"); 7038 aprint_error_dev(dev, "Failed to register LINK handler\n");
7039 error = ENXIO; 7039 error = ENXIO;
7040 goto err_out; 7040 goto err_out;
7041 } 7041 }
7042 /* Round-robin affinity */ 7042 /* Round-robin affinity */
7043 kcpuset_zero(affinity); 7043 kcpuset_zero(affinity);
7044 kcpuset_set(affinity, cpu_id % ncpu); 7044 kcpuset_set(affinity, cpu_id % ncpu);
7045 error = interrupt_distribute(sc->osdep.ihs[vector], affinity, 7045 error = interrupt_distribute(sc->osdep.ihs[vector], affinity,
7046 NULL); 7046 NULL);
7047 7047
7048 aprint_normal_dev(dev, 7048 aprint_normal_dev(dev,
7049 "for link, interrupting at %s", intrstr); 7049 "for link, interrupting at %s", intrstr);
7050 if (error == 0) 7050 if (error == 0)
7051 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); 7051 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
7052 else 7052 else
7053 aprint_normal("\n"); 7053 aprint_normal("\n");
7054 7054
7055 kcpuset_destroy(affinity); 7055 kcpuset_destroy(affinity);
7056 aprint_normal_dev(dev, 7056 aprint_normal_dev(dev,
7057 "Using MSI-X interrupts with %d vectors\n", vector + 1); 7057 "Using MSI-X interrupts with %d vectors\n", vector + 1);
7058 7058
7059 return (0); 7059 return (0);
7060 7060
7061err_out: 7061err_out:
7062 kcpuset_destroy(affinity); 7062 kcpuset_destroy(affinity);
7063 ixgbe_free_deferred_handlers(sc); 7063 ixgbe_free_deferred_handlers(sc);
7064 ixgbe_free_pciintr_resources(sc); 7064 ixgbe_free_pciintr_resources(sc);
7065 return (error); 7065 return (error);
7066} /* ixgbe_allocate_msix */ 7066} /* ixgbe_allocate_msix */
7067 7067
7068/************************************************************************ 7068/************************************************************************
7069 * ixgbe_configure_interrupts 7069 * ixgbe_configure_interrupts
7070 * 7070 *
7071 * Setup MSI-X, MSI, or legacy interrupts (in that order). 7071 * Setup MSI-X, MSI, or legacy interrupts (in that order).
7072 * This will also depend on user settings. 7072 * This will also depend on user settings.
7073 ************************************************************************/ 7073 ************************************************************************/
7074static int 7074static int
7075ixgbe_configure_interrupts(struct ixgbe_softc *sc) 7075ixgbe_configure_interrupts(struct ixgbe_softc *sc)
7076{ 7076{
7077 device_t dev = sc->dev; 7077 device_t dev = sc->dev;
7078 struct ixgbe_mac_info *mac = &sc->hw.mac; 7078 struct ixgbe_mac_info *mac = &sc->hw.mac;
7079 int want, queues, msgs; 7079 int want, queues, msgs;
7080 7080
7081 /* Default to 1 queue if MSI-X setup fails */ 7081 /* Default to 1 queue if MSI-X setup fails */
7082 sc->num_queues = 1; 7082 sc->num_queues = 1;
7083 7083
7084 /* Override by tuneable */ 7084 /* Override by tuneable */
7085 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX)) 7085 if (!(sc->feat_cap & IXGBE_FEATURE_MSIX))
7086 goto msi; 7086 goto msi;
7087 7087
7088 /* 7088 /*
7089 * NetBSD only: Use single vector MSI when number of CPU is 1 to save 7089 * NetBSD only: Use single vector MSI when number of CPU is 1 to save
7090 * interrupt slot. 7090 * interrupt slot.
7091 */ 7091 */
7092 if (ncpu == 1) 7092 if (ncpu == 1)
7093 goto msi; 7093 goto msi;
7094 7094
7095 /* First try MSI-X */ 7095 /* First try MSI-X */
7096 msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag); 7096 msgs = pci_msix_count(sc->osdep.pc, sc->osdep.tag);
7097 msgs = MIN(msgs, IXG_MAX_NINTR); 7097 msgs = MIN(msgs, IXG_MAX_NINTR);
7098 if (msgs < 2) 7098 if (msgs < 2)
7099 goto msi; 7099 goto msi;
7100 7100
7101 sc->feat_en |= IXGBE_FEATURE_MSIX; 7101 sc->feat_en |= IXGBE_FEATURE_MSIX;
7102 7102
7103 /* Figure out a reasonable auto config value */ 7103 /* Figure out a reasonable auto config value */
7104 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu; 7104 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
7105 7105
7106#ifdef RSS 7106#ifdef RSS
7107 /* If we're doing RSS, clamp at the number of RSS buckets */ 7107 /* If we're doing RSS, clamp at the number of RSS buckets */
7108 if (sc->feat_en & IXGBE_FEATURE_RSS) 7108 if (sc->feat_en & IXGBE_FEATURE_RSS)
7109 queues = uimin(queues, rss_getnumbuckets()); 7109 queues = uimin(queues, rss_getnumbuckets());
7110#endif 7110#endif
7111 if (ixgbe_num_queues > queues) { 7111 if (ixgbe_num_queues > queues) {
7112 aprint_error_dev(sc->dev, 7112 aprint_error_dev(sc->dev,
7113 "ixgbe_num_queues (%d) is too large, " 7113 "ixgbe_num_queues (%d) is too large, "
7114 "using reduced amount (%d).\n", ixgbe_num_queues, queues); 7114 "using reduced amount (%d).\n", ixgbe_num_queues, queues);
7115 ixgbe_num_queues = queues; 7115 ixgbe_num_queues = queues;
7116 } 7116 }
7117 7117
7118 if (ixgbe_num_queues != 0) 7118 if (ixgbe_num_queues != 0)
7119 queues = ixgbe_num_queues; 7119 queues = ixgbe_num_queues;
7120 else 7120 else
7121 queues = uimin(queues, 7121 queues = uimin(queues,
7122 uimin(mac->max_tx_queues, mac->max_rx_queues)); 7122 uimin(mac->max_tx_queues, mac->max_rx_queues));
7123 7123
7124 /* reflect correct sysctl value */ 
7125 ixgbe_num_queues = queues; 
7126 
7127 /* 7124 /*
7128 * Want one vector (RX/TX pair) per queue 7125 * Want one vector (RX/TX pair) per queue
7129 * plus an additional for Link. 7126 * plus an additional for Link.
7130 */ 7127 */
7131 want = queues + 1; 7128 want = queues + 1;
7132 if (msgs >= want) 7129 if (msgs >= want)
7133 msgs = want; 7130 msgs = want;
7134 else { 7131 else {
7135 aprint_error_dev(dev, "MSI-X Configuration Problem, " 7132 aprint_error_dev(dev, "MSI-X Configuration Problem, "
7136 "%d vectors but %d queues wanted!\n", msgs, want); 7133 "%d vectors but %d queues wanted!\n", msgs, want);
7137 goto msi; 7134 goto msi;
7138 } 7135 }
7139 sc->num_queues = queues; 7136 sc->num_queues = queues;
7140 sc->feat_en |= IXGBE_FEATURE_MSIX; 7137 sc->feat_en |= IXGBE_FEATURE_MSIX;
7141 return (0); 7138 return (0);
7142 7139
7143 /* 7140 /*
7144 * MSI-X allocation failed or provided us with 7141 * MSI-X allocation failed or provided us with
7145 * less vectors than needed. Free MSI-X resources 7142 * less vectors than needed. Free MSI-X resources
7146 * and we'll try enabling MSI. 7143 * and we'll try enabling MSI.
7147 */ 7144 */
7148msi: 7145msi:
7149 /* Without MSI-X, some features are no longer supported */ 7146 /* Without MSI-X, some features are no longer supported */
7150 sc->feat_cap &= ~IXGBE_FEATURE_RSS; 7147 sc->feat_cap &= ~IXGBE_FEATURE_RSS;
7151 sc->feat_en &= ~IXGBE_FEATURE_RSS; 7148 sc->feat_en &= ~IXGBE_FEATURE_RSS;
7152 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV; 7149 sc->feat_cap &= ~IXGBE_FEATURE_SRIOV;
7153 sc->feat_en &= ~IXGBE_FEATURE_SRIOV; 7150 sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
7154 7151
7155 msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag); 7152 msgs = pci_msi_count(sc->osdep.pc, sc->osdep.tag);
7156 sc->feat_en &= ~IXGBE_FEATURE_MSIX; 7153 sc->feat_en &= ~IXGBE_FEATURE_MSIX;
7157 if (msgs > 1) 7154 if (msgs > 1)
7158 msgs = 1; 7155 msgs = 1;
7159 if (msgs != 0) { 7156 if (msgs != 0) {
7160 msgs = 1; 7157 msgs = 1;
7161 sc->feat_en |= IXGBE_FEATURE_MSI; 7158 sc->feat_en |= IXGBE_FEATURE_MSI;
7162 return (0); 7159 return (0);
7163 } 7160 }
7164 7161
7165 if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) { 7162 if (!(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ)) {
7166 aprint_error_dev(dev, 7163 aprint_error_dev(dev,
7167 "Device does not support legacy interrupts.\n"); 7164 "Device does not support legacy interrupts.\n");
7168 return 1; 7165 return 1;
7169 } 7166 }
7170 7167
7171 sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ; 7168 sc->feat_en |= IXGBE_FEATURE_LEGACY_IRQ;
7172 7169
7173 return (0); 7170 return (0);
7174} /* ixgbe_configure_interrupts */ 7171} /* ixgbe_configure_interrupts */
7175 7172
7176 7173
7177/************************************************************************ 7174/************************************************************************
7178 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts 7175 * ixgbe_handle_link - Tasklet for MSI-X Link interrupts
7179 * 7176 *
7180 * Done outside of interrupt context since the driver might sleep 7177 * Done outside of interrupt context since the driver might sleep
7181 ************************************************************************/ 7178 ************************************************************************/
7182static void 7179static void
7183ixgbe_handle_link(void *context) 7180ixgbe_handle_link(void *context)
7184{ 7181{
7185 struct ixgbe_softc *sc = context; 7182 struct ixgbe_softc *sc = context;
7186 struct ixgbe_hw *hw = &sc->hw; 7183 struct ixgbe_hw *hw = &sc->hw;
7187 7184
7188 KASSERT(mutex_owned(&sc->core_mtx)); 7185 KASSERT(mutex_owned(&sc->core_mtx));
7189 7186
7190 IXGBE_EVC_ADD(&sc->link_workev, 1); 7187 IXGBE_EVC_ADD(&sc->link_workev, 1);
7191 ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0); 7188 ixgbe_check_link(hw, &sc->link_speed, &sc->link_up, 0);
7192 ixgbe_update_link_status(sc); 7189 ixgbe_update_link_status(sc);
7193 7190
7194 /* Re-enable link interrupts */ 7191 /* Re-enable link interrupts */
7195 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC); 7192 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_LSC);
7196} /* ixgbe_handle_link */ 7193} /* ixgbe_handle_link */
7197 7194
7198#if 0 7195#if 0
7199/************************************************************************ 7196/************************************************************************
7200 * ixgbe_rearm_queues 7197 * ixgbe_rearm_queues
7201 ************************************************************************/ 7198 ************************************************************************/
7202static __inline void 7199static __inline void
7203ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues) 7200ixgbe_rearm_queues(struct ixgbe_softc *sc, u64 queues)
7204{ 7201{
7205 u32 mask; 7202 u32 mask;
7206 7203
7207 switch (sc->hw.mac.type) { 7204 switch (sc->hw.mac.type) {
7208 case ixgbe_mac_82598EB: 7205 case ixgbe_mac_82598EB:
7209 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 7206 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
7210 IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask); 7207 IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
7211 break; 7208 break;
7212 case ixgbe_mac_82599EB: 7209 case ixgbe_mac_82599EB:
7213 case ixgbe_mac_X540: 7210 case ixgbe_mac_X540:
7214 case ixgbe_mac_X550: 7211 case ixgbe_mac_X550:
7215 case ixgbe_mac_X550EM_x: 7212 case ixgbe_mac_X550EM_x:
7216 case ixgbe_mac_X550EM_a: 7213 case ixgbe_mac_X550EM_a:
7217 mask = (queues & 0xFFFFFFFF); 7214 mask = (queues & 0xFFFFFFFF);
7218 IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask); 7215 IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
7219 mask = (queues >> 32); 7216 mask = (queues >> 32);
7220 IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask); 7217 IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
7221 break; 7218 break;
7222 default: 7219 default:
7223 break; 7220 break;
7224 } 7221 }
7225} /* ixgbe_rearm_queues */ 7222} /* ixgbe_rearm_queues */
7226#endif 7223#endif