Thu Aug 27 04:49:52 2020 UTC ()
 Fix compile error.


(msaitoh)
diff -r1.246 -r1.247 src/sys/dev/pci/ixgbe/ixgbe.c

cvs diff -r1.246 -r1.247 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2020/08/27 03:57:52 1.246
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2020/08/27 04:49:52 1.247
@@ -1,1000 +1,1000 @@ @@ -1,1000 +1,1000 @@
1/* $NetBSD: ixgbe.c,v 1.246 2020/08/27 03:57:52 msaitoh Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.247 2020/08/27 04:49:52 msaitoh Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_inet.h" 67#include "opt_inet.h"
68#include "opt_inet6.h" 68#include "opt_inet6.h"
69#include "opt_net_mpsafe.h" 69#include "opt_net_mpsafe.h"
70#endif 70#endif
71 71
72#include "ixgbe.h" 72#include "ixgbe.h"
73#include "ixgbe_sriov.h" 73#include "ixgbe_sriov.h"
74#include "vlan.h" 74#include "vlan.h"
75 75
76#include <sys/cprng.h> 76#include <sys/cprng.h>
77#include <dev/mii/mii.h> 77#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h> 78#include <dev/mii/miivar.h>
79 79
80/************************************************************************ 80/************************************************************************
81 * Driver version 81 * Driver version
82 ************************************************************************/ 82 ************************************************************************/
83static const char ixgbe_driver_version[] = "4.0.1-k"; 83static const char ixgbe_driver_version[] = "4.0.1-k";
84/* XXX NetBSD: + 3.3.10 */ 84/* XXX NetBSD: + 3.3.10 */
85 85
86/************************************************************************ 86/************************************************************************
87 * PCI Device ID Table 87 * PCI Device ID Table
88 * 88 *
89 * Used by probe to select devices to load on 89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings 90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s 91 * Last entry must be all 0s
92 * 92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/ 94 ************************************************************************/
95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96{ 96{
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */ 147 /* required last entry */
148 {0, 0, 0, 0, 0} 148 {0, 0, 0, 0, 0}
149}; 149};
150 150
151/************************************************************************ 151/************************************************************************
152 * Table of branding strings 152 * Table of branding strings
153 ************************************************************************/ 153 ************************************************************************/
154static const char *ixgbe_strings[] = { 154static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver" 155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156}; 156};
157 157
158/************************************************************************ 158/************************************************************************
159 * Function prototypes 159 * Function prototypes
160 ************************************************************************/ 160 ************************************************************************/
161static int ixgbe_probe(device_t, cfdata_t, void *); 161static int ixgbe_probe(device_t, cfdata_t, void *);
162static void ixgbe_quirks(struct adapter *); 162static void ixgbe_quirks(struct adapter *);
163static void ixgbe_attach(device_t, device_t, void *); 163static void ixgbe_attach(device_t, device_t, void *);
164static int ixgbe_detach(device_t, int); 164static int ixgbe_detach(device_t, int);
165#if 0 165#if 0
166static int ixgbe_shutdown(device_t); 166static int ixgbe_shutdown(device_t);
167#endif 167#endif
168static bool ixgbe_suspend(device_t, const pmf_qual_t *); 168static bool ixgbe_suspend(device_t, const pmf_qual_t *);
169static bool ixgbe_resume(device_t, const pmf_qual_t *); 169static bool ixgbe_resume(device_t, const pmf_qual_t *);
170static int ixgbe_ifflags_cb(struct ethercom *); 170static int ixgbe_ifflags_cb(struct ethercom *);
171static int ixgbe_ioctl(struct ifnet *, u_long, void *); 171static int ixgbe_ioctl(struct ifnet *, u_long, void *);
172static int ixgbe_init(struct ifnet *); 172static int ixgbe_init(struct ifnet *);
173static void ixgbe_init_locked(struct adapter *); 173static void ixgbe_init_locked(struct adapter *);
174static void ixgbe_ifstop(struct ifnet *, int); 174static void ixgbe_ifstop(struct ifnet *, int);
175static void ixgbe_stop(void *); 175static void ixgbe_stop(void *);
176static void ixgbe_init_device_features(struct adapter *); 176static void ixgbe_init_device_features(struct adapter *);
177static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 177static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
178static void ixgbe_add_media_types(struct adapter *); 178static void ixgbe_add_media_types(struct adapter *);
179static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 179static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
180static int ixgbe_media_change(struct ifnet *); 180static int ixgbe_media_change(struct ifnet *);
181static int ixgbe_allocate_pci_resources(struct adapter *, 181static int ixgbe_allocate_pci_resources(struct adapter *,
182 const struct pci_attach_args *); 182 const struct pci_attach_args *);
183static void ixgbe_free_workqueue(struct adapter *); 183static void ixgbe_free_workqueue(struct adapter *);
184static void ixgbe_get_slot_info(struct adapter *); 184static void ixgbe_get_slot_info(struct adapter *);
185static int ixgbe_allocate_msix(struct adapter *, 185static int ixgbe_allocate_msix(struct adapter *,
186 const struct pci_attach_args *); 186 const struct pci_attach_args *);
187static int ixgbe_allocate_legacy(struct adapter *, 187static int ixgbe_allocate_legacy(struct adapter *,
188 const struct pci_attach_args *); 188 const struct pci_attach_args *);
189static int ixgbe_configure_interrupts(struct adapter *); 189static int ixgbe_configure_interrupts(struct adapter *);
190static void ixgbe_free_pciintr_resources(struct adapter *); 190static void ixgbe_free_pciintr_resources(struct adapter *);
191static void ixgbe_free_pci_resources(struct adapter *); 191static void ixgbe_free_pci_resources(struct adapter *);
192static void ixgbe_local_timer(void *); 192static void ixgbe_local_timer(void *);
193static void ixgbe_handle_timer(struct work *, void *); 193static void ixgbe_handle_timer(struct work *, void *);
194static void ixgbe_recovery_mode_timer(void *); 194static void ixgbe_recovery_mode_timer(void *);
195static void ixgbe_handle_recovery_mode_timer(struct work *, void *); 195static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
196static int ixgbe_setup_interface(device_t, struct adapter *); 196static int ixgbe_setup_interface(device_t, struct adapter *);
197static void ixgbe_config_gpie(struct adapter *); 197static void ixgbe_config_gpie(struct adapter *);
198static void ixgbe_config_dmac(struct adapter *); 198static void ixgbe_config_dmac(struct adapter *);
199static void ixgbe_config_delay_values(struct adapter *); 199static void ixgbe_config_delay_values(struct adapter *);
200static void ixgbe_schedule_admin_tasklet(struct adapter *); 200static void ixgbe_schedule_admin_tasklet(struct adapter *);
201static void ixgbe_config_link(struct adapter *); 201static void ixgbe_config_link(struct adapter *);
202static void ixgbe_check_wol_support(struct adapter *); 202static void ixgbe_check_wol_support(struct adapter *);
203static int ixgbe_setup_low_power_mode(struct adapter *); 203static int ixgbe_setup_low_power_mode(struct adapter *);
204#if 0 204#if 0
205static void ixgbe_rearm_queues(struct adapter *, u64); 205static void ixgbe_rearm_queues(struct adapter *, u64);
206#endif 206#endif
207 207
208static void ixgbe_initialize_transmit_units(struct adapter *); 208static void ixgbe_initialize_transmit_units(struct adapter *);
209static void ixgbe_initialize_receive_units(struct adapter *); 209static void ixgbe_initialize_receive_units(struct adapter *);
210static void ixgbe_enable_rx_drop(struct adapter *); 210static void ixgbe_enable_rx_drop(struct adapter *);
211static void ixgbe_disable_rx_drop(struct adapter *); 211static void ixgbe_disable_rx_drop(struct adapter *);
212static void ixgbe_initialize_rss_mapping(struct adapter *); 212static void ixgbe_initialize_rss_mapping(struct adapter *);
213 213
214static void ixgbe_enable_intr(struct adapter *); 214static void ixgbe_enable_intr(struct adapter *);
215static void ixgbe_disable_intr(struct adapter *); 215static void ixgbe_disable_intr(struct adapter *);
216static void ixgbe_update_stats_counters(struct adapter *); 216static void ixgbe_update_stats_counters(struct adapter *);
217static void ixgbe_set_rxfilter(struct adapter *); 217static void ixgbe_set_rxfilter(struct adapter *);
218static void ixgbe_update_link_status(struct adapter *); 218static void ixgbe_update_link_status(struct adapter *);
219static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 219static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
220static void ixgbe_configure_ivars(struct adapter *); 220static void ixgbe_configure_ivars(struct adapter *);
221static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 221static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
222static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); 222static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
223 223
224static void ixgbe_setup_vlan_hw_tagging(struct adapter *); 224static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
225static void ixgbe_setup_vlan_hw_support(struct adapter *); 225static void ixgbe_setup_vlan_hw_support(struct adapter *);
226static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 226static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
227static int ixgbe_register_vlan(struct adapter *, u16); 227static int ixgbe_register_vlan(struct adapter *, u16);
228static int ixgbe_unregister_vlan(struct adapter *, u16); 228static int ixgbe_unregister_vlan(struct adapter *, u16);
229 229
230static void ixgbe_add_device_sysctls(struct adapter *); 230static void ixgbe_add_device_sysctls(struct adapter *);
231static void ixgbe_add_hw_stats(struct adapter *); 231static void ixgbe_add_hw_stats(struct adapter *);
232static void ixgbe_clear_evcnt(struct adapter *); 232static void ixgbe_clear_evcnt(struct adapter *);
233static int ixgbe_set_flowcntl(struct adapter *, int); 233static int ixgbe_set_flowcntl(struct adapter *, int);
234static int ixgbe_set_advertise(struct adapter *, int); 234static int ixgbe_set_advertise(struct adapter *, int);
235static int ixgbe_get_advertise(struct adapter *); 235static int ixgbe_get_advertise(struct adapter *);
236 236
237/* Sysctl handlers */ 237/* Sysctl handlers */
238static void ixgbe_set_sysctl_value(struct adapter *, const char *, 238static void ixgbe_set_sysctl_value(struct adapter *, const char *,
239 const char *, int *, int); 239 const char *, int *, int);
240static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 240static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
241static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 241static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
242static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 242static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
243static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
244static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 244static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
245static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
246#ifdef IXGBE_DEBUG 246#ifdef IXGBE_DEBUG
247static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 247static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
248static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 248static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
249#endif 249#endif
250static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
251static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 251static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
252static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 252static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
253static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
256static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
257static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 257static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
258static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 258static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
259 259
260/* Support for pluggable optic modules */ 260/* Support for pluggable optic modules */
261static bool ixgbe_sfp_cage_full(struct adapter *); 261static bool ixgbe_sfp_cage_full(struct adapter *);
262 262
263/* Legacy (single vector) interrupt handler */ 263/* Legacy (single vector) interrupt handler */
264static int ixgbe_legacy_irq(void *); 264static int ixgbe_legacy_irq(void *);
265 265
266/* The MSI/MSI-X Interrupt handlers */ 266/* The MSI/MSI-X Interrupt handlers */
267static int ixgbe_msix_que(void *); 267static int ixgbe_msix_que(void *);
268static int ixgbe_msix_admin(void *); 268static int ixgbe_msix_admin(void *);
269 269
270/* Event handlers running on workqueue */ 270/* Event handlers running on workqueue */
271static void ixgbe_handle_que(void *); 271static void ixgbe_handle_que(void *);
272static void ixgbe_handle_link(void *); 272static void ixgbe_handle_link(void *);
273static void ixgbe_handle_msf(void *); 273static void ixgbe_handle_msf(void *);
274static void ixgbe_handle_mod(void *); 274static void ixgbe_handle_mod(void *);
275static void ixgbe_handle_phy(void *); 275static void ixgbe_handle_phy(void *);
276 276
277/* Deferred workqueue handlers */ 277/* Deferred workqueue handlers */
278static void ixgbe_handle_admin(struct work *, void *); 278static void ixgbe_handle_admin(struct work *, void *);
279static void ixgbe_handle_que_work(struct work *, void *); 279static void ixgbe_handle_que_work(struct work *, void *);
280 280
281static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 281static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
282 282
283/************************************************************************ 283/************************************************************************
284 * NetBSD Device Interface Entry Points 284 * NetBSD Device Interface Entry Points
285 ************************************************************************/ 285 ************************************************************************/
286CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), 286CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
287 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 287 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
288 DVF_DETACH_SHUTDOWN); 288 DVF_DETACH_SHUTDOWN);
289 289
290#if 0 290#if 0
291devclass_t ix_devclass; 291devclass_t ix_devclass;
292DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 292DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
293 293
294MODULE_DEPEND(ix, pci, 1, 1, 1); 294MODULE_DEPEND(ix, pci, 1, 1, 1);
295MODULE_DEPEND(ix, ether, 1, 1, 1); 295MODULE_DEPEND(ix, ether, 1, 1, 1);
296#ifdef DEV_NETMAP 296#ifdef DEV_NETMAP
297MODULE_DEPEND(ix, netmap, 1, 1, 1); 297MODULE_DEPEND(ix, netmap, 1, 1, 1);
298#endif 298#endif
299#endif 299#endif
300 300
301/* 301/*
302 * TUNEABLE PARAMETERS: 302 * TUNEABLE PARAMETERS:
303 */ 303 */
304 304
305/* 305/*
306 * AIM: Adaptive Interrupt Moderation 306 * AIM: Adaptive Interrupt Moderation
307 * which means that the interrupt rate 307 * which means that the interrupt rate
308 * is varied over time based on the 308 * is varied over time based on the
309 * traffic for that interrupt vector 309 * traffic for that interrupt vector
310 */ 310 */
311static bool ixgbe_enable_aim = true; 311static bool ixgbe_enable_aim = true;
312#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 312#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
313SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 313SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
314 "Enable adaptive interrupt moderation"); 314 "Enable adaptive interrupt moderation");
315 315
316static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 316static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
317SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 317SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
318 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 318 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
319 319
320/* How many packets rxeof tries to clean at a time */ 320/* How many packets rxeof tries to clean at a time */
321static int ixgbe_rx_process_limit = 256; 321static int ixgbe_rx_process_limit = 256;
322SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 322SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
323 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 323 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
324 324
325/* How many packets txeof tries to clean at a time */ 325/* How many packets txeof tries to clean at a time */
326static int ixgbe_tx_process_limit = 256; 326static int ixgbe_tx_process_limit = 256;
327SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 327SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
328 &ixgbe_tx_process_limit, 0, 328 &ixgbe_tx_process_limit, 0,
329 "Maximum number of sent packets to process at a time, -1 means unlimited"); 329 "Maximum number of sent packets to process at a time, -1 means unlimited");
330 330
331/* Flow control setting, default to full */ 331/* Flow control setting, default to full */
332static int ixgbe_flow_control = ixgbe_fc_full; 332static int ixgbe_flow_control = ixgbe_fc_full;
333SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 333SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
334 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 334 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
335 335
336/* Which packet processing uses workqueue or softint */ 336/* Which packet processing uses workqueue or softint */
337static bool ixgbe_txrx_workqueue = false; 337static bool ixgbe_txrx_workqueue = false;
338 338
339/* 339/*
340 * Smart speed setting, default to on 340 * Smart speed setting, default to on
341 * this only works as a compile option 341 * this only works as a compile option
342 * right now as its during attach, set 342 * right now as its during attach, set
343 * this to 'ixgbe_smart_speed_off' to 343 * this to 'ixgbe_smart_speed_off' to
344 * disable. 344 * disable.
345 */ 345 */
346static int ixgbe_smart_speed = ixgbe_smart_speed_on; 346static int ixgbe_smart_speed = ixgbe_smart_speed_on;
347 347
348/* 348/*
349 * MSI-X should be the default for best performance, 349 * MSI-X should be the default for best performance,
350 * but this allows it to be forced off for testing. 350 * but this allows it to be forced off for testing.
351 */ 351 */
352static int ixgbe_enable_msix = 1; 352static int ixgbe_enable_msix = 1;
353SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 353SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
354 "Enable MSI-X interrupts"); 354 "Enable MSI-X interrupts");
355 355
356/* 356/*
357 * Number of Queues, can be set to 0, 357 * Number of Queues, can be set to 0,
358 * it then autoconfigures based on the 358 * it then autoconfigures based on the
359 * number of cpus with a max of 8. This 359 * number of cpus with a max of 8. This
360 * can be overridden manually here. 360 * can be overridden manually here.
361 */ 361 */
362static int ixgbe_num_queues = 0; 362static int ixgbe_num_queues = 0;
363SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 363SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
364 "Number of queues to configure, 0 indicates autoconfigure"); 364 "Number of queues to configure, 0 indicates autoconfigure");
365 365
366/* 366/*
367 * Number of TX descriptors per ring, 367 * Number of TX descriptors per ring,
368 * setting higher than RX as this seems 368 * setting higher than RX as this seems
369 * the better performing choice. 369 * the better performing choice.
370 */ 370 */
371static int ixgbe_txd = PERFORM_TXD; 371static int ixgbe_txd = PERFORM_TXD;
372SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 372SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
373 "Number of transmit descriptors per queue"); 373 "Number of transmit descriptors per queue");
374 374
375/* Number of RX descriptors per ring */ 375/* Number of RX descriptors per ring */
376static int ixgbe_rxd = PERFORM_RXD; 376static int ixgbe_rxd = PERFORM_RXD;
377SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 377SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
378 "Number of receive descriptors per queue"); 378 "Number of receive descriptors per queue");
379 379
380/* 380/*
381 * Defining this on will allow the use 381 * Defining this on will allow the use
382 * of unsupported SFP+ modules, note that 382 * of unsupported SFP+ modules, note that
383 * doing so you are on your own :) 383 * doing so you are on your own :)
384 */ 384 */
385static int allow_unsupported_sfp = false; 385static int allow_unsupported_sfp = false;
386#define TUNABLE_INT(__x, __y) 386#define TUNABLE_INT(__x, __y)
387TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 387TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
388 388
389/* 389/*
390 * Not sure if Flow Director is fully baked, 390 * Not sure if Flow Director is fully baked,
391 * so we'll default to turning it off. 391 * so we'll default to turning it off.
392 */ 392 */
393static int ixgbe_enable_fdir = 0; 393static int ixgbe_enable_fdir = 0;
394SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 394SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
395 "Enable Flow Director"); 395 "Enable Flow Director");
396 396
397/* Legacy Transmit (single queue) */ 397/* Legacy Transmit (single queue) */
398static int ixgbe_enable_legacy_tx = 0; 398static int ixgbe_enable_legacy_tx = 0;
399SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 399SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
400 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 400 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
401 401
402/* Receive-Side Scaling */ 402/* Receive-Side Scaling */
403static int ixgbe_enable_rss = 1; 403static int ixgbe_enable_rss = 1;
404SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 404SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
405 "Enable Receive-Side Scaling (RSS)"); 405 "Enable Receive-Side Scaling (RSS)");
406 406
407#if 0 407#if 0
408static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 408static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
409static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 409static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
410#endif 410#endif
411 411
412#ifdef NET_MPSAFE 412#ifdef NET_MPSAFE
413#define IXGBE_MPSAFE 1 413#define IXGBE_MPSAFE 1
414#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 414#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
415#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 415#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
416#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 416#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
417#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE 417#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
418#else 418#else
419#define IXGBE_CALLOUT_FLAGS 0 419#define IXGBE_CALLOUT_FLAGS 0
420#define IXGBE_SOFTINT_FLAGS 0 420#define IXGBE_SOFTINT_FLAGS 0
421#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 421#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
422#define IXGBE_TASKLET_WQ_FLAGS 0 422#define IXGBE_TASKLET_WQ_FLAGS 0
423#endif 423#endif
424#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 424#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
425 425
426/************************************************************************ 426/************************************************************************
427 * ixgbe_initialize_rss_mapping 427 * ixgbe_initialize_rss_mapping
428 ************************************************************************/ 428 ************************************************************************/
429static void 429static void
430ixgbe_initialize_rss_mapping(struct adapter *adapter) 430ixgbe_initialize_rss_mapping(struct adapter *adapter)
431{ 431{
432 struct ixgbe_hw *hw = &adapter->hw; 432 struct ixgbe_hw *hw = &adapter->hw;
433 u32 reta = 0, mrqc, rss_key[10]; 433 u32 reta = 0, mrqc, rss_key[10];
434 int queue_id, table_size, index_mult; 434 int queue_id, table_size, index_mult;
435 int i, j; 435 int i, j;
436 u32 rss_hash_config; 436 u32 rss_hash_config;
437 437
438 /* force use default RSS key. */ 438 /* force use default RSS key. */
439#ifdef __NetBSD__ 439#ifdef __NetBSD__
440 rss_getkey((uint8_t *) &rss_key); 440 rss_getkey((uint8_t *) &rss_key);
441#else 441#else
442 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 442 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
443 /* Fetch the configured RSS key */ 443 /* Fetch the configured RSS key */
444 rss_getkey((uint8_t *) &rss_key); 444 rss_getkey((uint8_t *) &rss_key);
445 } else { 445 } else {
446 /* set up random bits */ 446 /* set up random bits */
447 cprng_fast(&rss_key, sizeof(rss_key)); 447 cprng_fast(&rss_key, sizeof(rss_key));
448 } 448 }
449#endif 449#endif
450 450
451 /* Set multiplier for RETA setup and table size based on MAC */ 451 /* Set multiplier for RETA setup and table size based on MAC */
452 index_mult = 0x1; 452 index_mult = 0x1;
453 table_size = 128; 453 table_size = 128;
454 switch (adapter->hw.mac.type) { 454 switch (adapter->hw.mac.type) {
455 case ixgbe_mac_82598EB: 455 case ixgbe_mac_82598EB:
456 index_mult = 0x11; 456 index_mult = 0x11;
457 break; 457 break;
458 case ixgbe_mac_X550: 458 case ixgbe_mac_X550:
459 case ixgbe_mac_X550EM_x: 459 case ixgbe_mac_X550EM_x:
460 case ixgbe_mac_X550EM_a: 460 case ixgbe_mac_X550EM_a:
461 table_size = 512; 461 table_size = 512;
462 break; 462 break;
463 default: 463 default:
464 break; 464 break;
465 } 465 }
466 466
467 /* Set up the redirection table */ 467 /* Set up the redirection table */
468 for (i = 0, j = 0; i < table_size; i++, j++) { 468 for (i = 0, j = 0; i < table_size; i++, j++) {
469 if (j == adapter->num_queues) 469 if (j == adapter->num_queues)
470 j = 0; 470 j = 0;
471 471
472 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 472 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
473 /* 473 /*
474 * Fetch the RSS bucket id for the given indirection 474 * Fetch the RSS bucket id for the given indirection
475 * entry. Cap it at the number of configured buckets 475 * entry. Cap it at the number of configured buckets
476 * (which is num_queues.) 476 * (which is num_queues.)
477 */ 477 */
478 queue_id = rss_get_indirection_to_bucket(i); 478 queue_id = rss_get_indirection_to_bucket(i);
479 queue_id = queue_id % adapter->num_queues; 479 queue_id = queue_id % adapter->num_queues;
480 } else 480 } else
481 queue_id = (j * index_mult); 481 queue_id = (j * index_mult);
482 482
483 /* 483 /*
484 * The low 8 bits are for hash value (n+0); 484 * The low 8 bits are for hash value (n+0);
485 * The next 8 bits are for hash value (n+1), etc. 485 * The next 8 bits are for hash value (n+1), etc.
486 */ 486 */
487 reta = reta >> 8; 487 reta = reta >> 8;
488 reta = reta | (((uint32_t) queue_id) << 24); 488 reta = reta | (((uint32_t) queue_id) << 24);
489 if ((i & 3) == 3) { 489 if ((i & 3) == 3) {
490 if (i < 128) 490 if (i < 128)
491 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 491 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
492 else 492 else
493 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 493 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
494 reta); 494 reta);
495 reta = 0; 495 reta = 0;
496 } 496 }
497 } 497 }
498 498
499 /* Now fill our hash function seeds */ 499 /* Now fill our hash function seeds */
500 for (i = 0; i < 10; i++) 500 for (i = 0; i < 10; i++)
501 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 501 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
502 502
503 /* Perform hash on these packet types */ 503 /* Perform hash on these packet types */
504 if (adapter->feat_en & IXGBE_FEATURE_RSS) 504 if (adapter->feat_en & IXGBE_FEATURE_RSS)
505 rss_hash_config = rss_gethashconfig(); 505 rss_hash_config = rss_gethashconfig();
506 else { 506 else {
507 /* 507 /*
508 * Disable UDP - IP fragments aren't currently being handled 508 * Disable UDP - IP fragments aren't currently being handled
509 * and so we end up with a mix of 2-tuple and 4-tuple 509 * and so we end up with a mix of 2-tuple and 4-tuple
510 * traffic. 510 * traffic.
511 */ 511 */
512 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 512 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
513 | RSS_HASHTYPE_RSS_TCP_IPV4 513 | RSS_HASHTYPE_RSS_TCP_IPV4
514 | RSS_HASHTYPE_RSS_IPV6 514 | RSS_HASHTYPE_RSS_IPV6
515 | RSS_HASHTYPE_RSS_TCP_IPV6 515 | RSS_HASHTYPE_RSS_TCP_IPV6
516 | RSS_HASHTYPE_RSS_IPV6_EX 516 | RSS_HASHTYPE_RSS_IPV6_EX
517 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 517 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
518 } 518 }
519 519
520 mrqc = IXGBE_MRQC_RSSEN; 520 mrqc = IXGBE_MRQC_RSSEN;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 521 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 523 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 525 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 527 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 529 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 531 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
537 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 537 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
538 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 538 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
539 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 539 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
540 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 540 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
541} /* ixgbe_initialize_rss_mapping */ 541} /* ixgbe_initialize_rss_mapping */
542 542
543/************************************************************************ 543/************************************************************************
544 * ixgbe_initialize_receive_units - Setup receive registers and features. 544 * ixgbe_initialize_receive_units - Setup receive registers and features.
545 ************************************************************************/ 545 ************************************************************************/
546#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 546#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
547 547
548static void 548static void
549ixgbe_initialize_receive_units(struct adapter *adapter) 549ixgbe_initialize_receive_units(struct adapter *adapter)
550{ 550{
551 struct rx_ring *rxr = adapter->rx_rings; 551 struct rx_ring *rxr = adapter->rx_rings;
552 struct ixgbe_hw *hw = &adapter->hw; 552 struct ixgbe_hw *hw = &adapter->hw;
553 struct ifnet *ifp = adapter->ifp; 553 struct ifnet *ifp = adapter->ifp;
554 int i, j; 554 int i, j;
555 u32 bufsz, fctrl, srrctl, rxcsum; 555 u32 bufsz, fctrl, srrctl, rxcsum;
556 u32 hlreg; 556 u32 hlreg;
557 557
558 /* 558 /*
559 * Make sure receives are disabled while 559 * Make sure receives are disabled while
560 * setting up the descriptor ring 560 * setting up the descriptor ring
561 */ 561 */
562 ixgbe_disable_rx(hw); 562 ixgbe_disable_rx(hw);
563 563
564 /* Enable broadcasts */ 564 /* Enable broadcasts */
565 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 565 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
566 fctrl |= IXGBE_FCTRL_BAM; 566 fctrl |= IXGBE_FCTRL_BAM;
567 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 567 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
568 fctrl |= IXGBE_FCTRL_DPF; 568 fctrl |= IXGBE_FCTRL_DPF;
569 fctrl |= IXGBE_FCTRL_PMCF; 569 fctrl |= IXGBE_FCTRL_PMCF;
570 } 570 }
571 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 571 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
572 572
573 /* Set for Jumbo Frames? */ 573 /* Set for Jumbo Frames? */
574 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 574 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
575 if (ifp->if_mtu > ETHERMTU) 575 if (ifp->if_mtu > ETHERMTU)
576 hlreg |= IXGBE_HLREG0_JUMBOEN; 576 hlreg |= IXGBE_HLREG0_JUMBOEN;
577 else 577 else
578 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 578 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
579 579
580#ifdef DEV_NETMAP 580#ifdef DEV_NETMAP
581 /* CRC stripping is conditional in Netmap */ 581 /* CRC stripping is conditional in Netmap */
582 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 582 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
583 (ifp->if_capenable & IFCAP_NETMAP) && 583 (ifp->if_capenable & IFCAP_NETMAP) &&
584 !ix_crcstrip) 584 !ix_crcstrip)
585 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 585 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
586 else 586 else
587#endif /* DEV_NETMAP */ 587#endif /* DEV_NETMAP */
588 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 588 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
589 589
590 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 590 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
591 591
592 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 592 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
593 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 593 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
594 594
595 for (i = 0; i < adapter->num_queues; i++, rxr++) { 595 for (i = 0; i < adapter->num_queues; i++, rxr++) {
596 u64 rdba = rxr->rxdma.dma_paddr; 596 u64 rdba = rxr->rxdma.dma_paddr;
597 u32 reg; 597 u32 reg;
598 int regnum = i / 4; /* 1 register per 4 queues */ 598 int regnum = i / 4; /* 1 register per 4 queues */
599 int regshift = i % 4; /* 4 bits per 1 queue */ 599 int regshift = i % 4; /* 4 bits per 1 queue */
600 j = rxr->me; 600 j = rxr->me;
601 601
602 /* Setup the Base and Length of the Rx Descriptor Ring */ 602 /* Setup the Base and Length of the Rx Descriptor Ring */
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 603 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
604 (rdba & 0x00000000ffffffffULL)); 604 (rdba & 0x00000000ffffffffULL));
605 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 605 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
606 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 606 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
607 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 607 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
608 608
609 /* Set up the SRRCTL register */ 609 /* Set up the SRRCTL register */
610 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 610 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
611 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 611 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
612 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 612 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
613 srrctl |= bufsz; 613 srrctl |= bufsz;
614 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 614 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
615 615
616 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 616 /* Set RQSMR (Receive Queue Statistic Mapping) register */
617 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 617 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
618 reg &= ~(0x000000ffUL << (regshift * 8)); 618 reg &= ~(0x000000ffUL << (regshift * 8));
619 reg |= i << (regshift * 8); 619 reg |= i << (regshift * 8);
620 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 620 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
621 621
622 /* 622 /*
623 * Set DROP_EN iff we have no flow control and >1 queue. 623 * Set DROP_EN iff we have no flow control and >1 queue.
624 * Note that srrctl was cleared shortly before during reset, 624 * Note that srrctl was cleared shortly before during reset,
625 * so we do not need to clear the bit, but do it just in case 625 * so we do not need to clear the bit, but do it just in case
626 * this code is moved elsewhere. 626 * this code is moved elsewhere.
627 */ 627 */
628 if (adapter->num_queues > 1 && 628 if (adapter->num_queues > 1 &&
629 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 629 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
630 srrctl |= IXGBE_SRRCTL_DROP_EN; 630 srrctl |= IXGBE_SRRCTL_DROP_EN;
631 } else { 631 } else {
632 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 632 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
633 } 633 }
634 634
635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
636 636
637 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 637 /* Setup the HW Rx Head and Tail Descriptor Pointers */
638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
640 640
641 /* Set the driver rx tail address */ 641 /* Set the driver rx tail address */
642 rxr->tail = IXGBE_RDT(rxr->me); 642 rxr->tail = IXGBE_RDT(rxr->me);
643 } 643 }
644 644
645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
647 | IXGBE_PSRTYPE_UDPHDR 647 | IXGBE_PSRTYPE_UDPHDR
648 | IXGBE_PSRTYPE_IPV4HDR 648 | IXGBE_PSRTYPE_IPV4HDR
649 | IXGBE_PSRTYPE_IPV6HDR; 649 | IXGBE_PSRTYPE_IPV6HDR;
650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
651 } 651 }
652 652
653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
654 654
655 ixgbe_initialize_rss_mapping(adapter); 655 ixgbe_initialize_rss_mapping(adapter);
656 656
657 if (adapter->num_queues > 1) { 657 if (adapter->num_queues > 1) {
658 /* RSS and RX IPP Checksum are mutually exclusive */ 658 /* RSS and RX IPP Checksum are mutually exclusive */
659 rxcsum |= IXGBE_RXCSUM_PCSD; 659 rxcsum |= IXGBE_RXCSUM_PCSD;
660 } 660 }
661 661
662 if (ifp->if_capenable & IFCAP_RXCSUM) 662 if (ifp->if_capenable & IFCAP_RXCSUM)
663 rxcsum |= IXGBE_RXCSUM_PCSD; 663 rxcsum |= IXGBE_RXCSUM_PCSD;
664 664
665 /* This is useful for calculating UDP/IP fragment checksums */ 665 /* This is useful for calculating UDP/IP fragment checksums */
666 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 666 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
667 rxcsum |= IXGBE_RXCSUM_IPPCSE; 667 rxcsum |= IXGBE_RXCSUM_IPPCSE;
668 668
669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
670 670
671} /* ixgbe_initialize_receive_units */ 671} /* ixgbe_initialize_receive_units */
672 672
673/************************************************************************ 673/************************************************************************
674 * ixgbe_initialize_transmit_units - Enable transmit units. 674 * ixgbe_initialize_transmit_units - Enable transmit units.
675 ************************************************************************/ 675 ************************************************************************/
676static void 676static void
677ixgbe_initialize_transmit_units(struct adapter *adapter) 677ixgbe_initialize_transmit_units(struct adapter *adapter)
678{ 678{
679 struct tx_ring *txr = adapter->tx_rings; 679 struct tx_ring *txr = adapter->tx_rings;
680 struct ixgbe_hw *hw = &adapter->hw; 680 struct ixgbe_hw *hw = &adapter->hw;
681 int i; 681 int i;
682 682
683 INIT_DEBUGOUT("ixgbe_initialize_transmit_units"); 683 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
684 684
685 /* Setup the Base and Length of the Tx Descriptor Ring */ 685 /* Setup the Base and Length of the Tx Descriptor Ring */
686 for (i = 0; i < adapter->num_queues; i++, txr++) { 686 for (i = 0; i < adapter->num_queues; i++, txr++) {
687 u64 tdba = txr->txdma.dma_paddr; 687 u64 tdba = txr->txdma.dma_paddr;
688 u32 txctrl = 0; 688 u32 txctrl = 0;
689 u32 tqsmreg, reg; 689 u32 tqsmreg, reg;
690 int regnum = i / 4; /* 1 register per 4 queues */ 690 int regnum = i / 4; /* 1 register per 4 queues */
691 int regshift = i % 4; /* 4 bits per 1 queue */ 691 int regshift = i % 4; /* 4 bits per 1 queue */
692 int j = txr->me; 692 int j = txr->me;
693 693
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 694 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
695 (tdba & 0x00000000ffffffffULL)); 695 (tdba & 0x00000000ffffffffULL));
696 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 696 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
697 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 697 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
698 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 698 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
699 699
700 /* 700 /*
701 * Set TQSMR (Transmit Queue Statistic Mapping) register. 701 * Set TQSMR (Transmit Queue Statistic Mapping) register.
702 * Register location is different between 82598 and others. 702 * Register location is different between 82598 and others.
703 */ 703 */
704 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 704 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
705 tqsmreg = IXGBE_TQSMR(regnum); 705 tqsmreg = IXGBE_TQSMR(regnum);
706 else 706 else
707 tqsmreg = IXGBE_TQSM(regnum); 707 tqsmreg = IXGBE_TQSM(regnum);
708 reg = IXGBE_READ_REG(hw, tqsmreg); 708 reg = IXGBE_READ_REG(hw, tqsmreg);
709 reg &= ~(0x000000ffUL << (regshift * 8)); 709 reg &= ~(0x000000ffUL << (regshift * 8));
710 reg |= i << (regshift * 8); 710 reg |= i << (regshift * 8);
711 IXGBE_WRITE_REG(hw, tqsmreg, reg); 711 IXGBE_WRITE_REG(hw, tqsmreg, reg);
712 712
713 /* Setup the HW Tx Head and Tail descriptor pointers */ 713 /* Setup the HW Tx Head and Tail descriptor pointers */
714 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 714 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
715 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 715 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
716 716
717 /* Cache the tail address */ 717 /* Cache the tail address */
718 txr->tail = IXGBE_TDT(j); 718 txr->tail = IXGBE_TDT(j);
719 719
720 txr->txr_no_space = false; 720 txr->txr_no_space = false;
721 721
722 /* Disable Head Writeback */ 722 /* Disable Head Writeback */
723 /* 723 /*
724 * Note: for X550 series devices, these registers are actually 724 * Note: for X550 series devices, these registers are actually
725 * prefixed with TPH_ isntead of DCA_, but the addresses and 725 * prefixed with TPH_ isntead of DCA_, but the addresses and
726 * fields remain the same. 726 * fields remain the same.
727 */ 727 */
728 switch (hw->mac.type) { 728 switch (hw->mac.type) {
729 case ixgbe_mac_82598EB: 729 case ixgbe_mac_82598EB:
730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
731 break; 731 break;
732 default: 732 default:
733 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 733 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
734 break; 734 break;
735 } 735 }
736 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 736 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
737 switch (hw->mac.type) { 737 switch (hw->mac.type) {
738 case ixgbe_mac_82598EB: 738 case ixgbe_mac_82598EB:
739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
740 break; 740 break;
741 default: 741 default:
742 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 742 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
743 break; 743 break;
744 } 744 }
745 745
746 } 746 }
747 747
748 if (hw->mac.type != ixgbe_mac_82598EB) { 748 if (hw->mac.type != ixgbe_mac_82598EB) {
749 u32 dmatxctl, rttdcs; 749 u32 dmatxctl, rttdcs;
750 750
751 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 751 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
752 dmatxctl |= IXGBE_DMATXCTL_TE; 752 dmatxctl |= IXGBE_DMATXCTL_TE;
753 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 753 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
754 /* Disable arbiter to set MTQC */ 754 /* Disable arbiter to set MTQC */
755 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 755 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
756 rttdcs |= IXGBE_RTTDCS_ARBDIS; 756 rttdcs |= IXGBE_RTTDCS_ARBDIS;
757 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 757 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
758 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 758 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
759 ixgbe_get_mtqc(adapter->iov_mode)); 759 ixgbe_get_mtqc(adapter->iov_mode));
760 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 760 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 } 762 }
763 763
764 return; 764 return;
765} /* ixgbe_initialize_transmit_units */ 765} /* ixgbe_initialize_transmit_units */
766 766
767static void 767static void
768ixgbe_quirks(struct adapter *adapter) 768ixgbe_quirks(struct adapter *adapter)
769{ 769{
770 device_t dev = adapter->dev; 770 device_t dev = adapter->dev;
771 const char *vendor, *product; 771 const char *vendor, *product;
772 772
773 /* Quirk for inverted logic of SFP+'s MOD_ABS */ 773 /* Quirk for inverted logic of SFP+'s MOD_ABS */
774 vendor = pmf_get_platform("system-vendor"); 774 vendor = pmf_get_platform("system-vendor");
775 product = pmf_get_platform("system-product"); 775 product = pmf_get_platform("system-product");
776 776
777 if ((vendor == NULL) || (product == NULL)) 777 if ((vendor == NULL) || (product == NULL))
778 return; 778 return;
779 779
780 if ((strcmp(vendor, "GIGABYTE") == 0) && 780 if ((strcmp(vendor, "GIGABYTE") == 0) &&
781 (strcmp(product, "MA10-ST0") == 0)) { 781 (strcmp(product, "MA10-ST0") == 0)) {
782 aprint_verbose_dev(dev, "Enable SFP+ MOD_ABS inverse quirk\n"); 782 aprint_verbose_dev(dev, "Enable SFP+ MOD_ABS inverse quirk\n");
783 adapter->quirks |= IXGBE_QUIRK_MOD_ABS_INVERT; 783 adapter->quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
784 } 784 }
785} 785}
786 786
787/************************************************************************ 787/************************************************************************
788 * ixgbe_attach - Device initialization routine 788 * ixgbe_attach - Device initialization routine
789 * 789 *
790 * Called when the driver is being loaded. 790 * Called when the driver is being loaded.
791 * Identifies the type of hardware, allocates all resources 791 * Identifies the type of hardware, allocates all resources
792 * and initializes the hardware. 792 * and initializes the hardware.
793 * 793 *
794 * return 0 on success, positive on failure 794 * return 0 on success, positive on failure
795 ************************************************************************/ 795 ************************************************************************/
796static void 796static void
797ixgbe_attach(device_t parent, device_t dev, void *aux) 797ixgbe_attach(device_t parent, device_t dev, void *aux)
798{ 798{
799 struct adapter *adapter; 799 struct adapter *adapter;
800 struct ixgbe_hw *hw; 800 struct ixgbe_hw *hw;
801 int error = -1; 801 int error = -1;
802 u32 ctrl_ext; 802 u32 ctrl_ext;
803 u16 high, low, nvmreg; 803 u16 high, low, nvmreg;
804 pcireg_t id, subid; 804 pcireg_t id, subid;
805 const ixgbe_vendor_info_t *ent; 805 const ixgbe_vendor_info_t *ent;
806 struct pci_attach_args *pa = aux; 806 struct pci_attach_args *pa = aux;
807 bool unsupported_sfp = false; 807 bool unsupported_sfp = false;
808 const char *str; 808 const char *str;
809 char wqname[MAXCOMLEN]; 809 char wqname[MAXCOMLEN];
810 char buf[256]; 810 char buf[256];
811 811
812 INIT_DEBUGOUT("ixgbe_attach: begin"); 812 INIT_DEBUGOUT("ixgbe_attach: begin");
813 813
814 /* Allocate, clear, and link in our adapter structure */ 814 /* Allocate, clear, and link in our adapter structure */
815 adapter = device_private(dev); 815 adapter = device_private(dev);
816 adapter->hw.back = adapter; 816 adapter->hw.back = adapter;
817 adapter->dev = dev; 817 adapter->dev = dev;
818 hw = &adapter->hw; 818 hw = &adapter->hw;
819 adapter->osdep.pc = pa->pa_pc; 819 adapter->osdep.pc = pa->pa_pc;
820 adapter->osdep.tag = pa->pa_tag; 820 adapter->osdep.tag = pa->pa_tag;
821 if (pci_dma64_available(pa)) 821 if (pci_dma64_available(pa))
822 adapter->osdep.dmat = pa->pa_dmat64; 822 adapter->osdep.dmat = pa->pa_dmat64;
823 else 823 else
824 adapter->osdep.dmat = pa->pa_dmat; 824 adapter->osdep.dmat = pa->pa_dmat;
825 adapter->osdep.attached = false; 825 adapter->osdep.attached = false;
826 826
827 ent = ixgbe_lookup(pa); 827 ent = ixgbe_lookup(pa);
828 828
829 KASSERT(ent != NULL); 829 KASSERT(ent != NULL);
830 830
831 aprint_normal(": %s, Version - %s\n", 831 aprint_normal(": %s, Version - %s\n",
832 ixgbe_strings[ent->index], ixgbe_driver_version); 832 ixgbe_strings[ent->index], ixgbe_driver_version);
833 833
834 /* Set quirk flags */ 834 /* Set quirk flags */
835 ixgbe_quirks(adapter); 835 ixgbe_quirks(adapter);
836 836
837 /* Core Lock Init */ 837 /* Core Lock Init */
838 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 838 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
839 839
840 /* Set up the timer callout and workqueue */ 840 /* Set up the timer callout and workqueue */
841 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 841 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
842 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); 842 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
843 error = workqueue_create(&adapter->timer_wq, wqname, 843 error = workqueue_create(&adapter->timer_wq, wqname,
844 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 844 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
845 IXGBE_TASKLET_WQ_FLAGS); 845 IXGBE_TASKLET_WQ_FLAGS);
846 if (error) { 846 if (error) {
847 aprint_error_dev(dev, 847 aprint_error_dev(dev,
848 "could not create timer workqueue (%d)\n", error); 848 "could not create timer workqueue (%d)\n", error);
849 goto err_out; 849 goto err_out;
850 } 850 }
851 851
852 /* Determine hardware revision */ 852 /* Determine hardware revision */
853 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 853 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
854 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 854 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
855 855
856 hw->vendor_id = PCI_VENDOR(id); 856 hw->vendor_id = PCI_VENDOR(id);
857 hw->device_id = PCI_PRODUCT(id); 857 hw->device_id = PCI_PRODUCT(id);
858 hw->revision_id = 858 hw->revision_id =
859 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 859 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
860 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 860 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
861 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 861 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
862 862
863 /* 863 /*
864 * Make sure BUSMASTER is set 864 * Make sure BUSMASTER is set
865 */ 865 */
866 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 866 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
867 867
868 /* Do base PCI setup - map BAR0 */ 868 /* Do base PCI setup - map BAR0 */
869 if (ixgbe_allocate_pci_resources(adapter, pa)) { 869 if (ixgbe_allocate_pci_resources(adapter, pa)) {
870 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 870 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
871 error = ENXIO; 871 error = ENXIO;
872 goto err_out; 872 goto err_out;
873 } 873 }
874 874
875 /* let hardware know driver is loaded */ 875 /* let hardware know driver is loaded */
876 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 876 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
877 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 877 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
878 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 878 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
879 879
880 /* 880 /*
881 * Initialize the shared code 881 * Initialize the shared code
882 */ 882 */
883 if (ixgbe_init_shared_code(hw) != 0) { 883 if (ixgbe_init_shared_code(hw) != 0) {
884 aprint_error_dev(dev, "Unable to initialize the shared code\n"); 884 aprint_error_dev(dev, "Unable to initialize the shared code\n");
885 error = ENXIO; 885 error = ENXIO;
886 goto err_out; 886 goto err_out;
887 } 887 }
888 888
889 switch (hw->mac.type) { 889 switch (hw->mac.type) {
890 case ixgbe_mac_82598EB: 890 case ixgbe_mac_82598EB:
891 str = "82598EB"; 891 str = "82598EB";
892 break; 892 break;
893 case ixgbe_mac_82599EB: 893 case ixgbe_mac_82599EB:
894 str = "82599EB"; 894 str = "82599EB";
895 break; 895 break;
896 case ixgbe_mac_X540: 896 case ixgbe_mac_X540:
897 str = "X540"; 897 str = "X540";
898 break; 898 break;
899 case ixgbe_mac_X550: 899 case ixgbe_mac_X550:
900 str = "X550"; 900 str = "X550";
901 break; 901 break;
902 case ixgbe_mac_X550EM_x: 902 case ixgbe_mac_X550EM_x:
903 str = "X550EM X"; 903 str = "X550EM X";
904 break; 904 break;
905 case ixgbe_mac_X550EM_a: 905 case ixgbe_mac_X550EM_a:
906 str = "X550EM A"; 906 str = "X550EM A";
907 break; 907 break;
908 default: 908 default:
909 str = "Unknown"; 909 str = "Unknown";
910 break; 910 break;
911 } 911 }
912 aprint_normal_dev(dev, "device %s\n", str); 912 aprint_normal_dev(dev, "device %s\n", str);
913 913
914 if (hw->mbx.ops.init_params) 914 if (hw->mbx.ops.init_params)
915 hw->mbx.ops.init_params(hw); 915 hw->mbx.ops.init_params(hw);
916 916
917 hw->allow_unsupported_sfp = allow_unsupported_sfp; 917 hw->allow_unsupported_sfp = allow_unsupported_sfp;
918 918
919 /* Pick up the 82599 settings */ 919 /* Pick up the 82599 settings */
920 if (hw->mac.type != ixgbe_mac_82598EB) { 920 if (hw->mac.type != ixgbe_mac_82598EB) {
921 hw->phy.smart_speed = ixgbe_smart_speed; 921 hw->phy.smart_speed = ixgbe_smart_speed;
922 adapter->num_segs = IXGBE_82599_SCATTER; 922 adapter->num_segs = IXGBE_82599_SCATTER;
923 } else 923 } else
924 adapter->num_segs = IXGBE_82598_SCATTER; 924 adapter->num_segs = IXGBE_82598_SCATTER;
925 925
926 /* Ensure SW/FW semaphore is free */ 926 /* Ensure SW/FW semaphore is free */
927 ixgbe_init_swfw_semaphore(hw); 927 ixgbe_init_swfw_semaphore(hw);
928 928
929 hw->mac.ops.set_lan_id(hw); 929 hw->mac.ops.set_lan_id(hw);
930 ixgbe_init_device_features(adapter); 930 ixgbe_init_device_features(adapter);
931 931
932 if (ixgbe_configure_interrupts(adapter)) { 932 if (ixgbe_configure_interrupts(adapter)) {
933 error = ENXIO; 933 error = ENXIO;
934 goto err_out; 934 goto err_out;
935 } 935 }
936 936
937 /* Allocate multicast array memory. */ 937 /* Allocate multicast array memory. */
938 adapter->mta = malloc(sizeof(*adapter->mta) * 938 adapter->mta = malloc(sizeof(*adapter->mta) *
939 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK); 939 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
940 940
941 /* Enable WoL (if supported) */ 941 /* Enable WoL (if supported) */
942 ixgbe_check_wol_support(adapter); 942 ixgbe_check_wol_support(adapter);
943 943
944 /* Register for VLAN events */ 944 /* Register for VLAN events */
945 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); 945 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
946 946
947 /* Verify adapter fan is still functional (if applicable) */ 947 /* Verify adapter fan is still functional (if applicable) */
948 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 948 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
949 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 949 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
950 ixgbe_check_fan_failure(adapter, esdp, FALSE); 950 ixgbe_check_fan_failure(adapter, esdp, FALSE);
951 } 951 }
952 952
953 /* Set an initial default flow control value */ 953 /* Set an initial default flow control value */
954 hw->fc.requested_mode = ixgbe_flow_control; 954 hw->fc.requested_mode = ixgbe_flow_control;
955 955
956 /* Sysctls for limiting the amount of work done in the taskqueues */ 956 /* Sysctls for limiting the amount of work done in the taskqueues */
957 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 957 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
958 "max number of rx packets to process", 958 "max number of rx packets to process",
959 &adapter->rx_process_limit, ixgbe_rx_process_limit); 959 &adapter->rx_process_limit, ixgbe_rx_process_limit);
960 960
961 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 961 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
962 "max number of tx packets to process", 962 "max number of tx packets to process",
963 &adapter->tx_process_limit, ixgbe_tx_process_limit); 963 &adapter->tx_process_limit, ixgbe_tx_process_limit);
964 964
965 /* Do descriptor calc and sanity checks */ 965 /* Do descriptor calc and sanity checks */
966 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 966 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
967 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 967 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
968 aprint_error_dev(dev, "TXD config issue, using default!\n"); 968 aprint_error_dev(dev, "TXD config issue, using default!\n");
969 adapter->num_tx_desc = DEFAULT_TXD; 969 adapter->num_tx_desc = DEFAULT_TXD;
970 } else 970 } else
971 adapter->num_tx_desc = ixgbe_txd; 971 adapter->num_tx_desc = ixgbe_txd;
972 972
973 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 973 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
974 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 974 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
975 aprint_error_dev(dev, "RXD config issue, using default!\n"); 975 aprint_error_dev(dev, "RXD config issue, using default!\n");
976 adapter->num_rx_desc = DEFAULT_RXD; 976 adapter->num_rx_desc = DEFAULT_RXD;
977 } else 977 } else
978 adapter->num_rx_desc = ixgbe_rxd; 978 adapter->num_rx_desc = ixgbe_rxd;
979 979
980 /* Allocate our TX/RX Queues */ 980 /* Allocate our TX/RX Queues */
981 if (ixgbe_allocate_queues(adapter)) { 981 if (ixgbe_allocate_queues(adapter)) {
982 error = ENOMEM; 982 error = ENOMEM;
983 goto err_out; 983 goto err_out;
984 } 984 }
985 985
986 hw->phy.reset_if_overtemp = TRUE; 986 hw->phy.reset_if_overtemp = TRUE;
987 error = ixgbe_reset_hw(hw); 987 error = ixgbe_reset_hw(hw);
988 hw->phy.reset_if_overtemp = FALSE; 988 hw->phy.reset_if_overtemp = FALSE;
989 if (error == IXGBE_ERR_SFP_NOT_PRESENT) 989 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
990 error = IXGBE_SUCCESS; 990 error = IXGBE_SUCCESS;
991 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 991 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
992 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); 992 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
993 unsupported_sfp = true; 993 unsupported_sfp = true;
994 error = IXGBE_SUCCESS; 994 error = IXGBE_SUCCESS;
995 } else if (error) { 995 } else if (error) {
996 aprint_error_dev(dev, "Hardware initialization failed\n"); 996 aprint_error_dev(dev, "Hardware initialization failed\n");
997 error = EIO; 997 error = EIO;
998 goto err_late; 998 goto err_late;
999 } 999 }
1000 1000
@@ -3504,1999 +3504,1999 @@ map_err: @@ -3504,1999 +3504,1999 @@ map_err:
3504 default: 3504 default:
3505 aprint_error_dev(dev, "unexpected type on BAR0\n"); 3505 aprint_error_dev(dev, "unexpected type on BAR0\n");
3506 return ENXIO; 3506 return ENXIO;
3507 } 3507 }
3508 3508
3509 return (0); 3509 return (0);
3510} /* ixgbe_allocate_pci_resources */ 3510} /* ixgbe_allocate_pci_resources */
3511 3511
3512static void 3512static void
3513ixgbe_free_workqueue(struct adapter *adapter) 3513ixgbe_free_workqueue(struct adapter *adapter)
3514{ 3514{
3515 struct ix_queue *que = adapter->queues; 3515 struct ix_queue *que = adapter->queues;
3516 struct tx_ring *txr = adapter->tx_rings; 3516 struct tx_ring *txr = adapter->tx_rings;
3517 int i; 3517 int i;
3518 3518
3519 for (i = 0; i < adapter->num_queues; i++, que++, txr++) { 3519 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3520 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 3520 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3521 if (txr->txr_si != NULL) 3521 if (txr->txr_si != NULL)
3522 softint_disestablish(txr->txr_si); 3522 softint_disestablish(txr->txr_si);
3523 } 3523 }
3524 if (que->que_si != NULL) 3524 if (que->que_si != NULL)
3525 softint_disestablish(que->que_si); 3525 softint_disestablish(que->que_si);
3526 } 3526 }
3527 if (adapter->txr_wq != NULL) 3527 if (adapter->txr_wq != NULL)
3528 workqueue_destroy(adapter->txr_wq); 3528 workqueue_destroy(adapter->txr_wq);
3529 if (adapter->txr_wq_enqueued != NULL) 3529 if (adapter->txr_wq_enqueued != NULL)
3530 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 3530 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3531 if (adapter->que_wq != NULL) 3531 if (adapter->que_wq != NULL)
3532 workqueue_destroy(adapter->que_wq); 3532 workqueue_destroy(adapter->que_wq);
3533 3533
3534 if (adapter->admin_wq != NULL) { 3534 if (adapter->admin_wq != NULL) {
3535 workqueue_destroy(adapter->admin_wq); 3535 workqueue_destroy(adapter->admin_wq);
3536 adapter->admin_wq = NULL; 3536 adapter->admin_wq = NULL;
3537 } 3537 }
3538 if (adapter->timer_wq != NULL) { 3538 if (adapter->timer_wq != NULL) {
3539 workqueue_destroy(adapter->timer_wq); 3539 workqueue_destroy(adapter->timer_wq);
3540 adapter->timer_wq = NULL; 3540 adapter->timer_wq = NULL;
3541 } 3541 }
3542 if (adapter->recovery_mode_timer_wq != NULL) { 3542 if (adapter->recovery_mode_timer_wq != NULL) {
3543 /* 3543 /*
3544 * ixgbe_ifstop() doesn't call the workqueue_wait() for 3544 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3545 * the recovery_mode_timer workqueue, so call it here. 3545 * the recovery_mode_timer workqueue, so call it here.
3546 */ 3546 */
3547 workqueue_wait(adapter->recovery_mode_timer_wq, 3547 workqueue_wait(adapter->recovery_mode_timer_wq,
3548 &adapter->recovery_mode_timer_wc); 3548 &adapter->recovery_mode_timer_wc);
3549 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0); 3549 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3550 workqueue_destroy(adapter->recovery_mode_timer_wq); 3550 workqueue_destroy(adapter->recovery_mode_timer_wq);
3551 adapter->recovery_mode_timer_wq = NULL; 3551 adapter->recovery_mode_timer_wq = NULL;
3552 } 3552 }
3553} /* ixgbe_free_workqueue */ 3553} /* ixgbe_free_workqueue */
3554 3554
3555/************************************************************************ 3555/************************************************************************
3556 * ixgbe_detach - Device removal routine 3556 * ixgbe_detach - Device removal routine
3557 * 3557 *
3558 * Called when the driver is being removed. 3558 * Called when the driver is being removed.
3559 * Stops the adapter and deallocates all the resources 3559 * Stops the adapter and deallocates all the resources
3560 * that were allocated for driver operation. 3560 * that were allocated for driver operation.
3561 * 3561 *
3562 * return 0 on success, positive on failure 3562 * return 0 on success, positive on failure
3563 ************************************************************************/ 3563 ************************************************************************/
3564static int 3564static int
3565ixgbe_detach(device_t dev, int flags) 3565ixgbe_detach(device_t dev, int flags)
3566{ 3566{
3567 struct adapter *adapter = device_private(dev); 3567 struct adapter *adapter = device_private(dev);
3568 struct rx_ring *rxr = adapter->rx_rings; 3568 struct rx_ring *rxr = adapter->rx_rings;
3569 struct tx_ring *txr = adapter->tx_rings; 3569 struct tx_ring *txr = adapter->tx_rings;
3570 struct ixgbe_hw *hw = &adapter->hw; 3570 struct ixgbe_hw *hw = &adapter->hw;
3571 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 3571 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3572 u32 ctrl_ext; 3572 u32 ctrl_ext;
3573 int i; 3573 int i;
3574 3574
3575 INIT_DEBUGOUT("ixgbe_detach: begin"); 3575 INIT_DEBUGOUT("ixgbe_detach: begin");
3576 if (adapter->osdep.attached == false) 3576 if (adapter->osdep.attached == false)
3577 return 0; 3577 return 0;
3578 3578
3579 if (ixgbe_pci_iov_detach(dev) != 0) { 3579 if (ixgbe_pci_iov_detach(dev) != 0) {
3580 device_printf(dev, "SR-IOV in use; detach first.\n"); 3580 device_printf(dev, "SR-IOV in use; detach first.\n");
3581 return (EBUSY); 3581 return (EBUSY);
3582 } 3582 }
3583 3583
3584#if NVLAN > 0 3584#if NVLAN > 0
3585 /* Make sure VLANs are not using driver */ 3585 /* Make sure VLANs are not using driver */
3586 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 3586 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3587 ; /* nothing to do: no VLANs */ 3587 ; /* nothing to do: no VLANs */
3588 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 3588 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3589 vlan_ifdetach(adapter->ifp); 3589 vlan_ifdetach(adapter->ifp);
3590 else { 3590 else {
3591 aprint_error_dev(dev, "VLANs in use, detach first\n"); 3591 aprint_error_dev(dev, "VLANs in use, detach first\n");
3592 return (EBUSY); 3592 return (EBUSY);
3593 } 3593 }
3594#endif 3594#endif
3595 3595
3596 /* 3596 /*
3597 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(), 3597 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3598 * so it's not required to call ixgbe_stop() directly. 3598 * so it's not required to call ixgbe_stop() directly.
3599 */ 3599 */
3600 IXGBE_CORE_LOCK(adapter); 3600 IXGBE_CORE_LOCK(adapter);
3601 ixgbe_setup_low_power_mode(adapter); 3601 ixgbe_setup_low_power_mode(adapter);
3602 IXGBE_CORE_UNLOCK(adapter); 3602 IXGBE_CORE_UNLOCK(adapter);
3603 3603
3604 callout_halt(&adapter->timer, NULL); 3604 callout_halt(&adapter->timer, NULL);
3605 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { 3605 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
3606 callout_stop(&adapter->recovery_mode_timer); 3606 callout_stop(&adapter->recovery_mode_timer);
3607 callout_halt(&adapter->recovery_mode_timer, NULL); 3607 callout_halt(&adapter->recovery_mode_timer, NULL);
3608 } 3608 }
3609 3609
3610 workqueue_wait(adapter->admin_wq, &adapter->admin_wc); 3610 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3611 atomic_store_relaxed(&adapter->admin_pending, 0); 3611 atomic_store_relaxed(&adapter->admin_pending, 0);
3612 workqueue_wait(adapter->timer_wq, &adapter->timer_wc); 3612 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3613 atomic_store_relaxed(&adapter->timer_pending, 0); 3613 atomic_store_relaxed(&adapter->timer_pending, 0);
3614 3614
3615 pmf_device_deregister(dev); 3615 pmf_device_deregister(dev);
3616 3616
3617 ether_ifdetach(adapter->ifp); 3617 ether_ifdetach(adapter->ifp);
3618 3618
3619 ixgbe_free_workqueue(adapter); 3619 ixgbe_free_workqueue(adapter);
3620 3620
3621 /* let hardware know driver is unloading */ 3621 /* let hardware know driver is unloading */
3622 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 3622 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3623 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3623 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 3624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3625 3625
3626 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 3626 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3627 netmap_detach(adapter->ifp); 3627 netmap_detach(adapter->ifp);
3628 3628
3629 ixgbe_free_pci_resources(adapter); 3629 ixgbe_free_pci_resources(adapter);
3630#if 0 /* XXX the NetBSD port is probably missing something here */ 3630#if 0 /* XXX the NetBSD port is probably missing something here */
3631 bus_generic_detach(dev); 3631 bus_generic_detach(dev);
3632#endif 3632#endif
3633 if_detach(adapter->ifp); 3633 if_detach(adapter->ifp);
3634 ifmedia_fini(&adapter->media); 3634 ifmedia_fini(&adapter->media);
3635 if_percpuq_destroy(adapter->ipq); 3635 if_percpuq_destroy(adapter->ipq);
3636 3636
3637 sysctl_teardown(&adapter->sysctllog); 3637 sysctl_teardown(&adapter->sysctllog);
3638 evcnt_detach(&adapter->efbig_tx_dma_setup); 3638 evcnt_detach(&adapter->efbig_tx_dma_setup);
3639 evcnt_detach(&adapter->mbuf_defrag_failed); 3639 evcnt_detach(&adapter->mbuf_defrag_failed);
3640 evcnt_detach(&adapter->efbig2_tx_dma_setup); 3640 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3641 evcnt_detach(&adapter->einval_tx_dma_setup); 3641 evcnt_detach(&adapter->einval_tx_dma_setup);
3642 evcnt_detach(&adapter->other_tx_dma_setup); 3642 evcnt_detach(&adapter->other_tx_dma_setup);
3643 evcnt_detach(&adapter->eagain_tx_dma_setup); 3643 evcnt_detach(&adapter->eagain_tx_dma_setup);
3644 evcnt_detach(&adapter->enomem_tx_dma_setup); 3644 evcnt_detach(&adapter->enomem_tx_dma_setup);
3645 evcnt_detach(&adapter->watchdog_events); 3645 evcnt_detach(&adapter->watchdog_events);
3646 evcnt_detach(&adapter->tso_err); 3646 evcnt_detach(&adapter->tso_err);
3647 evcnt_detach(&adapter->admin_irqev); 3647 evcnt_detach(&adapter->admin_irqev);
3648 evcnt_detach(&adapter->link_workev); 3648 evcnt_detach(&adapter->link_workev);
3649 evcnt_detach(&adapter->mod_workev); 3649 evcnt_detach(&adapter->mod_workev);
3650 evcnt_detach(&adapter->msf_workev); 3650 evcnt_detach(&adapter->msf_workev);
3651 evcnt_detach(&adapter->phy_workev); 3651 evcnt_detach(&adapter->phy_workev);
3652 3652
3653 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 3653 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3654 if (i < __arraycount(stats->mpc)) { 3654 if (i < __arraycount(stats->mpc)) {
3655 evcnt_detach(&stats->mpc[i]); 3655 evcnt_detach(&stats->mpc[i]);
3656 if (hw->mac.type == ixgbe_mac_82598EB) 3656 if (hw->mac.type == ixgbe_mac_82598EB)
3657 evcnt_detach(&stats->rnbc[i]); 3657 evcnt_detach(&stats->rnbc[i]);
3658 } 3658 }
3659 if (i < __arraycount(stats->pxontxc)) { 3659 if (i < __arraycount(stats->pxontxc)) {
3660 evcnt_detach(&stats->pxontxc[i]); 3660 evcnt_detach(&stats->pxontxc[i]);
3661 evcnt_detach(&stats->pxonrxc[i]); 3661 evcnt_detach(&stats->pxonrxc[i]);
3662 evcnt_detach(&stats->pxofftxc[i]); 3662 evcnt_detach(&stats->pxofftxc[i]);
3663 evcnt_detach(&stats->pxoffrxc[i]); 3663 evcnt_detach(&stats->pxoffrxc[i]);
3664 if (hw->mac.type >= ixgbe_mac_82599EB) 3664 if (hw->mac.type >= ixgbe_mac_82599EB)
3665 evcnt_detach(&stats->pxon2offc[i]); 3665 evcnt_detach(&stats->pxon2offc[i]);
3666 } 3666 }
3667 } 3667 }
3668 3668
3669 txr = adapter->tx_rings; 3669 txr = adapter->tx_rings;
3670 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 3670 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3671 evcnt_detach(&adapter->queues[i].irqs); 3671 evcnt_detach(&adapter->queues[i].irqs);
3672 evcnt_detach(&adapter->queues[i].handleq); 3672 evcnt_detach(&adapter->queues[i].handleq);
3673 evcnt_detach(&adapter->queues[i].req); 3673 evcnt_detach(&adapter->queues[i].req);
3674 evcnt_detach(&txr->no_desc_avail); 3674 evcnt_detach(&txr->no_desc_avail);
3675 evcnt_detach(&txr->total_packets); 3675 evcnt_detach(&txr->total_packets);
3676 evcnt_detach(&txr->tso_tx); 3676 evcnt_detach(&txr->tso_tx);
3677#ifndef IXGBE_LEGACY_TX 3677#ifndef IXGBE_LEGACY_TX
3678 evcnt_detach(&txr->pcq_drops); 3678 evcnt_detach(&txr->pcq_drops);
3679#endif 3679#endif
3680 3680
3681 if (i < __arraycount(stats->qprc)) { 3681 if (i < __arraycount(stats->qprc)) {
3682 evcnt_detach(&stats->qprc[i]); 3682 evcnt_detach(&stats->qprc[i]);
3683 evcnt_detach(&stats->qptc[i]); 3683 evcnt_detach(&stats->qptc[i]);
3684 evcnt_detach(&stats->qbrc[i]); 3684 evcnt_detach(&stats->qbrc[i]);
3685 evcnt_detach(&stats->qbtc[i]); 3685 evcnt_detach(&stats->qbtc[i]);
3686 if (hw->mac.type >= ixgbe_mac_82599EB) 3686 if (hw->mac.type >= ixgbe_mac_82599EB)
3687 evcnt_detach(&stats->qprdc[i]); 3687 evcnt_detach(&stats->qprdc[i]);
3688 } 3688 }
3689 3689
3690 evcnt_detach(&rxr->rx_packets); 3690 evcnt_detach(&rxr->rx_packets);
3691 evcnt_detach(&rxr->rx_bytes); 3691 evcnt_detach(&rxr->rx_bytes);
3692 evcnt_detach(&rxr->rx_copies); 3692 evcnt_detach(&rxr->rx_copies);
3693 evcnt_detach(&rxr->no_jmbuf); 3693 evcnt_detach(&rxr->no_jmbuf);
3694 evcnt_detach(&rxr->rx_discarded); 3694 evcnt_detach(&rxr->rx_discarded);
3695 } 3695 }
3696 evcnt_detach(&stats->ipcs); 3696 evcnt_detach(&stats->ipcs);
3697 evcnt_detach(&stats->l4cs); 3697 evcnt_detach(&stats->l4cs);
3698 evcnt_detach(&stats->ipcs_bad); 3698 evcnt_detach(&stats->ipcs_bad);
3699 evcnt_detach(&stats->l4cs_bad); 3699 evcnt_detach(&stats->l4cs_bad);
3700 evcnt_detach(&stats->intzero); 3700 evcnt_detach(&stats->intzero);
3701 evcnt_detach(&stats->legint); 3701 evcnt_detach(&stats->legint);
3702 evcnt_detach(&stats->crcerrs); 3702 evcnt_detach(&stats->crcerrs);
3703 evcnt_detach(&stats->illerrc); 3703 evcnt_detach(&stats->illerrc);
3704 evcnt_detach(&stats->errbc); 3704 evcnt_detach(&stats->errbc);
3705 evcnt_detach(&stats->mspdc); 3705 evcnt_detach(&stats->mspdc);
3706 if (hw->mac.type >= ixgbe_mac_X550) 3706 if (hw->mac.type >= ixgbe_mac_X550)
3707 evcnt_detach(&stats->mbsdc); 3707 evcnt_detach(&stats->mbsdc);
3708 evcnt_detach(&stats->mpctotal); 3708 evcnt_detach(&stats->mpctotal);
3709 evcnt_detach(&stats->mlfc); 3709 evcnt_detach(&stats->mlfc);
3710 evcnt_detach(&stats->mrfc); 3710 evcnt_detach(&stats->mrfc);
3711 evcnt_detach(&stats->rlec); 3711 evcnt_detach(&stats->rlec);
3712 evcnt_detach(&stats->lxontxc); 3712 evcnt_detach(&stats->lxontxc);
3713 evcnt_detach(&stats->lxonrxc); 3713 evcnt_detach(&stats->lxonrxc);
3714 evcnt_detach(&stats->lxofftxc); 3714 evcnt_detach(&stats->lxofftxc);
3715 evcnt_detach(&stats->lxoffrxc); 3715 evcnt_detach(&stats->lxoffrxc);
3716 3716
3717 /* Packet Reception Stats */ 3717 /* Packet Reception Stats */
3718 evcnt_detach(&stats->tor); 3718 evcnt_detach(&stats->tor);
3719 evcnt_detach(&stats->gorc); 3719 evcnt_detach(&stats->gorc);
3720 evcnt_detach(&stats->tpr); 3720 evcnt_detach(&stats->tpr);
3721 evcnt_detach(&stats->gprc); 3721 evcnt_detach(&stats->gprc);
3722 evcnt_detach(&stats->mprc); 3722 evcnt_detach(&stats->mprc);
3723 evcnt_detach(&stats->bprc); 3723 evcnt_detach(&stats->bprc);
3724 evcnt_detach(&stats->prc64); 3724 evcnt_detach(&stats->prc64);
3725 evcnt_detach(&stats->prc127); 3725 evcnt_detach(&stats->prc127);
3726 evcnt_detach(&stats->prc255); 3726 evcnt_detach(&stats->prc255);
3727 evcnt_detach(&stats->prc511); 3727 evcnt_detach(&stats->prc511);
3728 evcnt_detach(&stats->prc1023); 3728 evcnt_detach(&stats->prc1023);
3729 evcnt_detach(&stats->prc1522); 3729 evcnt_detach(&stats->prc1522);
3730 evcnt_detach(&stats->ruc); 3730 evcnt_detach(&stats->ruc);
3731 evcnt_detach(&stats->rfc); 3731 evcnt_detach(&stats->rfc);
3732 evcnt_detach(&stats->roc); 3732 evcnt_detach(&stats->roc);
3733 evcnt_detach(&stats->rjc); 3733 evcnt_detach(&stats->rjc);
3734 evcnt_detach(&stats->mngprc); 3734 evcnt_detach(&stats->mngprc);
3735 evcnt_detach(&stats->mngpdc); 3735 evcnt_detach(&stats->mngpdc);
3736 evcnt_detach(&stats->xec); 3736 evcnt_detach(&stats->xec);
3737 3737
3738 /* Packet Transmission Stats */ 3738 /* Packet Transmission Stats */
3739 evcnt_detach(&stats->gotc); 3739 evcnt_detach(&stats->gotc);
3740 evcnt_detach(&stats->tpt); 3740 evcnt_detach(&stats->tpt);
3741 evcnt_detach(&stats->gptc); 3741 evcnt_detach(&stats->gptc);
3742 evcnt_detach(&stats->bptc); 3742 evcnt_detach(&stats->bptc);
3743 evcnt_detach(&stats->mptc); 3743 evcnt_detach(&stats->mptc);
3744 evcnt_detach(&stats->mngptc); 3744 evcnt_detach(&stats->mngptc);
3745 evcnt_detach(&stats->ptc64); 3745 evcnt_detach(&stats->ptc64);
3746 evcnt_detach(&stats->ptc127); 3746 evcnt_detach(&stats->ptc127);
3747 evcnt_detach(&stats->ptc255); 3747 evcnt_detach(&stats->ptc255);
3748 evcnt_detach(&stats->ptc511); 3748 evcnt_detach(&stats->ptc511);
3749 evcnt_detach(&stats->ptc1023); 3749 evcnt_detach(&stats->ptc1023);
3750 evcnt_detach(&stats->ptc1522); 3750 evcnt_detach(&stats->ptc1522);
3751 3751
3752 ixgbe_free_queues(adapter); 3752 ixgbe_free_queues(adapter);
3753 free(adapter->mta, M_DEVBUF); 3753 free(adapter->mta, M_DEVBUF);
3754 3754
3755 IXGBE_CORE_LOCK_DESTROY(adapter); 3755 IXGBE_CORE_LOCK_DESTROY(adapter);
3756 3756
3757 return (0); 3757 return (0);
3758} /* ixgbe_detach */ 3758} /* ixgbe_detach */
3759 3759
3760/************************************************************************ 3760/************************************************************************
3761 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 3761 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3762 * 3762 *
3763 * Prepare the adapter/port for LPLU and/or WoL 3763 * Prepare the adapter/port for LPLU and/or WoL
3764 ************************************************************************/ 3764 ************************************************************************/
3765static int 3765static int
3766ixgbe_setup_low_power_mode(struct adapter *adapter) 3766ixgbe_setup_low_power_mode(struct adapter *adapter)
3767{ 3767{
3768 struct ixgbe_hw *hw = &adapter->hw; 3768 struct ixgbe_hw *hw = &adapter->hw;
3769 device_t dev = adapter->dev; 3769 device_t dev = adapter->dev;
3770 s32 error = 0; 3770 s32 error = 0;
3771 3771
3772 KASSERT(mutex_owned(&adapter->core_mtx)); 3772 KASSERT(mutex_owned(&adapter->core_mtx));
3773 3773
3774 /* Limit power management flow to X550EM baseT */ 3774 /* Limit power management flow to X550EM baseT */
3775 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 3775 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3776 hw->phy.ops.enter_lplu) { 3776 hw->phy.ops.enter_lplu) {
3777 /* X550EM baseT adapters need a special LPLU flow */ 3777 /* X550EM baseT adapters need a special LPLU flow */
3778 hw->phy.reset_disable = true; 3778 hw->phy.reset_disable = true;
3779 ixgbe_stop(adapter); 3779 ixgbe_stop(adapter);
3780 error = hw->phy.ops.enter_lplu(hw); 3780 error = hw->phy.ops.enter_lplu(hw);
3781 if (error) 3781 if (error)
3782 device_printf(dev, 3782 device_printf(dev,
3783 "Error entering LPLU: %d\n", error); 3783 "Error entering LPLU: %d\n", error);
3784 hw->phy.reset_disable = false; 3784 hw->phy.reset_disable = false;
3785 } else { 3785 } else {
3786 /* Just stop for other adapters */ 3786 /* Just stop for other adapters */
3787 ixgbe_stop(adapter); 3787 ixgbe_stop(adapter);
3788 } 3788 }
3789 3789
3790 if (!hw->wol_enabled) { 3790 if (!hw->wol_enabled) {
3791 ixgbe_set_phy_power(hw, FALSE); 3791 ixgbe_set_phy_power(hw, FALSE);
3792 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3792 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3793 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 3793 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3794 } else { 3794 } else {
3795 /* Turn off support for APM wakeup. (Using ACPI instead) */ 3795 /* Turn off support for APM wakeup. (Using ACPI instead) */
3796 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), 3796 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3797 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); 3797 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3798 3798
3799 /* 3799 /*
3800 * Clear Wake Up Status register to prevent any previous wakeup 3800 * Clear Wake Up Status register to prevent any previous wakeup
3801 * events from waking us up immediately after we suspend. 3801 * events from waking us up immediately after we suspend.
3802 */ 3802 */
3803 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3803 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3804 3804
3805 /* 3805 /*
3806 * Program the Wakeup Filter Control register with user filter 3806 * Program the Wakeup Filter Control register with user filter
3807 * settings 3807 * settings
3808 */ 3808 */
3809 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 3809 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3810 3810
3811 /* Enable wakeups and power management in Wakeup Control */ 3811 /* Enable wakeups and power management in Wakeup Control */
3812 IXGBE_WRITE_REG(hw, IXGBE_WUC, 3812 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3813 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3813 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3814 3814
3815 } 3815 }
3816 3816
3817 return error; 3817 return error;
3818} /* ixgbe_setup_low_power_mode */ 3818} /* ixgbe_setup_low_power_mode */
3819 3819
3820/************************************************************************ 3820/************************************************************************
3821 * ixgbe_shutdown - Shutdown entry point 3821 * ixgbe_shutdown - Shutdown entry point
3822 ************************************************************************/ 3822 ************************************************************************/
3823#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 3823#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3824static int 3824static int
3825ixgbe_shutdown(device_t dev) 3825ixgbe_shutdown(device_t dev)
3826{ 3826{
3827 struct adapter *adapter = device_private(dev); 3827 struct adapter *adapter = device_private(dev);
3828 int error = 0; 3828 int error = 0;
3829 3829
3830 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 3830 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3831 3831
3832 IXGBE_CORE_LOCK(adapter); 3832 IXGBE_CORE_LOCK(adapter);
3833 error = ixgbe_setup_low_power_mode(adapter); 3833 error = ixgbe_setup_low_power_mode(adapter);
3834 IXGBE_CORE_UNLOCK(adapter); 3834 IXGBE_CORE_UNLOCK(adapter);
3835 3835
3836 return (error); 3836 return (error);
3837} /* ixgbe_shutdown */ 3837} /* ixgbe_shutdown */
3838#endif 3838#endif
3839 3839
3840/************************************************************************ 3840/************************************************************************
3841 * ixgbe_suspend 3841 * ixgbe_suspend
3842 * 3842 *
3843 * From D0 to D3 3843 * From D0 to D3
3844 ************************************************************************/ 3844 ************************************************************************/
3845static bool 3845static bool
3846ixgbe_suspend(device_t dev, const pmf_qual_t *qual) 3846ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3847{ 3847{
3848 struct adapter *adapter = device_private(dev); 3848 struct adapter *adapter = device_private(dev);
3849 int error = 0; 3849 int error = 0;
3850 3850
3851 INIT_DEBUGOUT("ixgbe_suspend: begin"); 3851 INIT_DEBUGOUT("ixgbe_suspend: begin");
3852 3852
3853 IXGBE_CORE_LOCK(adapter); 3853 IXGBE_CORE_LOCK(adapter);
3854 3854
3855 error = ixgbe_setup_low_power_mode(adapter); 3855 error = ixgbe_setup_low_power_mode(adapter);
3856 3856
3857 IXGBE_CORE_UNLOCK(adapter); 3857 IXGBE_CORE_UNLOCK(adapter);
3858 3858
3859 return (error); 3859 return (error);
3860} /* ixgbe_suspend */ 3860} /* ixgbe_suspend */
3861 3861
3862/************************************************************************ 3862/************************************************************************
3863 * ixgbe_resume 3863 * ixgbe_resume
3864 * 3864 *
3865 * From D3 to D0 3865 * From D3 to D0
3866 ************************************************************************/ 3866 ************************************************************************/
3867static bool 3867static bool
3868ixgbe_resume(device_t dev, const pmf_qual_t *qual) 3868ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3869{ 3869{
3870 struct adapter *adapter = device_private(dev); 3870 struct adapter *adapter = device_private(dev);
3871 struct ifnet *ifp = adapter->ifp; 3871 struct ifnet *ifp = adapter->ifp;
3872 struct ixgbe_hw *hw = &adapter->hw; 3872 struct ixgbe_hw *hw = &adapter->hw;
3873 u32 wus; 3873 u32 wus;
3874 3874
3875 INIT_DEBUGOUT("ixgbe_resume: begin"); 3875 INIT_DEBUGOUT("ixgbe_resume: begin");
3876 3876
3877 IXGBE_CORE_LOCK(adapter); 3877 IXGBE_CORE_LOCK(adapter);
3878 3878
3879 /* Read & clear WUS register */ 3879 /* Read & clear WUS register */
3880 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 3880 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3881 if (wus) 3881 if (wus)
3882 device_printf(dev, "Woken up by (WUS): %#010x\n", 3882 device_printf(dev, "Woken up by (WUS): %#010x\n",
3883 IXGBE_READ_REG(hw, IXGBE_WUS)); 3883 IXGBE_READ_REG(hw, IXGBE_WUS));
3884 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3884 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3885 /* And clear WUFC until next low-power transition */ 3885 /* And clear WUFC until next low-power transition */
3886 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3886 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3887 3887
3888 /* 3888 /*
3889 * Required after D3->D0 transition; 3889 * Required after D3->D0 transition;
3890 * will re-advertise all previous advertised speeds 3890 * will re-advertise all previous advertised speeds
3891 */ 3891 */
3892 if (ifp->if_flags & IFF_UP) 3892 if (ifp->if_flags & IFF_UP)
3893 ixgbe_init_locked(adapter); 3893 ixgbe_init_locked(adapter);
3894 3894
3895 IXGBE_CORE_UNLOCK(adapter); 3895 IXGBE_CORE_UNLOCK(adapter);
3896 3896
3897 return true; 3897 return true;
3898} /* ixgbe_resume */ 3898} /* ixgbe_resume */
3899 3899
3900/* 3900/*
3901 * Set the various hardware offload abilities. 3901 * Set the various hardware offload abilities.
3902 * 3902 *
3903 * This takes the ifnet's if_capenable flags (e.g. set by the user using 3903 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3904 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 3904 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3905 * mbuf offload flags the driver will understand. 3905 * mbuf offload flags the driver will understand.
3906 */ 3906 */
3907static void 3907static void
3908ixgbe_set_if_hwassist(struct adapter *adapter) 3908ixgbe_set_if_hwassist(struct adapter *adapter)
3909{ 3909{
3910 /* XXX */ 3910 /* XXX */
3911} 3911}
3912 3912
3913/************************************************************************ 3913/************************************************************************
3914 * ixgbe_init_locked - Init entry point 3914 * ixgbe_init_locked - Init entry point
3915 * 3915 *
3916 * Used in two ways: It is used by the stack as an init 3916 * Used in two ways: It is used by the stack as an init
3917 * entry point in network interface structure. It is also 3917 * entry point in network interface structure. It is also
3918 * used by the driver as a hw/sw initialization routine to 3918 * used by the driver as a hw/sw initialization routine to
3919 * get to a consistent state. 3919 * get to a consistent state.
3920 * 3920 *
3921 * return 0 on success, positive on failure 3921 * return 0 on success, positive on failure
3922 ************************************************************************/ 3922 ************************************************************************/
3923static void 3923static void
3924ixgbe_init_locked(struct adapter *adapter) 3924ixgbe_init_locked(struct adapter *adapter)
3925{ 3925{
3926 struct ifnet *ifp = adapter->ifp; 3926 struct ifnet *ifp = adapter->ifp;
3927 device_t dev = adapter->dev; 3927 device_t dev = adapter->dev;
3928 struct ixgbe_hw *hw = &adapter->hw; 3928 struct ixgbe_hw *hw = &adapter->hw;
3929 struct ix_queue *que; 3929 struct ix_queue *que;
3930 struct tx_ring *txr; 3930 struct tx_ring *txr;
3931 struct rx_ring *rxr; 3931 struct rx_ring *rxr;
3932 u32 txdctl, mhadd; 3932 u32 txdctl, mhadd;
3933 u32 rxdctl, rxctrl; 3933 u32 rxdctl, rxctrl;
3934 u32 ctrl_ext; 3934 u32 ctrl_ext;
3935 bool unsupported_sfp = false; 3935 bool unsupported_sfp = false;
3936 int i, j, err; 3936 int i, j, err;
3937 3937
3938 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ 3938 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3939 3939
3940 KASSERT(mutex_owned(&adapter->core_mtx)); 3940 KASSERT(mutex_owned(&adapter->core_mtx));
3941 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 3941 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3942 3942
3943 hw->need_unsupported_sfp_recovery = false; 3943 hw->need_unsupported_sfp_recovery = false;
3944 hw->adapter_stopped = FALSE; 3944 hw->adapter_stopped = FALSE;
3945 ixgbe_stop_adapter(hw); 3945 ixgbe_stop_adapter(hw);
3946 callout_stop(&adapter->timer); 3946 callout_stop(&adapter->timer);
3947 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 3947 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3948 callout_stop(&adapter->recovery_mode_timer); 3948 callout_stop(&adapter->recovery_mode_timer);
3949 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 3949 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3950 que->disabled_count = 0; 3950 que->disabled_count = 0;
3951 3951
3952 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ 3952 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3953 adapter->max_frame_size = 3953 adapter->max_frame_size =
3954 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 3954 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3955 3955
3956 /* Queue indices may change with IOV mode */ 3956 /* Queue indices may change with IOV mode */
3957 ixgbe_align_all_queue_indices(adapter); 3957 ixgbe_align_all_queue_indices(adapter);
3958 3958
3959 /* reprogram the RAR[0] in case user changed it. */ 3959 /* reprogram the RAR[0] in case user changed it. */
3960 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 3960 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3961 3961
3962 /* Get the latest mac address, User can use a LAA */ 3962 /* Get the latest mac address, User can use a LAA */
3963 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 3963 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3964 IXGBE_ETH_LENGTH_OF_ADDRESS); 3964 IXGBE_ETH_LENGTH_OF_ADDRESS);
3965 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 3965 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3966 hw->addr_ctrl.rar_used_count = 1; 3966 hw->addr_ctrl.rar_used_count = 1;
3967 3967
3968 /* Set hardware offload abilities from ifnet flags */ 3968 /* Set hardware offload abilities from ifnet flags */
3969 ixgbe_set_if_hwassist(adapter); 3969 ixgbe_set_if_hwassist(adapter);
3970 3970
3971 /* Prepare transmit descriptors and buffers */ 3971 /* Prepare transmit descriptors and buffers */
3972 if (ixgbe_setup_transmit_structures(adapter)) { 3972 if (ixgbe_setup_transmit_structures(adapter)) {
3973 device_printf(dev, "Could not setup transmit structures\n"); 3973 device_printf(dev, "Could not setup transmit structures\n");
3974 ixgbe_stop(adapter); 3974 ixgbe_stop(adapter);
3975 return; 3975 return;
3976 } 3976 }
3977 3977
3978 ixgbe_init_hw(hw); 3978 ixgbe_init_hw(hw);
3979 3979
3980 ixgbe_initialize_iov(adapter); 3980 ixgbe_initialize_iov(adapter);
3981 3981
3982 ixgbe_initialize_transmit_units(adapter); 3982 ixgbe_initialize_transmit_units(adapter);
3983 3983
3984 /* Setup Multicast table */ 3984 /* Setup Multicast table */
3985 ixgbe_set_rxfilter(adapter); 3985 ixgbe_set_rxfilter(adapter);
3986 3986
3987 /* Determine the correct mbuf pool, based on frame size */ 3987 /* Determine the correct mbuf pool, based on frame size */
3988 if (adapter->max_frame_size <= MCLBYTES) 3988 if (adapter->max_frame_size <= MCLBYTES)
3989 adapter->rx_mbuf_sz = MCLBYTES; 3989 adapter->rx_mbuf_sz = MCLBYTES;
3990 else 3990 else
3991 adapter->rx_mbuf_sz = MJUMPAGESIZE; 3991 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3992 3992
3993 /* Prepare receive descriptors and buffers */ 3993 /* Prepare receive descriptors and buffers */
3994 if (ixgbe_setup_receive_structures(adapter)) { 3994 if (ixgbe_setup_receive_structures(adapter)) {
3995 device_printf(dev, "Could not setup receive structures\n"); 3995 device_printf(dev, "Could not setup receive structures\n");
3996 ixgbe_stop(adapter); 3996 ixgbe_stop(adapter);
3997 return; 3997 return;
3998 } 3998 }
3999 3999
4000 /* Configure RX settings */ 4000 /* Configure RX settings */
4001 ixgbe_initialize_receive_units(adapter); 4001 ixgbe_initialize_receive_units(adapter);
4002 4002
4003 /* Initialize variable holding task enqueue requests interrupts */ 4003 /* Initialize variable holding task enqueue requests interrupts */
4004 adapter->task_requests = 0; 4004 adapter->task_requests = 0;
4005 4005
4006 /* Enable SDP & MSI-X interrupts based on adapter */ 4006 /* Enable SDP & MSI-X interrupts based on adapter */
4007 ixgbe_config_gpie(adapter); 4007 ixgbe_config_gpie(adapter);
4008 4008
4009 /* Set MTU size */ 4009 /* Set MTU size */
4010 if (ifp->if_mtu > ETHERMTU) { 4010 if (ifp->if_mtu > ETHERMTU) {
4011 /* aka IXGBE_MAXFRS on 82599 and newer */ 4011 /* aka IXGBE_MAXFRS on 82599 and newer */
4012 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 4012 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4013 mhadd &= ~IXGBE_MHADD_MFS_MASK; 4013 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4014 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 4014 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4015 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 4015 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4016 } 4016 }
4017 4017
4018 /* Now enable all the queues */ 4018 /* Now enable all the queues */
4019 for (i = 0; i < adapter->num_queues; i++) { 4019 for (i = 0; i < adapter->num_queues; i++) {
4020 txr = &adapter->tx_rings[i]; 4020 txr = &adapter->tx_rings[i];
4021 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 4021 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4022 txdctl |= IXGBE_TXDCTL_ENABLE; 4022 txdctl |= IXGBE_TXDCTL_ENABLE;
4023 /* Set WTHRESH to 8, burst writeback */ 4023 /* Set WTHRESH to 8, burst writeback */
4024 txdctl |= (8 << 16); 4024 txdctl |= (8 << 16);
4025 /* 4025 /*
4026 * When the internal queue falls below PTHRESH (32), 4026 * When the internal queue falls below PTHRESH (32),
4027 * start prefetching as long as there are at least 4027 * start prefetching as long as there are at least
4028 * HTHRESH (1) buffers ready. The values are taken 4028 * HTHRESH (1) buffers ready. The values are taken
4029 * from the Intel linux driver 3.8.21. 4029 * from the Intel linux driver 3.8.21.
4030 * Prefetching enables tx line rate even with 1 queue. 4030 * Prefetching enables tx line rate even with 1 queue.
4031 */ 4031 */
4032 txdctl |= (32 << 0) | (1 << 8); 4032 txdctl |= (32 << 0) | (1 << 8);
4033 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 4033 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4034 } 4034 }
4035 4035
4036 for (i = 0; i < adapter->num_queues; i++) { 4036 for (i = 0; i < adapter->num_queues; i++) {
4037 rxr = &adapter->rx_rings[i]; 4037 rxr = &adapter->rx_rings[i];
4038 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 4038 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4039 if (hw->mac.type == ixgbe_mac_82598EB) { 4039 if (hw->mac.type == ixgbe_mac_82598EB) {
4040 /* 4040 /*
4041 * PTHRESH = 21 4041 * PTHRESH = 21
4042 * HTHRESH = 4 4042 * HTHRESH = 4
4043 * WTHRESH = 8 4043 * WTHRESH = 8
4044 */ 4044 */
4045 rxdctl &= ~0x3FFFFF; 4045 rxdctl &= ~0x3FFFFF;
4046 rxdctl |= 0x080420; 4046 rxdctl |= 0x080420;
4047 } 4047 }
4048 rxdctl |= IXGBE_RXDCTL_ENABLE; 4048 rxdctl |= IXGBE_RXDCTL_ENABLE;
4049 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 4049 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4050 for (j = 0; j < 10; j++) { 4050 for (j = 0; j < 10; j++) {
4051 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 4051 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4052 IXGBE_RXDCTL_ENABLE) 4052 IXGBE_RXDCTL_ENABLE)
4053 break; 4053 break;
4054 else 4054 else
4055 msec_delay(1); 4055 msec_delay(1);
4056 } 4056 }
4057 IXGBE_WRITE_BARRIER(hw); 4057 IXGBE_WRITE_BARRIER(hw);
4058 4058
4059 /* 4059 /*
4060 * In netmap mode, we must preserve the buffers made 4060 * In netmap mode, we must preserve the buffers made
4061 * available to userspace before the if_init() 4061 * available to userspace before the if_init()
4062 * (this is true by default on the TX side, because 4062 * (this is true by default on the TX side, because
4063 * init makes all buffers available to userspace). 4063 * init makes all buffers available to userspace).
4064 * 4064 *
4065 * netmap_reset() and the device specific routines 4065 * netmap_reset() and the device specific routines
4066 * (e.g. ixgbe_setup_receive_rings()) map these 4066 * (e.g. ixgbe_setup_receive_rings()) map these
4067 * buffers at the end of the NIC ring, so here we 4067 * buffers at the end of the NIC ring, so here we
4068 * must set the RDT (tail) register to make sure 4068 * must set the RDT (tail) register to make sure
4069 * they are not overwritten. 4069 * they are not overwritten.
4070 * 4070 *
4071 * In this driver the NIC ring starts at RDH = 0, 4071 * In this driver the NIC ring starts at RDH = 0,
4072 * RDT points to the last slot available for reception (?), 4072 * RDT points to the last slot available for reception (?),
4073 * so RDT = num_rx_desc - 1 means the whole ring is available. 4073 * so RDT = num_rx_desc - 1 means the whole ring is available.
4074 */ 4074 */
4075#ifdef DEV_NETMAP 4075#ifdef DEV_NETMAP
4076 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 4076 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4077 (ifp->if_capenable & IFCAP_NETMAP)) { 4077 (ifp->if_capenable & IFCAP_NETMAP)) {
4078 struct netmap_adapter *na = NA(adapter->ifp); 4078 struct netmap_adapter *na = NA(adapter->ifp);
4079 struct netmap_kring *kring = na->rx_rings[i]; 4079 struct netmap_kring *kring = na->rx_rings[i];
4080 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 4080 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4081 4081
4082 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 4082 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4083 } else 4083 } else
4084#endif /* DEV_NETMAP */ 4084#endif /* DEV_NETMAP */
4085 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), 4085 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4086 adapter->num_rx_desc - 1); 4086 adapter->num_rx_desc - 1);
4087 } 4087 }
4088 4088
4089 /* Enable Receive engine */ 4089 /* Enable Receive engine */
4090 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4090 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4091 if (hw->mac.type == ixgbe_mac_82598EB) 4091 if (hw->mac.type == ixgbe_mac_82598EB)
4092 rxctrl |= IXGBE_RXCTRL_DMBYPS; 4092 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4093 rxctrl |= IXGBE_RXCTRL_RXEN; 4093 rxctrl |= IXGBE_RXCTRL_RXEN;
4094 ixgbe_enable_rx_dma(hw, rxctrl); 4094 ixgbe_enable_rx_dma(hw, rxctrl);
4095 4095
4096 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4096 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4097 atomic_store_relaxed(&adapter->timer_pending, 0); 4097 atomic_store_relaxed(&adapter->timer_pending, 0);
4098 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 4098 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4099 callout_reset(&adapter->recovery_mode_timer, hz, 4099 callout_reset(&adapter->recovery_mode_timer, hz,
4100 ixgbe_recovery_mode_timer, adapter); 4100 ixgbe_recovery_mode_timer, adapter);
4101 4101
4102 /* Set up MSI/MSI-X routing */ 4102 /* Set up MSI/MSI-X routing */
4103 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4103 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4104 ixgbe_configure_ivars(adapter); 4104 ixgbe_configure_ivars(adapter);
4105 /* Set up auto-mask */ 4105 /* Set up auto-mask */
4106 if (hw->mac.type == ixgbe_mac_82598EB) 4106 if (hw->mac.type == ixgbe_mac_82598EB)
4107 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4107 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4108 else { 4108 else {
4109 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4109 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4110 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4110 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4111 } 4111 }
4112 } else { /* Simple settings for Legacy/MSI */ 4112 } else { /* Simple settings for Legacy/MSI */
4113 ixgbe_set_ivar(adapter, 0, 0, 0); 4113 ixgbe_set_ivar(adapter, 0, 0, 0);
4114 ixgbe_set_ivar(adapter, 0, 0, 1); 4114 ixgbe_set_ivar(adapter, 0, 0, 1);
4115 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4115 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4116 } 4116 }
4117 4117
4118 ixgbe_init_fdir(adapter); 4118 ixgbe_init_fdir(adapter);
4119 4119
4120 /* 4120 /*
4121 * Check on any SFP devices that 4121 * Check on any SFP devices that
4122 * need to be kick-started 4122 * need to be kick-started
4123 */ 4123 */
4124 if (hw->phy.type == ixgbe_phy_none) { 4124 if (hw->phy.type == ixgbe_phy_none) {
4125 err = hw->phy.ops.identify(hw); 4125 err = hw->phy.ops.identify(hw);
4126 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 4126 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4127 unsupported_sfp = true; 4127 unsupported_sfp = true;
4128 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported) 4128 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4129 unsupported_sfp = true; 4129 unsupported_sfp = true;
4130 4130
4131 if (unsupported_sfp) 4131 if (unsupported_sfp)
4132 device_printf(dev, 4132 device_printf(dev,
4133 "Unsupported SFP+ module type was detected.\n"); 4133 "Unsupported SFP+ module type was detected.\n");
4134 4134
4135 /* Set moderation on the Link interrupt */ 4135 /* Set moderation on the Link interrupt */
4136 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 4136 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4137 4137
4138 /* Enable EEE power saving */ 4138 /* Enable EEE power saving */
4139 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4139 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4140 hw->mac.ops.setup_eee(hw, 4140 hw->mac.ops.setup_eee(hw,
4141 adapter->feat_en & IXGBE_FEATURE_EEE); 4141 adapter->feat_en & IXGBE_FEATURE_EEE);
4142 4142
4143 /* Enable power to the phy. */ 4143 /* Enable power to the phy. */
4144 if (!unsupported_sfp) { 4144 if (!unsupported_sfp) {
4145 ixgbe_set_phy_power(hw, TRUE); 4145 ixgbe_set_phy_power(hw, TRUE);
4146 4146
4147 /* Config/Enable Link */ 4147 /* Config/Enable Link */
4148 ixgbe_config_link(adapter); 4148 ixgbe_config_link(adapter);
4149 } 4149 }
4150 4150
4151 /* Hardware Packet Buffer & Flow Control setup */ 4151 /* Hardware Packet Buffer & Flow Control setup */
4152 ixgbe_config_delay_values(adapter); 4152 ixgbe_config_delay_values(adapter);
4153 4153
4154 /* Initialize the FC settings */ 4154 /* Initialize the FC settings */
4155 ixgbe_start_hw(hw); 4155 ixgbe_start_hw(hw);
4156 4156
4157 /* Set up VLAN support and filter */ 4157 /* Set up VLAN support and filter */
4158 ixgbe_setup_vlan_hw_support(adapter); 4158 ixgbe_setup_vlan_hw_support(adapter);
4159 4159
4160 /* Setup DMA Coalescing */ 4160 /* Setup DMA Coalescing */
4161 ixgbe_config_dmac(adapter); 4161 ixgbe_config_dmac(adapter);
4162 4162
4163 /* OK to schedule workqueues. */ 4163 /* OK to schedule workqueues. */
4164 adapter->schedule_wqs_ok = true; 4164 adapter->schedule_wqs_ok = true;
4165 4165
4166 /* And now turn on interrupts */ 4166 /* And now turn on interrupts */
4167 ixgbe_enable_intr(adapter); 4167 ixgbe_enable_intr(adapter);
4168 4168
4169 /* Enable the use of the MBX by the VF's */ 4169 /* Enable the use of the MBX by the VF's */
4170 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 4170 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4171 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4171 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4172 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 4172 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4173 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4173 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4174 } 4174 }
4175 4175
4176 /* Update saved flags. See ixgbe_ifflags_cb() */ 4176 /* Update saved flags. See ixgbe_ifflags_cb() */
4177 adapter->if_flags = ifp->if_flags; 4177 adapter->if_flags = ifp->if_flags;
4178 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 4178 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4179 4179
4180 /* Now inform the stack we're ready */ 4180 /* Now inform the stack we're ready */
4181 ifp->if_flags |= IFF_RUNNING; 4181 ifp->if_flags |= IFF_RUNNING;
4182 4182
4183 return; 4183 return;
4184} /* ixgbe_init_locked */ 4184} /* ixgbe_init_locked */
4185 4185
4186/************************************************************************ 4186/************************************************************************
4187 * ixgbe_init 4187 * ixgbe_init
4188 ************************************************************************/ 4188 ************************************************************************/
4189static int 4189static int
4190ixgbe_init(struct ifnet *ifp) 4190ixgbe_init(struct ifnet *ifp)
4191{ 4191{
4192 struct adapter *adapter = ifp->if_softc; 4192 struct adapter *adapter = ifp->if_softc;
4193 4193
4194 IXGBE_CORE_LOCK(adapter); 4194 IXGBE_CORE_LOCK(adapter);
4195 ixgbe_init_locked(adapter); 4195 ixgbe_init_locked(adapter);
4196 IXGBE_CORE_UNLOCK(adapter); 4196 IXGBE_CORE_UNLOCK(adapter);
4197 4197
4198 return 0; /* XXX ixgbe_init_locked cannot fail? really? */ 4198 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4199} /* ixgbe_init */ 4199} /* ixgbe_init */
4200 4200
4201/************************************************************************ 4201/************************************************************************
4202 * ixgbe_set_ivar 4202 * ixgbe_set_ivar
4203 * 4203 *
4204 * Setup the correct IVAR register for a particular MSI-X interrupt 4204 * Setup the correct IVAR register for a particular MSI-X interrupt
4205 * (yes this is all very magic and confusing :) 4205 * (yes this is all very magic and confusing :)
4206 * - entry is the register array entry 4206 * - entry is the register array entry
4207 * - vector is the MSI-X vector for this queue 4207 * - vector is the MSI-X vector for this queue
4208 * - type is RX/TX/MISC 4208 * - type is RX/TX/MISC
4209 ************************************************************************/ 4209 ************************************************************************/
4210static void 4210static void
4211ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 4211ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4212{ 4212{
4213 struct ixgbe_hw *hw = &adapter->hw; 4213 struct ixgbe_hw *hw = &adapter->hw;
4214 u32 ivar, index; 4214 u32 ivar, index;
4215 4215
4216 vector |= IXGBE_IVAR_ALLOC_VAL; 4216 vector |= IXGBE_IVAR_ALLOC_VAL;
4217 4217
4218 switch (hw->mac.type) { 4218 switch (hw->mac.type) {
4219 case ixgbe_mac_82598EB: 4219 case ixgbe_mac_82598EB:
4220 if (type == -1) 4220 if (type == -1)
4221 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 4221 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4222 else 4222 else
4223 entry += (type * 64); 4223 entry += (type * 64);
4224 index = (entry >> 2) & 0x1F; 4224 index = (entry >> 2) & 0x1F;
4225 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4225 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4226 ivar &= ~(0xffUL << (8 * (entry & 0x3))); 4226 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4227 ivar |= ((u32)vector << (8 * (entry & 0x3))); 4227 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 4228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4229 break; 4229 break;
4230 case ixgbe_mac_82599EB: 4230 case ixgbe_mac_82599EB:
4231 case ixgbe_mac_X540: 4231 case ixgbe_mac_X540:
4232 case ixgbe_mac_X550: 4232 case ixgbe_mac_X550:
4233 case ixgbe_mac_X550EM_x: 4233 case ixgbe_mac_X550EM_x:
4234 case ixgbe_mac_X550EM_a: 4234 case ixgbe_mac_X550EM_a:
4235 if (type == -1) { /* MISC IVAR */ 4235 if (type == -1) { /* MISC IVAR */
4236 index = (entry & 1) * 8; 4236 index = (entry & 1) * 8;
4237 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4237 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4238 ivar &= ~(0xffUL << index); 4238 ivar &= ~(0xffUL << index);
4239 ivar |= ((u32)vector << index); 4239 ivar |= ((u32)vector << index);
4240 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4240 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4241 } else { /* RX/TX IVARS */ 4241 } else { /* RX/TX IVARS */
4242 index = (16 * (entry & 1)) + (8 * type); 4242 index = (16 * (entry & 1)) + (8 * type);
4243 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 4243 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4244 ivar &= ~(0xffUL << index); 4244 ivar &= ~(0xffUL << index);
4245 ivar |= ((u32)vector << index); 4245 ivar |= ((u32)vector << index);
4246 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 4246 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4247 } 4247 }
4248 break; 4248 break;
4249 default: 4249 default:
4250 break; 4250 break;
4251 } 4251 }
4252} /* ixgbe_set_ivar */ 4252} /* ixgbe_set_ivar */
4253 4253
4254/************************************************************************ 4254/************************************************************************
4255 * ixgbe_configure_ivars 4255 * ixgbe_configure_ivars
4256 ************************************************************************/ 4256 ************************************************************************/
4257static void 4257static void
4258ixgbe_configure_ivars(struct adapter *adapter) 4258ixgbe_configure_ivars(struct adapter *adapter)
4259{ 4259{
4260 struct ix_queue *que = adapter->queues; 4260 struct ix_queue *que = adapter->queues;
4261 u32 newitr; 4261 u32 newitr;
4262 4262
4263 if (ixgbe_max_interrupt_rate > 0) 4263 if (ixgbe_max_interrupt_rate > 0)
4264 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 4264 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4265 else { 4265 else {
4266 /* 4266 /*
4267 * Disable DMA coalescing if interrupt moderation is 4267 * Disable DMA coalescing if interrupt moderation is
4268 * disabled. 4268 * disabled.
4269 */ 4269 */
4270 adapter->dmac = 0; 4270 adapter->dmac = 0;
4271 newitr = 0; 4271 newitr = 0;
4272 } 4272 }
4273 4273
4274 for (int i = 0; i < adapter->num_queues; i++, que++) { 4274 for (int i = 0; i < adapter->num_queues; i++, que++) {
4275 struct rx_ring *rxr = &adapter->rx_rings[i]; 4275 struct rx_ring *rxr = &adapter->rx_rings[i];
4276 struct tx_ring *txr = &adapter->tx_rings[i]; 4276 struct tx_ring *txr = &adapter->tx_rings[i];
4277 /* First the RX queue entry */ 4277 /* First the RX queue entry */
4278 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 4278 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4279 /* ... and the TX */ 4279 /* ... and the TX */
4280 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 4280 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4281 /* Set an Initial EITR value */ 4281 /* Set an Initial EITR value */
4282 ixgbe_eitr_write(adapter, que->msix, newitr); 4282 ixgbe_eitr_write(adapter, que->msix, newitr);
4283 /* 4283 /*
4284 * To eliminate influence of the previous state. 4284 * To eliminate influence of the previous state.
4285 * At this point, Tx/Rx interrupt handler 4285 * At this point, Tx/Rx interrupt handler
4286 * (ixgbe_msix_que()) cannot be called, so both 4286 * (ixgbe_msix_que()) cannot be called, so both
4287 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required. 4287 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4288 */ 4288 */
4289 que->eitr_setting = 0; 4289 que->eitr_setting = 0;
4290 } 4290 }
4291 4291
4292 /* For the Link interrupt */ 4292 /* For the Link interrupt */
4293 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 4293 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4294} /* ixgbe_configure_ivars */ 4294} /* ixgbe_configure_ivars */
4295 4295
4296/************************************************************************ 4296/************************************************************************
4297 * ixgbe_config_gpie 4297 * ixgbe_config_gpie
4298 ************************************************************************/ 4298 ************************************************************************/
4299static void 4299static void
4300ixgbe_config_gpie(struct adapter *adapter) 4300ixgbe_config_gpie(struct adapter *adapter)
4301{ 4301{
4302 struct ixgbe_hw *hw = &adapter->hw; 4302 struct ixgbe_hw *hw = &adapter->hw;
4303 u32 gpie; 4303 u32 gpie;
4304 4304
4305 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4305 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4306 4306
4307 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4307 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4308 /* Enable Enhanced MSI-X mode */ 4308 /* Enable Enhanced MSI-X mode */
4309 gpie |= IXGBE_GPIE_MSIX_MODE 4309 gpie |= IXGBE_GPIE_MSIX_MODE
4310 | IXGBE_GPIE_EIAME 4310 | IXGBE_GPIE_EIAME
4311 | IXGBE_GPIE_PBA_SUPPORT 4311 | IXGBE_GPIE_PBA_SUPPORT
4312 | IXGBE_GPIE_OCD; 4312 | IXGBE_GPIE_OCD;
4313 } 4313 }
4314 4314
4315 /* Fan Failure Interrupt */ 4315 /* Fan Failure Interrupt */
4316 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 4316 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4317 gpie |= IXGBE_SDP1_GPIEN; 4317 gpie |= IXGBE_SDP1_GPIEN;
4318 4318
4319 /* Thermal Sensor Interrupt */ 4319 /* Thermal Sensor Interrupt */
4320 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 4320 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4321 gpie |= IXGBE_SDP0_GPIEN_X540; 4321 gpie |= IXGBE_SDP0_GPIEN_X540;
4322 4322
4323 /* Link detection */ 4323 /* Link detection */
4324 switch (hw->mac.type) { 4324 switch (hw->mac.type) {
4325 case ixgbe_mac_82599EB: 4325 case ixgbe_mac_82599EB:
4326 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 4326 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4327 break; 4327 break;
4328 case ixgbe_mac_X550EM_x: 4328 case ixgbe_mac_X550EM_x:
4329 case ixgbe_mac_X550EM_a: 4329 case ixgbe_mac_X550EM_a:
4330 gpie |= IXGBE_SDP0_GPIEN_X540; 4330 gpie |= IXGBE_SDP0_GPIEN_X540;
4331 break; 4331 break;
4332 default: 4332 default:
4333 break; 4333 break;
4334 } 4334 }
4335 4335
4336 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4336 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4337 4337
4338} /* ixgbe_config_gpie */ 4338} /* ixgbe_config_gpie */
4339 4339
4340/************************************************************************ 4340/************************************************************************
4341 * ixgbe_config_delay_values 4341 * ixgbe_config_delay_values
4342 * 4342 *
4343 * Requires adapter->max_frame_size to be set. 4343 * Requires adapter->max_frame_size to be set.
4344 ************************************************************************/ 4344 ************************************************************************/
4345static void 4345static void
4346ixgbe_config_delay_values(struct adapter *adapter) 4346ixgbe_config_delay_values(struct adapter *adapter)
4347{ 4347{
4348 struct ixgbe_hw *hw = &adapter->hw; 4348 struct ixgbe_hw *hw = &adapter->hw;
4349 u32 rxpb, frame, size, tmp; 4349 u32 rxpb, frame, size, tmp;
4350 4350
4351 frame = adapter->max_frame_size; 4351 frame = adapter->max_frame_size;
4352 4352
4353 /* Calculate High Water */ 4353 /* Calculate High Water */
4354 switch (hw->mac.type) { 4354 switch (hw->mac.type) {
4355 case ixgbe_mac_X540: 4355 case ixgbe_mac_X540:
4356 case ixgbe_mac_X550: 4356 case ixgbe_mac_X550:
4357 case ixgbe_mac_X550EM_x: 4357 case ixgbe_mac_X550EM_x:
4358 case ixgbe_mac_X550EM_a: 4358 case ixgbe_mac_X550EM_a:
4359 tmp = IXGBE_DV_X540(frame, frame); 4359 tmp = IXGBE_DV_X540(frame, frame);
4360 break; 4360 break;
4361 default: 4361 default:
4362 tmp = IXGBE_DV(frame, frame); 4362 tmp = IXGBE_DV(frame, frame);
4363 break; 4363 break;
4364 } 4364 }
4365 size = IXGBE_BT2KB(tmp); 4365 size = IXGBE_BT2KB(tmp);
4366 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 4366 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4367 hw->fc.high_water[0] = rxpb - size; 4367 hw->fc.high_water[0] = rxpb - size;
4368 4368
4369 /* Now calculate Low Water */ 4369 /* Now calculate Low Water */
4370 switch (hw->mac.type) { 4370 switch (hw->mac.type) {
4371 case ixgbe_mac_X540: 4371 case ixgbe_mac_X540:
4372 case ixgbe_mac_X550: 4372 case ixgbe_mac_X550:
4373 case ixgbe_mac_X550EM_x: 4373 case ixgbe_mac_X550EM_x:
4374 case ixgbe_mac_X550EM_a: 4374 case ixgbe_mac_X550EM_a:
4375 tmp = IXGBE_LOW_DV_X540(frame); 4375 tmp = IXGBE_LOW_DV_X540(frame);
4376 break; 4376 break;
4377 default: 4377 default:
4378 tmp = IXGBE_LOW_DV(frame); 4378 tmp = IXGBE_LOW_DV(frame);
4379 break; 4379 break;
4380 } 4380 }
4381 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 4381 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4382 4382
4383 hw->fc.pause_time = IXGBE_FC_PAUSE; 4383 hw->fc.pause_time = IXGBE_FC_PAUSE;
4384 hw->fc.send_xon = TRUE; 4384 hw->fc.send_xon = TRUE;
4385} /* ixgbe_config_delay_values */ 4385} /* ixgbe_config_delay_values */
4386 4386
4387/************************************************************************ 4387/************************************************************************
4388 * ixgbe_set_rxfilter - Multicast Update 4388 * ixgbe_set_rxfilter - Multicast Update
4389 * 4389 *
4390 * Called whenever multicast address list is updated. 4390 * Called whenever multicast address list is updated.
4391 ************************************************************************/ 4391 ************************************************************************/
4392static void 4392static void
4393ixgbe_set_rxfilter(struct adapter *adapter) 4393ixgbe_set_rxfilter(struct adapter *adapter)
4394{ 4394{
4395 struct ixgbe_mc_addr *mta; 4395 struct ixgbe_mc_addr *mta;
4396 struct ifnet *ifp = adapter->ifp; 4396 struct ifnet *ifp = adapter->ifp;
4397 u8 *update_ptr; 4397 u8 *update_ptr;
4398 int mcnt = 0; 4398 int mcnt = 0;
4399 u32 fctrl; 4399 u32 fctrl;
4400 struct ethercom *ec = &adapter->osdep.ec; 4400 struct ethercom *ec = &adapter->osdep.ec;
4401 struct ether_multi *enm; 4401 struct ether_multi *enm;
4402 struct ether_multistep step; 4402 struct ether_multistep step;
4403 4403
4404 KASSERT(mutex_owned(&adapter->core_mtx)); 4404 KASSERT(mutex_owned(&adapter->core_mtx));
4405 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin"); 4405 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4406 4406
4407 mta = adapter->mta; 4407 mta = adapter->mta;
4408 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 4408 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4409 4409
4410 ETHER_LOCK(ec); 4410 ETHER_LOCK(ec);
4411 ec->ec_flags &= ~ETHER_F_ALLMULTI; 4411 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4412 ETHER_FIRST_MULTI(step, ec, enm); 4412 ETHER_FIRST_MULTI(step, ec, enm);
4413 while (enm != NULL) { 4413 while (enm != NULL) {
4414 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) || 4414 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4415 (memcmp(enm->enm_addrlo, enm->enm_addrhi, 4415 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4416 ETHER_ADDR_LEN) != 0)) { 4416 ETHER_ADDR_LEN) != 0)) {
4417 ec->ec_flags |= ETHER_F_ALLMULTI; 4417 ec->ec_flags |= ETHER_F_ALLMULTI;
4418 break; 4418 break;
4419 } 4419 }
4420 bcopy(enm->enm_addrlo, 4420 bcopy(enm->enm_addrlo,
4421 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 4421 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4422 mta[mcnt].vmdq = adapter->pool; 4422 mta[mcnt].vmdq = adapter->pool;
4423 mcnt++; 4423 mcnt++;
4424 ETHER_NEXT_MULTI(step, enm); 4424 ETHER_NEXT_MULTI(step, enm);
4425 } 4425 }
4426 4426
4427 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 4427 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4428 if (ifp->if_flags & IFF_PROMISC) 4428 if (ifp->if_flags & IFF_PROMISC)
4429 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4429 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4430 else if (ec->ec_flags & ETHER_F_ALLMULTI) { 4430 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4431 fctrl |= IXGBE_FCTRL_MPE; 4431 fctrl |= IXGBE_FCTRL_MPE;
4432 fctrl &= ~IXGBE_FCTRL_UPE; 4432 fctrl &= ~IXGBE_FCTRL_UPE;
4433 } else 4433 } else
4434 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4434 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4435 4435
4436 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 4436 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4437 4437
4438 /* Update multicast filter entries only when it's not ALLMULTI */ 4438 /* Update multicast filter entries only when it's not ALLMULTI */
4439 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) { 4439 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4440 ETHER_UNLOCK(ec); 4440 ETHER_UNLOCK(ec);
4441 update_ptr = (u8 *)mta; 4441 update_ptr = (u8 *)mta;
4442 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 4442 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4443 ixgbe_mc_array_itr, TRUE); 4443 ixgbe_mc_array_itr, TRUE);
4444 } else 4444 } else
4445 ETHER_UNLOCK(ec); 4445 ETHER_UNLOCK(ec);
4446} /* ixgbe_set_rxfilter */ 4446} /* ixgbe_set_rxfilter */
4447 4447
4448/************************************************************************ 4448/************************************************************************
4449 * ixgbe_mc_array_itr 4449 * ixgbe_mc_array_itr
4450 * 4450 *
4451 * An iterator function needed by the multicast shared code. 4451 * An iterator function needed by the multicast shared code.
4452 * It feeds the shared code routine the addresses in the 4452 * It feeds the shared code routine the addresses in the
4453 * array of ixgbe_set_rxfilter() one by one. 4453 * array of ixgbe_set_rxfilter() one by one.
4454 ************************************************************************/ 4454 ************************************************************************/
4455static u8 * 4455static u8 *
4456ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 4456ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4457{ 4457{
4458 struct ixgbe_mc_addr *mta; 4458 struct ixgbe_mc_addr *mta;
4459 4459
4460 mta = (struct ixgbe_mc_addr *)*update_ptr; 4460 mta = (struct ixgbe_mc_addr *)*update_ptr;
4461 *vmdq = mta->vmdq; 4461 *vmdq = mta->vmdq;
4462 4462
4463 *update_ptr = (u8*)(mta + 1); 4463 *update_ptr = (u8*)(mta + 1);
4464 4464
4465 return (mta->addr); 4465 return (mta->addr);
4466} /* ixgbe_mc_array_itr */ 4466} /* ixgbe_mc_array_itr */
4467 4467
4468/************************************************************************ 4468/************************************************************************
4469 * ixgbe_local_timer - Timer routine 4469 * ixgbe_local_timer - Timer routine
4470 * 4470 *
4471 * Checks for link status, updates statistics, 4471 * Checks for link status, updates statistics,
4472 * and runs the watchdog check. 4472 * and runs the watchdog check.
4473 ************************************************************************/ 4473 ************************************************************************/
4474static void 4474static void
4475ixgbe_local_timer(void *arg) 4475ixgbe_local_timer(void *arg)
4476{ 4476{
4477 struct adapter *adapter = arg; 4477 struct adapter *adapter = arg;
4478 4478
4479 if (adapter->schedule_wqs_ok) { 4479 if (adapter->schedule_wqs_ok) {
4480 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0) 4480 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4481 workqueue_enqueue(adapter->timer_wq, 4481 workqueue_enqueue(adapter->timer_wq,
4482 &adapter->timer_wc, NULL); 4482 &adapter->timer_wc, NULL);
4483 } 4483 }
4484} 4484}
4485 4485
4486static void 4486static void
4487ixgbe_handle_timer(struct work *wk, void *context) 4487ixgbe_handle_timer(struct work *wk, void *context)
4488{ 4488{
4489 struct adapter *adapter = context; 4489 struct adapter *adapter = context;
4490 struct ixgbe_hw *hw = &adapter->hw; 4490 struct ixgbe_hw *hw = &adapter->hw;
4491 device_t dev = adapter->dev; 4491 device_t dev = adapter->dev;
4492 struct ix_queue *que = adapter->queues; 4492 struct ix_queue *que = adapter->queues;
4493 u64 queues = 0; 4493 u64 queues = 0;
4494 u64 v0, v1, v2, v3, v4, v5, v6, v7; 4494 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4495 int hung = 0; 4495 int hung = 0;
4496 int i; 4496 int i;
4497 4497
4498 IXGBE_CORE_LOCK(adapter); 4498 IXGBE_CORE_LOCK(adapter);
4499 4499
4500 /* Check for pluggable optics */ 4500 /* Check for pluggable optics */
4501 if (ixgbe_is_sfp(hw)) { 4501 if (ixgbe_is_sfp(hw)) {
4502 bool was_full = hw->phy.sfp_type != ixgbe_sfp_type_not_present; 4502 bool was_full = hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4503 bool is_full = ixgbe_sfp_cage_full(hw); 4503 bool is_full = ixgbe_sfp_cage_full(adapter);
4504 4504
4505 /* do probe if cage state changed */ 4505 /* do probe if cage state changed */
4506 if (was_full ^ is_full) { 4506 if (was_full ^ is_full) {
4507 atomic_or_32(&adapter->task_requests, 4507 atomic_or_32(&adapter->task_requests,
4508 IXGBE_REQUEST_TASK_MOD); 4508 IXGBE_REQUEST_TASK_MOD);
4509 ixgbe_schedule_admin_tasklet(adapter); 4509 ixgbe_schedule_admin_tasklet(adapter);
4510 } 4510 }
4511 } 4511 }
4512 4512
4513 ixgbe_update_link_status(adapter); 4513 ixgbe_update_link_status(adapter);
4514 ixgbe_update_stats_counters(adapter); 4514 ixgbe_update_stats_counters(adapter);
4515 4515
4516 /* Update some event counters */ 4516 /* Update some event counters */
4517 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 4517 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4518 que = adapter->queues; 4518 que = adapter->queues;
4519 for (i = 0; i < adapter->num_queues; i++, que++) { 4519 for (i = 0; i < adapter->num_queues; i++, que++) {
4520 struct tx_ring *txr = que->txr; 4520 struct tx_ring *txr = que->txr;
4521 4521
4522 v0 += txr->q_efbig_tx_dma_setup; 4522 v0 += txr->q_efbig_tx_dma_setup;
4523 v1 += txr->q_mbuf_defrag_failed; 4523 v1 += txr->q_mbuf_defrag_failed;
4524 v2 += txr->q_efbig2_tx_dma_setup; 4524 v2 += txr->q_efbig2_tx_dma_setup;
4525 v3 += txr->q_einval_tx_dma_setup; 4525 v3 += txr->q_einval_tx_dma_setup;
4526 v4 += txr->q_other_tx_dma_setup; 4526 v4 += txr->q_other_tx_dma_setup;
4527 v5 += txr->q_eagain_tx_dma_setup; 4527 v5 += txr->q_eagain_tx_dma_setup;
4528 v6 += txr->q_enomem_tx_dma_setup; 4528 v6 += txr->q_enomem_tx_dma_setup;
4529 v7 += txr->q_tso_err; 4529 v7 += txr->q_tso_err;
4530 } 4530 }
4531 adapter->efbig_tx_dma_setup.ev_count = v0; 4531 adapter->efbig_tx_dma_setup.ev_count = v0;
4532 adapter->mbuf_defrag_failed.ev_count = v1; 4532 adapter->mbuf_defrag_failed.ev_count = v1;
4533 adapter->efbig2_tx_dma_setup.ev_count = v2; 4533 adapter->efbig2_tx_dma_setup.ev_count = v2;
4534 adapter->einval_tx_dma_setup.ev_count = v3; 4534 adapter->einval_tx_dma_setup.ev_count = v3;
4535 adapter->other_tx_dma_setup.ev_count = v4; 4535 adapter->other_tx_dma_setup.ev_count = v4;
4536 adapter->eagain_tx_dma_setup.ev_count = v5; 4536 adapter->eagain_tx_dma_setup.ev_count = v5;
4537 adapter->enomem_tx_dma_setup.ev_count = v6; 4537 adapter->enomem_tx_dma_setup.ev_count = v6;
4538 adapter->tso_err.ev_count = v7; 4538 adapter->tso_err.ev_count = v7;
4539 4539
4540 /* 4540 /*
4541 * Check the TX queues status 4541 * Check the TX queues status
4542 * - mark hung queues so we don't schedule on them 4542 * - mark hung queues so we don't schedule on them
4543 * - watchdog only if all queues show hung 4543 * - watchdog only if all queues show hung
4544 */ 4544 */
4545 que = adapter->queues; 4545 que = adapter->queues;
4546 for (i = 0; i < adapter->num_queues; i++, que++) { 4546 for (i = 0; i < adapter->num_queues; i++, que++) {
4547 /* Keep track of queues with work for soft irq */ 4547 /* Keep track of queues with work for soft irq */
4548 if (que->txr->busy) 4548 if (que->txr->busy)
4549 queues |= 1ULL << que->me; 4549 queues |= 1ULL << que->me;
4550 /* 4550 /*
4551 * Each time txeof runs without cleaning, but there 4551 * Each time txeof runs without cleaning, but there
4552 * are uncleaned descriptors it increments busy. If 4552 * are uncleaned descriptors it increments busy. If
4553 * we get to the MAX we declare it hung. 4553 * we get to the MAX we declare it hung.
4554 */ 4554 */
4555 if (que->busy == IXGBE_QUEUE_HUNG) { 4555 if (que->busy == IXGBE_QUEUE_HUNG) {
4556 ++hung; 4556 ++hung;
4557 /* Mark the queue as inactive */ 4557 /* Mark the queue as inactive */
4558 adapter->active_queues &= ~(1ULL << que->me); 4558 adapter->active_queues &= ~(1ULL << que->me);
4559 continue; 4559 continue;
4560 } else { 4560 } else {
4561 /* Check if we've come back from hung */ 4561 /* Check if we've come back from hung */
4562 if ((adapter->active_queues & (1ULL << que->me)) == 0) 4562 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4563 adapter->active_queues |= 1ULL << que->me; 4563 adapter->active_queues |= 1ULL << que->me;
4564 } 4564 }
4565 if (que->busy >= IXGBE_MAX_TX_BUSY) { 4565 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4566 device_printf(dev, 4566 device_printf(dev,
4567 "Warning queue %d appears to be hung!\n", i); 4567 "Warning queue %d appears to be hung!\n", i);
4568 que->txr->busy = IXGBE_QUEUE_HUNG; 4568 que->txr->busy = IXGBE_QUEUE_HUNG;
4569 ++hung; 4569 ++hung;
4570 } 4570 }
4571 } 4571 }
4572 4572
4573 /* Only truly watchdog if all queues show hung */ 4573 /* Only truly watchdog if all queues show hung */
4574 if (hung == adapter->num_queues) 4574 if (hung == adapter->num_queues)
4575 goto watchdog; 4575 goto watchdog;
4576#if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */ 4576#if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4577 else if (queues != 0) { /* Force an IRQ on queues with work */ 4577 else if (queues != 0) { /* Force an IRQ on queues with work */
4578 que = adapter->queues; 4578 que = adapter->queues;
4579 for (i = 0; i < adapter->num_queues; i++, que++) { 4579 for (i = 0; i < adapter->num_queues; i++, que++) {
4580 mutex_enter(&que->dc_mtx); 4580 mutex_enter(&que->dc_mtx);
4581 if (que->disabled_count == 0) 4581 if (que->disabled_count == 0)
4582 ixgbe_rearm_queues(adapter, 4582 ixgbe_rearm_queues(adapter,
4583 queues & ((u64)1 << i)); 4583 queues & ((u64)1 << i));
4584 mutex_exit(&que->dc_mtx); 4584 mutex_exit(&que->dc_mtx);
4585 } 4585 }
4586 } 4586 }
4587#endif 4587#endif
4588 4588
4589 atomic_store_relaxed(&adapter->timer_pending, 0); 4589 atomic_store_relaxed(&adapter->timer_pending, 0);
4590 IXGBE_CORE_UNLOCK(adapter); 4590 IXGBE_CORE_UNLOCK(adapter);
4591 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4591 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4592 return; 4592 return;
4593 4593
4594watchdog: 4594watchdog:
4595 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 4595 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4596 adapter->ifp->if_flags &= ~IFF_RUNNING; 4596 adapter->ifp->if_flags &= ~IFF_RUNNING;
4597 adapter->watchdog_events.ev_count++; 4597 adapter->watchdog_events.ev_count++;
4598 ixgbe_init_locked(adapter); 4598 ixgbe_init_locked(adapter);
4599 IXGBE_CORE_UNLOCK(adapter); 4599 IXGBE_CORE_UNLOCK(adapter);
4600} /* ixgbe_handle_timer */ 4600} /* ixgbe_handle_timer */
4601 4601
4602/************************************************************************ 4602/************************************************************************
4603 * ixgbe_recovery_mode_timer - Recovery mode timer routine 4603 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4604 ************************************************************************/ 4604 ************************************************************************/
4605static void 4605static void
4606ixgbe_recovery_mode_timer(void *arg) 4606ixgbe_recovery_mode_timer(void *arg)
4607{ 4607{
4608 struct adapter *adapter = arg; 4608 struct adapter *adapter = arg;
4609 4609
4610 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0) 4610 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0)
4611 { 4611 {
4612 workqueue_enqueue(adapter->recovery_mode_timer_wq, 4612 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4613 &adapter->recovery_mode_timer_wc, NULL); 4613 &adapter->recovery_mode_timer_wc, NULL);
4614 } 4614 }
4615} 4615}
4616 4616
4617static void 4617static void
4618ixgbe_handle_recovery_mode_timer(struct work *wk, void *context) 4618ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4619{ 4619{
4620 struct adapter *adapter = context; 4620 struct adapter *adapter = context;
4621 struct ixgbe_hw *hw = &adapter->hw; 4621 struct ixgbe_hw *hw = &adapter->hw;
4622 4622
4623 IXGBE_CORE_LOCK(adapter); 4623 IXGBE_CORE_LOCK(adapter);
4624 if (ixgbe_fw_recovery_mode(hw)) { 4624 if (ixgbe_fw_recovery_mode(hw)) {
4625 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) { 4625 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4626 /* Firmware error detected, entering recovery mode */ 4626 /* Firmware error detected, entering recovery mode */
4627 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 4627 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4628 4628
4629 if (hw->adapter_stopped == FALSE) 4629 if (hw->adapter_stopped == FALSE)
4630 ixgbe_stop(adapter); 4630 ixgbe_stop(adapter);
4631 } 4631 }
4632 } else 4632 } else
4633 atomic_cas_uint(&adapter->recovery_mode, 1, 0); 4633 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4634 4634
4635 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0); 4635 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4636 callout_reset(&adapter->recovery_mode_timer, hz, 4636 callout_reset(&adapter->recovery_mode_timer, hz,
4637 ixgbe_recovery_mode_timer, adapter); 4637 ixgbe_recovery_mode_timer, adapter);
4638 IXGBE_CORE_UNLOCK(adapter); 4638 IXGBE_CORE_UNLOCK(adapter);
4639} /* ixgbe_handle_recovery_mode_timer */ 4639} /* ixgbe_handle_recovery_mode_timer */
4640 4640
4641/************************************************************************ 4641/************************************************************************
4642 * ixgbe_sfp_cage_full 4642 * ixgbe_sfp_cage_full
4643 * 4643 *
4644 * Determine if a port had optics inserted. 4644 * Determine if a port had optics inserted.
4645 ************************************************************************/ 4645 ************************************************************************/
4646static bool 4646static bool
4647ixgbe_sfp_cage_full(struct adapter *adapter) 4647ixgbe_sfp_cage_full(struct adapter *adapter)
4648{ 4648{
4649 struct ixgbe_hw *hw = &adapter->hw; 4649 struct ixgbe_hw *hw = &adapter->hw;
4650 uint32_t mask; 4650 uint32_t mask;
4651 int rv; 4651 int rv;
4652 4652
4653 if (hw->mac.type >= ixgbe_mac_X540) 4653 if (hw->mac.type >= ixgbe_mac_X540)
4654 mask = IXGBE_ESDP_SDP0; 4654 mask = IXGBE_ESDP_SDP0;
4655 else 4655 else
4656 mask = IXGBE_ESDP_SDP2; 4656 mask = IXGBE_ESDP_SDP2;
4657 4657
4658 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask; 4658 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask;
4659 if ((adapter->quirks & IXGBE_QUIRK_MOD_ABS_INVERT) != 0) 4659 if ((adapter->quirks & IXGBE_QUIRK_MOD_ABS_INVERT) != 0)
4660 rv = !rv; 4660 rv = !rv;
4661 4661
4662 if (hw->mac.type == ixgbe_mac_X550EM_a) { 4662 if (hw->mac.type == ixgbe_mac_X550EM_a) {
4663 /* X550EM_a's SDP0 is inverted than others. */ 4663 /* X550EM_a's SDP0 is inverted than others. */
4664 return !rv; 4664 return !rv;
4665 } 4665 }
4666 4666
4667 return rv; 4667 return rv;
4668} /* ixgbe_sfp_cage_full */ 4668} /* ixgbe_sfp_cage_full */
4669 4669
4670/************************************************************************ 4670/************************************************************************
4671 * ixgbe_handle_mod - Tasklet for SFP module interrupts 4671 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4672 ************************************************************************/ 4672 ************************************************************************/
4673static void 4673static void
4674ixgbe_handle_mod(void *context) 4674ixgbe_handle_mod(void *context)
4675{ 4675{
4676 struct adapter *adapter = context; 4676 struct adapter *adapter = context;
4677 struct ixgbe_hw *hw = &adapter->hw; 4677 struct ixgbe_hw *hw = &adapter->hw;
4678 device_t dev = adapter->dev; 4678 device_t dev = adapter->dev;
4679 u32 err, cage_full = 0; 4679 u32 err, cage_full = 0;
4680 4680
4681 ++adapter->mod_workev.ev_count; 4681 ++adapter->mod_workev.ev_count;
4682 if (adapter->hw.need_crosstalk_fix) { 4682 if (adapter->hw.need_crosstalk_fix) {
4683 switch (hw->mac.type) { 4683 switch (hw->mac.type) {
4684 case ixgbe_mac_82599EB: 4684 case ixgbe_mac_82599EB:
4685 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4685 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4686 IXGBE_ESDP_SDP2; 4686 IXGBE_ESDP_SDP2;
4687 break; 4687 break;
4688 case ixgbe_mac_X550EM_x: 4688 case ixgbe_mac_X550EM_x:
4689 case ixgbe_mac_X550EM_a: 4689 case ixgbe_mac_X550EM_a:
4690 /* 4690 /*
4691 * XXX See ixgbe_sfp_cage_full(). It seems the bit is 4691 * XXX See ixgbe_sfp_cage_full(). It seems the bit is
4692 * inverted on X550EM_a, so I think this is incorrect. 4692 * inverted on X550EM_a, so I think this is incorrect.
4693 */ 4693 */
4694 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4694 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4695 IXGBE_ESDP_SDP0; 4695 IXGBE_ESDP_SDP0;
4696 break; 4696 break;
4697 default: 4697 default:
4698 break; 4698 break;
4699 } 4699 }
4700 4700
4701 if (!cage_full) 4701 if (!cage_full)
4702 goto out; 4702 goto out;
4703 } 4703 }
4704 4704
4705 err = hw->phy.ops.identify_sfp(hw); 4705 err = hw->phy.ops.identify_sfp(hw);
4706 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4706 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4707 device_printf(dev, 4707 device_printf(dev,
4708 "Unsupported SFP+ module type was detected.\n"); 4708 "Unsupported SFP+ module type was detected.\n");
4709 goto out; 4709 goto out;
4710 } 4710 }
4711 4711
4712 if (hw->need_unsupported_sfp_recovery) { 4712 if (hw->need_unsupported_sfp_recovery) {
4713 device_printf(dev, "Recovering from unsupported SFP\n"); 4713 device_printf(dev, "Recovering from unsupported SFP\n");
4714 /* 4714 /*
4715 * We could recover the status by calling setup_sfp(), 4715 * We could recover the status by calling setup_sfp(),
4716 * setup_link() and some others. It's complex and might not 4716 * setup_link() and some others. It's complex and might not
4717 * work correctly on some unknown cases. To avoid such type of 4717 * work correctly on some unknown cases. To avoid such type of
4718 * problem, call ixgbe_init_locked(). It's simple and safe 4718 * problem, call ixgbe_init_locked(). It's simple and safe
4719 * approach. 4719 * approach.
4720 */ 4720 */
4721 ixgbe_init_locked(adapter); 4721 ixgbe_init_locked(adapter);
4722 } else { 4722 } else {
4723 if (hw->mac.type == ixgbe_mac_82598EB) 4723 if (hw->mac.type == ixgbe_mac_82598EB)
4724 err = hw->phy.ops.reset(hw); 4724 err = hw->phy.ops.reset(hw);
4725 else { 4725 else {
4726 err = hw->mac.ops.setup_sfp(hw); 4726 err = hw->mac.ops.setup_sfp(hw);
4727 hw->phy.sfp_setup_needed = FALSE; 4727 hw->phy.sfp_setup_needed = FALSE;
4728 } 4728 }
4729 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4729 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4730 device_printf(dev, 4730 device_printf(dev,
4731 "Setup failure - unsupported SFP+ module type.\n"); 4731 "Setup failure - unsupported SFP+ module type.\n");
4732 goto out; 4732 goto out;
4733 } 4733 }
4734 } 4734 }
4735 4735
4736out: 4736out:
4737 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 4737 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4738 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 4738 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4739 4739
4740 /* Adjust media types shown in ifconfig */ 4740 /* Adjust media types shown in ifconfig */
4741 IXGBE_CORE_UNLOCK(adapter); 4741 IXGBE_CORE_UNLOCK(adapter);
4742 ifmedia_removeall(&adapter->media); 4742 ifmedia_removeall(&adapter->media);
4743 ixgbe_add_media_types(adapter); 4743 ixgbe_add_media_types(adapter);
4744 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 4744 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4745 IXGBE_CORE_LOCK(adapter); 4745 IXGBE_CORE_LOCK(adapter);
4746 4746
4747 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF); 4747 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4748 /* 4748 /*
4749 * Don't call ixgbe_schedule_admin_tasklet() because we are on 4749 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4750 * the workqueue now. 4750 * the workqueue now.
4751 */ 4751 */
4752} /* ixgbe_handle_mod */ 4752} /* ixgbe_handle_mod */
4753 4753
4754 4754
4755/************************************************************************ 4755/************************************************************************
4756 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 4756 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4757 ************************************************************************/ 4757 ************************************************************************/
4758static void 4758static void
4759ixgbe_handle_msf(void *context) 4759ixgbe_handle_msf(void *context)
4760{ 4760{
4761 struct adapter *adapter = context; 4761 struct adapter *adapter = context;
4762 struct ixgbe_hw *hw = &adapter->hw; 4762 struct ixgbe_hw *hw = &adapter->hw;
4763 u32 autoneg; 4763 u32 autoneg;
4764 bool negotiate; 4764 bool negotiate;
4765 4765
4766 ++adapter->msf_workev.ev_count; 4766 ++adapter->msf_workev.ev_count;
4767 4767
4768 autoneg = hw->phy.autoneg_advertised; 4768 autoneg = hw->phy.autoneg_advertised;
4769 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 4769 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4770 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 4770 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4771 if (hw->mac.ops.setup_link) 4771 if (hw->mac.ops.setup_link)
4772 hw->mac.ops.setup_link(hw, autoneg, TRUE); 4772 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4773} /* ixgbe_handle_msf */ 4773} /* ixgbe_handle_msf */
4774 4774
4775/************************************************************************ 4775/************************************************************************
4776 * ixgbe_handle_phy - Tasklet for external PHY interrupts 4776 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4777 ************************************************************************/ 4777 ************************************************************************/
4778static void 4778static void
4779ixgbe_handle_phy(void *context) 4779ixgbe_handle_phy(void *context)
4780{ 4780{
4781 struct adapter *adapter = context; 4781 struct adapter *adapter = context;
4782 struct ixgbe_hw *hw = &adapter->hw; 4782 struct ixgbe_hw *hw = &adapter->hw;
4783 int error; 4783 int error;
4784 4784
4785 ++adapter->phy_workev.ev_count; 4785 ++adapter->phy_workev.ev_count;
4786 error = hw->phy.ops.handle_lasi(hw); 4786 error = hw->phy.ops.handle_lasi(hw);
4787 if (error == IXGBE_ERR_OVERTEMP) 4787 if (error == IXGBE_ERR_OVERTEMP)
4788 device_printf(adapter->dev, 4788 device_printf(adapter->dev,
4789 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 4789 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4790 " PHY will downshift to lower power state!\n"); 4790 " PHY will downshift to lower power state!\n");
4791 else if (error) 4791 else if (error)
4792 device_printf(adapter->dev, 4792 device_printf(adapter->dev,
4793 "Error handling LASI interrupt: %d\n", error); 4793 "Error handling LASI interrupt: %d\n", error);
4794} /* ixgbe_handle_phy */ 4794} /* ixgbe_handle_phy */
4795 4795
4796static void 4796static void
4797ixgbe_handle_admin(struct work *wk, void *context) 4797ixgbe_handle_admin(struct work *wk, void *context)
4798{ 4798{
4799 struct adapter *adapter = context; 4799 struct adapter *adapter = context;
4800 struct ifnet *ifp = adapter->ifp; 4800 struct ifnet *ifp = adapter->ifp;
4801 struct ixgbe_hw *hw = &adapter->hw; 4801 struct ixgbe_hw *hw = &adapter->hw;
4802 u32 req; 4802 u32 req;
4803 4803
4804 /* 4804 /*
4805 * Hold the IFNET_LOCK across this entire call. This will 4805 * Hold the IFNET_LOCK across this entire call. This will
4806 * prevent additional changes to adapter->phy_layer 4806 * prevent additional changes to adapter->phy_layer
4807 * and serialize calls to this tasklet. We cannot hold the 4807 * and serialize calls to this tasklet. We cannot hold the
4808 * CORE_LOCK while calling into the ifmedia functions as 4808 * CORE_LOCK while calling into the ifmedia functions as
4809 * they call ifmedia_lock() and the lock is CORE_LOCK. 4809 * they call ifmedia_lock() and the lock is CORE_LOCK.
4810 */ 4810 */
4811 IFNET_LOCK(ifp); 4811 IFNET_LOCK(ifp);
4812 IXGBE_CORE_LOCK(adapter); 4812 IXGBE_CORE_LOCK(adapter);
4813 while ((req = 4813 while ((req =
4814 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR)) 4814 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR))
4815 != 0) { 4815 != 0) {
4816 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) { 4816 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4817 ixgbe_handle_link(adapter); 4817 ixgbe_handle_link(adapter);
4818 atomic_and_32(&adapter->task_requests, 4818 atomic_and_32(&adapter->task_requests,
4819 ~IXGBE_REQUEST_TASK_LSC); 4819 ~IXGBE_REQUEST_TASK_LSC);
4820 } 4820 }
4821 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) { 4821 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4822 ixgbe_handle_mod(adapter); 4822 ixgbe_handle_mod(adapter);
4823 atomic_and_32(&adapter->task_requests, 4823 atomic_and_32(&adapter->task_requests,
4824 ~IXGBE_REQUEST_TASK_MOD); 4824 ~IXGBE_REQUEST_TASK_MOD);
4825 } 4825 }
4826 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) { 4826 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4827 ixgbe_handle_msf(adapter); 4827 ixgbe_handle_msf(adapter);
4828 atomic_and_32(&adapter->task_requests, 4828 atomic_and_32(&adapter->task_requests,
4829 ~IXGBE_REQUEST_TASK_MSF); 4829 ~IXGBE_REQUEST_TASK_MSF);
4830 } 4830 }
4831 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) { 4831 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4832 ixgbe_handle_phy(adapter); 4832 ixgbe_handle_phy(adapter);
4833 atomic_and_32(&adapter->task_requests, 4833 atomic_and_32(&adapter->task_requests,
4834 ~IXGBE_REQUEST_TASK_PHY); 4834 ~IXGBE_REQUEST_TASK_PHY);
4835 } 4835 }
4836 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) { 4836 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4837 ixgbe_reinit_fdir(adapter); 4837 ixgbe_reinit_fdir(adapter);
4838 atomic_and_32(&adapter->task_requests, 4838 atomic_and_32(&adapter->task_requests,
4839 ~IXGBE_REQUEST_TASK_FDIR); 4839 ~IXGBE_REQUEST_TASK_FDIR);
4840 } 4840 }
4841#if 0 /* notyet */ 4841#if 0 /* notyet */
4842 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) { 4842 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4843 ixgbe_handle_mbx(adapter); 4843 ixgbe_handle_mbx(adapter);
4844 atomic_and_32(&adapter->task_requests, 4844 atomic_and_32(&adapter->task_requests,
4845 ~IXGBE_REQUEST_TASK_MBX); 4845 ~IXGBE_REQUEST_TASK_MBX);
4846 } 4846 }
4847#endif 4847#endif
4848 } 4848 }
4849 atomic_store_relaxed(&adapter->admin_pending, 0); 4849 atomic_store_relaxed(&adapter->admin_pending, 0);
4850 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) { 4850 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4851 atomic_and_32(&adapter->task_requests, 4851 atomic_and_32(&adapter->task_requests,
4852 ~IXGBE_REQUEST_TASK_NEED_ACKINTR); 4852 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4853 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) { 4853 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4854 /* Re-enable other interrupts */ 4854 /* Re-enable other interrupts */
4855 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 4855 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4856 } else 4856 } else
4857 ixgbe_enable_intr(adapter); 4857 ixgbe_enable_intr(adapter);
4858 } 4858 }
4859 4859
4860 IXGBE_CORE_UNLOCK(adapter); 4860 IXGBE_CORE_UNLOCK(adapter);
4861 IFNET_UNLOCK(ifp); 4861 IFNET_UNLOCK(ifp);
4862} /* ixgbe_handle_admin */ 4862} /* ixgbe_handle_admin */
4863 4863
4864static void 4864static void
4865ixgbe_ifstop(struct ifnet *ifp, int disable) 4865ixgbe_ifstop(struct ifnet *ifp, int disable)
4866{ 4866{
4867 struct adapter *adapter = ifp->if_softc; 4867 struct adapter *adapter = ifp->if_softc;
4868 4868
4869 IXGBE_CORE_LOCK(adapter); 4869 IXGBE_CORE_LOCK(adapter);
4870 ixgbe_stop(adapter); 4870 ixgbe_stop(adapter);
4871 IXGBE_CORE_UNLOCK(adapter); 4871 IXGBE_CORE_UNLOCK(adapter);
4872 4872
4873 workqueue_wait(adapter->timer_wq, &adapter->timer_wc); 4873 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4874 atomic_store_relaxed(&adapter->timer_pending, 0); 4874 atomic_store_relaxed(&adapter->timer_pending, 0);
4875} 4875}
4876 4876
4877/************************************************************************ 4877/************************************************************************
4878 * ixgbe_stop - Stop the hardware 4878 * ixgbe_stop - Stop the hardware
4879 * 4879 *
4880 * Disables all traffic on the adapter by issuing a 4880 * Disables all traffic on the adapter by issuing a
4881 * global reset on the MAC and deallocates TX/RX buffers. 4881 * global reset on the MAC and deallocates TX/RX buffers.
4882 ************************************************************************/ 4882 ************************************************************************/
4883static void 4883static void
4884ixgbe_stop(void *arg) 4884ixgbe_stop(void *arg)
4885{ 4885{
4886 struct ifnet *ifp; 4886 struct ifnet *ifp;
4887 struct adapter *adapter = arg; 4887 struct adapter *adapter = arg;
4888 struct ixgbe_hw *hw = &adapter->hw; 4888 struct ixgbe_hw *hw = &adapter->hw;
4889 4889
4890 ifp = adapter->ifp; 4890 ifp = adapter->ifp;
4891 4891
4892 KASSERT(mutex_owned(&adapter->core_mtx)); 4892 KASSERT(mutex_owned(&adapter->core_mtx));
4893 4893
4894 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 4894 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4895 ixgbe_disable_intr(adapter); 4895 ixgbe_disable_intr(adapter);
4896 callout_stop(&adapter->timer); 4896 callout_stop(&adapter->timer);
4897 4897
4898 /* Don't schedule workqueues. */ 4898 /* Don't schedule workqueues. */
4899 adapter->schedule_wqs_ok = false; 4899 adapter->schedule_wqs_ok = false;
4900 4900
4901 /* Let the stack know...*/ 4901 /* Let the stack know...*/
4902 ifp->if_flags &= ~IFF_RUNNING; 4902 ifp->if_flags &= ~IFF_RUNNING;
4903 4903
4904 ixgbe_reset_hw(hw); 4904 ixgbe_reset_hw(hw);
4905 hw->adapter_stopped = FALSE; 4905 hw->adapter_stopped = FALSE;
4906 ixgbe_stop_adapter(hw); 4906 ixgbe_stop_adapter(hw);
4907 if (hw->mac.type == ixgbe_mac_82599EB) 4907 if (hw->mac.type == ixgbe_mac_82599EB)
4908 ixgbe_stop_mac_link_on_d3_82599(hw); 4908 ixgbe_stop_mac_link_on_d3_82599(hw);
4909 /* Turn off the laser - noop with no optics */ 4909 /* Turn off the laser - noop with no optics */
4910 ixgbe_disable_tx_laser(hw); 4910 ixgbe_disable_tx_laser(hw);
4911 4911
4912 /* Update the stack */ 4912 /* Update the stack */
4913 adapter->link_up = FALSE; 4913 adapter->link_up = FALSE;
4914 ixgbe_update_link_status(adapter); 4914 ixgbe_update_link_status(adapter);
4915 4915
4916 /* reprogram the RAR[0] in case user changed it. */ 4916 /* reprogram the RAR[0] in case user changed it. */
4917 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 4917 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4918 4918
4919 return; 4919 return;
4920} /* ixgbe_stop */ 4920} /* ixgbe_stop */
4921 4921
4922/************************************************************************ 4922/************************************************************************
4923 * ixgbe_update_link_status - Update OS on link state 4923 * ixgbe_update_link_status - Update OS on link state
4924 * 4924 *
4925 * Note: Only updates the OS on the cached link state. 4925 * Note: Only updates the OS on the cached link state.
4926 * The real check of the hardware only happens with 4926 * The real check of the hardware only happens with
4927 * a link interrupt. 4927 * a link interrupt.
4928 ************************************************************************/ 4928 ************************************************************************/
4929static void 4929static void
4930ixgbe_update_link_status(struct adapter *adapter) 4930ixgbe_update_link_status(struct adapter *adapter)
4931{ 4931{
4932 struct ifnet *ifp = adapter->ifp; 4932 struct ifnet *ifp = adapter->ifp;
4933 device_t dev = adapter->dev; 4933 device_t dev = adapter->dev;
4934 struct ixgbe_hw *hw = &adapter->hw; 4934 struct ixgbe_hw *hw = &adapter->hw;
4935 4935
4936 KASSERT(mutex_owned(&adapter->core_mtx)); 4936 KASSERT(mutex_owned(&adapter->core_mtx));
4937 4937
4938 if (adapter->link_up) { 4938 if (adapter->link_up) {
4939 if (adapter->link_active != LINK_STATE_UP) { 4939 if (adapter->link_active != LINK_STATE_UP) {
4940 /* 4940 /*
4941 * To eliminate influence of the previous state 4941 * To eliminate influence of the previous state
4942 * in the same way as ixgbe_init_locked(). 4942 * in the same way as ixgbe_init_locked().
4943 */ 4943 */
4944 struct ix_queue *que = adapter->queues; 4944 struct ix_queue *que = adapter->queues;
4945 for (int i = 0; i < adapter->num_queues; i++, que++) 4945 for (int i = 0; i < adapter->num_queues; i++, que++)
4946 que->eitr_setting = 0; 4946 que->eitr_setting = 0;
4947 4947
4948 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){ 4948 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4949 /* 4949 /*
4950 * Discard count for both MAC Local Fault and 4950 * Discard count for both MAC Local Fault and
4951 * Remote Fault because those registers are 4951 * Remote Fault because those registers are
4952 * valid only when the link speed is up and 4952 * valid only when the link speed is up and
4953 * 10Gbps. 4953 * 10Gbps.
4954 */ 4954 */
4955 IXGBE_READ_REG(hw, IXGBE_MLFC); 4955 IXGBE_READ_REG(hw, IXGBE_MLFC);
4956 IXGBE_READ_REG(hw, IXGBE_MRFC); 4956 IXGBE_READ_REG(hw, IXGBE_MRFC);
4957 } 4957 }
4958 4958
4959 if (bootverbose) { 4959 if (bootverbose) {
4960 const char *bpsmsg; 4960 const char *bpsmsg;
4961 4961
4962 switch (adapter->link_speed) { 4962 switch (adapter->link_speed) {
4963 case IXGBE_LINK_SPEED_10GB_FULL: 4963 case IXGBE_LINK_SPEED_10GB_FULL:
4964 bpsmsg = "10 Gbps"; 4964 bpsmsg = "10 Gbps";
4965 break; 4965 break;
4966 case IXGBE_LINK_SPEED_5GB_FULL: 4966 case IXGBE_LINK_SPEED_5GB_FULL:
4967 bpsmsg = "5 Gbps"; 4967 bpsmsg = "5 Gbps";
4968 break; 4968 break;
4969 case IXGBE_LINK_SPEED_2_5GB_FULL: 4969 case IXGBE_LINK_SPEED_2_5GB_FULL:
4970 bpsmsg = "2.5 Gbps"; 4970 bpsmsg = "2.5 Gbps";
4971 break; 4971 break;
4972 case IXGBE_LINK_SPEED_1GB_FULL: 4972 case IXGBE_LINK_SPEED_1GB_FULL:
4973 bpsmsg = "1 Gbps"; 4973 bpsmsg = "1 Gbps";
4974 break; 4974 break;
4975 case IXGBE_LINK_SPEED_100_FULL: 4975 case IXGBE_LINK_SPEED_100_FULL:
4976 bpsmsg = "100 Mbps"; 4976 bpsmsg = "100 Mbps";
4977 break; 4977 break;
4978 case IXGBE_LINK_SPEED_10_FULL: 4978 case IXGBE_LINK_SPEED_10_FULL:
4979 bpsmsg = "10 Mbps"; 4979 bpsmsg = "10 Mbps";
4980 break; 4980 break;
4981 default: 4981 default:
4982 bpsmsg = "unknown speed"; 4982 bpsmsg = "unknown speed";
4983 break; 4983 break;
4984 } 4984 }
4985 device_printf(dev, "Link is up %s %s \n", 4985 device_printf(dev, "Link is up %s %s \n",
4986 bpsmsg, "Full Duplex"); 4986 bpsmsg, "Full Duplex");
4987 } 4987 }
4988 adapter->link_active = LINK_STATE_UP; 4988 adapter->link_active = LINK_STATE_UP;
4989 /* Update any Flow Control changes */ 4989 /* Update any Flow Control changes */
4990 ixgbe_fc_enable(&adapter->hw); 4990 ixgbe_fc_enable(&adapter->hw);
4991 /* Update DMA coalescing config */ 4991 /* Update DMA coalescing config */
4992 ixgbe_config_dmac(adapter); 4992 ixgbe_config_dmac(adapter);
4993 if_link_state_change(ifp, LINK_STATE_UP); 4993 if_link_state_change(ifp, LINK_STATE_UP);
4994 4994
4995 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 4995 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4996 ixgbe_ping_all_vfs(adapter); 4996 ixgbe_ping_all_vfs(adapter);
4997 } 4997 }
4998 } else { 4998 } else {
4999 /* 4999 /*
5000 * Do it when link active changes to DOWN. i.e. 5000 * Do it when link active changes to DOWN. i.e.
5001 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 5001 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5002 * b) LINK_STATE_UP -> LINK_STATE_DOWN 5002 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5003 */ 5003 */
5004 if (adapter->link_active != LINK_STATE_DOWN) { 5004 if (adapter->link_active != LINK_STATE_DOWN) {
5005 if (bootverbose) 5005 if (bootverbose)
5006 device_printf(dev, "Link is Down\n"); 5006 device_printf(dev, "Link is Down\n");
5007 if_link_state_change(ifp, LINK_STATE_DOWN); 5007 if_link_state_change(ifp, LINK_STATE_DOWN);
5008 adapter->link_active = LINK_STATE_DOWN; 5008 adapter->link_active = LINK_STATE_DOWN;
5009 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 5009 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5010 ixgbe_ping_all_vfs(adapter); 5010 ixgbe_ping_all_vfs(adapter);
5011 ixgbe_drain_all(adapter); 5011 ixgbe_drain_all(adapter);
5012 } 5012 }
5013 } 5013 }
5014} /* ixgbe_update_link_status */ 5014} /* ixgbe_update_link_status */
5015 5015
5016/************************************************************************ 5016/************************************************************************
5017 * ixgbe_config_dmac - Configure DMA Coalescing 5017 * ixgbe_config_dmac - Configure DMA Coalescing
5018 ************************************************************************/ 5018 ************************************************************************/
5019static void 5019static void
5020ixgbe_config_dmac(struct adapter *adapter) 5020ixgbe_config_dmac(struct adapter *adapter)
5021{ 5021{
5022 struct ixgbe_hw *hw = &adapter->hw; 5022 struct ixgbe_hw *hw = &adapter->hw;
5023 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 5023 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5024 5024
5025 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 5025 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5026 return; 5026 return;
5027 5027
5028 if (dcfg->watchdog_timer ^ adapter->dmac || 5028 if (dcfg->watchdog_timer ^ adapter->dmac ||
5029 dcfg->link_speed ^ adapter->link_speed) { 5029 dcfg->link_speed ^ adapter->link_speed) {
5030 dcfg->watchdog_timer = adapter->dmac; 5030 dcfg->watchdog_timer = adapter->dmac;
5031 dcfg->fcoe_en = false; 5031 dcfg->fcoe_en = false;
5032 dcfg->link_speed = adapter->link_speed; 5032 dcfg->link_speed = adapter->link_speed;
5033 dcfg->num_tcs = 1; 5033 dcfg->num_tcs = 1;
5034 5034
5035 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 5035 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5036 dcfg->watchdog_timer, dcfg->link_speed); 5036 dcfg->watchdog_timer, dcfg->link_speed);
5037 5037
5038 hw->mac.ops.dmac_config(hw); 5038 hw->mac.ops.dmac_config(hw);
5039 } 5039 }
5040} /* ixgbe_config_dmac */ 5040} /* ixgbe_config_dmac */
5041 5041
5042/************************************************************************ 5042/************************************************************************
5043 * ixgbe_enable_intr 5043 * ixgbe_enable_intr
5044 ************************************************************************/ 5044 ************************************************************************/
5045static void 5045static void
5046ixgbe_enable_intr(struct adapter *adapter) 5046ixgbe_enable_intr(struct adapter *adapter)
5047{ 5047{
5048 struct ixgbe_hw *hw = &adapter->hw; 5048 struct ixgbe_hw *hw = &adapter->hw;
5049 struct ix_queue *que = adapter->queues; 5049 struct ix_queue *que = adapter->queues;
5050 u32 mask, fwsm; 5050 u32 mask, fwsm;
5051 5051
5052 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 5052 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5053 5053
5054 switch (adapter->hw.mac.type) { 5054 switch (adapter->hw.mac.type) {
5055 case ixgbe_mac_82599EB: 5055 case ixgbe_mac_82599EB:
5056 mask |= IXGBE_EIMS_ECC; 5056 mask |= IXGBE_EIMS_ECC;
5057 /* Temperature sensor on some adapters */ 5057 /* Temperature sensor on some adapters */
5058 mask |= IXGBE_EIMS_GPI_SDP0; 5058 mask |= IXGBE_EIMS_GPI_SDP0;
5059 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 5059 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5060 mask |= IXGBE_EIMS_GPI_SDP1; 5060 mask |= IXGBE_EIMS_GPI_SDP1;
5061 mask |= IXGBE_EIMS_GPI_SDP2; 5061 mask |= IXGBE_EIMS_GPI_SDP2;
5062 break; 5062 break;
5063 case ixgbe_mac_X540: 5063 case ixgbe_mac_X540:
5064 /* Detect if Thermal Sensor is enabled */ 5064 /* Detect if Thermal Sensor is enabled */
5065 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 5065 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5066 if (fwsm & IXGBE_FWSM_TS_ENABLED) 5066 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5067 mask |= IXGBE_EIMS_TS; 5067 mask |= IXGBE_EIMS_TS;
5068 mask |= IXGBE_EIMS_ECC; 5068 mask |= IXGBE_EIMS_ECC;
5069 break; 5069 break;
5070 case ixgbe_mac_X550: 5070 case ixgbe_mac_X550:
5071 /* MAC thermal sensor is automatically enabled */ 5071 /* MAC thermal sensor is automatically enabled */
5072 mask |= IXGBE_EIMS_TS; 5072 mask |= IXGBE_EIMS_TS;
5073 mask |= IXGBE_EIMS_ECC; 5073 mask |= IXGBE_EIMS_ECC;
5074 break; 5074 break;
5075 case ixgbe_mac_X550EM_x: 5075 case ixgbe_mac_X550EM_x:
5076 case ixgbe_mac_X550EM_a: 5076 case ixgbe_mac_X550EM_a:
5077 /* Some devices use SDP0 for important information */ 5077 /* Some devices use SDP0 for important information */
5078 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 5078 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5079 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 5079 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5080 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 5080 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5081 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 5081 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5082 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 5082 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5083 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 5083 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5084 mask |= IXGBE_EICR_GPI_SDP0_X540; 5084 mask |= IXGBE_EICR_GPI_SDP0_X540;
5085 mask |= IXGBE_EIMS_ECC; 5085 mask |= IXGBE_EIMS_ECC;
5086 break; 5086 break;
5087 default: 5087 default:
5088 break; 5088 break;
5089 } 5089 }
5090 5090
5091 /* Enable Fan Failure detection */ 5091 /* Enable Fan Failure detection */
5092 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 5092 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5093 mask |= IXGBE_EIMS_GPI_SDP1; 5093 mask |= IXGBE_EIMS_GPI_SDP1;
5094 /* Enable SR-IOV */ 5094 /* Enable SR-IOV */
5095 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 5095 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5096 mask |= IXGBE_EIMS_MAILBOX; 5096 mask |= IXGBE_EIMS_MAILBOX;
5097 /* Enable Flow Director */ 5097 /* Enable Flow Director */
5098 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 5098 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5099 mask |= IXGBE_EIMS_FLOW_DIR; 5099 mask |= IXGBE_EIMS_FLOW_DIR;
5100 5100
5101 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 5101 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5102 5102
5103 /* With MSI-X we use auto clear */ 5103 /* With MSI-X we use auto clear */
5104 if (adapter->msix_mem) { 5104 if (adapter->msix_mem) {
5105 mask = IXGBE_EIMS_ENABLE_MASK; 5105 mask = IXGBE_EIMS_ENABLE_MASK;
5106 /* Don't autoclear Link */ 5106 /* Don't autoclear Link */
5107 mask &= ~IXGBE_EIMS_OTHER; 5107 mask &= ~IXGBE_EIMS_OTHER;
5108 mask &= ~IXGBE_EIMS_LSC; 5108 mask &= ~IXGBE_EIMS_LSC;
5109 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 5109 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5110 mask &= ~IXGBE_EIMS_MAILBOX; 5110 mask &= ~IXGBE_EIMS_MAILBOX;
5111 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 5111 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5112 } 5112 }
5113 5113
5114 /* 5114 /*
5115 * Now enable all queues, this is done separately to 5115 * Now enable all queues, this is done separately to
5116 * allow for handling the extended (beyond 32) MSI-X 5116 * allow for handling the extended (beyond 32) MSI-X
5117 * vectors that can be used by 82599 5117 * vectors that can be used by 82599
5118 */ 5118 */
5119 for (int i = 0; i < adapter->num_queues; i++, que++) 5119 for (int i = 0; i < adapter->num_queues; i++, que++)
5120 ixgbe_enable_queue(adapter, que->msix); 5120 ixgbe_enable_queue(adapter, que->msix);
5121 5121
5122 IXGBE_WRITE_FLUSH(hw); 5122 IXGBE_WRITE_FLUSH(hw);
5123 5123
5124} /* ixgbe_enable_intr */ 5124} /* ixgbe_enable_intr */
5125 5125
5126/************************************************************************ 5126/************************************************************************
5127 * ixgbe_disable_intr_internal 5127 * ixgbe_disable_intr_internal
5128 ************************************************************************/ 5128 ************************************************************************/
5129static void 5129static void
5130ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok) 5130ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5131{ 5131{
5132 struct ix_queue *que = adapter->queues; 5132 struct ix_queue *que = adapter->queues;
5133 5133
5134 /* disable interrupts other than queues */ 5134 /* disable interrupts other than queues */
5135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE); 5135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5136 5136
5137 if (adapter->msix_mem) 5137 if (adapter->msix_mem)
5138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 5138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5139 5139
5140 for (int i = 0; i < adapter->num_queues; i++, que++) 5140 for (int i = 0; i < adapter->num_queues; i++, que++)
5141 ixgbe_disable_queue_internal(adapter, que->msix, nestok); 5141 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5142 5142
5143 IXGBE_WRITE_FLUSH(&adapter->hw); 5143 IXGBE_WRITE_FLUSH(&adapter->hw);
5144 5144
5145} /* ixgbe_do_disable_intr_internal */ 5145} /* ixgbe_do_disable_intr_internal */
5146 5146
5147/************************************************************************ 5147/************************************************************************
5148 * ixgbe_disable_intr 5148 * ixgbe_disable_intr
5149 ************************************************************************/ 5149 ************************************************************************/
5150static void 5150static void
5151ixgbe_disable_intr(struct adapter *adapter) 5151ixgbe_disable_intr(struct adapter *adapter)
5152{ 5152{
5153 5153
5154 ixgbe_disable_intr_internal(adapter, true); 5154 ixgbe_disable_intr_internal(adapter, true);
5155} /* ixgbe_disable_intr */ 5155} /* ixgbe_disable_intr */
5156 5156
5157/************************************************************************ 5157/************************************************************************
5158 * ixgbe_ensure_disabled_intr 5158 * ixgbe_ensure_disabled_intr
5159 ************************************************************************/ 5159 ************************************************************************/
5160void 5160void
5161ixgbe_ensure_disabled_intr(struct adapter *adapter) 5161ixgbe_ensure_disabled_intr(struct adapter *adapter)
5162{ 5162{
5163 5163
5164 ixgbe_disable_intr_internal(adapter, false); 5164 ixgbe_disable_intr_internal(adapter, false);
5165} /* ixgbe_ensure_disabled_intr */ 5165} /* ixgbe_ensure_disabled_intr */
5166 5166
5167/************************************************************************ 5167/************************************************************************
5168 * ixgbe_legacy_irq - Legacy Interrupt Service routine 5168 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5169 ************************************************************************/ 5169 ************************************************************************/
5170static int 5170static int
5171ixgbe_legacy_irq(void *arg) 5171ixgbe_legacy_irq(void *arg)
5172{ 5172{
5173 struct ix_queue *que = arg; 5173 struct ix_queue *que = arg;
5174 struct adapter *adapter = que->adapter; 5174 struct adapter *adapter = que->adapter;
5175 struct ixgbe_hw *hw = &adapter->hw; 5175 struct ixgbe_hw *hw = &adapter->hw;
5176 struct ifnet *ifp = adapter->ifp; 5176 struct ifnet *ifp = adapter->ifp;
5177 struct tx_ring *txr = adapter->tx_rings; 5177 struct tx_ring *txr = adapter->tx_rings;
5178 bool more = false; 5178 bool more = false;
5179 bool reenable_intr = true; 5179 bool reenable_intr = true;
5180 u32 eicr, eicr_mask; 5180 u32 eicr, eicr_mask;
5181 u32 task_requests = 0; 5181 u32 task_requests = 0;
5182 5182
5183 /* Silicon errata #26 on 82598 */ 5183 /* Silicon errata #26 on 82598 */
5184 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 5184 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5185 5185
5186 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 5186 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5187 5187
5188 adapter->stats.pf.legint.ev_count++; 5188 adapter->stats.pf.legint.ev_count++;
5189 ++que->irqs.ev_count; 5189 ++que->irqs.ev_count;
5190 if (eicr == 0) { 5190 if (eicr == 0) {
5191 adapter->stats.pf.intzero.ev_count++; 5191 adapter->stats.pf.intzero.ev_count++;
5192 if ((ifp->if_flags & IFF_UP) != 0) 5192 if ((ifp->if_flags & IFF_UP) != 0)
5193 ixgbe_enable_intr(adapter); 5193 ixgbe_enable_intr(adapter);
5194 return 0; 5194 return 0;
5195 } 5195 }
5196 5196
5197 if ((ifp->if_flags & IFF_RUNNING) != 0) { 5197 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5198 /* 5198 /*
5199 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue". 5199 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5200 */ 5200 */
5201 que->txrx_use_workqueue = adapter->txrx_use_workqueue; 5201 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5202 5202
5203#ifdef __NetBSD__ 5203#ifdef __NetBSD__
5204 /* Don't run ixgbe_rxeof in interrupt context */ 5204 /* Don't run ixgbe_rxeof in interrupt context */
5205 more = true; 5205 more = true;
5206#else 5206#else
5207 more = ixgbe_rxeof(que); 5207 more = ixgbe_rxeof(que);
5208#endif 5208#endif
5209 5209
5210 IXGBE_TX_LOCK(txr); 5210 IXGBE_TX_LOCK(txr);
5211 ixgbe_txeof(txr); 5211 ixgbe_txeof(txr);
5212#ifdef notyet 5212#ifdef notyet
5213 if (!ixgbe_ring_empty(ifp, txr->br)) 5213 if (!ixgbe_ring_empty(ifp, txr->br))
5214 ixgbe_start_locked(ifp, txr); 5214 ixgbe_start_locked(ifp, txr);
5215#endif 5215#endif
5216 IXGBE_TX_UNLOCK(txr); 5216 IXGBE_TX_UNLOCK(txr);
5217 } 5217 }
5218 5218
5219 /* Check for fan failure */ 5219 /* Check for fan failure */
5220 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 5220 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5221 ixgbe_check_fan_failure(adapter, eicr, true); 5221 ixgbe_check_fan_failure(adapter, eicr, true);
5222 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 5222 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5223 } 5223 }
5224 5224
5225 /* Link status change */ 5225 /* Link status change */
5226 if (eicr & IXGBE_EICR_LSC) 5226 if (eicr & IXGBE_EICR_LSC)
5227 task_requests |= IXGBE_REQUEST_TASK_LSC; 5227 task_requests |= IXGBE_REQUEST_TASK_LSC;
5228 5228
5229 if (ixgbe_is_sfp(hw)) { 5229 if (ixgbe_is_sfp(hw)) {
5230 /* Pluggable optics-related interrupt */ 5230 /* Pluggable optics-related interrupt */
5231 if (hw->mac.type >= ixgbe_mac_X540) 5231 if (hw->mac.type >= ixgbe_mac_X540)
5232 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 5232 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5233 else 5233 else
5234 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 5234 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5235 5235
5236 if (eicr & eicr_mask) { 5236 if (eicr & eicr_mask) {
5237 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 5237 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5238 task_requests |= IXGBE_REQUEST_TASK_MOD; 5238 task_requests |= IXGBE_REQUEST_TASK_MOD;
5239 } 5239 }
5240 5240
5241 if ((hw->mac.type == ixgbe_mac_82599EB) && 5241 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5242 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 5242 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5243 IXGBE_WRITE_REG(hw, IXGBE_EICR, 5243 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5244 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 5244 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5245 task_requests |= IXGBE_REQUEST_TASK_MSF; 5245 task_requests |= IXGBE_REQUEST_TASK_MSF;
5246 } 5246 }
5247 } 5247 }
5248 5248
5249 /* External PHY interrupt */ 5249 /* External PHY interrupt */
5250 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 5250 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5251 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 5251 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5252 task_requests |= IXGBE_REQUEST_TASK_PHY; 5252 task_requests |= IXGBE_REQUEST_TASK_PHY;
5253 5253
5254 if (more) { 5254 if (more) {
5255 que->req.ev_count++; 5255 que->req.ev_count++;
5256 ixgbe_sched_handle_que(adapter, que); 5256 ixgbe_sched_handle_que(adapter, que);
5257 reenable_intr = false; 5257 reenable_intr = false;
5258 } 5258 }
5259 if (task_requests != 0) { 5259 if (task_requests != 0) {
5260 /* Re-enabling other interrupts is done in the admin task */ 5260 /* Re-enabling other interrupts is done in the admin task */
5261 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR; 5261 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5262 atomic_or_32(&adapter->task_requests, task_requests); 5262 atomic_or_32(&adapter->task_requests, task_requests);
5263 ixgbe_schedule_admin_tasklet(adapter); 5263 ixgbe_schedule_admin_tasklet(adapter);
5264 reenable_intr = false; 5264 reenable_intr = false;
5265 } 5265 }
5266 5266
5267 if (reenable_intr == true) 5267 if (reenable_intr == true)
5268 ixgbe_enable_intr(adapter); 5268 ixgbe_enable_intr(adapter);
5269 5269
5270 return 1; 5270 return 1;
5271} /* ixgbe_legacy_irq */ 5271} /* ixgbe_legacy_irq */
5272 5272
5273/************************************************************************ 5273/************************************************************************
5274 * ixgbe_free_pciintr_resources 5274 * ixgbe_free_pciintr_resources
5275 ************************************************************************/ 5275 ************************************************************************/
5276static void 5276static void
5277ixgbe_free_pciintr_resources(struct adapter *adapter) 5277ixgbe_free_pciintr_resources(struct adapter *adapter)
5278{ 5278{
5279 struct ix_queue *que = adapter->queues; 5279 struct ix_queue *que = adapter->queues;
5280 int rid; 5280 int rid;
5281 5281
5282 /* 5282 /*
5283 * Release all msix queue resources: 5283 * Release all msix queue resources:
5284 */ 5284 */
5285 for (int i = 0; i < adapter->num_queues; i++, que++) { 5285 for (int i = 0; i < adapter->num_queues; i++, que++) {
5286 if (que->res != NULL) { 5286 if (que->res != NULL) {
5287 pci_intr_disestablish(adapter->osdep.pc, 5287 pci_intr_disestablish(adapter->osdep.pc,
5288 adapter->osdep.ihs[i]); 5288 adapter->osdep.ihs[i]);
5289 adapter->osdep.ihs[i] = NULL; 5289 adapter->osdep.ihs[i] = NULL;
5290 } 5290 }
5291 } 5291 }
5292 5292
5293 /* Clean the Legacy or Link interrupt last */ 5293 /* Clean the Legacy or Link interrupt last */
5294 if (adapter->vector) /* we are doing MSIX */ 5294 if (adapter->vector) /* we are doing MSIX */
5295 rid = adapter->vector; 5295 rid = adapter->vector;
5296 else 5296 else
5297 rid = 0; 5297 rid = 0;
5298 5298
5299 if (adapter->osdep.ihs[rid] != NULL) { 5299 if (adapter->osdep.ihs[rid] != NULL) {
5300 pci_intr_disestablish(adapter->osdep.pc, 5300 pci_intr_disestablish(adapter->osdep.pc,
5301 adapter->osdep.ihs[rid]); 5301 adapter->osdep.ihs[rid]);
5302 adapter->osdep.ihs[rid] = NULL; 5302 adapter->osdep.ihs[rid] = NULL;
5303 } 5303 }
5304 5304
5305 if (adapter->osdep.intrs != NULL) { 5305 if (adapter->osdep.intrs != NULL) {
5306 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 5306 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5307 adapter->osdep.nintrs); 5307 adapter->osdep.nintrs);
5308 adapter->osdep.intrs = NULL; 5308 adapter->osdep.intrs = NULL;
5309 } 5309 }
5310} /* ixgbe_free_pciintr_resources */ 5310} /* ixgbe_free_pciintr_resources */
5311 5311
5312/************************************************************************ 5312/************************************************************************
5313 * ixgbe_free_pci_resources 5313 * ixgbe_free_pci_resources
5314 ************************************************************************/ 5314 ************************************************************************/
5315static void 5315static void
5316ixgbe_free_pci_resources(struct adapter *adapter) 5316ixgbe_free_pci_resources(struct adapter *adapter)
5317{ 5317{
5318 5318
5319 ixgbe_free_pciintr_resources(adapter); 5319 ixgbe_free_pciintr_resources(adapter);
5320 5320
5321 if (adapter->osdep.mem_size != 0) { 5321 if (adapter->osdep.mem_size != 0) {
5322 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 5322 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5323 adapter->osdep.mem_bus_space_handle, 5323 adapter->osdep.mem_bus_space_handle,
5324 adapter->osdep.mem_size); 5324 adapter->osdep.mem_size);
5325 } 5325 }
5326 5326
5327} /* ixgbe_free_pci_resources */ 5327} /* ixgbe_free_pci_resources */
5328 5328
5329/************************************************************************ 5329/************************************************************************
5330 * ixgbe_set_sysctl_value 5330 * ixgbe_set_sysctl_value
5331 ************************************************************************/ 5331 ************************************************************************/
5332static void 5332static void
5333ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 5333ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5334 const char *description, int *limit, int value) 5334 const char *description, int *limit, int value)
5335{ 5335{
5336 device_t dev = adapter->dev; 5336 device_t dev = adapter->dev;
5337 struct sysctllog **log; 5337 struct sysctllog **log;
5338 const struct sysctlnode *rnode, *cnode; 5338 const struct sysctlnode *rnode, *cnode;
5339 5339
5340 /* 5340 /*
5341 * It's not required to check recovery mode because this function never 5341 * It's not required to check recovery mode because this function never
5342 * touches hardware. 5342 * touches hardware.
5343 */ 5343 */
5344 5344
5345 log = &adapter->sysctllog; 5345 log = &adapter->sysctllog;
5346 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 5346 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5347 aprint_error_dev(dev, "could not create sysctl root\n"); 5347 aprint_error_dev(dev, "could not create sysctl root\n");
5348 return; 5348 return;
5349 } 5349 }
5350 if (sysctl_createv(log, 0, &rnode, &cnode, 5350 if (sysctl_createv(log, 0, &rnode, &cnode,
5351 CTLFLAG_READWRITE, CTLTYPE_INT, 5351 CTLFLAG_READWRITE, CTLTYPE_INT,
5352 name, SYSCTL_DESCR(description), 5352 name, SYSCTL_DESCR(description),
5353 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) 5353 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5354 aprint_error_dev(dev, "could not create sysctl\n"); 5354 aprint_error_dev(dev, "could not create sysctl\n");
5355 *limit = value; 5355 *limit = value;
5356} /* ixgbe_set_sysctl_value */ 5356} /* ixgbe_set_sysctl_value */
5357 5357
5358/************************************************************************ 5358/************************************************************************
5359 * ixgbe_sysctl_flowcntl 5359 * ixgbe_sysctl_flowcntl
5360 * 5360 *
5361 * SYSCTL wrapper around setting Flow Control 5361 * SYSCTL wrapper around setting Flow Control
5362 ************************************************************************/ 5362 ************************************************************************/
5363static int 5363static int
5364ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS) 5364ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5365{ 5365{
5366 struct sysctlnode node = *rnode; 5366 struct sysctlnode node = *rnode;
5367 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5367 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5368 int error, fc; 5368 int error, fc;
5369 5369
5370 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5370 if (ixgbe_fw_recovery_mode_swflag(adapter))
5371 return (EPERM); 5371 return (EPERM);
5372 5372
5373 fc = adapter->hw.fc.current_mode; 5373 fc = adapter->hw.fc.current_mode;
5374 node.sysctl_data = &fc; 5374 node.sysctl_data = &fc;
5375 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5375 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5376 if (error != 0 || newp == NULL) 5376 if (error != 0 || newp == NULL)
5377 return error; 5377 return error;
5378 5378
5379 /* Don't bother if it's not changed */ 5379 /* Don't bother if it's not changed */
5380 if (fc == adapter->hw.fc.current_mode) 5380 if (fc == adapter->hw.fc.current_mode)
5381 return (0); 5381 return (0);
5382 5382
5383 return ixgbe_set_flowcntl(adapter, fc); 5383 return ixgbe_set_flowcntl(adapter, fc);
5384} /* ixgbe_sysctl_flowcntl */ 5384} /* ixgbe_sysctl_flowcntl */
5385 5385
5386/************************************************************************ 5386/************************************************************************
5387 * ixgbe_set_flowcntl - Set flow control 5387 * ixgbe_set_flowcntl - Set flow control
5388 * 5388 *
5389 * Flow control values: 5389 * Flow control values:
5390 * 0 - off 5390 * 0 - off
5391 * 1 - rx pause 5391 * 1 - rx pause
5392 * 2 - tx pause 5392 * 2 - tx pause
5393 * 3 - full 5393 * 3 - full
5394 ************************************************************************/ 5394 ************************************************************************/
5395static int 5395static int
5396ixgbe_set_flowcntl(struct adapter *adapter, int fc) 5396ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5397{ 5397{
5398 switch (fc) { 5398 switch (fc) {
5399 case ixgbe_fc_rx_pause: 5399 case ixgbe_fc_rx_pause:
5400 case ixgbe_fc_tx_pause: 5400 case ixgbe_fc_tx_pause:
5401 case ixgbe_fc_full: 5401 case ixgbe_fc_full:
5402 adapter->hw.fc.requested_mode = fc; 5402 adapter->hw.fc.requested_mode = fc;
5403 if (adapter->num_queues > 1) 5403 if (adapter->num_queues > 1)
5404 ixgbe_disable_rx_drop(adapter); 5404 ixgbe_disable_rx_drop(adapter);
5405 break; 5405 break;
5406 case ixgbe_fc_none: 5406 case ixgbe_fc_none:
5407 adapter->hw.fc.requested_mode = ixgbe_fc_none; 5407 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5408 if (adapter->num_queues > 1) 5408 if (adapter->num_queues > 1)
5409 ixgbe_enable_rx_drop(adapter); 5409 ixgbe_enable_rx_drop(adapter);
5410 break; 5410 break;
5411 default: 5411 default:
5412 return (EINVAL); 5412 return (EINVAL);
5413 } 5413 }
5414 5414
5415#if 0 /* XXX NetBSD */ 5415#if 0 /* XXX NetBSD */
5416 /* Don't autoneg if forcing a value */ 5416 /* Don't autoneg if forcing a value */
5417 adapter->hw.fc.disable_fc_autoneg = TRUE; 5417 adapter->hw.fc.disable_fc_autoneg = TRUE;
5418#endif 5418#endif
5419 ixgbe_fc_enable(&adapter->hw); 5419 ixgbe_fc_enable(&adapter->hw);
5420 5420
5421 return (0); 5421 return (0);
5422} /* ixgbe_set_flowcntl */ 5422} /* ixgbe_set_flowcntl */
5423 5423
5424/************************************************************************ 5424/************************************************************************
5425 * ixgbe_enable_rx_drop 5425 * ixgbe_enable_rx_drop
5426 * 5426 *
5427 * Enable the hardware to drop packets when the buffer is 5427 * Enable the hardware to drop packets when the buffer is
5428 * full. This is useful with multiqueue, so that no single 5428 * full. This is useful with multiqueue, so that no single
5429 * queue being full stalls the entire RX engine. We only 5429 * queue being full stalls the entire RX engine. We only
5430 * enable this when Multiqueue is enabled AND Flow Control 5430 * enable this when Multiqueue is enabled AND Flow Control
5431 * is disabled. 5431 * is disabled.
5432 ************************************************************************/ 5432 ************************************************************************/
5433static void 5433static void
5434ixgbe_enable_rx_drop(struct adapter *adapter) 5434ixgbe_enable_rx_drop(struct adapter *adapter)
5435{ 5435{
5436 struct ixgbe_hw *hw = &adapter->hw; 5436 struct ixgbe_hw *hw = &adapter->hw;
5437 struct rx_ring *rxr; 5437 struct rx_ring *rxr;
5438 u32 srrctl; 5438 u32 srrctl;
5439 5439
5440 for (int i = 0; i < adapter->num_queues; i++) { 5440 for (int i = 0; i < adapter->num_queues; i++) {
5441 rxr = &adapter->rx_rings[i]; 5441 rxr = &adapter->rx_rings[i];
5442 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5442 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5443 srrctl |= IXGBE_SRRCTL_DROP_EN; 5443 srrctl |= IXGBE_SRRCTL_DROP_EN;
5444 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5444 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5445 } 5445 }
5446 5446
5447 /* enable drop for each vf */ 5447 /* enable drop for each vf */
5448 for (int i = 0; i < adapter->num_vfs; i++) { 5448 for (int i = 0; i < adapter->num_vfs; i++) {
5449 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5449 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5450 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 5450 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5451 IXGBE_QDE_ENABLE)); 5451 IXGBE_QDE_ENABLE));
5452 } 5452 }
5453} /* ixgbe_enable_rx_drop */ 5453} /* ixgbe_enable_rx_drop */
5454 5454
5455/************************************************************************ 5455/************************************************************************
5456 * ixgbe_disable_rx_drop 5456 * ixgbe_disable_rx_drop
5457 ************************************************************************/ 5457 ************************************************************************/
5458static void 5458static void
5459ixgbe_disable_rx_drop(struct adapter *adapter) 5459ixgbe_disable_rx_drop(struct adapter *adapter)
5460{ 5460{
5461 struct ixgbe_hw *hw = &adapter->hw; 5461 struct ixgbe_hw *hw = &adapter->hw;
5462 struct rx_ring *rxr; 5462 struct rx_ring *rxr;
5463 u32 srrctl; 5463 u32 srrctl;
5464 5464
5465 for (int i = 0; i < adapter->num_queues; i++) { 5465 for (int i = 0; i < adapter->num_queues; i++) {
5466 rxr = &adapter->rx_rings[i]; 5466 rxr = &adapter->rx_rings[i];
5467 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5467 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5468 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 5468 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5469 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5469 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5470 } 5470 }
5471 5471
5472 /* disable drop for each vf */ 5472 /* disable drop for each vf */
5473 for (int i = 0; i < adapter->num_vfs; i++) { 5473 for (int i = 0; i < adapter->num_vfs; i++) {
5474 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5474 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5475 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 5475 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5476 } 5476 }
5477} /* ixgbe_disable_rx_drop */ 5477} /* ixgbe_disable_rx_drop */
5478 5478
5479/************************************************************************ 5479/************************************************************************
5480 * ixgbe_sysctl_advertise 5480 * ixgbe_sysctl_advertise
5481 * 5481 *
5482 * SYSCTL wrapper around setting advertised speed 5482 * SYSCTL wrapper around setting advertised speed
5483 ************************************************************************/ 5483 ************************************************************************/
5484static int 5484static int
5485ixgbe_sysctl_advertise(SYSCTLFN_ARGS) 5485ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5486{ 5486{
5487 struct sysctlnode node = *rnode; 5487 struct sysctlnode node = *rnode;
5488 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5488 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5489 int error = 0, advertise; 5489 int error = 0, advertise;
5490 5490
5491 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5491 if (ixgbe_fw_recovery_mode_swflag(adapter))
5492 return (EPERM); 5492 return (EPERM);
5493 5493
5494 advertise = adapter->advertise; 5494 advertise = adapter->advertise;
5495 node.sysctl_data = &advertise; 5495 node.sysctl_data = &advertise;
5496 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5496 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5497 if (error != 0 || newp == NULL) 5497 if (error != 0 || newp == NULL)
5498 return error; 5498 return error;
5499 5499
5500 return ixgbe_set_advertise(adapter, advertise); 5500 return ixgbe_set_advertise(adapter, advertise);
5501} /* ixgbe_sysctl_advertise */ 5501} /* ixgbe_sysctl_advertise */
5502 5502