Mon Aug 31 06:23:19 2020 UTC ()
 Fix typo in comment.


(msaitoh)
diff -r1.249 -r1.250 src/sys/dev/pci/ixgbe/ixgbe.c

cvs diff -r1.249 -r1.250 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2020/08/31 06:20:06 1.249
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2020/08/31 06:23:19 1.250
@@ -1,1000 +1,1000 @@ @@ -1,1000 +1,1000 @@
1/* $NetBSD: ixgbe.c,v 1.249 2020/08/31 06:20:06 msaitoh Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.250 2020/08/31 06:23:19 msaitoh Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_inet.h" 67#include "opt_inet.h"
68#include "opt_inet6.h" 68#include "opt_inet6.h"
69#include "opt_net_mpsafe.h" 69#include "opt_net_mpsafe.h"
70#endif 70#endif
71 71
72#include "ixgbe.h" 72#include "ixgbe.h"
73#include "ixgbe_sriov.h" 73#include "ixgbe_sriov.h"
74#include "vlan.h" 74#include "vlan.h"
75 75
76#include <sys/cprng.h> 76#include <sys/cprng.h>
77#include <dev/mii/mii.h> 77#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h> 78#include <dev/mii/miivar.h>
79 79
80/************************************************************************ 80/************************************************************************
81 * Driver version 81 * Driver version
82 ************************************************************************/ 82 ************************************************************************/
83static const char ixgbe_driver_version[] = "4.0.1-k"; 83static const char ixgbe_driver_version[] = "4.0.1-k";
84/* XXX NetBSD: + 3.3.10 */ 84/* XXX NetBSD: + 3.3.10 */
85 85
86/************************************************************************ 86/************************************************************************
87 * PCI Device ID Table 87 * PCI Device ID Table
88 * 88 *
89 * Used by probe to select devices to load on 89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings 90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s 91 * Last entry must be all 0s
92 * 92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/ 94 ************************************************************************/
95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96{ 96{
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */ 147 /* required last entry */
148 {0, 0, 0, 0, 0} 148 {0, 0, 0, 0, 0}
149}; 149};
150 150
151/************************************************************************ 151/************************************************************************
152 * Table of branding strings 152 * Table of branding strings
153 ************************************************************************/ 153 ************************************************************************/
154static const char *ixgbe_strings[] = { 154static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver" 155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156}; 156};
157 157
158/************************************************************************ 158/************************************************************************
159 * Function prototypes 159 * Function prototypes
160 ************************************************************************/ 160 ************************************************************************/
161static int ixgbe_probe(device_t, cfdata_t, void *); 161static int ixgbe_probe(device_t, cfdata_t, void *);
162static void ixgbe_quirks(struct adapter *); 162static void ixgbe_quirks(struct adapter *);
163static void ixgbe_attach(device_t, device_t, void *); 163static void ixgbe_attach(device_t, device_t, void *);
164static int ixgbe_detach(device_t, int); 164static int ixgbe_detach(device_t, int);
165#if 0 165#if 0
166static int ixgbe_shutdown(device_t); 166static int ixgbe_shutdown(device_t);
167#endif 167#endif
168static bool ixgbe_suspend(device_t, const pmf_qual_t *); 168static bool ixgbe_suspend(device_t, const pmf_qual_t *);
169static bool ixgbe_resume(device_t, const pmf_qual_t *); 169static bool ixgbe_resume(device_t, const pmf_qual_t *);
170static int ixgbe_ifflags_cb(struct ethercom *); 170static int ixgbe_ifflags_cb(struct ethercom *);
171static int ixgbe_ioctl(struct ifnet *, u_long, void *); 171static int ixgbe_ioctl(struct ifnet *, u_long, void *);
172static int ixgbe_init(struct ifnet *); 172static int ixgbe_init(struct ifnet *);
173static void ixgbe_init_locked(struct adapter *); 173static void ixgbe_init_locked(struct adapter *);
174static void ixgbe_ifstop(struct ifnet *, int); 174static void ixgbe_ifstop(struct ifnet *, int);
175static void ixgbe_stop(void *); 175static void ixgbe_stop(void *);
176static void ixgbe_init_device_features(struct adapter *); 176static void ixgbe_init_device_features(struct adapter *);
177static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 177static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
178static void ixgbe_add_media_types(struct adapter *); 178static void ixgbe_add_media_types(struct adapter *);
179static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 179static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
180static int ixgbe_media_change(struct ifnet *); 180static int ixgbe_media_change(struct ifnet *);
181static int ixgbe_allocate_pci_resources(struct adapter *, 181static int ixgbe_allocate_pci_resources(struct adapter *,
182 const struct pci_attach_args *); 182 const struct pci_attach_args *);
183static void ixgbe_free_workqueue(struct adapter *); 183static void ixgbe_free_workqueue(struct adapter *);
184static void ixgbe_get_slot_info(struct adapter *); 184static void ixgbe_get_slot_info(struct adapter *);
185static int ixgbe_allocate_msix(struct adapter *, 185static int ixgbe_allocate_msix(struct adapter *,
186 const struct pci_attach_args *); 186 const struct pci_attach_args *);
187static int ixgbe_allocate_legacy(struct adapter *, 187static int ixgbe_allocate_legacy(struct adapter *,
188 const struct pci_attach_args *); 188 const struct pci_attach_args *);
189static int ixgbe_configure_interrupts(struct adapter *); 189static int ixgbe_configure_interrupts(struct adapter *);
190static void ixgbe_free_pciintr_resources(struct adapter *); 190static void ixgbe_free_pciintr_resources(struct adapter *);
191static void ixgbe_free_pci_resources(struct adapter *); 191static void ixgbe_free_pci_resources(struct adapter *);
192static void ixgbe_local_timer(void *); 192static void ixgbe_local_timer(void *);
193static void ixgbe_handle_timer(struct work *, void *); 193static void ixgbe_handle_timer(struct work *, void *);
194static void ixgbe_recovery_mode_timer(void *); 194static void ixgbe_recovery_mode_timer(void *);
195static void ixgbe_handle_recovery_mode_timer(struct work *, void *); 195static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
196static int ixgbe_setup_interface(device_t, struct adapter *); 196static int ixgbe_setup_interface(device_t, struct adapter *);
197static void ixgbe_config_gpie(struct adapter *); 197static void ixgbe_config_gpie(struct adapter *);
198static void ixgbe_config_dmac(struct adapter *); 198static void ixgbe_config_dmac(struct adapter *);
199static void ixgbe_config_delay_values(struct adapter *); 199static void ixgbe_config_delay_values(struct adapter *);
200static void ixgbe_schedule_admin_tasklet(struct adapter *); 200static void ixgbe_schedule_admin_tasklet(struct adapter *);
201static void ixgbe_config_link(struct adapter *); 201static void ixgbe_config_link(struct adapter *);
202static void ixgbe_check_wol_support(struct adapter *); 202static void ixgbe_check_wol_support(struct adapter *);
203static int ixgbe_setup_low_power_mode(struct adapter *); 203static int ixgbe_setup_low_power_mode(struct adapter *);
204#if 0 204#if 0
205static void ixgbe_rearm_queues(struct adapter *, u64); 205static void ixgbe_rearm_queues(struct adapter *, u64);
206#endif 206#endif
207 207
208static void ixgbe_initialize_transmit_units(struct adapter *); 208static void ixgbe_initialize_transmit_units(struct adapter *);
209static void ixgbe_initialize_receive_units(struct adapter *); 209static void ixgbe_initialize_receive_units(struct adapter *);
210static void ixgbe_enable_rx_drop(struct adapter *); 210static void ixgbe_enable_rx_drop(struct adapter *);
211static void ixgbe_disable_rx_drop(struct adapter *); 211static void ixgbe_disable_rx_drop(struct adapter *);
212static void ixgbe_initialize_rss_mapping(struct adapter *); 212static void ixgbe_initialize_rss_mapping(struct adapter *);
213 213
214static void ixgbe_enable_intr(struct adapter *); 214static void ixgbe_enable_intr(struct adapter *);
215static void ixgbe_disable_intr(struct adapter *); 215static void ixgbe_disable_intr(struct adapter *);
216static void ixgbe_update_stats_counters(struct adapter *); 216static void ixgbe_update_stats_counters(struct adapter *);
217static void ixgbe_set_rxfilter(struct adapter *); 217static void ixgbe_set_rxfilter(struct adapter *);
218static void ixgbe_update_link_status(struct adapter *); 218static void ixgbe_update_link_status(struct adapter *);
219static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 219static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
220static void ixgbe_configure_ivars(struct adapter *); 220static void ixgbe_configure_ivars(struct adapter *);
221static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 221static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
222static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); 222static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
223 223
224static void ixgbe_setup_vlan_hw_tagging(struct adapter *); 224static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
225static void ixgbe_setup_vlan_hw_support(struct adapter *); 225static void ixgbe_setup_vlan_hw_support(struct adapter *);
226static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 226static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
227static int ixgbe_register_vlan(struct adapter *, u16); 227static int ixgbe_register_vlan(struct adapter *, u16);
228static int ixgbe_unregister_vlan(struct adapter *, u16); 228static int ixgbe_unregister_vlan(struct adapter *, u16);
229 229
230static void ixgbe_add_device_sysctls(struct adapter *); 230static void ixgbe_add_device_sysctls(struct adapter *);
231static void ixgbe_add_hw_stats(struct adapter *); 231static void ixgbe_add_hw_stats(struct adapter *);
232static void ixgbe_clear_evcnt(struct adapter *); 232static void ixgbe_clear_evcnt(struct adapter *);
233static int ixgbe_set_flowcntl(struct adapter *, int); 233static int ixgbe_set_flowcntl(struct adapter *, int);
234static int ixgbe_set_advertise(struct adapter *, int); 234static int ixgbe_set_advertise(struct adapter *, int);
235static int ixgbe_get_advertise(struct adapter *); 235static int ixgbe_get_advertise(struct adapter *);
236 236
237/* Sysctl handlers */ 237/* Sysctl handlers */
238static void ixgbe_set_sysctl_value(struct adapter *, const char *, 238static void ixgbe_set_sysctl_value(struct adapter *, const char *,
239 const char *, int *, int); 239 const char *, int *, int);
240static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 240static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
241static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 241static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
242static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 242static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
243static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
244static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 244static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
245static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
246#ifdef IXGBE_DEBUG 246#ifdef IXGBE_DEBUG
247static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 247static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
248static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 248static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
249#endif 249#endif
250static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
251static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 251static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
252static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 252static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
253static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
256static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
257static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 257static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
258static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 258static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
259 259
260/* Support for pluggable optic modules */ 260/* Support for pluggable optic modules */
261static bool ixgbe_sfp_cage_full(struct adapter *); 261static bool ixgbe_sfp_cage_full(struct adapter *);
262 262
263/* Legacy (single vector) interrupt handler */ 263/* Legacy (single vector) interrupt handler */
264static int ixgbe_legacy_irq(void *); 264static int ixgbe_legacy_irq(void *);
265 265
266/* The MSI/MSI-X Interrupt handlers */ 266/* The MSI/MSI-X Interrupt handlers */
267static int ixgbe_msix_que(void *); 267static int ixgbe_msix_que(void *);
268static int ixgbe_msix_admin(void *); 268static int ixgbe_msix_admin(void *);
269 269
270/* Event handlers running on workqueue */ 270/* Event handlers running on workqueue */
271static void ixgbe_handle_que(void *); 271static void ixgbe_handle_que(void *);
272static void ixgbe_handle_link(void *); 272static void ixgbe_handle_link(void *);
273static void ixgbe_handle_msf(void *); 273static void ixgbe_handle_msf(void *);
274static void ixgbe_handle_mod(void *); 274static void ixgbe_handle_mod(void *);
275static void ixgbe_handle_phy(void *); 275static void ixgbe_handle_phy(void *);
276 276
277/* Deferred workqueue handlers */ 277/* Deferred workqueue handlers */
278static void ixgbe_handle_admin(struct work *, void *); 278static void ixgbe_handle_admin(struct work *, void *);
279static void ixgbe_handle_que_work(struct work *, void *); 279static void ixgbe_handle_que_work(struct work *, void *);
280 280
281static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 281static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
282 282
283/************************************************************************ 283/************************************************************************
284 * NetBSD Device Interface Entry Points 284 * NetBSD Device Interface Entry Points
285 ************************************************************************/ 285 ************************************************************************/
286CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), 286CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
287 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 287 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
288 DVF_DETACH_SHUTDOWN); 288 DVF_DETACH_SHUTDOWN);
289 289
290#if 0 290#if 0
291devclass_t ix_devclass; 291devclass_t ix_devclass;
292DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 292DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
293 293
294MODULE_DEPEND(ix, pci, 1, 1, 1); 294MODULE_DEPEND(ix, pci, 1, 1, 1);
295MODULE_DEPEND(ix, ether, 1, 1, 1); 295MODULE_DEPEND(ix, ether, 1, 1, 1);
296#ifdef DEV_NETMAP 296#ifdef DEV_NETMAP
297MODULE_DEPEND(ix, netmap, 1, 1, 1); 297MODULE_DEPEND(ix, netmap, 1, 1, 1);
298#endif 298#endif
299#endif 299#endif
300 300
301/* 301/*
302 * TUNEABLE PARAMETERS: 302 * TUNEABLE PARAMETERS:
303 */ 303 */
304 304
305/* 305/*
306 * AIM: Adaptive Interrupt Moderation 306 * AIM: Adaptive Interrupt Moderation
307 * which means that the interrupt rate 307 * which means that the interrupt rate
308 * is varied over time based on the 308 * is varied over time based on the
309 * traffic for that interrupt vector 309 * traffic for that interrupt vector
310 */ 310 */
311static bool ixgbe_enable_aim = true; 311static bool ixgbe_enable_aim = true;
312#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 312#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
313SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 313SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
314 "Enable adaptive interrupt moderation"); 314 "Enable adaptive interrupt moderation");
315 315
316static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 316static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
317SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 317SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
318 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 318 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
319 319
320/* How many packets rxeof tries to clean at a time */ 320/* How many packets rxeof tries to clean at a time */
321static int ixgbe_rx_process_limit = 256; 321static int ixgbe_rx_process_limit = 256;
322SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 322SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
323 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 323 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
324 324
325/* How many packets txeof tries to clean at a time */ 325/* How many packets txeof tries to clean at a time */
326static int ixgbe_tx_process_limit = 256; 326static int ixgbe_tx_process_limit = 256;
327SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 327SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
328 &ixgbe_tx_process_limit, 0, 328 &ixgbe_tx_process_limit, 0,
329 "Maximum number of sent packets to process at a time, -1 means unlimited"); 329 "Maximum number of sent packets to process at a time, -1 means unlimited");
330 330
331/* Flow control setting, default to full */ 331/* Flow control setting, default to full */
332static int ixgbe_flow_control = ixgbe_fc_full; 332static int ixgbe_flow_control = ixgbe_fc_full;
333SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 333SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
334 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 334 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
335 335
336/* Which packet processing uses workqueue or softint */ 336/* Which packet processing uses workqueue or softint */
337static bool ixgbe_txrx_workqueue = false; 337static bool ixgbe_txrx_workqueue = false;
338 338
339/* 339/*
340 * Smart speed setting, default to on 340 * Smart speed setting, default to on
341 * this only works as a compile option 341 * this only works as a compile option
342 * right now as its during attach, set 342 * right now as its during attach, set
343 * this to 'ixgbe_smart_speed_off' to 343 * this to 'ixgbe_smart_speed_off' to
344 * disable. 344 * disable.
345 */ 345 */
346static int ixgbe_smart_speed = ixgbe_smart_speed_on; 346static int ixgbe_smart_speed = ixgbe_smart_speed_on;
347 347
348/* 348/*
349 * MSI-X should be the default for best performance, 349 * MSI-X should be the default for best performance,
350 * but this allows it to be forced off for testing. 350 * but this allows it to be forced off for testing.
351 */ 351 */
352static int ixgbe_enable_msix = 1; 352static int ixgbe_enable_msix = 1;
353SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 353SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
354 "Enable MSI-X interrupts"); 354 "Enable MSI-X interrupts");
355 355
356/* 356/*
357 * Number of Queues, can be set to 0, 357 * Number of Queues, can be set to 0,
358 * it then autoconfigures based on the 358 * it then autoconfigures based on the
359 * number of cpus with a max of 8. This 359 * number of cpus with a max of 8. This
360 * can be overridden manually here. 360 * can be overridden manually here.
361 */ 361 */
362static int ixgbe_num_queues = 0; 362static int ixgbe_num_queues = 0;
363SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 363SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
364 "Number of queues to configure, 0 indicates autoconfigure"); 364 "Number of queues to configure, 0 indicates autoconfigure");
365 365
366/* 366/*
367 * Number of TX descriptors per ring, 367 * Number of TX descriptors per ring,
368 * setting higher than RX as this seems 368 * setting higher than RX as this seems
369 * the better performing choice. 369 * the better performing choice.
370 */ 370 */
371static int ixgbe_txd = PERFORM_TXD; 371static int ixgbe_txd = PERFORM_TXD;
372SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 372SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
373 "Number of transmit descriptors per queue"); 373 "Number of transmit descriptors per queue");
374 374
375/* Number of RX descriptors per ring */ 375/* Number of RX descriptors per ring */
376static int ixgbe_rxd = PERFORM_RXD; 376static int ixgbe_rxd = PERFORM_RXD;
377SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 377SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
378 "Number of receive descriptors per queue"); 378 "Number of receive descriptors per queue");
379 379
380/* 380/*
381 * Defining this on will allow the use 381 * Defining this on will allow the use
382 * of unsupported SFP+ modules, note that 382 * of unsupported SFP+ modules, note that
383 * doing so you are on your own :) 383 * doing so you are on your own :)
384 */ 384 */
385static int allow_unsupported_sfp = false; 385static int allow_unsupported_sfp = false;
386#define TUNABLE_INT(__x, __y) 386#define TUNABLE_INT(__x, __y)
387TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 387TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
388 388
389/* 389/*
390 * Not sure if Flow Director is fully baked, 390 * Not sure if Flow Director is fully baked,
391 * so we'll default to turning it off. 391 * so we'll default to turning it off.
392 */ 392 */
393static int ixgbe_enable_fdir = 0; 393static int ixgbe_enable_fdir = 0;
394SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 394SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
395 "Enable Flow Director"); 395 "Enable Flow Director");
396 396
397/* Legacy Transmit (single queue) */ 397/* Legacy Transmit (single queue) */
398static int ixgbe_enable_legacy_tx = 0; 398static int ixgbe_enable_legacy_tx = 0;
399SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 399SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
400 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 400 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
401 401
402/* Receive-Side Scaling */ 402/* Receive-Side Scaling */
403static int ixgbe_enable_rss = 1; 403static int ixgbe_enable_rss = 1;
404SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 404SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
405 "Enable Receive-Side Scaling (RSS)"); 405 "Enable Receive-Side Scaling (RSS)");
406 406
407#if 0 407#if 0
408static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 408static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
409static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 409static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
410#endif 410#endif
411 411
412#ifdef NET_MPSAFE 412#ifdef NET_MPSAFE
413#define IXGBE_MPSAFE 1 413#define IXGBE_MPSAFE 1
414#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 414#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
415#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 415#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
416#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 416#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
417#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE 417#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
418#else 418#else
419#define IXGBE_CALLOUT_FLAGS 0 419#define IXGBE_CALLOUT_FLAGS 0
420#define IXGBE_SOFTINT_FLAGS 0 420#define IXGBE_SOFTINT_FLAGS 0
421#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 421#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
422#define IXGBE_TASKLET_WQ_FLAGS 0 422#define IXGBE_TASKLET_WQ_FLAGS 0
423#endif 423#endif
424#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 424#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
425 425
426/************************************************************************ 426/************************************************************************
427 * ixgbe_initialize_rss_mapping 427 * ixgbe_initialize_rss_mapping
428 ************************************************************************/ 428 ************************************************************************/
429static void 429static void
430ixgbe_initialize_rss_mapping(struct adapter *adapter) 430ixgbe_initialize_rss_mapping(struct adapter *adapter)
431{ 431{
432 struct ixgbe_hw *hw = &adapter->hw; 432 struct ixgbe_hw *hw = &adapter->hw;
433 u32 reta = 0, mrqc, rss_key[10]; 433 u32 reta = 0, mrqc, rss_key[10];
434 int queue_id, table_size, index_mult; 434 int queue_id, table_size, index_mult;
435 int i, j; 435 int i, j;
436 u32 rss_hash_config; 436 u32 rss_hash_config;
437 437
438 /* force use default RSS key. */ 438 /* force use default RSS key. */
439#ifdef __NetBSD__ 439#ifdef __NetBSD__
440 rss_getkey((uint8_t *) &rss_key); 440 rss_getkey((uint8_t *) &rss_key);
441#else 441#else
442 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 442 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
443 /* Fetch the configured RSS key */ 443 /* Fetch the configured RSS key */
444 rss_getkey((uint8_t *) &rss_key); 444 rss_getkey((uint8_t *) &rss_key);
445 } else { 445 } else {
446 /* set up random bits */ 446 /* set up random bits */
447 cprng_fast(&rss_key, sizeof(rss_key)); 447 cprng_fast(&rss_key, sizeof(rss_key));
448 } 448 }
449#endif 449#endif
450 450
451 /* Set multiplier for RETA setup and table size based on MAC */ 451 /* Set multiplier for RETA setup and table size based on MAC */
452 index_mult = 0x1; 452 index_mult = 0x1;
453 table_size = 128; 453 table_size = 128;
454 switch (adapter->hw.mac.type) { 454 switch (adapter->hw.mac.type) {
455 case ixgbe_mac_82598EB: 455 case ixgbe_mac_82598EB:
456 index_mult = 0x11; 456 index_mult = 0x11;
457 break; 457 break;
458 case ixgbe_mac_X550: 458 case ixgbe_mac_X550:
459 case ixgbe_mac_X550EM_x: 459 case ixgbe_mac_X550EM_x:
460 case ixgbe_mac_X550EM_a: 460 case ixgbe_mac_X550EM_a:
461 table_size = 512; 461 table_size = 512;
462 break; 462 break;
463 default: 463 default:
464 break; 464 break;
465 } 465 }
466 466
467 /* Set up the redirection table */ 467 /* Set up the redirection table */
468 for (i = 0, j = 0; i < table_size; i++, j++) { 468 for (i = 0, j = 0; i < table_size; i++, j++) {
469 if (j == adapter->num_queues) 469 if (j == adapter->num_queues)
470 j = 0; 470 j = 0;
471 471
472 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 472 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
473 /* 473 /*
474 * Fetch the RSS bucket id for the given indirection 474 * Fetch the RSS bucket id for the given indirection
475 * entry. Cap it at the number of configured buckets 475 * entry. Cap it at the number of configured buckets
476 * (which is num_queues.) 476 * (which is num_queues.)
477 */ 477 */
478 queue_id = rss_get_indirection_to_bucket(i); 478 queue_id = rss_get_indirection_to_bucket(i);
479 queue_id = queue_id % adapter->num_queues; 479 queue_id = queue_id % adapter->num_queues;
480 } else 480 } else
481 queue_id = (j * index_mult); 481 queue_id = (j * index_mult);
482 482
483 /* 483 /*
484 * The low 8 bits are for hash value (n+0); 484 * The low 8 bits are for hash value (n+0);
485 * The next 8 bits are for hash value (n+1), etc. 485 * The next 8 bits are for hash value (n+1), etc.
486 */ 486 */
487 reta = reta >> 8; 487 reta = reta >> 8;
488 reta = reta | (((uint32_t) queue_id) << 24); 488 reta = reta | (((uint32_t) queue_id) << 24);
489 if ((i & 3) == 3) { 489 if ((i & 3) == 3) {
490 if (i < 128) 490 if (i < 128)
491 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 491 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
492 else 492 else
493 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 493 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
494 reta); 494 reta);
495 reta = 0; 495 reta = 0;
496 } 496 }
497 } 497 }
498 498
499 /* Now fill our hash function seeds */ 499 /* Now fill our hash function seeds */
500 for (i = 0; i < 10; i++) 500 for (i = 0; i < 10; i++)
501 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 501 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
502 502
503 /* Perform hash on these packet types */ 503 /* Perform hash on these packet types */
504 if (adapter->feat_en & IXGBE_FEATURE_RSS) 504 if (adapter->feat_en & IXGBE_FEATURE_RSS)
505 rss_hash_config = rss_gethashconfig(); 505 rss_hash_config = rss_gethashconfig();
506 else { 506 else {
507 /* 507 /*
508 * Disable UDP - IP fragments aren't currently being handled 508 * Disable UDP - IP fragments aren't currently being handled
509 * and so we end up with a mix of 2-tuple and 4-tuple 509 * and so we end up with a mix of 2-tuple and 4-tuple
510 * traffic. 510 * traffic.
511 */ 511 */
512 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 512 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
513 | RSS_HASHTYPE_RSS_TCP_IPV4 513 | RSS_HASHTYPE_RSS_TCP_IPV4
514 | RSS_HASHTYPE_RSS_IPV6 514 | RSS_HASHTYPE_RSS_IPV6
515 | RSS_HASHTYPE_RSS_TCP_IPV6 515 | RSS_HASHTYPE_RSS_TCP_IPV6
516 | RSS_HASHTYPE_RSS_IPV6_EX 516 | RSS_HASHTYPE_RSS_IPV6_EX
517 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 517 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
518 } 518 }
519 519
520 mrqc = IXGBE_MRQC_RSSEN; 520 mrqc = IXGBE_MRQC_RSSEN;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 521 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 523 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 525 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 527 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 529 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 531 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 533 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 534 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 535 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 536 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
537 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 537 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
538 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 538 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
539 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 539 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
540 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 540 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
541} /* ixgbe_initialize_rss_mapping */ 541} /* ixgbe_initialize_rss_mapping */
542 542
543/************************************************************************ 543/************************************************************************
544 * ixgbe_initialize_receive_units - Setup receive registers and features. 544 * ixgbe_initialize_receive_units - Setup receive registers and features.
545 ************************************************************************/ 545 ************************************************************************/
546#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 546#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
547 547
548static void 548static void
549ixgbe_initialize_receive_units(struct adapter *adapter) 549ixgbe_initialize_receive_units(struct adapter *adapter)
550{ 550{
551 struct rx_ring *rxr = adapter->rx_rings; 551 struct rx_ring *rxr = adapter->rx_rings;
552 struct ixgbe_hw *hw = &adapter->hw; 552 struct ixgbe_hw *hw = &adapter->hw;
553 struct ifnet *ifp = adapter->ifp; 553 struct ifnet *ifp = adapter->ifp;
554 int i, j; 554 int i, j;
555 u32 bufsz, fctrl, srrctl, rxcsum; 555 u32 bufsz, fctrl, srrctl, rxcsum;
556 u32 hlreg; 556 u32 hlreg;
557 557
558 /* 558 /*
559 * Make sure receives are disabled while 559 * Make sure receives are disabled while
560 * setting up the descriptor ring 560 * setting up the descriptor ring
561 */ 561 */
562 ixgbe_disable_rx(hw); 562 ixgbe_disable_rx(hw);
563 563
564 /* Enable broadcasts */ 564 /* Enable broadcasts */
565 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 565 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
566 fctrl |= IXGBE_FCTRL_BAM; 566 fctrl |= IXGBE_FCTRL_BAM;
567 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 567 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
568 fctrl |= IXGBE_FCTRL_DPF; 568 fctrl |= IXGBE_FCTRL_DPF;
569 fctrl |= IXGBE_FCTRL_PMCF; 569 fctrl |= IXGBE_FCTRL_PMCF;
570 } 570 }
571 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 571 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
572 572
573 /* Set for Jumbo Frames? */ 573 /* Set for Jumbo Frames? */
574 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 574 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
575 if (ifp->if_mtu > ETHERMTU) 575 if (ifp->if_mtu > ETHERMTU)
576 hlreg |= IXGBE_HLREG0_JUMBOEN; 576 hlreg |= IXGBE_HLREG0_JUMBOEN;
577 else 577 else
578 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 578 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
579 579
580#ifdef DEV_NETMAP 580#ifdef DEV_NETMAP
581 /* CRC stripping is conditional in Netmap */ 581 /* CRC stripping is conditional in Netmap */
582 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 582 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
583 (ifp->if_capenable & IFCAP_NETMAP) && 583 (ifp->if_capenable & IFCAP_NETMAP) &&
584 !ix_crcstrip) 584 !ix_crcstrip)
585 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 585 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
586 else 586 else
587#endif /* DEV_NETMAP */ 587#endif /* DEV_NETMAP */
588 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 588 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
589 589
590 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 590 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
591 591
592 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 592 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
593 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 593 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
594 594
595 for (i = 0; i < adapter->num_queues; i++, rxr++) { 595 for (i = 0; i < adapter->num_queues; i++, rxr++) {
596 u64 rdba = rxr->rxdma.dma_paddr; 596 u64 rdba = rxr->rxdma.dma_paddr;
597 u32 reg; 597 u32 reg;
598 int regnum = i / 4; /* 1 register per 4 queues */ 598 int regnum = i / 4; /* 1 register per 4 queues */
599 int regshift = i % 4; /* 4 bits per 1 queue */ 599 int regshift = i % 4; /* 4 bits per 1 queue */
600 j = rxr->me; 600 j = rxr->me;
601 601
602 /* Setup the Base and Length of the Rx Descriptor Ring */ 602 /* Setup the Base and Length of the Rx Descriptor Ring */
603 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 603 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
604 (rdba & 0x00000000ffffffffULL)); 604 (rdba & 0x00000000ffffffffULL));
605 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 605 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
606 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 606 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
607 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 607 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
608 608
609 /* Set up the SRRCTL register */ 609 /* Set up the SRRCTL register */
610 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 610 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
611 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 611 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
612 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 612 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
613 srrctl |= bufsz; 613 srrctl |= bufsz;
614 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 614 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
615 615
616 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 616 /* Set RQSMR (Receive Queue Statistic Mapping) register */
617 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 617 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
618 reg &= ~(0x000000ffUL << (regshift * 8)); 618 reg &= ~(0x000000ffUL << (regshift * 8));
619 reg |= i << (regshift * 8); 619 reg |= i << (regshift * 8);
620 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 620 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
621 621
622 /* 622 /*
623 * Set DROP_EN iff we have no flow control and >1 queue. 623 * Set DROP_EN iff we have no flow control and >1 queue.
624 * Note that srrctl was cleared shortly before during reset, 624 * Note that srrctl was cleared shortly before during reset,
625 * so we do not need to clear the bit, but do it just in case 625 * so we do not need to clear the bit, but do it just in case
626 * this code is moved elsewhere. 626 * this code is moved elsewhere.
627 */ 627 */
628 if (adapter->num_queues > 1 && 628 if (adapter->num_queues > 1 &&
629 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 629 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
630 srrctl |= IXGBE_SRRCTL_DROP_EN; 630 srrctl |= IXGBE_SRRCTL_DROP_EN;
631 } else { 631 } else {
632 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 632 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
633 } 633 }
634 634
635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 635 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
636 636
637 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 637 /* Setup the HW Rx Head and Tail Descriptor Pointers */
638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 638 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 639 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
640 640
641 /* Set the driver rx tail address */ 641 /* Set the driver rx tail address */
642 rxr->tail = IXGBE_RDT(rxr->me); 642 rxr->tail = IXGBE_RDT(rxr->me);
643 } 643 }
644 644
645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 645 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 646 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
647 | IXGBE_PSRTYPE_UDPHDR 647 | IXGBE_PSRTYPE_UDPHDR
648 | IXGBE_PSRTYPE_IPV4HDR 648 | IXGBE_PSRTYPE_IPV4HDR
649 | IXGBE_PSRTYPE_IPV6HDR; 649 | IXGBE_PSRTYPE_IPV6HDR;
650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 650 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
651 } 651 }
652 652
653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 653 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
654 654
655 ixgbe_initialize_rss_mapping(adapter); 655 ixgbe_initialize_rss_mapping(adapter);
656 656
657 if (adapter->num_queues > 1) { 657 if (adapter->num_queues > 1) {
658 /* RSS and RX IPP Checksum are mutually exclusive */ 658 /* RSS and RX IPP Checksum are mutually exclusive */
659 rxcsum |= IXGBE_RXCSUM_PCSD; 659 rxcsum |= IXGBE_RXCSUM_PCSD;
660 } 660 }
661 661
662 if (ifp->if_capenable & IFCAP_RXCSUM) 662 if (ifp->if_capenable & IFCAP_RXCSUM)
663 rxcsum |= IXGBE_RXCSUM_PCSD; 663 rxcsum |= IXGBE_RXCSUM_PCSD;
664 664
665 /* This is useful for calculating UDP/IP fragment checksums */ 665 /* This is useful for calculating UDP/IP fragment checksums */
666 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 666 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
667 rxcsum |= IXGBE_RXCSUM_IPPCSE; 667 rxcsum |= IXGBE_RXCSUM_IPPCSE;
668 668
669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 669 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
670 670
671} /* ixgbe_initialize_receive_units */ 671} /* ixgbe_initialize_receive_units */
672 672
673/************************************************************************ 673/************************************************************************
674 * ixgbe_initialize_transmit_units - Enable transmit units. 674 * ixgbe_initialize_transmit_units - Enable transmit units.
675 ************************************************************************/ 675 ************************************************************************/
676static void 676static void
677ixgbe_initialize_transmit_units(struct adapter *adapter) 677ixgbe_initialize_transmit_units(struct adapter *adapter)
678{ 678{
679 struct tx_ring *txr = adapter->tx_rings; 679 struct tx_ring *txr = adapter->tx_rings;
680 struct ixgbe_hw *hw = &adapter->hw; 680 struct ixgbe_hw *hw = &adapter->hw;
681 int i; 681 int i;
682 682
683 INIT_DEBUGOUT("ixgbe_initialize_transmit_units"); 683 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
684 684
685 /* Setup the Base and Length of the Tx Descriptor Ring */ 685 /* Setup the Base and Length of the Tx Descriptor Ring */
686 for (i = 0; i < adapter->num_queues; i++, txr++) { 686 for (i = 0; i < adapter->num_queues; i++, txr++) {
687 u64 tdba = txr->txdma.dma_paddr; 687 u64 tdba = txr->txdma.dma_paddr;
688 u32 txctrl = 0; 688 u32 txctrl = 0;
689 u32 tqsmreg, reg; 689 u32 tqsmreg, reg;
690 int regnum = i / 4; /* 1 register per 4 queues */ 690 int regnum = i / 4; /* 1 register per 4 queues */
691 int regshift = i % 4; /* 4 bits per 1 queue */ 691 int regshift = i % 4; /* 4 bits per 1 queue */
692 int j = txr->me; 692 int j = txr->me;
693 693
694 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 694 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
695 (tdba & 0x00000000ffffffffULL)); 695 (tdba & 0x00000000ffffffffULL));
696 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 696 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
697 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 697 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
698 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 698 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
699 699
700 /* 700 /*
701 * Set TQSMR (Transmit Queue Statistic Mapping) register. 701 * Set TQSMR (Transmit Queue Statistic Mapping) register.
702 * Register location is different between 82598 and others. 702 * Register location is different between 82598 and others.
703 */ 703 */
704 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 704 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
705 tqsmreg = IXGBE_TQSMR(regnum); 705 tqsmreg = IXGBE_TQSMR(regnum);
706 else 706 else
707 tqsmreg = IXGBE_TQSM(regnum); 707 tqsmreg = IXGBE_TQSM(regnum);
708 reg = IXGBE_READ_REG(hw, tqsmreg); 708 reg = IXGBE_READ_REG(hw, tqsmreg);
709 reg &= ~(0x000000ffUL << (regshift * 8)); 709 reg &= ~(0x000000ffUL << (regshift * 8));
710 reg |= i << (regshift * 8); 710 reg |= i << (regshift * 8);
711 IXGBE_WRITE_REG(hw, tqsmreg, reg); 711 IXGBE_WRITE_REG(hw, tqsmreg, reg);
712 712
713 /* Setup the HW Tx Head and Tail descriptor pointers */ 713 /* Setup the HW Tx Head and Tail descriptor pointers */
714 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 714 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
715 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 715 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
716 716
717 /* Cache the tail address */ 717 /* Cache the tail address */
718 txr->tail = IXGBE_TDT(j); 718 txr->tail = IXGBE_TDT(j);
719 719
720 txr->txr_no_space = false; 720 txr->txr_no_space = false;
721 721
722 /* Disable Head Writeback */ 722 /* Disable Head Writeback */
723 /* 723 /*
724 * Note: for X550 series devices, these registers are actually 724 * Note: for X550 series devices, these registers are actually
725 * prefixed with TPH_ isntead of DCA_, but the addresses and 725 * prefixed with TPH_ isntead of DCA_, but the addresses and
726 * fields remain the same. 726 * fields remain the same.
727 */ 727 */
728 switch (hw->mac.type) { 728 switch (hw->mac.type) {
729 case ixgbe_mac_82598EB: 729 case ixgbe_mac_82598EB:
730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 730 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
731 break; 731 break;
732 default: 732 default:
733 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 733 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
734 break; 734 break;
735 } 735 }
736 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 736 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
737 switch (hw->mac.type) { 737 switch (hw->mac.type) {
738 case ixgbe_mac_82598EB: 738 case ixgbe_mac_82598EB:
739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 739 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
740 break; 740 break;
741 default: 741 default:
742 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 742 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
743 break; 743 break;
744 } 744 }
745 745
746 } 746 }
747 747
748 if (hw->mac.type != ixgbe_mac_82598EB) { 748 if (hw->mac.type != ixgbe_mac_82598EB) {
749 u32 dmatxctl, rttdcs; 749 u32 dmatxctl, rttdcs;
750 750
751 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 751 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
752 dmatxctl |= IXGBE_DMATXCTL_TE; 752 dmatxctl |= IXGBE_DMATXCTL_TE;
753 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 753 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
754 /* Disable arbiter to set MTQC */ 754 /* Disable arbiter to set MTQC */
755 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 755 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
756 rttdcs |= IXGBE_RTTDCS_ARBDIS; 756 rttdcs |= IXGBE_RTTDCS_ARBDIS;
757 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 757 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
758 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 758 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
759 ixgbe_get_mtqc(adapter->iov_mode)); 759 ixgbe_get_mtqc(adapter->iov_mode));
760 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 760 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 761 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
762 } 762 }
763 763
764 return; 764 return;
765} /* ixgbe_initialize_transmit_units */ 765} /* ixgbe_initialize_transmit_units */
766 766
767static void 767static void
768ixgbe_quirks(struct adapter *adapter) 768ixgbe_quirks(struct adapter *adapter)
769{ 769{
770 device_t dev = adapter->dev; 770 device_t dev = adapter->dev;
771 struct ixgbe_hw *hw = &adapter->hw; 771 struct ixgbe_hw *hw = &adapter->hw;
772 const char *vendor, *product; 772 const char *vendor, *product;
773 773
774 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { 774 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
775 /* 775 /*
776 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE 776 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
777 * MA10-ST0. 777 * MA10-ST0.
778 */ 778 */
779 vendor = pmf_get_platform("system-vendor"); 779 vendor = pmf_get_platform("system-vendor");
780 product = pmf_get_platform("system-product"); 780 product = pmf_get_platform("system-product");
781 781
782 if ((vendor == NULL) || (product == NULL)) 782 if ((vendor == NULL) || (product == NULL))
783 return; 783 return;
784 784
785 if ((strcmp(vendor, "GIGABYTE") == 0) && 785 if ((strcmp(vendor, "GIGABYTE") == 0) &&
786 (strcmp(product, "MA10-ST0") == 0)) { 786 (strcmp(product, "MA10-ST0") == 0)) {
787 aprint_verbose_dev(dev, 787 aprint_verbose_dev(dev,
788 "Enable SFP+ MOD_ABS inverse quirk\n"); 788 "Enable SFP+ MOD_ABS inverse quirk\n");
789 adapter->quirks |= IXGBE_QUIRK_MOD_ABS_INVERT; 789 adapter->quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
790 } 790 }
791 } 791 }
792} 792}
793 793
794/************************************************************************ 794/************************************************************************
795 * ixgbe_attach - Device initialization routine 795 * ixgbe_attach - Device initialization routine
796 * 796 *
797 * Called when the driver is being loaded. 797 * Called when the driver is being loaded.
798 * Identifies the type of hardware, allocates all resources 798 * Identifies the type of hardware, allocates all resources
799 * and initializes the hardware. 799 * and initializes the hardware.
800 * 800 *
801 * return 0 on success, positive on failure 801 * return 0 on success, positive on failure
802 ************************************************************************/ 802 ************************************************************************/
803static void 803static void
804ixgbe_attach(device_t parent, device_t dev, void *aux) 804ixgbe_attach(device_t parent, device_t dev, void *aux)
805{ 805{
806 struct adapter *adapter; 806 struct adapter *adapter;
807 struct ixgbe_hw *hw; 807 struct ixgbe_hw *hw;
808 int error = -1; 808 int error = -1;
809 u32 ctrl_ext; 809 u32 ctrl_ext;
810 u16 high, low, nvmreg; 810 u16 high, low, nvmreg;
811 pcireg_t id, subid; 811 pcireg_t id, subid;
812 const ixgbe_vendor_info_t *ent; 812 const ixgbe_vendor_info_t *ent;
813 struct pci_attach_args *pa = aux; 813 struct pci_attach_args *pa = aux;
814 bool unsupported_sfp = false; 814 bool unsupported_sfp = false;
815 const char *str; 815 const char *str;
816 char wqname[MAXCOMLEN]; 816 char wqname[MAXCOMLEN];
817 char buf[256]; 817 char buf[256];
818 818
819 INIT_DEBUGOUT("ixgbe_attach: begin"); 819 INIT_DEBUGOUT("ixgbe_attach: begin");
820 820
821 /* Allocate, clear, and link in our adapter structure */ 821 /* Allocate, clear, and link in our adapter structure */
822 adapter = device_private(dev); 822 adapter = device_private(dev);
823 adapter->hw.back = adapter; 823 adapter->hw.back = adapter;
824 adapter->dev = dev; 824 adapter->dev = dev;
825 hw = &adapter->hw; 825 hw = &adapter->hw;
826 adapter->osdep.pc = pa->pa_pc; 826 adapter->osdep.pc = pa->pa_pc;
827 adapter->osdep.tag = pa->pa_tag; 827 adapter->osdep.tag = pa->pa_tag;
828 if (pci_dma64_available(pa)) 828 if (pci_dma64_available(pa))
829 adapter->osdep.dmat = pa->pa_dmat64; 829 adapter->osdep.dmat = pa->pa_dmat64;
830 else 830 else
831 adapter->osdep.dmat = pa->pa_dmat; 831 adapter->osdep.dmat = pa->pa_dmat;
832 adapter->osdep.attached = false; 832 adapter->osdep.attached = false;
833 833
834 ent = ixgbe_lookup(pa); 834 ent = ixgbe_lookup(pa);
835 835
836 KASSERT(ent != NULL); 836 KASSERT(ent != NULL);
837 837
838 aprint_normal(": %s, Version - %s\n", 838 aprint_normal(": %s, Version - %s\n",
839 ixgbe_strings[ent->index], ixgbe_driver_version); 839 ixgbe_strings[ent->index], ixgbe_driver_version);
840 840
841 /* Core Lock Init */ 841 /* Core Lock Init */
842 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 842 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
843 843
844 /* Set up the timer callout and workqueue */ 844 /* Set up the timer callout and workqueue */
845 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 845 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
846 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); 846 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
847 error = workqueue_create(&adapter->timer_wq, wqname, 847 error = workqueue_create(&adapter->timer_wq, wqname,
848 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 848 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
849 IXGBE_TASKLET_WQ_FLAGS); 849 IXGBE_TASKLET_WQ_FLAGS);
850 if (error) { 850 if (error) {
851 aprint_error_dev(dev, 851 aprint_error_dev(dev,
852 "could not create timer workqueue (%d)\n", error); 852 "could not create timer workqueue (%d)\n", error);
853 goto err_out; 853 goto err_out;
854 } 854 }
855 855
856 /* Determine hardware revision */ 856 /* Determine hardware revision */
857 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 857 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
858 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 858 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
859 859
860 hw->vendor_id = PCI_VENDOR(id); 860 hw->vendor_id = PCI_VENDOR(id);
861 hw->device_id = PCI_PRODUCT(id); 861 hw->device_id = PCI_PRODUCT(id);
862 hw->revision_id = 862 hw->revision_id =
863 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 863 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
864 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 864 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
865 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 865 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
866 866
867 /* Set quirk flags */ 867 /* Set quirk flags */
868 ixgbe_quirks(adapter); 868 ixgbe_quirks(adapter);
869 869
870 /* 870 /*
871 * Make sure BUSMASTER is set 871 * Make sure BUSMASTER is set
872 */ 872 */
873 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 873 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
874 874
875 /* Do base PCI setup - map BAR0 */ 875 /* Do base PCI setup - map BAR0 */
876 if (ixgbe_allocate_pci_resources(adapter, pa)) { 876 if (ixgbe_allocate_pci_resources(adapter, pa)) {
877 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 877 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
878 error = ENXIO; 878 error = ENXIO;
879 goto err_out; 879 goto err_out;
880 } 880 }
881 881
882 /* let hardware know driver is loaded */ 882 /* let hardware know driver is loaded */
883 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 883 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
884 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 884 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
885 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 885 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
886 886
887 /* 887 /*
888 * Initialize the shared code 888 * Initialize the shared code
889 */ 889 */
890 if (ixgbe_init_shared_code(hw) != 0) { 890 if (ixgbe_init_shared_code(hw) != 0) {
891 aprint_error_dev(dev, "Unable to initialize the shared code\n"); 891 aprint_error_dev(dev, "Unable to initialize the shared code\n");
892 error = ENXIO; 892 error = ENXIO;
893 goto err_out; 893 goto err_out;
894 } 894 }
895 895
896 switch (hw->mac.type) { 896 switch (hw->mac.type) {
897 case ixgbe_mac_82598EB: 897 case ixgbe_mac_82598EB:
898 str = "82598EB"; 898 str = "82598EB";
899 break; 899 break;
900 case ixgbe_mac_82599EB: 900 case ixgbe_mac_82599EB:
901 str = "82599EB"; 901 str = "82599EB";
902 break; 902 break;
903 case ixgbe_mac_X540: 903 case ixgbe_mac_X540:
904 str = "X540"; 904 str = "X540";
905 break; 905 break;
906 case ixgbe_mac_X550: 906 case ixgbe_mac_X550:
907 str = "X550"; 907 str = "X550";
908 break; 908 break;
909 case ixgbe_mac_X550EM_x: 909 case ixgbe_mac_X550EM_x:
910 str = "X550EM X"; 910 str = "X550EM X";
911 break; 911 break;
912 case ixgbe_mac_X550EM_a: 912 case ixgbe_mac_X550EM_a:
913 str = "X550EM A"; 913 str = "X550EM A";
914 break; 914 break;
915 default: 915 default:
916 str = "Unknown"; 916 str = "Unknown";
917 break; 917 break;
918 } 918 }
919 aprint_normal_dev(dev, "device %s\n", str); 919 aprint_normal_dev(dev, "device %s\n", str);
920 920
921 if (hw->mbx.ops.init_params) 921 if (hw->mbx.ops.init_params)
922 hw->mbx.ops.init_params(hw); 922 hw->mbx.ops.init_params(hw);
923 923
924 hw->allow_unsupported_sfp = allow_unsupported_sfp; 924 hw->allow_unsupported_sfp = allow_unsupported_sfp;
925 925
926 /* Pick up the 82599 settings */ 926 /* Pick up the 82599 settings */
927 if (hw->mac.type != ixgbe_mac_82598EB) { 927 if (hw->mac.type != ixgbe_mac_82598EB) {
928 hw->phy.smart_speed = ixgbe_smart_speed; 928 hw->phy.smart_speed = ixgbe_smart_speed;
929 adapter->num_segs = IXGBE_82599_SCATTER; 929 adapter->num_segs = IXGBE_82599_SCATTER;
930 } else 930 } else
931 adapter->num_segs = IXGBE_82598_SCATTER; 931 adapter->num_segs = IXGBE_82598_SCATTER;
932 932
933 /* Ensure SW/FW semaphore is free */ 933 /* Ensure SW/FW semaphore is free */
934 ixgbe_init_swfw_semaphore(hw); 934 ixgbe_init_swfw_semaphore(hw);
935 935
936 hw->mac.ops.set_lan_id(hw); 936 hw->mac.ops.set_lan_id(hw);
937 ixgbe_init_device_features(adapter); 937 ixgbe_init_device_features(adapter);
938 938
939 if (ixgbe_configure_interrupts(adapter)) { 939 if (ixgbe_configure_interrupts(adapter)) {
940 error = ENXIO; 940 error = ENXIO;
941 goto err_out; 941 goto err_out;
942 } 942 }
943 943
944 /* Allocate multicast array memory. */ 944 /* Allocate multicast array memory. */
945 adapter->mta = malloc(sizeof(*adapter->mta) * 945 adapter->mta = malloc(sizeof(*adapter->mta) *
946 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK); 946 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
947 947
948 /* Enable WoL (if supported) */ 948 /* Enable WoL (if supported) */
949 ixgbe_check_wol_support(adapter); 949 ixgbe_check_wol_support(adapter);
950 950
951 /* Register for VLAN events */ 951 /* Register for VLAN events */
952 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); 952 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
953 953
954 /* Verify adapter fan is still functional (if applicable) */ 954 /* Verify adapter fan is still functional (if applicable) */
955 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 955 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
956 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 956 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
957 ixgbe_check_fan_failure(adapter, esdp, FALSE); 957 ixgbe_check_fan_failure(adapter, esdp, FALSE);
958 } 958 }
959 959
960 /* Set an initial default flow control value */ 960 /* Set an initial default flow control value */
961 hw->fc.requested_mode = ixgbe_flow_control; 961 hw->fc.requested_mode = ixgbe_flow_control;
962 962
963 /* Sysctls for limiting the amount of work done in the taskqueues */ 963 /* Sysctls for limiting the amount of work done in the taskqueues */
964 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 964 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
965 "max number of rx packets to process", 965 "max number of rx packets to process",
966 &adapter->rx_process_limit, ixgbe_rx_process_limit); 966 &adapter->rx_process_limit, ixgbe_rx_process_limit);
967 967
968 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 968 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
969 "max number of tx packets to process", 969 "max number of tx packets to process",
970 &adapter->tx_process_limit, ixgbe_tx_process_limit); 970 &adapter->tx_process_limit, ixgbe_tx_process_limit);
971 971
972 /* Do descriptor calc and sanity checks */ 972 /* Do descriptor calc and sanity checks */
973 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 973 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
974 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 974 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
975 aprint_error_dev(dev, "TXD config issue, using default!\n"); 975 aprint_error_dev(dev, "TXD config issue, using default!\n");
976 adapter->num_tx_desc = DEFAULT_TXD; 976 adapter->num_tx_desc = DEFAULT_TXD;
977 } else 977 } else
978 adapter->num_tx_desc = ixgbe_txd; 978 adapter->num_tx_desc = ixgbe_txd;
979 979
980 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 980 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
981 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 981 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
982 aprint_error_dev(dev, "RXD config issue, using default!\n"); 982 aprint_error_dev(dev, "RXD config issue, using default!\n");
983 adapter->num_rx_desc = DEFAULT_RXD; 983 adapter->num_rx_desc = DEFAULT_RXD;
984 } else 984 } else
985 adapter->num_rx_desc = ixgbe_rxd; 985 adapter->num_rx_desc = ixgbe_rxd;
986 986
987 /* Allocate our TX/RX Queues */ 987 /* Allocate our TX/RX Queues */
988 if (ixgbe_allocate_queues(adapter)) { 988 if (ixgbe_allocate_queues(adapter)) {
989 error = ENOMEM; 989 error = ENOMEM;
990 goto err_out; 990 goto err_out;
991 } 991 }
992 992
993 hw->phy.reset_if_overtemp = TRUE; 993 hw->phy.reset_if_overtemp = TRUE;
994 error = ixgbe_reset_hw(hw); 994 error = ixgbe_reset_hw(hw);
995 hw->phy.reset_if_overtemp = FALSE; 995 hw->phy.reset_if_overtemp = FALSE;
996 if (error == IXGBE_ERR_SFP_NOT_PRESENT) 996 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
997 error = IXGBE_SUCCESS; 997 error = IXGBE_SUCCESS;
998 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 998 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
999 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); 999 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
1000 unsupported_sfp = true; 1000 unsupported_sfp = true;
@@ -3515,2265 +3515,2265 @@ map_err: @@ -3515,2265 +3515,2265 @@ map_err:
3515 3515
3516 return (0); 3516 return (0);
3517} /* ixgbe_allocate_pci_resources */ 3517} /* ixgbe_allocate_pci_resources */
3518 3518
3519static void 3519static void
3520ixgbe_free_workqueue(struct adapter *adapter) 3520ixgbe_free_workqueue(struct adapter *adapter)
3521{ 3521{
3522 struct ix_queue *que = adapter->queues; 3522 struct ix_queue *que = adapter->queues;
3523 struct tx_ring *txr = adapter->tx_rings; 3523 struct tx_ring *txr = adapter->tx_rings;
3524 int i; 3524 int i;
3525 3525
3526 for (i = 0; i < adapter->num_queues; i++, que++, txr++) { 3526 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3527 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 3527 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3528 if (txr->txr_si != NULL) 3528 if (txr->txr_si != NULL)
3529 softint_disestablish(txr->txr_si); 3529 softint_disestablish(txr->txr_si);
3530 } 3530 }
3531 if (que->que_si != NULL) 3531 if (que->que_si != NULL)
3532 softint_disestablish(que->que_si); 3532 softint_disestablish(que->que_si);
3533 } 3533 }
3534 if (adapter->txr_wq != NULL) 3534 if (adapter->txr_wq != NULL)
3535 workqueue_destroy(adapter->txr_wq); 3535 workqueue_destroy(adapter->txr_wq);
3536 if (adapter->txr_wq_enqueued != NULL) 3536 if (adapter->txr_wq_enqueued != NULL)
3537 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 3537 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3538 if (adapter->que_wq != NULL) 3538 if (adapter->que_wq != NULL)
3539 workqueue_destroy(adapter->que_wq); 3539 workqueue_destroy(adapter->que_wq);
3540 3540
3541 if (adapter->admin_wq != NULL) { 3541 if (adapter->admin_wq != NULL) {
3542 workqueue_destroy(adapter->admin_wq); 3542 workqueue_destroy(adapter->admin_wq);
3543 adapter->admin_wq = NULL; 3543 adapter->admin_wq = NULL;
3544 } 3544 }
3545 if (adapter->timer_wq != NULL) { 3545 if (adapter->timer_wq != NULL) {
3546 workqueue_destroy(adapter->timer_wq); 3546 workqueue_destroy(adapter->timer_wq);
3547 adapter->timer_wq = NULL; 3547 adapter->timer_wq = NULL;
3548 } 3548 }
3549 if (adapter->recovery_mode_timer_wq != NULL) { 3549 if (adapter->recovery_mode_timer_wq != NULL) {
3550 /* 3550 /*
3551 * ixgbe_ifstop() doesn't call the workqueue_wait() for 3551 * ixgbe_ifstop() doesn't call the workqueue_wait() for
3552 * the recovery_mode_timer workqueue, so call it here. 3552 * the recovery_mode_timer workqueue, so call it here.
3553 */ 3553 */
3554 workqueue_wait(adapter->recovery_mode_timer_wq, 3554 workqueue_wait(adapter->recovery_mode_timer_wq,
3555 &adapter->recovery_mode_timer_wc); 3555 &adapter->recovery_mode_timer_wc);
3556 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0); 3556 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
3557 workqueue_destroy(adapter->recovery_mode_timer_wq); 3557 workqueue_destroy(adapter->recovery_mode_timer_wq);
3558 adapter->recovery_mode_timer_wq = NULL; 3558 adapter->recovery_mode_timer_wq = NULL;
3559 } 3559 }
3560} /* ixgbe_free_workqueue */ 3560} /* ixgbe_free_workqueue */
3561 3561
3562/************************************************************************ 3562/************************************************************************
3563 * ixgbe_detach - Device removal routine 3563 * ixgbe_detach - Device removal routine
3564 * 3564 *
3565 * Called when the driver is being removed. 3565 * Called when the driver is being removed.
3566 * Stops the adapter and deallocates all the resources 3566 * Stops the adapter and deallocates all the resources
3567 * that were allocated for driver operation. 3567 * that were allocated for driver operation.
3568 * 3568 *
3569 * return 0 on success, positive on failure 3569 * return 0 on success, positive on failure
3570 ************************************************************************/ 3570 ************************************************************************/
3571static int 3571static int
3572ixgbe_detach(device_t dev, int flags) 3572ixgbe_detach(device_t dev, int flags)
3573{ 3573{
3574 struct adapter *adapter = device_private(dev); 3574 struct adapter *adapter = device_private(dev);
3575 struct rx_ring *rxr = adapter->rx_rings; 3575 struct rx_ring *rxr = adapter->rx_rings;
3576 struct tx_ring *txr = adapter->tx_rings; 3576 struct tx_ring *txr = adapter->tx_rings;
3577 struct ixgbe_hw *hw = &adapter->hw; 3577 struct ixgbe_hw *hw = &adapter->hw;
3578 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 3578 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3579 u32 ctrl_ext; 3579 u32 ctrl_ext;
3580 int i; 3580 int i;
3581 3581
3582 INIT_DEBUGOUT("ixgbe_detach: begin"); 3582 INIT_DEBUGOUT("ixgbe_detach: begin");
3583 if (adapter->osdep.attached == false) 3583 if (adapter->osdep.attached == false)
3584 return 0; 3584 return 0;
3585 3585
3586 if (ixgbe_pci_iov_detach(dev) != 0) { 3586 if (ixgbe_pci_iov_detach(dev) != 0) {
3587 device_printf(dev, "SR-IOV in use; detach first.\n"); 3587 device_printf(dev, "SR-IOV in use; detach first.\n");
3588 return (EBUSY); 3588 return (EBUSY);
3589 } 3589 }
3590 3590
3591#if NVLAN > 0 3591#if NVLAN > 0
3592 /* Make sure VLANs are not using driver */ 3592 /* Make sure VLANs are not using driver */
3593 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 3593 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3594 ; /* nothing to do: no VLANs */ 3594 ; /* nothing to do: no VLANs */
3595 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 3595 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3596 vlan_ifdetach(adapter->ifp); 3596 vlan_ifdetach(adapter->ifp);
3597 else { 3597 else {
3598 aprint_error_dev(dev, "VLANs in use, detach first\n"); 3598 aprint_error_dev(dev, "VLANs in use, detach first\n");
3599 return (EBUSY); 3599 return (EBUSY);
3600 } 3600 }
3601#endif 3601#endif
3602 3602
3603 /* 3603 /*
3604 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(), 3604 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3605 * so it's not required to call ixgbe_stop() directly. 3605 * so it's not required to call ixgbe_stop() directly.
3606 */ 3606 */
3607 IXGBE_CORE_LOCK(adapter); 3607 IXGBE_CORE_LOCK(adapter);
3608 ixgbe_setup_low_power_mode(adapter); 3608 ixgbe_setup_low_power_mode(adapter);
3609 IXGBE_CORE_UNLOCK(adapter); 3609 IXGBE_CORE_UNLOCK(adapter);
3610 3610
3611 callout_halt(&adapter->timer, NULL); 3611 callout_halt(&adapter->timer, NULL);
3612 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { 3612 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
3613 callout_stop(&adapter->recovery_mode_timer); 3613 callout_stop(&adapter->recovery_mode_timer);
3614 callout_halt(&adapter->recovery_mode_timer, NULL); 3614 callout_halt(&adapter->recovery_mode_timer, NULL);
3615 } 3615 }
3616 3616
3617 workqueue_wait(adapter->admin_wq, &adapter->admin_wc); 3617 workqueue_wait(adapter->admin_wq, &adapter->admin_wc);
3618 atomic_store_relaxed(&adapter->admin_pending, 0); 3618 atomic_store_relaxed(&adapter->admin_pending, 0);
3619 workqueue_wait(adapter->timer_wq, &adapter->timer_wc); 3619 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
3620 atomic_store_relaxed(&adapter->timer_pending, 0); 3620 atomic_store_relaxed(&adapter->timer_pending, 0);
3621 3621
3622 pmf_device_deregister(dev); 3622 pmf_device_deregister(dev);
3623 3623
3624 ether_ifdetach(adapter->ifp); 3624 ether_ifdetach(adapter->ifp);
3625 3625
3626 ixgbe_free_workqueue(adapter); 3626 ixgbe_free_workqueue(adapter);
3627 3627
3628 /* let hardware know driver is unloading */ 3628 /* let hardware know driver is unloading */
3629 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 3629 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3630 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3630 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3631 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 3631 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3632 3632
3633 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 3633 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3634 netmap_detach(adapter->ifp); 3634 netmap_detach(adapter->ifp);
3635 3635
3636 ixgbe_free_pci_resources(adapter); 3636 ixgbe_free_pci_resources(adapter);
3637#if 0 /* XXX the NetBSD port is probably missing something here */ 3637#if 0 /* XXX the NetBSD port is probably missing something here */
3638 bus_generic_detach(dev); 3638 bus_generic_detach(dev);
3639#endif 3639#endif
3640 if_detach(adapter->ifp); 3640 if_detach(adapter->ifp);
3641 ifmedia_fini(&adapter->media); 3641 ifmedia_fini(&adapter->media);
3642 if_percpuq_destroy(adapter->ipq); 3642 if_percpuq_destroy(adapter->ipq);
3643 3643
3644 sysctl_teardown(&adapter->sysctllog); 3644 sysctl_teardown(&adapter->sysctllog);
3645 evcnt_detach(&adapter->efbig_tx_dma_setup); 3645 evcnt_detach(&adapter->efbig_tx_dma_setup);
3646 evcnt_detach(&adapter->mbuf_defrag_failed); 3646 evcnt_detach(&adapter->mbuf_defrag_failed);
3647 evcnt_detach(&adapter->efbig2_tx_dma_setup); 3647 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3648 evcnt_detach(&adapter->einval_tx_dma_setup); 3648 evcnt_detach(&adapter->einval_tx_dma_setup);
3649 evcnt_detach(&adapter->other_tx_dma_setup); 3649 evcnt_detach(&adapter->other_tx_dma_setup);
3650 evcnt_detach(&adapter->eagain_tx_dma_setup); 3650 evcnt_detach(&adapter->eagain_tx_dma_setup);
3651 evcnt_detach(&adapter->enomem_tx_dma_setup); 3651 evcnt_detach(&adapter->enomem_tx_dma_setup);
3652 evcnt_detach(&adapter->watchdog_events); 3652 evcnt_detach(&adapter->watchdog_events);
3653 evcnt_detach(&adapter->tso_err); 3653 evcnt_detach(&adapter->tso_err);
3654 evcnt_detach(&adapter->admin_irqev); 3654 evcnt_detach(&adapter->admin_irqev);
3655 evcnt_detach(&adapter->link_workev); 3655 evcnt_detach(&adapter->link_workev);
3656 evcnt_detach(&adapter->mod_workev); 3656 evcnt_detach(&adapter->mod_workev);
3657 evcnt_detach(&adapter->msf_workev); 3657 evcnt_detach(&adapter->msf_workev);
3658 evcnt_detach(&adapter->phy_workev); 3658 evcnt_detach(&adapter->phy_workev);
3659 3659
3660 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 3660 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3661 if (i < __arraycount(stats->mpc)) { 3661 if (i < __arraycount(stats->mpc)) {
3662 evcnt_detach(&stats->mpc[i]); 3662 evcnt_detach(&stats->mpc[i]);
3663 if (hw->mac.type == ixgbe_mac_82598EB) 3663 if (hw->mac.type == ixgbe_mac_82598EB)
3664 evcnt_detach(&stats->rnbc[i]); 3664 evcnt_detach(&stats->rnbc[i]);
3665 } 3665 }
3666 if (i < __arraycount(stats->pxontxc)) { 3666 if (i < __arraycount(stats->pxontxc)) {
3667 evcnt_detach(&stats->pxontxc[i]); 3667 evcnt_detach(&stats->pxontxc[i]);
3668 evcnt_detach(&stats->pxonrxc[i]); 3668 evcnt_detach(&stats->pxonrxc[i]);
3669 evcnt_detach(&stats->pxofftxc[i]); 3669 evcnt_detach(&stats->pxofftxc[i]);
3670 evcnt_detach(&stats->pxoffrxc[i]); 3670 evcnt_detach(&stats->pxoffrxc[i]);
3671 if (hw->mac.type >= ixgbe_mac_82599EB) 3671 if (hw->mac.type >= ixgbe_mac_82599EB)
3672 evcnt_detach(&stats->pxon2offc[i]); 3672 evcnt_detach(&stats->pxon2offc[i]);
3673 } 3673 }
3674 } 3674 }
3675 3675
3676 txr = adapter->tx_rings; 3676 txr = adapter->tx_rings;
3677 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 3677 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3678 evcnt_detach(&adapter->queues[i].irqs); 3678 evcnt_detach(&adapter->queues[i].irqs);
3679 evcnt_detach(&adapter->queues[i].handleq); 3679 evcnt_detach(&adapter->queues[i].handleq);
3680 evcnt_detach(&adapter->queues[i].req); 3680 evcnt_detach(&adapter->queues[i].req);
3681 evcnt_detach(&txr->no_desc_avail); 3681 evcnt_detach(&txr->no_desc_avail);
3682 evcnt_detach(&txr->total_packets); 3682 evcnt_detach(&txr->total_packets);
3683 evcnt_detach(&txr->tso_tx); 3683 evcnt_detach(&txr->tso_tx);
3684#ifndef IXGBE_LEGACY_TX 3684#ifndef IXGBE_LEGACY_TX
3685 evcnt_detach(&txr->pcq_drops); 3685 evcnt_detach(&txr->pcq_drops);
3686#endif 3686#endif
3687 3687
3688 if (i < __arraycount(stats->qprc)) { 3688 if (i < __arraycount(stats->qprc)) {
3689 evcnt_detach(&stats->qprc[i]); 3689 evcnt_detach(&stats->qprc[i]);
3690 evcnt_detach(&stats->qptc[i]); 3690 evcnt_detach(&stats->qptc[i]);
3691 evcnt_detach(&stats->qbrc[i]); 3691 evcnt_detach(&stats->qbrc[i]);
3692 evcnt_detach(&stats->qbtc[i]); 3692 evcnt_detach(&stats->qbtc[i]);
3693 if (hw->mac.type >= ixgbe_mac_82599EB) 3693 if (hw->mac.type >= ixgbe_mac_82599EB)
3694 evcnt_detach(&stats->qprdc[i]); 3694 evcnt_detach(&stats->qprdc[i]);
3695 } 3695 }
3696 3696
3697 evcnt_detach(&rxr->rx_packets); 3697 evcnt_detach(&rxr->rx_packets);
3698 evcnt_detach(&rxr->rx_bytes); 3698 evcnt_detach(&rxr->rx_bytes);
3699 evcnt_detach(&rxr->rx_copies); 3699 evcnt_detach(&rxr->rx_copies);
3700 evcnt_detach(&rxr->no_jmbuf); 3700 evcnt_detach(&rxr->no_jmbuf);
3701 evcnt_detach(&rxr->rx_discarded); 3701 evcnt_detach(&rxr->rx_discarded);
3702 } 3702 }
3703 evcnt_detach(&stats->ipcs); 3703 evcnt_detach(&stats->ipcs);
3704 evcnt_detach(&stats->l4cs); 3704 evcnt_detach(&stats->l4cs);
3705 evcnt_detach(&stats->ipcs_bad); 3705 evcnt_detach(&stats->ipcs_bad);
3706 evcnt_detach(&stats->l4cs_bad); 3706 evcnt_detach(&stats->l4cs_bad);
3707 evcnt_detach(&stats->intzero); 3707 evcnt_detach(&stats->intzero);
3708 evcnt_detach(&stats->legint); 3708 evcnt_detach(&stats->legint);
3709 evcnt_detach(&stats->crcerrs); 3709 evcnt_detach(&stats->crcerrs);
3710 evcnt_detach(&stats->illerrc); 3710 evcnt_detach(&stats->illerrc);
3711 evcnt_detach(&stats->errbc); 3711 evcnt_detach(&stats->errbc);
3712 evcnt_detach(&stats->mspdc); 3712 evcnt_detach(&stats->mspdc);
3713 if (hw->mac.type >= ixgbe_mac_X550) 3713 if (hw->mac.type >= ixgbe_mac_X550)
3714 evcnt_detach(&stats->mbsdc); 3714 evcnt_detach(&stats->mbsdc);
3715 evcnt_detach(&stats->mpctotal); 3715 evcnt_detach(&stats->mpctotal);
3716 evcnt_detach(&stats->mlfc); 3716 evcnt_detach(&stats->mlfc);
3717 evcnt_detach(&stats->mrfc); 3717 evcnt_detach(&stats->mrfc);
3718 evcnt_detach(&stats->rlec); 3718 evcnt_detach(&stats->rlec);
3719 evcnt_detach(&stats->lxontxc); 3719 evcnt_detach(&stats->lxontxc);
3720 evcnt_detach(&stats->lxonrxc); 3720 evcnt_detach(&stats->lxonrxc);
3721 evcnt_detach(&stats->lxofftxc); 3721 evcnt_detach(&stats->lxofftxc);
3722 evcnt_detach(&stats->lxoffrxc); 3722 evcnt_detach(&stats->lxoffrxc);
3723 3723
3724 /* Packet Reception Stats */ 3724 /* Packet Reception Stats */
3725 evcnt_detach(&stats->tor); 3725 evcnt_detach(&stats->tor);
3726 evcnt_detach(&stats->gorc); 3726 evcnt_detach(&stats->gorc);
3727 evcnt_detach(&stats->tpr); 3727 evcnt_detach(&stats->tpr);
3728 evcnt_detach(&stats->gprc); 3728 evcnt_detach(&stats->gprc);
3729 evcnt_detach(&stats->mprc); 3729 evcnt_detach(&stats->mprc);
3730 evcnt_detach(&stats->bprc); 3730 evcnt_detach(&stats->bprc);
3731 evcnt_detach(&stats->prc64); 3731 evcnt_detach(&stats->prc64);
3732 evcnt_detach(&stats->prc127); 3732 evcnt_detach(&stats->prc127);
3733 evcnt_detach(&stats->prc255); 3733 evcnt_detach(&stats->prc255);
3734 evcnt_detach(&stats->prc511); 3734 evcnt_detach(&stats->prc511);
3735 evcnt_detach(&stats->prc1023); 3735 evcnt_detach(&stats->prc1023);
3736 evcnt_detach(&stats->prc1522); 3736 evcnt_detach(&stats->prc1522);
3737 evcnt_detach(&stats->ruc); 3737 evcnt_detach(&stats->ruc);
3738 evcnt_detach(&stats->rfc); 3738 evcnt_detach(&stats->rfc);
3739 evcnt_detach(&stats->roc); 3739 evcnt_detach(&stats->roc);
3740 evcnt_detach(&stats->rjc); 3740 evcnt_detach(&stats->rjc);
3741 evcnt_detach(&stats->mngprc); 3741 evcnt_detach(&stats->mngprc);
3742 evcnt_detach(&stats->mngpdc); 3742 evcnt_detach(&stats->mngpdc);
3743 evcnt_detach(&stats->xec); 3743 evcnt_detach(&stats->xec);
3744 3744
3745 /* Packet Transmission Stats */ 3745 /* Packet Transmission Stats */
3746 evcnt_detach(&stats->gotc); 3746 evcnt_detach(&stats->gotc);
3747 evcnt_detach(&stats->tpt); 3747 evcnt_detach(&stats->tpt);
3748 evcnt_detach(&stats->gptc); 3748 evcnt_detach(&stats->gptc);
3749 evcnt_detach(&stats->bptc); 3749 evcnt_detach(&stats->bptc);
3750 evcnt_detach(&stats->mptc); 3750 evcnt_detach(&stats->mptc);
3751 evcnt_detach(&stats->mngptc); 3751 evcnt_detach(&stats->mngptc);
3752 evcnt_detach(&stats->ptc64); 3752 evcnt_detach(&stats->ptc64);
3753 evcnt_detach(&stats->ptc127); 3753 evcnt_detach(&stats->ptc127);
3754 evcnt_detach(&stats->ptc255); 3754 evcnt_detach(&stats->ptc255);
3755 evcnt_detach(&stats->ptc511); 3755 evcnt_detach(&stats->ptc511);
3756 evcnt_detach(&stats->ptc1023); 3756 evcnt_detach(&stats->ptc1023);
3757 evcnt_detach(&stats->ptc1522); 3757 evcnt_detach(&stats->ptc1522);
3758 3758
3759 ixgbe_free_queues(adapter); 3759 ixgbe_free_queues(adapter);
3760 free(adapter->mta, M_DEVBUF); 3760 free(adapter->mta, M_DEVBUF);
3761 3761
3762 IXGBE_CORE_LOCK_DESTROY(adapter); 3762 IXGBE_CORE_LOCK_DESTROY(adapter);
3763 3763
3764 return (0); 3764 return (0);
3765} /* ixgbe_detach */ 3765} /* ixgbe_detach */
3766 3766
3767/************************************************************************ 3767/************************************************************************
3768 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 3768 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3769 * 3769 *
3770 * Prepare the adapter/port for LPLU and/or WoL 3770 * Prepare the adapter/port for LPLU and/or WoL
3771 ************************************************************************/ 3771 ************************************************************************/
3772static int 3772static int
3773ixgbe_setup_low_power_mode(struct adapter *adapter) 3773ixgbe_setup_low_power_mode(struct adapter *adapter)
3774{ 3774{
3775 struct ixgbe_hw *hw = &adapter->hw; 3775 struct ixgbe_hw *hw = &adapter->hw;
3776 device_t dev = adapter->dev; 3776 device_t dev = adapter->dev;
3777 s32 error = 0; 3777 s32 error = 0;
3778 3778
3779 KASSERT(mutex_owned(&adapter->core_mtx)); 3779 KASSERT(mutex_owned(&adapter->core_mtx));
3780 3780
3781 /* Limit power management flow to X550EM baseT */ 3781 /* Limit power management flow to X550EM baseT */
3782 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 3782 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3783 hw->phy.ops.enter_lplu) { 3783 hw->phy.ops.enter_lplu) {
3784 /* X550EM baseT adapters need a special LPLU flow */ 3784 /* X550EM baseT adapters need a special LPLU flow */
3785 hw->phy.reset_disable = true; 3785 hw->phy.reset_disable = true;
3786 ixgbe_stop(adapter); 3786 ixgbe_stop(adapter);
3787 error = hw->phy.ops.enter_lplu(hw); 3787 error = hw->phy.ops.enter_lplu(hw);
3788 if (error) 3788 if (error)
3789 device_printf(dev, 3789 device_printf(dev,
3790 "Error entering LPLU: %d\n", error); 3790 "Error entering LPLU: %d\n", error);
3791 hw->phy.reset_disable = false; 3791 hw->phy.reset_disable = false;
3792 } else { 3792 } else {
3793 /* Just stop for other adapters */ 3793 /* Just stop for other adapters */
3794 ixgbe_stop(adapter); 3794 ixgbe_stop(adapter);
3795 } 3795 }
3796 3796
3797 if (!hw->wol_enabled) { 3797 if (!hw->wol_enabled) {
3798 ixgbe_set_phy_power(hw, FALSE); 3798 ixgbe_set_phy_power(hw, FALSE);
3799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3799 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3800 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 3800 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3801 } else { 3801 } else {
3802 /* Turn off support for APM wakeup. (Using ACPI instead) */ 3802 /* Turn off support for APM wakeup. (Using ACPI instead) */
3803 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), 3803 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3804 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); 3804 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3805 3805
3806 /* 3806 /*
3807 * Clear Wake Up Status register to prevent any previous wakeup 3807 * Clear Wake Up Status register to prevent any previous wakeup
3808 * events from waking us up immediately after we suspend. 3808 * events from waking us up immediately after we suspend.
3809 */ 3809 */
3810 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3810 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3811 3811
3812 /* 3812 /*
3813 * Program the Wakeup Filter Control register with user filter 3813 * Program the Wakeup Filter Control register with user filter
3814 * settings 3814 * settings
3815 */ 3815 */
3816 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 3816 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3817 3817
3818 /* Enable wakeups and power management in Wakeup Control */ 3818 /* Enable wakeups and power management in Wakeup Control */
3819 IXGBE_WRITE_REG(hw, IXGBE_WUC, 3819 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3820 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3820 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3821 3821
3822 } 3822 }
3823 3823
3824 return error; 3824 return error;
3825} /* ixgbe_setup_low_power_mode */ 3825} /* ixgbe_setup_low_power_mode */
3826 3826
3827/************************************************************************ 3827/************************************************************************
3828 * ixgbe_shutdown - Shutdown entry point 3828 * ixgbe_shutdown - Shutdown entry point
3829 ************************************************************************/ 3829 ************************************************************************/
3830#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 3830#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3831static int 3831static int
3832ixgbe_shutdown(device_t dev) 3832ixgbe_shutdown(device_t dev)
3833{ 3833{
3834 struct adapter *adapter = device_private(dev); 3834 struct adapter *adapter = device_private(dev);
3835 int error = 0; 3835 int error = 0;
3836 3836
3837 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 3837 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3838 3838
3839 IXGBE_CORE_LOCK(adapter); 3839 IXGBE_CORE_LOCK(adapter);
3840 error = ixgbe_setup_low_power_mode(adapter); 3840 error = ixgbe_setup_low_power_mode(adapter);
3841 IXGBE_CORE_UNLOCK(adapter); 3841 IXGBE_CORE_UNLOCK(adapter);
3842 3842
3843 return (error); 3843 return (error);
3844} /* ixgbe_shutdown */ 3844} /* ixgbe_shutdown */
3845#endif 3845#endif
3846 3846
3847/************************************************************************ 3847/************************************************************************
3848 * ixgbe_suspend 3848 * ixgbe_suspend
3849 * 3849 *
3850 * From D0 to D3 3850 * From D0 to D3
3851 ************************************************************************/ 3851 ************************************************************************/
3852static bool 3852static bool
3853ixgbe_suspend(device_t dev, const pmf_qual_t *qual) 3853ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3854{ 3854{
3855 struct adapter *adapter = device_private(dev); 3855 struct adapter *adapter = device_private(dev);
3856 int error = 0; 3856 int error = 0;
3857 3857
3858 INIT_DEBUGOUT("ixgbe_suspend: begin"); 3858 INIT_DEBUGOUT("ixgbe_suspend: begin");
3859 3859
3860 IXGBE_CORE_LOCK(adapter); 3860 IXGBE_CORE_LOCK(adapter);
3861 3861
3862 error = ixgbe_setup_low_power_mode(adapter); 3862 error = ixgbe_setup_low_power_mode(adapter);
3863 3863
3864 IXGBE_CORE_UNLOCK(adapter); 3864 IXGBE_CORE_UNLOCK(adapter);
3865 3865
3866 return (error); 3866 return (error);
3867} /* ixgbe_suspend */ 3867} /* ixgbe_suspend */
3868 3868
3869/************************************************************************ 3869/************************************************************************
3870 * ixgbe_resume 3870 * ixgbe_resume
3871 * 3871 *
3872 * From D3 to D0 3872 * From D3 to D0
3873 ************************************************************************/ 3873 ************************************************************************/
3874static bool 3874static bool
3875ixgbe_resume(device_t dev, const pmf_qual_t *qual) 3875ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3876{ 3876{
3877 struct adapter *adapter = device_private(dev); 3877 struct adapter *adapter = device_private(dev);
3878 struct ifnet *ifp = adapter->ifp; 3878 struct ifnet *ifp = adapter->ifp;
3879 struct ixgbe_hw *hw = &adapter->hw; 3879 struct ixgbe_hw *hw = &adapter->hw;
3880 u32 wus; 3880 u32 wus;
3881 3881
3882 INIT_DEBUGOUT("ixgbe_resume: begin"); 3882 INIT_DEBUGOUT("ixgbe_resume: begin");
3883 3883
3884 IXGBE_CORE_LOCK(adapter); 3884 IXGBE_CORE_LOCK(adapter);
3885 3885
3886 /* Read & clear WUS register */ 3886 /* Read & clear WUS register */
3887 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 3887 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3888 if (wus) 3888 if (wus)
3889 device_printf(dev, "Woken up by (WUS): %#010x\n", 3889 device_printf(dev, "Woken up by (WUS): %#010x\n",
3890 IXGBE_READ_REG(hw, IXGBE_WUS)); 3890 IXGBE_READ_REG(hw, IXGBE_WUS));
3891 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3891 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3892 /* And clear WUFC until next low-power transition */ 3892 /* And clear WUFC until next low-power transition */
3893 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3893 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3894 3894
3895 /* 3895 /*
3896 * Required after D3->D0 transition; 3896 * Required after D3->D0 transition;
3897 * will re-advertise all previous advertised speeds 3897 * will re-advertise all previous advertised speeds
3898 */ 3898 */
3899 if (ifp->if_flags & IFF_UP) 3899 if (ifp->if_flags & IFF_UP)
3900 ixgbe_init_locked(adapter); 3900 ixgbe_init_locked(adapter);
3901 3901
3902 IXGBE_CORE_UNLOCK(adapter); 3902 IXGBE_CORE_UNLOCK(adapter);
3903 3903
3904 return true; 3904 return true;
3905} /* ixgbe_resume */ 3905} /* ixgbe_resume */
3906 3906
3907/* 3907/*
3908 * Set the various hardware offload abilities. 3908 * Set the various hardware offload abilities.
3909 * 3909 *
3910 * This takes the ifnet's if_capenable flags (e.g. set by the user using 3910 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3911 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 3911 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3912 * mbuf offload flags the driver will understand. 3912 * mbuf offload flags the driver will understand.
3913 */ 3913 */
3914static void 3914static void
3915ixgbe_set_if_hwassist(struct adapter *adapter) 3915ixgbe_set_if_hwassist(struct adapter *adapter)
3916{ 3916{
3917 /* XXX */ 3917 /* XXX */
3918} 3918}
3919 3919
3920/************************************************************************ 3920/************************************************************************
3921 * ixgbe_init_locked - Init entry point 3921 * ixgbe_init_locked - Init entry point
3922 * 3922 *
3923 * Used in two ways: It is used by the stack as an init 3923 * Used in two ways: It is used by the stack as an init
3924 * entry point in network interface structure. It is also 3924 * entry point in network interface structure. It is also
3925 * used by the driver as a hw/sw initialization routine to 3925 * used by the driver as a hw/sw initialization routine to
3926 * get to a consistent state. 3926 * get to a consistent state.
3927 * 3927 *
3928 * return 0 on success, positive on failure 3928 * return 0 on success, positive on failure
3929 ************************************************************************/ 3929 ************************************************************************/
3930static void 3930static void
3931ixgbe_init_locked(struct adapter *adapter) 3931ixgbe_init_locked(struct adapter *adapter)
3932{ 3932{
3933 struct ifnet *ifp = adapter->ifp; 3933 struct ifnet *ifp = adapter->ifp;
3934 device_t dev = adapter->dev; 3934 device_t dev = adapter->dev;
3935 struct ixgbe_hw *hw = &adapter->hw; 3935 struct ixgbe_hw *hw = &adapter->hw;
3936 struct ix_queue *que; 3936 struct ix_queue *que;
3937 struct tx_ring *txr; 3937 struct tx_ring *txr;
3938 struct rx_ring *rxr; 3938 struct rx_ring *rxr;
3939 u32 txdctl, mhadd; 3939 u32 txdctl, mhadd;
3940 u32 rxdctl, rxctrl; 3940 u32 rxdctl, rxctrl;
3941 u32 ctrl_ext; 3941 u32 ctrl_ext;
3942 bool unsupported_sfp = false; 3942 bool unsupported_sfp = false;
3943 int i, j, err; 3943 int i, j, err;
3944 3944
3945 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ 3945 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3946 3946
3947 KASSERT(mutex_owned(&adapter->core_mtx)); 3947 KASSERT(mutex_owned(&adapter->core_mtx));
3948 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 3948 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3949 3949
3950 hw->need_unsupported_sfp_recovery = false; 3950 hw->need_unsupported_sfp_recovery = false;
3951 hw->adapter_stopped = FALSE; 3951 hw->adapter_stopped = FALSE;
3952 ixgbe_stop_adapter(hw); 3952 ixgbe_stop_adapter(hw);
3953 callout_stop(&adapter->timer); 3953 callout_stop(&adapter->timer);
3954 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 3954 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3955 callout_stop(&adapter->recovery_mode_timer); 3955 callout_stop(&adapter->recovery_mode_timer);
3956 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 3956 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3957 que->disabled_count = 0; 3957 que->disabled_count = 0;
3958 3958
3959 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ 3959 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3960 adapter->max_frame_size = 3960 adapter->max_frame_size =
3961 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 3961 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3962 3962
3963 /* Queue indices may change with IOV mode */ 3963 /* Queue indices may change with IOV mode */
3964 ixgbe_align_all_queue_indices(adapter); 3964 ixgbe_align_all_queue_indices(adapter);
3965 3965
3966 /* reprogram the RAR[0] in case user changed it. */ 3966 /* reprogram the RAR[0] in case user changed it. */
3967 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 3967 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3968 3968
3969 /* Get the latest mac address, User can use a LAA */ 3969 /* Get the latest mac address, User can use a LAA */
3970 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 3970 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3971 IXGBE_ETH_LENGTH_OF_ADDRESS); 3971 IXGBE_ETH_LENGTH_OF_ADDRESS);
3972 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 3972 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3973 hw->addr_ctrl.rar_used_count = 1; 3973 hw->addr_ctrl.rar_used_count = 1;
3974 3974
3975 /* Set hardware offload abilities from ifnet flags */ 3975 /* Set hardware offload abilities from ifnet flags */
3976 ixgbe_set_if_hwassist(adapter); 3976 ixgbe_set_if_hwassist(adapter);
3977 3977
3978 /* Prepare transmit descriptors and buffers */ 3978 /* Prepare transmit descriptors and buffers */
3979 if (ixgbe_setup_transmit_structures(adapter)) { 3979 if (ixgbe_setup_transmit_structures(adapter)) {
3980 device_printf(dev, "Could not setup transmit structures\n"); 3980 device_printf(dev, "Could not setup transmit structures\n");
3981 ixgbe_stop(adapter); 3981 ixgbe_stop(adapter);
3982 return; 3982 return;
3983 } 3983 }
3984 3984
3985 ixgbe_init_hw(hw); 3985 ixgbe_init_hw(hw);
3986 3986
3987 ixgbe_initialize_iov(adapter); 3987 ixgbe_initialize_iov(adapter);
3988 3988
3989 ixgbe_initialize_transmit_units(adapter); 3989 ixgbe_initialize_transmit_units(adapter);
3990 3990
3991 /* Setup Multicast table */ 3991 /* Setup Multicast table */
3992 ixgbe_set_rxfilter(adapter); 3992 ixgbe_set_rxfilter(adapter);
3993 3993
3994 /* Determine the correct mbuf pool, based on frame size */ 3994 /* Determine the correct mbuf pool, based on frame size */
3995 if (adapter->max_frame_size <= MCLBYTES) 3995 if (adapter->max_frame_size <= MCLBYTES)
3996 adapter->rx_mbuf_sz = MCLBYTES; 3996 adapter->rx_mbuf_sz = MCLBYTES;
3997 else 3997 else
3998 adapter->rx_mbuf_sz = MJUMPAGESIZE; 3998 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3999 3999
4000 /* Prepare receive descriptors and buffers */ 4000 /* Prepare receive descriptors and buffers */
4001 if (ixgbe_setup_receive_structures(adapter)) { 4001 if (ixgbe_setup_receive_structures(adapter)) {
4002 device_printf(dev, "Could not setup receive structures\n"); 4002 device_printf(dev, "Could not setup receive structures\n");
4003 ixgbe_stop(adapter); 4003 ixgbe_stop(adapter);
4004 return; 4004 return;
4005 } 4005 }
4006 4006
4007 /* Configure RX settings */ 4007 /* Configure RX settings */
4008 ixgbe_initialize_receive_units(adapter); 4008 ixgbe_initialize_receive_units(adapter);
4009 4009
4010 /* Initialize variable holding task enqueue requests interrupts */ 4010 /* Initialize variable holding task enqueue requests interrupts */
4011 adapter->task_requests = 0; 4011 adapter->task_requests = 0;
4012 4012
4013 /* Enable SDP & MSI-X interrupts based on adapter */ 4013 /* Enable SDP & MSI-X interrupts based on adapter */
4014 ixgbe_config_gpie(adapter); 4014 ixgbe_config_gpie(adapter);
4015 4015
4016 /* Set MTU size */ 4016 /* Set MTU size */
4017 if (ifp->if_mtu > ETHERMTU) { 4017 if (ifp->if_mtu > ETHERMTU) {
4018 /* aka IXGBE_MAXFRS on 82599 and newer */ 4018 /* aka IXGBE_MAXFRS on 82599 and newer */
4019 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 4019 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4020 mhadd &= ~IXGBE_MHADD_MFS_MASK; 4020 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4021 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 4021 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4022 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 4022 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4023 } 4023 }
4024 4024
4025 /* Now enable all the queues */ 4025 /* Now enable all the queues */
4026 for (i = 0; i < adapter->num_queues; i++) { 4026 for (i = 0; i < adapter->num_queues; i++) {
4027 txr = &adapter->tx_rings[i]; 4027 txr = &adapter->tx_rings[i];
4028 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 4028 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4029 txdctl |= IXGBE_TXDCTL_ENABLE; 4029 txdctl |= IXGBE_TXDCTL_ENABLE;
4030 /* Set WTHRESH to 8, burst writeback */ 4030 /* Set WTHRESH to 8, burst writeback */
4031 txdctl |= (8 << 16); 4031 txdctl |= (8 << 16);
4032 /* 4032 /*
4033 * When the internal queue falls below PTHRESH (32), 4033 * When the internal queue falls below PTHRESH (32),
4034 * start prefetching as long as there are at least 4034 * start prefetching as long as there are at least
4035 * HTHRESH (1) buffers ready. The values are taken 4035 * HTHRESH (1) buffers ready. The values are taken
4036 * from the Intel linux driver 3.8.21. 4036 * from the Intel linux driver 3.8.21.
4037 * Prefetching enables tx line rate even with 1 queue. 4037 * Prefetching enables tx line rate even with 1 queue.
4038 */ 4038 */
4039 txdctl |= (32 << 0) | (1 << 8); 4039 txdctl |= (32 << 0) | (1 << 8);
4040 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 4040 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4041 } 4041 }
4042 4042
4043 for (i = 0; i < adapter->num_queues; i++) { 4043 for (i = 0; i < adapter->num_queues; i++) {
4044 rxr = &adapter->rx_rings[i]; 4044 rxr = &adapter->rx_rings[i];
4045 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 4045 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4046 if (hw->mac.type == ixgbe_mac_82598EB) { 4046 if (hw->mac.type == ixgbe_mac_82598EB) {
4047 /* 4047 /*
4048 * PTHRESH = 21 4048 * PTHRESH = 21
4049 * HTHRESH = 4 4049 * HTHRESH = 4
4050 * WTHRESH = 8 4050 * WTHRESH = 8
4051 */ 4051 */
4052 rxdctl &= ~0x3FFFFF; 4052 rxdctl &= ~0x3FFFFF;
4053 rxdctl |= 0x080420; 4053 rxdctl |= 0x080420;
4054 } 4054 }
4055 rxdctl |= IXGBE_RXDCTL_ENABLE; 4055 rxdctl |= IXGBE_RXDCTL_ENABLE;
4056 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 4056 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4057 for (j = 0; j < 10; j++) { 4057 for (j = 0; j < 10; j++) {
4058 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 4058 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4059 IXGBE_RXDCTL_ENABLE) 4059 IXGBE_RXDCTL_ENABLE)
4060 break; 4060 break;
4061 else 4061 else
4062 msec_delay(1); 4062 msec_delay(1);
4063 } 4063 }
4064 IXGBE_WRITE_BARRIER(hw); 4064 IXGBE_WRITE_BARRIER(hw);
4065 4065
4066 /* 4066 /*
4067 * In netmap mode, we must preserve the buffers made 4067 * In netmap mode, we must preserve the buffers made
4068 * available to userspace before the if_init() 4068 * available to userspace before the if_init()
4069 * (this is true by default on the TX side, because 4069 * (this is true by default on the TX side, because
4070 * init makes all buffers available to userspace). 4070 * init makes all buffers available to userspace).
4071 * 4071 *
4072 * netmap_reset() and the device specific routines 4072 * netmap_reset() and the device specific routines
4073 * (e.g. ixgbe_setup_receive_rings()) map these 4073 * (e.g. ixgbe_setup_receive_rings()) map these
4074 * buffers at the end of the NIC ring, so here we 4074 * buffers at the end of the NIC ring, so here we
4075 * must set the RDT (tail) register to make sure 4075 * must set the RDT (tail) register to make sure
4076 * they are not overwritten. 4076 * they are not overwritten.
4077 * 4077 *
4078 * In this driver the NIC ring starts at RDH = 0, 4078 * In this driver the NIC ring starts at RDH = 0,
4079 * RDT points to the last slot available for reception (?), 4079 * RDT points to the last slot available for reception (?),
4080 * so RDT = num_rx_desc - 1 means the whole ring is available. 4080 * so RDT = num_rx_desc - 1 means the whole ring is available.
4081 */ 4081 */
4082#ifdef DEV_NETMAP 4082#ifdef DEV_NETMAP
4083 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 4083 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4084 (ifp->if_capenable & IFCAP_NETMAP)) { 4084 (ifp->if_capenable & IFCAP_NETMAP)) {
4085 struct netmap_adapter *na = NA(adapter->ifp); 4085 struct netmap_adapter *na = NA(adapter->ifp);
4086 struct netmap_kring *kring = na->rx_rings[i]; 4086 struct netmap_kring *kring = na->rx_rings[i];
4087 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 4087 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4088 4088
4089 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 4089 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4090 } else 4090 } else
4091#endif /* DEV_NETMAP */ 4091#endif /* DEV_NETMAP */
4092 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), 4092 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4093 adapter->num_rx_desc - 1); 4093 adapter->num_rx_desc - 1);
4094 } 4094 }
4095 4095
4096 /* Enable Receive engine */ 4096 /* Enable Receive engine */
4097 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4097 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4098 if (hw->mac.type == ixgbe_mac_82598EB) 4098 if (hw->mac.type == ixgbe_mac_82598EB)
4099 rxctrl |= IXGBE_RXCTRL_DMBYPS; 4099 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4100 rxctrl |= IXGBE_RXCTRL_RXEN; 4100 rxctrl |= IXGBE_RXCTRL_RXEN;
4101 ixgbe_enable_rx_dma(hw, rxctrl); 4101 ixgbe_enable_rx_dma(hw, rxctrl);
4102 4102
4103 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4103 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4104 atomic_store_relaxed(&adapter->timer_pending, 0); 4104 atomic_store_relaxed(&adapter->timer_pending, 0);
4105 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 4105 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
4106 callout_reset(&adapter->recovery_mode_timer, hz, 4106 callout_reset(&adapter->recovery_mode_timer, hz,
4107 ixgbe_recovery_mode_timer, adapter); 4107 ixgbe_recovery_mode_timer, adapter);
4108 4108
4109 /* Set up MSI/MSI-X routing */ 4109 /* Set up MSI/MSI-X routing */
4110 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4110 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4111 ixgbe_configure_ivars(adapter); 4111 ixgbe_configure_ivars(adapter);
4112 /* Set up auto-mask */ 4112 /* Set up auto-mask */
4113 if (hw->mac.type == ixgbe_mac_82598EB) 4113 if (hw->mac.type == ixgbe_mac_82598EB)
4114 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4114 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4115 else { 4115 else {
4116 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4116 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4117 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4117 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4118 } 4118 }
4119 } else { /* Simple settings for Legacy/MSI */ 4119 } else { /* Simple settings for Legacy/MSI */
4120 ixgbe_set_ivar(adapter, 0, 0, 0); 4120 ixgbe_set_ivar(adapter, 0, 0, 0);
4121 ixgbe_set_ivar(adapter, 0, 0, 1); 4121 ixgbe_set_ivar(adapter, 0, 0, 1);
4122 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4122 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4123 } 4123 }
4124 4124
4125 ixgbe_init_fdir(adapter); 4125 ixgbe_init_fdir(adapter);
4126 4126
4127 /* 4127 /*
4128 * Check on any SFP devices that 4128 * Check on any SFP devices that
4129 * need to be kick-started 4129 * need to be kick-started
4130 */ 4130 */
4131 if (hw->phy.type == ixgbe_phy_none) { 4131 if (hw->phy.type == ixgbe_phy_none) {
4132 err = hw->phy.ops.identify(hw); 4132 err = hw->phy.ops.identify(hw);
4133 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 4133 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4134 unsupported_sfp = true; 4134 unsupported_sfp = true;
4135 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported) 4135 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4136 unsupported_sfp = true; 4136 unsupported_sfp = true;
4137 4137
4138 if (unsupported_sfp) 4138 if (unsupported_sfp)
4139 device_printf(dev, 4139 device_printf(dev,
4140 "Unsupported SFP+ module type was detected.\n"); 4140 "Unsupported SFP+ module type was detected.\n");
4141 4141
4142 /* Set moderation on the Link interrupt */ 4142 /* Set moderation on the Link interrupt */
4143 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 4143 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4144 4144
4145 /* Enable EEE power saving */ 4145 /* Enable EEE power saving */
4146 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4146 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4147 hw->mac.ops.setup_eee(hw, 4147 hw->mac.ops.setup_eee(hw,
4148 adapter->feat_en & IXGBE_FEATURE_EEE); 4148 adapter->feat_en & IXGBE_FEATURE_EEE);
4149 4149
4150 /* Enable power to the phy. */ 4150 /* Enable power to the phy. */
4151 if (!unsupported_sfp) { 4151 if (!unsupported_sfp) {
4152 ixgbe_set_phy_power(hw, TRUE); 4152 ixgbe_set_phy_power(hw, TRUE);
4153 4153
4154 /* Config/Enable Link */ 4154 /* Config/Enable Link */
4155 ixgbe_config_link(adapter); 4155 ixgbe_config_link(adapter);
4156 } 4156 }
4157 4157
4158 /* Hardware Packet Buffer & Flow Control setup */ 4158 /* Hardware Packet Buffer & Flow Control setup */
4159 ixgbe_config_delay_values(adapter); 4159 ixgbe_config_delay_values(adapter);
4160 4160
4161 /* Initialize the FC settings */ 4161 /* Initialize the FC settings */
4162 ixgbe_start_hw(hw); 4162 ixgbe_start_hw(hw);
4163 4163
4164 /* Set up VLAN support and filter */ 4164 /* Set up VLAN support and filter */
4165 ixgbe_setup_vlan_hw_support(adapter); 4165 ixgbe_setup_vlan_hw_support(adapter);
4166 4166
4167 /* Setup DMA Coalescing */ 4167 /* Setup DMA Coalescing */
4168 ixgbe_config_dmac(adapter); 4168 ixgbe_config_dmac(adapter);
4169 4169
4170 /* OK to schedule workqueues. */ 4170 /* OK to schedule workqueues. */
4171 adapter->schedule_wqs_ok = true; 4171 adapter->schedule_wqs_ok = true;
4172 4172
4173 /* And now turn on interrupts */ 4173 /* And now turn on interrupts */
4174 ixgbe_enable_intr(adapter); 4174 ixgbe_enable_intr(adapter);
4175 4175
4176 /* Enable the use of the MBX by the VF's */ 4176 /* Enable the use of the MBX by the VF's */
4177 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 4177 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4178 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4178 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4179 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 4179 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4180 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4180 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4181 } 4181 }
4182 4182
4183 /* Update saved flags. See ixgbe_ifflags_cb() */ 4183 /* Update saved flags. See ixgbe_ifflags_cb() */
4184 adapter->if_flags = ifp->if_flags; 4184 adapter->if_flags = ifp->if_flags;
4185 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 4185 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4186 4186
4187 /* Now inform the stack we're ready */ 4187 /* Now inform the stack we're ready */
4188 ifp->if_flags |= IFF_RUNNING; 4188 ifp->if_flags |= IFF_RUNNING;
4189 4189
4190 return; 4190 return;
4191} /* ixgbe_init_locked */ 4191} /* ixgbe_init_locked */
4192 4192
4193/************************************************************************ 4193/************************************************************************
4194 * ixgbe_init 4194 * ixgbe_init
4195 ************************************************************************/ 4195 ************************************************************************/
4196static int 4196static int
4197ixgbe_init(struct ifnet *ifp) 4197ixgbe_init(struct ifnet *ifp)
4198{ 4198{
4199 struct adapter *adapter = ifp->if_softc; 4199 struct adapter *adapter = ifp->if_softc;
4200 4200
4201 IXGBE_CORE_LOCK(adapter); 4201 IXGBE_CORE_LOCK(adapter);
4202 ixgbe_init_locked(adapter); 4202 ixgbe_init_locked(adapter);
4203 IXGBE_CORE_UNLOCK(adapter); 4203 IXGBE_CORE_UNLOCK(adapter);
4204 4204
4205 return 0; /* XXX ixgbe_init_locked cannot fail? really? */ 4205 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4206} /* ixgbe_init */ 4206} /* ixgbe_init */
4207 4207
4208/************************************************************************ 4208/************************************************************************
4209 * ixgbe_set_ivar 4209 * ixgbe_set_ivar
4210 * 4210 *
4211 * Setup the correct IVAR register for a particular MSI-X interrupt 4211 * Setup the correct IVAR register for a particular MSI-X interrupt
4212 * (yes this is all very magic and confusing :) 4212 * (yes this is all very magic and confusing :)
4213 * - entry is the register array entry 4213 * - entry is the register array entry
4214 * - vector is the MSI-X vector for this queue 4214 * - vector is the MSI-X vector for this queue
4215 * - type is RX/TX/MISC 4215 * - type is RX/TX/MISC
4216 ************************************************************************/ 4216 ************************************************************************/
4217static void 4217static void
4218ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 4218ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4219{ 4219{
4220 struct ixgbe_hw *hw = &adapter->hw; 4220 struct ixgbe_hw *hw = &adapter->hw;
4221 u32 ivar, index; 4221 u32 ivar, index;
4222 4222
4223 vector |= IXGBE_IVAR_ALLOC_VAL; 4223 vector |= IXGBE_IVAR_ALLOC_VAL;
4224 4224
4225 switch (hw->mac.type) { 4225 switch (hw->mac.type) {
4226 case ixgbe_mac_82598EB: 4226 case ixgbe_mac_82598EB:
4227 if (type == -1) 4227 if (type == -1)
4228 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 4228 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4229 else 4229 else
4230 entry += (type * 64); 4230 entry += (type * 64);
4231 index = (entry >> 2) & 0x1F; 4231 index = (entry >> 2) & 0x1F;
4232 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4232 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4233 ivar &= ~(0xffUL << (8 * (entry & 0x3))); 4233 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4234 ivar |= ((u32)vector << (8 * (entry & 0x3))); 4234 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 4235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4236 break; 4236 break;
4237 case ixgbe_mac_82599EB: 4237 case ixgbe_mac_82599EB:
4238 case ixgbe_mac_X540: 4238 case ixgbe_mac_X540:
4239 case ixgbe_mac_X550: 4239 case ixgbe_mac_X550:
4240 case ixgbe_mac_X550EM_x: 4240 case ixgbe_mac_X550EM_x:
4241 case ixgbe_mac_X550EM_a: 4241 case ixgbe_mac_X550EM_a:
4242 if (type == -1) { /* MISC IVAR */ 4242 if (type == -1) { /* MISC IVAR */
4243 index = (entry & 1) * 8; 4243 index = (entry & 1) * 8;
4244 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4244 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4245 ivar &= ~(0xffUL << index); 4245 ivar &= ~(0xffUL << index);
4246 ivar |= ((u32)vector << index); 4246 ivar |= ((u32)vector << index);
4247 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4247 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4248 } else { /* RX/TX IVARS */ 4248 } else { /* RX/TX IVARS */
4249 index = (16 * (entry & 1)) + (8 * type); 4249 index = (16 * (entry & 1)) + (8 * type);
4250 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 4250 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4251 ivar &= ~(0xffUL << index); 4251 ivar &= ~(0xffUL << index);
4252 ivar |= ((u32)vector << index); 4252 ivar |= ((u32)vector << index);
4253 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 4253 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4254 } 4254 }
4255 break; 4255 break;
4256 default: 4256 default:
4257 break; 4257 break;
4258 } 4258 }
4259} /* ixgbe_set_ivar */ 4259} /* ixgbe_set_ivar */
4260 4260
4261/************************************************************************ 4261/************************************************************************
4262 * ixgbe_configure_ivars 4262 * ixgbe_configure_ivars
4263 ************************************************************************/ 4263 ************************************************************************/
4264static void 4264static void
4265ixgbe_configure_ivars(struct adapter *adapter) 4265ixgbe_configure_ivars(struct adapter *adapter)
4266{ 4266{
4267 struct ix_queue *que = adapter->queues; 4267 struct ix_queue *que = adapter->queues;
4268 u32 newitr; 4268 u32 newitr;
4269 4269
4270 if (ixgbe_max_interrupt_rate > 0) 4270 if (ixgbe_max_interrupt_rate > 0)
4271 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 4271 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4272 else { 4272 else {
4273 /* 4273 /*
4274 * Disable DMA coalescing if interrupt moderation is 4274 * Disable DMA coalescing if interrupt moderation is
4275 * disabled. 4275 * disabled.
4276 */ 4276 */
4277 adapter->dmac = 0; 4277 adapter->dmac = 0;
4278 newitr = 0; 4278 newitr = 0;
4279 } 4279 }
4280 4280
4281 for (int i = 0; i < adapter->num_queues; i++, que++) { 4281 for (int i = 0; i < adapter->num_queues; i++, que++) {
4282 struct rx_ring *rxr = &adapter->rx_rings[i]; 4282 struct rx_ring *rxr = &adapter->rx_rings[i];
4283 struct tx_ring *txr = &adapter->tx_rings[i]; 4283 struct tx_ring *txr = &adapter->tx_rings[i];
4284 /* First the RX queue entry */ 4284 /* First the RX queue entry */
4285 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 4285 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4286 /* ... and the TX */ 4286 /* ... and the TX */
4287 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 4287 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4288 /* Set an Initial EITR value */ 4288 /* Set an Initial EITR value */
4289 ixgbe_eitr_write(adapter, que->msix, newitr); 4289 ixgbe_eitr_write(adapter, que->msix, newitr);
4290 /* 4290 /*
4291 * To eliminate influence of the previous state. 4291 * To eliminate influence of the previous state.
4292 * At this point, Tx/Rx interrupt handler 4292 * At this point, Tx/Rx interrupt handler
4293 * (ixgbe_msix_que()) cannot be called, so both 4293 * (ixgbe_msix_que()) cannot be called, so both
4294 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required. 4294 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4295 */ 4295 */
4296 que->eitr_setting = 0; 4296 que->eitr_setting = 0;
4297 } 4297 }
4298 4298
4299 /* For the Link interrupt */ 4299 /* For the Link interrupt */
4300 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 4300 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4301} /* ixgbe_configure_ivars */ 4301} /* ixgbe_configure_ivars */
4302 4302
4303/************************************************************************ 4303/************************************************************************
4304 * ixgbe_config_gpie 4304 * ixgbe_config_gpie
4305 ************************************************************************/ 4305 ************************************************************************/
4306static void 4306static void
4307ixgbe_config_gpie(struct adapter *adapter) 4307ixgbe_config_gpie(struct adapter *adapter)
4308{ 4308{
4309 struct ixgbe_hw *hw = &adapter->hw; 4309 struct ixgbe_hw *hw = &adapter->hw;
4310 u32 gpie; 4310 u32 gpie;
4311 4311
4312 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4312 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4313 4313
4314 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4314 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4315 /* Enable Enhanced MSI-X mode */ 4315 /* Enable Enhanced MSI-X mode */
4316 gpie |= IXGBE_GPIE_MSIX_MODE 4316 gpie |= IXGBE_GPIE_MSIX_MODE
4317 | IXGBE_GPIE_EIAME 4317 | IXGBE_GPIE_EIAME
4318 | IXGBE_GPIE_PBA_SUPPORT 4318 | IXGBE_GPIE_PBA_SUPPORT
4319 | IXGBE_GPIE_OCD; 4319 | IXGBE_GPIE_OCD;
4320 } 4320 }
4321 4321
4322 /* Fan Failure Interrupt */ 4322 /* Fan Failure Interrupt */
4323 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 4323 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4324 gpie |= IXGBE_SDP1_GPIEN; 4324 gpie |= IXGBE_SDP1_GPIEN;
4325 4325
4326 /* Thermal Sensor Interrupt */ 4326 /* Thermal Sensor Interrupt */
4327 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 4327 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4328 gpie |= IXGBE_SDP0_GPIEN_X540; 4328 gpie |= IXGBE_SDP0_GPIEN_X540;
4329 4329
4330 /* Link detection */ 4330 /* Link detection */
4331 switch (hw->mac.type) { 4331 switch (hw->mac.type) {
4332 case ixgbe_mac_82599EB: 4332 case ixgbe_mac_82599EB:
4333 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 4333 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4334 break; 4334 break;
4335 case ixgbe_mac_X550EM_x: 4335 case ixgbe_mac_X550EM_x:
4336 case ixgbe_mac_X550EM_a: 4336 case ixgbe_mac_X550EM_a:
4337 gpie |= IXGBE_SDP0_GPIEN_X540; 4337 gpie |= IXGBE_SDP0_GPIEN_X540;
4338 break; 4338 break;
4339 default: 4339 default:
4340 break; 4340 break;
4341 } 4341 }
4342 4342
4343 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4343 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4344 4344
4345} /* ixgbe_config_gpie */ 4345} /* ixgbe_config_gpie */
4346 4346
4347/************************************************************************ 4347/************************************************************************
4348 * ixgbe_config_delay_values 4348 * ixgbe_config_delay_values
4349 * 4349 *
4350 * Requires adapter->max_frame_size to be set. 4350 * Requires adapter->max_frame_size to be set.
4351 ************************************************************************/ 4351 ************************************************************************/
4352static void 4352static void
4353ixgbe_config_delay_values(struct adapter *adapter) 4353ixgbe_config_delay_values(struct adapter *adapter)
4354{ 4354{
4355 struct ixgbe_hw *hw = &adapter->hw; 4355 struct ixgbe_hw *hw = &adapter->hw;
4356 u32 rxpb, frame, size, tmp; 4356 u32 rxpb, frame, size, tmp;
4357 4357
4358 frame = adapter->max_frame_size; 4358 frame = adapter->max_frame_size;
4359 4359
4360 /* Calculate High Water */ 4360 /* Calculate High Water */
4361 switch (hw->mac.type) { 4361 switch (hw->mac.type) {
4362 case ixgbe_mac_X540: 4362 case ixgbe_mac_X540:
4363 case ixgbe_mac_X550: 4363 case ixgbe_mac_X550:
4364 case ixgbe_mac_X550EM_x: 4364 case ixgbe_mac_X550EM_x:
4365 case ixgbe_mac_X550EM_a: 4365 case ixgbe_mac_X550EM_a:
4366 tmp = IXGBE_DV_X540(frame, frame); 4366 tmp = IXGBE_DV_X540(frame, frame);
4367 break; 4367 break;
4368 default: 4368 default:
4369 tmp = IXGBE_DV(frame, frame); 4369 tmp = IXGBE_DV(frame, frame);
4370 break; 4370 break;
4371 } 4371 }
4372 size = IXGBE_BT2KB(tmp); 4372 size = IXGBE_BT2KB(tmp);
4373 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; 4373 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
4374 hw->fc.high_water[0] = rxpb - size; 4374 hw->fc.high_water[0] = rxpb - size;
4375 4375
4376 /* Now calculate Low Water */ 4376 /* Now calculate Low Water */
4377 switch (hw->mac.type) { 4377 switch (hw->mac.type) {
4378 case ixgbe_mac_X540: 4378 case ixgbe_mac_X540:
4379 case ixgbe_mac_X550: 4379 case ixgbe_mac_X550:
4380 case ixgbe_mac_X550EM_x: 4380 case ixgbe_mac_X550EM_x:
4381 case ixgbe_mac_X550EM_a: 4381 case ixgbe_mac_X550EM_a:
4382 tmp = IXGBE_LOW_DV_X540(frame); 4382 tmp = IXGBE_LOW_DV_X540(frame);
4383 break; 4383 break;
4384 default: 4384 default:
4385 tmp = IXGBE_LOW_DV(frame); 4385 tmp = IXGBE_LOW_DV(frame);
4386 break; 4386 break;
4387 } 4387 }
4388 hw->fc.low_water[0] = IXGBE_BT2KB(tmp); 4388 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
4389 4389
4390 hw->fc.pause_time = IXGBE_FC_PAUSE; 4390 hw->fc.pause_time = IXGBE_FC_PAUSE;
4391 hw->fc.send_xon = TRUE; 4391 hw->fc.send_xon = TRUE;
4392} /* ixgbe_config_delay_values */ 4392} /* ixgbe_config_delay_values */
4393 4393
4394/************************************************************************ 4394/************************************************************************
4395 * ixgbe_set_rxfilter - Multicast Update 4395 * ixgbe_set_rxfilter - Multicast Update
4396 * 4396 *
4397 * Called whenever multicast address list is updated. 4397 * Called whenever multicast address list is updated.
4398 ************************************************************************/ 4398 ************************************************************************/
4399static void 4399static void
4400ixgbe_set_rxfilter(struct adapter *adapter) 4400ixgbe_set_rxfilter(struct adapter *adapter)
4401{ 4401{
4402 struct ixgbe_mc_addr *mta; 4402 struct ixgbe_mc_addr *mta;
4403 struct ifnet *ifp = adapter->ifp; 4403 struct ifnet *ifp = adapter->ifp;
4404 u8 *update_ptr; 4404 u8 *update_ptr;
4405 int mcnt = 0; 4405 int mcnt = 0;
4406 u32 fctrl; 4406 u32 fctrl;
4407 struct ethercom *ec = &adapter->osdep.ec; 4407 struct ethercom *ec = &adapter->osdep.ec;
4408 struct ether_multi *enm; 4408 struct ether_multi *enm;
4409 struct ether_multistep step; 4409 struct ether_multistep step;
4410 4410
4411 KASSERT(mutex_owned(&adapter->core_mtx)); 4411 KASSERT(mutex_owned(&adapter->core_mtx));
4412 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin"); 4412 IOCTL_DEBUGOUT("ixgbe_set_rxfilter: begin");
4413 4413
4414 mta = adapter->mta; 4414 mta = adapter->mta;
4415 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); 4415 bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
4416 4416
4417 ETHER_LOCK(ec); 4417 ETHER_LOCK(ec);
4418 ec->ec_flags &= ~ETHER_F_ALLMULTI; 4418 ec->ec_flags &= ~ETHER_F_ALLMULTI;
4419 ETHER_FIRST_MULTI(step, ec, enm); 4419 ETHER_FIRST_MULTI(step, ec, enm);
4420 while (enm != NULL) { 4420 while (enm != NULL) {
4421 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) || 4421 if ((mcnt == MAX_NUM_MULTICAST_ADDRESSES) ||
4422 (memcmp(enm->enm_addrlo, enm->enm_addrhi, 4422 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
4423 ETHER_ADDR_LEN) != 0)) { 4423 ETHER_ADDR_LEN) != 0)) {
4424 ec->ec_flags |= ETHER_F_ALLMULTI; 4424 ec->ec_flags |= ETHER_F_ALLMULTI;
4425 break; 4425 break;
4426 } 4426 }
4427 bcopy(enm->enm_addrlo, 4427 bcopy(enm->enm_addrlo,
4428 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 4428 mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
4429 mta[mcnt].vmdq = adapter->pool; 4429 mta[mcnt].vmdq = adapter->pool;
4430 mcnt++; 4430 mcnt++;
4431 ETHER_NEXT_MULTI(step, enm); 4431 ETHER_NEXT_MULTI(step, enm);
4432 } 4432 }
4433 4433
4434 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 4434 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
4435 if (ifp->if_flags & IFF_PROMISC) 4435 if (ifp->if_flags & IFF_PROMISC)
4436 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4436 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4437 else if (ec->ec_flags & ETHER_F_ALLMULTI) { 4437 else if (ec->ec_flags & ETHER_F_ALLMULTI) {
4438 fctrl |= IXGBE_FCTRL_MPE; 4438 fctrl |= IXGBE_FCTRL_MPE;
4439 fctrl &= ~IXGBE_FCTRL_UPE; 4439 fctrl &= ~IXGBE_FCTRL_UPE;
4440 } else 4440 } else
4441 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4441 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4442 4442
4443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 4443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4444 4444
4445 /* Update multicast filter entries only when it's not ALLMULTI */ 4445 /* Update multicast filter entries only when it's not ALLMULTI */
4446 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) { 4446 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4447 ETHER_UNLOCK(ec); 4447 ETHER_UNLOCK(ec);
4448 update_ptr = (u8 *)mta; 4448 update_ptr = (u8 *)mta;
4449 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 4449 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4450 ixgbe_mc_array_itr, TRUE); 4450 ixgbe_mc_array_itr, TRUE);
4451 } else 4451 } else
4452 ETHER_UNLOCK(ec); 4452 ETHER_UNLOCK(ec);
4453} /* ixgbe_set_rxfilter */ 4453} /* ixgbe_set_rxfilter */
4454 4454
4455/************************************************************************ 4455/************************************************************************
4456 * ixgbe_mc_array_itr 4456 * ixgbe_mc_array_itr
4457 * 4457 *
4458 * An iterator function needed by the multicast shared code. 4458 * An iterator function needed by the multicast shared code.
4459 * It feeds the shared code routine the addresses in the 4459 * It feeds the shared code routine the addresses in the
4460 * array of ixgbe_set_rxfilter() one by one. 4460 * array of ixgbe_set_rxfilter() one by one.
4461 ************************************************************************/ 4461 ************************************************************************/
4462static u8 * 4462static u8 *
4463ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 4463ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4464{ 4464{
4465 struct ixgbe_mc_addr *mta; 4465 struct ixgbe_mc_addr *mta;
4466 4466
4467 mta = (struct ixgbe_mc_addr *)*update_ptr; 4467 mta = (struct ixgbe_mc_addr *)*update_ptr;
4468 *vmdq = mta->vmdq; 4468 *vmdq = mta->vmdq;
4469 4469
4470 *update_ptr = (u8*)(mta + 1); 4470 *update_ptr = (u8*)(mta + 1);
4471 4471
4472 return (mta->addr); 4472 return (mta->addr);
4473} /* ixgbe_mc_array_itr */ 4473} /* ixgbe_mc_array_itr */
4474 4474
4475/************************************************************************ 4475/************************************************************************
4476 * ixgbe_local_timer - Timer routine 4476 * ixgbe_local_timer - Timer routine
4477 * 4477 *
4478 * Checks for link status, updates statistics, 4478 * Checks for link status, updates statistics,
4479 * and runs the watchdog check. 4479 * and runs the watchdog check.
4480 ************************************************************************/ 4480 ************************************************************************/
4481static void 4481static void
4482ixgbe_local_timer(void *arg) 4482ixgbe_local_timer(void *arg)
4483{ 4483{
4484 struct adapter *adapter = arg; 4484 struct adapter *adapter = arg;
4485 4485
4486 if (adapter->schedule_wqs_ok) { 4486 if (adapter->schedule_wqs_ok) {
4487 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0) 4487 if (atomic_cas_uint(&adapter->timer_pending, 0, 1) == 0)
4488 workqueue_enqueue(adapter->timer_wq, 4488 workqueue_enqueue(adapter->timer_wq,
4489 &adapter->timer_wc, NULL); 4489 &adapter->timer_wc, NULL);
4490 } 4490 }
4491} 4491}
4492 4492
4493static void 4493static void
4494ixgbe_handle_timer(struct work *wk, void *context) 4494ixgbe_handle_timer(struct work *wk, void *context)
4495{ 4495{
4496 struct adapter *adapter = context; 4496 struct adapter *adapter = context;
4497 struct ixgbe_hw *hw = &adapter->hw; 4497 struct ixgbe_hw *hw = &adapter->hw;
4498 device_t dev = adapter->dev; 4498 device_t dev = adapter->dev;
4499 struct ix_queue *que = adapter->queues; 4499 struct ix_queue *que = adapter->queues;
4500 u64 queues = 0; 4500 u64 queues = 0;
4501 u64 v0, v1, v2, v3, v4, v5, v6, v7; 4501 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4502 int hung = 0; 4502 int hung = 0;
4503 int i; 4503 int i;
4504 4504
4505 IXGBE_CORE_LOCK(adapter); 4505 IXGBE_CORE_LOCK(adapter);
4506 4506
4507 /* Check for pluggable optics */ 4507 /* Check for pluggable optics */
4508 if (ixgbe_is_sfp(hw)) { 4508 if (ixgbe_is_sfp(hw)) {
4509 bool sched_mod_task = false; 4509 bool sched_mod_task = false;
4510 4510
4511 if (hw->mac.type == ixgbe_mac_82598EB) { 4511 if (hw->mac.type == ixgbe_mac_82598EB) {
4512 /* 4512 /*
4513 * On 82598EB, SFP+'s MOD_ABS pin is not connected to 4513 * On 82598EB, SFP+'s MOD_ABS pin is not connected to
4514 * any GPIP(SDP). So just schedule TASK_MOD. 4514 * any GPIO(SDP). So just schedule TASK_MOD.
4515 */ 4515 */
4516 sched_mod_task = true; 4516 sched_mod_task = true;
4517 } else { 4517 } else {
4518 bool was_full, is_full; 4518 bool was_full, is_full;
4519 4519
4520 was_full = 4520 was_full =
4521 hw->phy.sfp_type != ixgbe_sfp_type_not_present; 4521 hw->phy.sfp_type != ixgbe_sfp_type_not_present;
4522 is_full = ixgbe_sfp_cage_full(adapter); 4522 is_full = ixgbe_sfp_cage_full(adapter);
4523 4523
4524 /* Do probe if cage state changed */ 4524 /* Do probe if cage state changed */
4525 if (was_full ^ is_full) 4525 if (was_full ^ is_full)
4526 sched_mod_task = true; 4526 sched_mod_task = true;
4527 } 4527 }
4528 if (sched_mod_task) { 4528 if (sched_mod_task) {
4529 atomic_or_32(&adapter->task_requests, 4529 atomic_or_32(&adapter->task_requests,
4530 IXGBE_REQUEST_TASK_MOD); 4530 IXGBE_REQUEST_TASK_MOD);
4531 ixgbe_schedule_admin_tasklet(adapter); 4531 ixgbe_schedule_admin_tasklet(adapter);
4532 } 4532 }
4533 } 4533 }
4534 4534
4535 ixgbe_update_link_status(adapter); 4535 ixgbe_update_link_status(adapter);
4536 ixgbe_update_stats_counters(adapter); 4536 ixgbe_update_stats_counters(adapter);
4537 4537
4538 /* Update some event counters */ 4538 /* Update some event counters */
4539 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 4539 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4540 que = adapter->queues; 4540 que = adapter->queues;
4541 for (i = 0; i < adapter->num_queues; i++, que++) { 4541 for (i = 0; i < adapter->num_queues; i++, que++) {
4542 struct tx_ring *txr = que->txr; 4542 struct tx_ring *txr = que->txr;
4543 4543
4544 v0 += txr->q_efbig_tx_dma_setup; 4544 v0 += txr->q_efbig_tx_dma_setup;
4545 v1 += txr->q_mbuf_defrag_failed; 4545 v1 += txr->q_mbuf_defrag_failed;
4546 v2 += txr->q_efbig2_tx_dma_setup; 4546 v2 += txr->q_efbig2_tx_dma_setup;
4547 v3 += txr->q_einval_tx_dma_setup; 4547 v3 += txr->q_einval_tx_dma_setup;
4548 v4 += txr->q_other_tx_dma_setup; 4548 v4 += txr->q_other_tx_dma_setup;
4549 v5 += txr->q_eagain_tx_dma_setup; 4549 v5 += txr->q_eagain_tx_dma_setup;
4550 v6 += txr->q_enomem_tx_dma_setup; 4550 v6 += txr->q_enomem_tx_dma_setup;
4551 v7 += txr->q_tso_err; 4551 v7 += txr->q_tso_err;
4552 } 4552 }
4553 adapter->efbig_tx_dma_setup.ev_count = v0; 4553 adapter->efbig_tx_dma_setup.ev_count = v0;
4554 adapter->mbuf_defrag_failed.ev_count = v1; 4554 adapter->mbuf_defrag_failed.ev_count = v1;
4555 adapter->efbig2_tx_dma_setup.ev_count = v2; 4555 adapter->efbig2_tx_dma_setup.ev_count = v2;
4556 adapter->einval_tx_dma_setup.ev_count = v3; 4556 adapter->einval_tx_dma_setup.ev_count = v3;
4557 adapter->other_tx_dma_setup.ev_count = v4; 4557 adapter->other_tx_dma_setup.ev_count = v4;
4558 adapter->eagain_tx_dma_setup.ev_count = v5; 4558 adapter->eagain_tx_dma_setup.ev_count = v5;
4559 adapter->enomem_tx_dma_setup.ev_count = v6; 4559 adapter->enomem_tx_dma_setup.ev_count = v6;
4560 adapter->tso_err.ev_count = v7; 4560 adapter->tso_err.ev_count = v7;
4561 4561
4562 /* 4562 /*
4563 * Check the TX queues status 4563 * Check the TX queues status
4564 * - mark hung queues so we don't schedule on them 4564 * - mark hung queues so we don't schedule on them
4565 * - watchdog only if all queues show hung 4565 * - watchdog only if all queues show hung
4566 */ 4566 */
4567 que = adapter->queues; 4567 que = adapter->queues;
4568 for (i = 0; i < adapter->num_queues; i++, que++) { 4568 for (i = 0; i < adapter->num_queues; i++, que++) {
4569 /* Keep track of queues with work for soft irq */ 4569 /* Keep track of queues with work for soft irq */
4570 if (que->txr->busy) 4570 if (que->txr->busy)
4571 queues |= 1ULL << que->me; 4571 queues |= 1ULL << que->me;
4572 /* 4572 /*
4573 * Each time txeof runs without cleaning, but there 4573 * Each time txeof runs without cleaning, but there
4574 * are uncleaned descriptors it increments busy. If 4574 * are uncleaned descriptors it increments busy. If
4575 * we get to the MAX we declare it hung. 4575 * we get to the MAX we declare it hung.
4576 */ 4576 */
4577 if (que->busy == IXGBE_QUEUE_HUNG) { 4577 if (que->busy == IXGBE_QUEUE_HUNG) {
4578 ++hung; 4578 ++hung;
4579 /* Mark the queue as inactive */ 4579 /* Mark the queue as inactive */
4580 adapter->active_queues &= ~(1ULL << que->me); 4580 adapter->active_queues &= ~(1ULL << que->me);
4581 continue; 4581 continue;
4582 } else { 4582 } else {
4583 /* Check if we've come back from hung */ 4583 /* Check if we've come back from hung */
4584 if ((adapter->active_queues & (1ULL << que->me)) == 0) 4584 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4585 adapter->active_queues |= 1ULL << que->me; 4585 adapter->active_queues |= 1ULL << que->me;
4586 } 4586 }
4587 if (que->busy >= IXGBE_MAX_TX_BUSY) { 4587 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4588 device_printf(dev, 4588 device_printf(dev,
4589 "Warning queue %d appears to be hung!\n", i); 4589 "Warning queue %d appears to be hung!\n", i);
4590 que->txr->busy = IXGBE_QUEUE_HUNG; 4590 que->txr->busy = IXGBE_QUEUE_HUNG;
4591 ++hung; 4591 ++hung;
4592 } 4592 }
4593 } 4593 }
4594 4594
4595 /* Only truly watchdog if all queues show hung */ 4595 /* Only truly watchdog if all queues show hung */
4596 if (hung == adapter->num_queues) 4596 if (hung == adapter->num_queues)
4597 goto watchdog; 4597 goto watchdog;
4598#if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */ 4598#if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4599 else if (queues != 0) { /* Force an IRQ on queues with work */ 4599 else if (queues != 0) { /* Force an IRQ on queues with work */
4600 que = adapter->queues; 4600 que = adapter->queues;
4601 for (i = 0; i < adapter->num_queues; i++, que++) { 4601 for (i = 0; i < adapter->num_queues; i++, que++) {
4602 mutex_enter(&que->dc_mtx); 4602 mutex_enter(&que->dc_mtx);
4603 if (que->disabled_count == 0) 4603 if (que->disabled_count == 0)
4604 ixgbe_rearm_queues(adapter, 4604 ixgbe_rearm_queues(adapter,
4605 queues & ((u64)1 << i)); 4605 queues & ((u64)1 << i));
4606 mutex_exit(&que->dc_mtx); 4606 mutex_exit(&que->dc_mtx);
4607 } 4607 }
4608 } 4608 }
4609#endif 4609#endif
4610 4610
4611 atomic_store_relaxed(&adapter->timer_pending, 0); 4611 atomic_store_relaxed(&adapter->timer_pending, 0);
4612 IXGBE_CORE_UNLOCK(adapter); 4612 IXGBE_CORE_UNLOCK(adapter);
4613 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4613 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4614 return; 4614 return;
4615 4615
4616watchdog: 4616watchdog:
4617 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 4617 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4618 adapter->ifp->if_flags &= ~IFF_RUNNING; 4618 adapter->ifp->if_flags &= ~IFF_RUNNING;
4619 adapter->watchdog_events.ev_count++; 4619 adapter->watchdog_events.ev_count++;
4620 ixgbe_init_locked(adapter); 4620 ixgbe_init_locked(adapter);
4621 IXGBE_CORE_UNLOCK(adapter); 4621 IXGBE_CORE_UNLOCK(adapter);
4622} /* ixgbe_handle_timer */ 4622} /* ixgbe_handle_timer */
4623 4623
4624/************************************************************************ 4624/************************************************************************
4625 * ixgbe_recovery_mode_timer - Recovery mode timer routine 4625 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4626 ************************************************************************/ 4626 ************************************************************************/
4627static void 4627static void
4628ixgbe_recovery_mode_timer(void *arg) 4628ixgbe_recovery_mode_timer(void *arg)
4629{ 4629{
4630 struct adapter *adapter = arg; 4630 struct adapter *adapter = arg;
4631 4631
4632 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0) 4632 if (atomic_cas_uint(&adapter->recovery_mode_timer_pending, 0, 1) == 0)
4633 { 4633 {
4634 workqueue_enqueue(adapter->recovery_mode_timer_wq, 4634 workqueue_enqueue(adapter->recovery_mode_timer_wq,
4635 &adapter->recovery_mode_timer_wc, NULL); 4635 &adapter->recovery_mode_timer_wc, NULL);
4636 } 4636 }
4637} 4637}
4638 4638
4639static void 4639static void
4640ixgbe_handle_recovery_mode_timer(struct work *wk, void *context) 4640ixgbe_handle_recovery_mode_timer(struct work *wk, void *context)
4641{ 4641{
4642 struct adapter *adapter = context; 4642 struct adapter *adapter = context;
4643 struct ixgbe_hw *hw = &adapter->hw; 4643 struct ixgbe_hw *hw = &adapter->hw;
4644 4644
4645 IXGBE_CORE_LOCK(adapter); 4645 IXGBE_CORE_LOCK(adapter);
4646 if (ixgbe_fw_recovery_mode(hw)) { 4646 if (ixgbe_fw_recovery_mode(hw)) {
4647 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) { 4647 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4648 /* Firmware error detected, entering recovery mode */ 4648 /* Firmware error detected, entering recovery mode */
4649 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 4649 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4650 4650
4651 if (hw->adapter_stopped == FALSE) 4651 if (hw->adapter_stopped == FALSE)
4652 ixgbe_stop(adapter); 4652 ixgbe_stop(adapter);
4653 } 4653 }
4654 } else 4654 } else
4655 atomic_cas_uint(&adapter->recovery_mode, 1, 0); 4655 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4656 4656
4657 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0); 4657 atomic_store_relaxed(&adapter->recovery_mode_timer_pending, 0);
4658 callout_reset(&adapter->recovery_mode_timer, hz, 4658 callout_reset(&adapter->recovery_mode_timer, hz,
4659 ixgbe_recovery_mode_timer, adapter); 4659 ixgbe_recovery_mode_timer, adapter);
4660 IXGBE_CORE_UNLOCK(adapter); 4660 IXGBE_CORE_UNLOCK(adapter);
4661} /* ixgbe_handle_recovery_mode_timer */ 4661} /* ixgbe_handle_recovery_mode_timer */
4662 4662
4663/************************************************************************ 4663/************************************************************************
4664 * ixgbe_sfp_cage_full 4664 * ixgbe_sfp_cage_full
4665 * 4665 *
4666 * Determine if a port had optics inserted. 4666 * Determine if a port had optics inserted.
4667 ************************************************************************/ 4667 ************************************************************************/
4668static bool 4668static bool
4669ixgbe_sfp_cage_full(struct adapter *adapter) 4669ixgbe_sfp_cage_full(struct adapter *adapter)
4670{ 4670{
4671 struct ixgbe_hw *hw = &adapter->hw; 4671 struct ixgbe_hw *hw = &adapter->hw;
4672 uint32_t mask; 4672 uint32_t mask;
4673 int rv; 4673 int rv;
4674 4674
4675 if (hw->mac.type >= ixgbe_mac_X540) 4675 if (hw->mac.type >= ixgbe_mac_X540)
4676 mask = IXGBE_ESDP_SDP0; 4676 mask = IXGBE_ESDP_SDP0;
4677 else 4677 else
4678 mask = IXGBE_ESDP_SDP2; 4678 mask = IXGBE_ESDP_SDP2;
4679 4679
4680 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask; 4680 rv = IXGBE_READ_REG(hw, IXGBE_ESDP) & mask;
4681 if ((adapter->quirks & IXGBE_QUIRK_MOD_ABS_INVERT) != 0) 4681 if ((adapter->quirks & IXGBE_QUIRK_MOD_ABS_INVERT) != 0)
4682 rv = !rv; 4682 rv = !rv;
4683 4683
4684 if (hw->mac.type == ixgbe_mac_X550EM_a) { 4684 if (hw->mac.type == ixgbe_mac_X550EM_a) {
4685 /* X550EM_a's SDP0 is inverted than others. */ 4685 /* X550EM_a's SDP0 is inverted than others. */
4686 return !rv; 4686 return !rv;
4687 } 4687 }
4688 4688
4689 return rv; 4689 return rv;
4690} /* ixgbe_sfp_cage_full */ 4690} /* ixgbe_sfp_cage_full */
4691 4691
4692/************************************************************************ 4692/************************************************************************
4693 * ixgbe_handle_mod - Tasklet for SFP module interrupts 4693 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4694 ************************************************************************/ 4694 ************************************************************************/
4695static void 4695static void
4696ixgbe_handle_mod(void *context) 4696ixgbe_handle_mod(void *context)
4697{ 4697{
4698 struct adapter *adapter = context; 4698 struct adapter *adapter = context;
4699 struct ixgbe_hw *hw = &adapter->hw; 4699 struct ixgbe_hw *hw = &adapter->hw;
4700 device_t dev = adapter->dev; 4700 device_t dev = adapter->dev;
4701 enum ixgbe_sfp_type last_sfp_type; 4701 enum ixgbe_sfp_type last_sfp_type;
4702 u32 err, cage_full = 0; 4702 u32 err, cage_full = 0;
4703 bool last_unsupported_sfp_recovery; 4703 bool last_unsupported_sfp_recovery;
4704 4704
4705 last_sfp_type = hw->phy.sfp_type; 4705 last_sfp_type = hw->phy.sfp_type;
4706 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery; 4706 last_unsupported_sfp_recovery = hw->need_unsupported_sfp_recovery;
4707 ++adapter->mod_workev.ev_count; 4707 ++adapter->mod_workev.ev_count;
4708 if (adapter->hw.need_crosstalk_fix) { 4708 if (adapter->hw.need_crosstalk_fix) {
4709 switch (hw->mac.type) { 4709 switch (hw->mac.type) {
4710 case ixgbe_mac_82599EB: 4710 case ixgbe_mac_82599EB:
4711 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4711 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4712 IXGBE_ESDP_SDP2; 4712 IXGBE_ESDP_SDP2;
4713 break; 4713 break;
4714 case ixgbe_mac_X550EM_x: 4714 case ixgbe_mac_X550EM_x:
4715 case ixgbe_mac_X550EM_a: 4715 case ixgbe_mac_X550EM_a:
4716 /* 4716 /*
4717 * XXX See ixgbe_sfp_cage_full(). It seems the bit is 4717 * XXX See ixgbe_sfp_cage_full(). It seems the bit is
4718 * inverted on X550EM_a, so I think this is incorrect. 4718 * inverted on X550EM_a, so I think this is incorrect.
4719 */ 4719 */
4720 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4720 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4721 IXGBE_ESDP_SDP0; 4721 IXGBE_ESDP_SDP0;
4722 break; 4722 break;
4723 default: 4723 default:
4724 break; 4724 break;
4725 } 4725 }
4726 4726
4727 if (!cage_full) 4727 if (!cage_full)
4728 goto out; 4728 goto out;
4729 } 4729 }
4730 4730
4731 err = hw->phy.ops.identify_sfp(hw); 4731 err = hw->phy.ops.identify_sfp(hw);
4732 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4732 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4733 if (last_unsupported_sfp_recovery == false) 4733 if (last_unsupported_sfp_recovery == false)
4734 device_printf(dev, 4734 device_printf(dev,
4735 "Unsupported SFP+ module type was detected.\n"); 4735 "Unsupported SFP+ module type was detected.\n");
4736 goto out; 4736 goto out;
4737 } 4737 }
4738 4738
4739 if (hw->need_unsupported_sfp_recovery) { 4739 if (hw->need_unsupported_sfp_recovery) {
4740 device_printf(dev, "Recovering from unsupported SFP\n"); 4740 device_printf(dev, "Recovering from unsupported SFP\n");
4741 /* 4741 /*
4742 * We could recover the status by calling setup_sfp(), 4742 * We could recover the status by calling setup_sfp(),
4743 * setup_link() and some others. It's complex and might not 4743 * setup_link() and some others. It's complex and might not
4744 * work correctly on some unknown cases. To avoid such type of 4744 * work correctly on some unknown cases. To avoid such type of
4745 * problem, call ixgbe_init_locked(). It's simple and safe 4745 * problem, call ixgbe_init_locked(). It's simple and safe
4746 * approach. 4746 * approach.
4747 */ 4747 */
4748 ixgbe_init_locked(adapter); 4748 ixgbe_init_locked(adapter);
4749 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) && 4749 } else if ((hw->phy.sfp_type != ixgbe_sfp_type_not_present) &&
4750 (hw->phy.sfp_type != last_sfp_type)) { 4750 (hw->phy.sfp_type != last_sfp_type)) {
4751 /* A module is inserted and changed. */ 4751 /* A module is inserted and changed. */
4752 4752
4753 if (hw->mac.type == ixgbe_mac_82598EB) 4753 if (hw->mac.type == ixgbe_mac_82598EB)
4754 err = hw->phy.ops.reset(hw); 4754 err = hw->phy.ops.reset(hw);
4755 else { 4755 else {
4756 err = hw->mac.ops.setup_sfp(hw); 4756 err = hw->mac.ops.setup_sfp(hw);
4757 hw->phy.sfp_setup_needed = FALSE; 4757 hw->phy.sfp_setup_needed = FALSE;
4758 } 4758 }
4759 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4759 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4760 device_printf(dev, 4760 device_printf(dev,
4761 "Setup failure - unsupported SFP+ module type.\n"); 4761 "Setup failure - unsupported SFP+ module type.\n");
4762 goto out; 4762 goto out;
4763 } 4763 }
4764 } 4764 }
4765 4765
4766out: 4766out:
4767 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 4767 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4768 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 4768 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4769 4769
4770 /* Adjust media types shown in ifconfig */ 4770 /* Adjust media types shown in ifconfig */
4771 IXGBE_CORE_UNLOCK(adapter); 4771 IXGBE_CORE_UNLOCK(adapter);
4772 ifmedia_removeall(&adapter->media); 4772 ifmedia_removeall(&adapter->media);
4773 ixgbe_add_media_types(adapter); 4773 ixgbe_add_media_types(adapter);
4774 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 4774 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4775 IXGBE_CORE_LOCK(adapter); 4775 IXGBE_CORE_LOCK(adapter);
4776 4776
4777 /* 4777 /*
4778 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support 4778 * Don't shedule MSF event if the chip is 82598. 82598 doesn't support
4779 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link 4779 * MSF. At least, calling ixgbe_handle_msf on 82598 DA makes the link
4780 * flap because the function call setup_link(). 4780 * flap because the function calls setup_link().
4781 */ 4781 */
4782 if (hw->mac.type != ixgbe_mac_82598EB) 4782 if (hw->mac.type != ixgbe_mac_82598EB)
4783 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF); 4783 atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MSF);
4784 4784
4785 /* 4785 /*
4786 * Don't call ixgbe_schedule_admin_tasklet() because we are on 4786 * Don't call ixgbe_schedule_admin_tasklet() because we are on
4787 * the workqueue now. 4787 * the workqueue now.
4788 */ 4788 */
4789} /* ixgbe_handle_mod */ 4789} /* ixgbe_handle_mod */
4790 4790
4791 4791
4792/************************************************************************ 4792/************************************************************************
4793 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 4793 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4794 ************************************************************************/ 4794 ************************************************************************/
4795static void 4795static void
4796ixgbe_handle_msf(void *context) 4796ixgbe_handle_msf(void *context)
4797{ 4797{
4798 struct adapter *adapter = context; 4798 struct adapter *adapter = context;
4799 struct ixgbe_hw *hw = &adapter->hw; 4799 struct ixgbe_hw *hw = &adapter->hw;
4800 u32 autoneg; 4800 u32 autoneg;
4801 bool negotiate; 4801 bool negotiate;
4802 4802
4803 ++adapter->msf_workev.ev_count; 4803 ++adapter->msf_workev.ev_count;
4804 4804
4805 autoneg = hw->phy.autoneg_advertised; 4805 autoneg = hw->phy.autoneg_advertised;
4806 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 4806 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4807 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 4807 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4808 if (hw->mac.ops.setup_link) 4808 if (hw->mac.ops.setup_link)
4809 hw->mac.ops.setup_link(hw, autoneg, TRUE); 4809 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4810} /* ixgbe_handle_msf */ 4810} /* ixgbe_handle_msf */
4811 4811
4812/************************************************************************ 4812/************************************************************************
4813 * ixgbe_handle_phy - Tasklet for external PHY interrupts 4813 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4814 ************************************************************************/ 4814 ************************************************************************/
4815static void 4815static void
4816ixgbe_handle_phy(void *context) 4816ixgbe_handle_phy(void *context)
4817{ 4817{
4818 struct adapter *adapter = context; 4818 struct adapter *adapter = context;
4819 struct ixgbe_hw *hw = &adapter->hw; 4819 struct ixgbe_hw *hw = &adapter->hw;
4820 int error; 4820 int error;
4821 4821
4822 ++adapter->phy_workev.ev_count; 4822 ++adapter->phy_workev.ev_count;
4823 error = hw->phy.ops.handle_lasi(hw); 4823 error = hw->phy.ops.handle_lasi(hw);
4824 if (error == IXGBE_ERR_OVERTEMP) 4824 if (error == IXGBE_ERR_OVERTEMP)
4825 device_printf(adapter->dev, 4825 device_printf(adapter->dev,
4826 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 4826 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4827 " PHY will downshift to lower power state!\n"); 4827 " PHY will downshift to lower power state!\n");
4828 else if (error) 4828 else if (error)
4829 device_printf(adapter->dev, 4829 device_printf(adapter->dev,
4830 "Error handling LASI interrupt: %d\n", error); 4830 "Error handling LASI interrupt: %d\n", error);
4831} /* ixgbe_handle_phy */ 4831} /* ixgbe_handle_phy */
4832 4832
4833static void 4833static void
4834ixgbe_handle_admin(struct work *wk, void *context) 4834ixgbe_handle_admin(struct work *wk, void *context)
4835{ 4835{
4836 struct adapter *adapter = context; 4836 struct adapter *adapter = context;
4837 struct ifnet *ifp = adapter->ifp; 4837 struct ifnet *ifp = adapter->ifp;
4838 struct ixgbe_hw *hw = &adapter->hw; 4838 struct ixgbe_hw *hw = &adapter->hw;
4839 u32 req; 4839 u32 req;
4840 4840
4841 /* 4841 /*
4842 * Hold the IFNET_LOCK across this entire call. This will 4842 * Hold the IFNET_LOCK across this entire call. This will
4843 * prevent additional changes to adapter->phy_layer 4843 * prevent additional changes to adapter->phy_layer
4844 * and serialize calls to this tasklet. We cannot hold the 4844 * and serialize calls to this tasklet. We cannot hold the
4845 * CORE_LOCK while calling into the ifmedia functions as 4845 * CORE_LOCK while calling into the ifmedia functions as
4846 * they call ifmedia_lock() and the lock is CORE_LOCK. 4846 * they call ifmedia_lock() and the lock is CORE_LOCK.
4847 */ 4847 */
4848 IFNET_LOCK(ifp); 4848 IFNET_LOCK(ifp);
4849 IXGBE_CORE_LOCK(adapter); 4849 IXGBE_CORE_LOCK(adapter);
4850 while ((req = 4850 while ((req =
4851 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR)) 4851 (adapter->task_requests & ~IXGBE_REQUEST_TASK_NEED_ACKINTR))
4852 != 0) { 4852 != 0) {
4853 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) { 4853 if ((req & IXGBE_REQUEST_TASK_LSC) != 0) {
4854 ixgbe_handle_link(adapter); 4854 ixgbe_handle_link(adapter);
4855 atomic_and_32(&adapter->task_requests, 4855 atomic_and_32(&adapter->task_requests,
4856 ~IXGBE_REQUEST_TASK_LSC); 4856 ~IXGBE_REQUEST_TASK_LSC);
4857 } 4857 }
4858 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) { 4858 if ((req & IXGBE_REQUEST_TASK_MOD) != 0) {
4859 ixgbe_handle_mod(adapter); 4859 ixgbe_handle_mod(adapter);
4860 atomic_and_32(&adapter->task_requests, 4860 atomic_and_32(&adapter->task_requests,
4861 ~IXGBE_REQUEST_TASK_MOD); 4861 ~IXGBE_REQUEST_TASK_MOD);
4862 } 4862 }
4863 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) { 4863 if ((req & IXGBE_REQUEST_TASK_MSF) != 0) {
4864 ixgbe_handle_msf(adapter); 4864 ixgbe_handle_msf(adapter);
4865 atomic_and_32(&adapter->task_requests, 4865 atomic_and_32(&adapter->task_requests,
4866 ~IXGBE_REQUEST_TASK_MSF); 4866 ~IXGBE_REQUEST_TASK_MSF);
4867 } 4867 }
4868 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) { 4868 if ((req & IXGBE_REQUEST_TASK_PHY) != 0) {
4869 ixgbe_handle_phy(adapter); 4869 ixgbe_handle_phy(adapter);
4870 atomic_and_32(&adapter->task_requests, 4870 atomic_and_32(&adapter->task_requests,
4871 ~IXGBE_REQUEST_TASK_PHY); 4871 ~IXGBE_REQUEST_TASK_PHY);
4872 } 4872 }
4873 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) { 4873 if ((req & IXGBE_REQUEST_TASK_FDIR) != 0) {
4874 ixgbe_reinit_fdir(adapter); 4874 ixgbe_reinit_fdir(adapter);
4875 atomic_and_32(&adapter->task_requests, 4875 atomic_and_32(&adapter->task_requests,
4876 ~IXGBE_REQUEST_TASK_FDIR); 4876 ~IXGBE_REQUEST_TASK_FDIR);
4877 } 4877 }
4878#if 0 /* notyet */ 4878#if 0 /* notyet */
4879 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) { 4879 if ((req & IXGBE_REQUEST_TASK_MBX) != 0) {
4880 ixgbe_handle_mbx(adapter); 4880 ixgbe_handle_mbx(adapter);
4881 atomic_and_32(&adapter->task_requests, 4881 atomic_and_32(&adapter->task_requests,
4882 ~IXGBE_REQUEST_TASK_MBX); 4882 ~IXGBE_REQUEST_TASK_MBX);
4883 } 4883 }
4884#endif 4884#endif
4885 } 4885 }
4886 atomic_store_relaxed(&adapter->admin_pending, 0); 4886 atomic_store_relaxed(&adapter->admin_pending, 0);
4887 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) { 4887 if ((adapter->task_requests & IXGBE_REQUEST_TASK_NEED_ACKINTR) != 0) {
4888 atomic_and_32(&adapter->task_requests, 4888 atomic_and_32(&adapter->task_requests,
4889 ~IXGBE_REQUEST_TASK_NEED_ACKINTR); 4889 ~IXGBE_REQUEST_TASK_NEED_ACKINTR);
4890 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) { 4890 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) != 0) {
4891 /* Re-enable other interrupts */ 4891 /* Re-enable other interrupts */
4892 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 4892 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
4893 } else 4893 } else
4894 ixgbe_enable_intr(adapter); 4894 ixgbe_enable_intr(adapter);
4895 } 4895 }
4896 4896
4897 IXGBE_CORE_UNLOCK(adapter); 4897 IXGBE_CORE_UNLOCK(adapter);
4898 IFNET_UNLOCK(ifp); 4898 IFNET_UNLOCK(ifp);
4899} /* ixgbe_handle_admin */ 4899} /* ixgbe_handle_admin */
4900 4900
4901static void 4901static void
4902ixgbe_ifstop(struct ifnet *ifp, int disable) 4902ixgbe_ifstop(struct ifnet *ifp, int disable)
4903{ 4903{
4904 struct adapter *adapter = ifp->if_softc; 4904 struct adapter *adapter = ifp->if_softc;
4905 4905
4906 IXGBE_CORE_LOCK(adapter); 4906 IXGBE_CORE_LOCK(adapter);
4907 ixgbe_stop(adapter); 4907 ixgbe_stop(adapter);
4908 IXGBE_CORE_UNLOCK(adapter); 4908 IXGBE_CORE_UNLOCK(adapter);
4909 4909
4910 workqueue_wait(adapter->timer_wq, &adapter->timer_wc); 4910 workqueue_wait(adapter->timer_wq, &adapter->timer_wc);
4911 atomic_store_relaxed(&adapter->timer_pending, 0); 4911 atomic_store_relaxed(&adapter->timer_pending, 0);
4912} 4912}
4913 4913
4914/************************************************************************ 4914/************************************************************************
4915 * ixgbe_stop - Stop the hardware 4915 * ixgbe_stop - Stop the hardware
4916 * 4916 *
4917 * Disables all traffic on the adapter by issuing a 4917 * Disables all traffic on the adapter by issuing a
4918 * global reset on the MAC and deallocates TX/RX buffers. 4918 * global reset on the MAC and deallocates TX/RX buffers.
4919 ************************************************************************/ 4919 ************************************************************************/
4920static void 4920static void
4921ixgbe_stop(void *arg) 4921ixgbe_stop(void *arg)
4922{ 4922{
4923 struct ifnet *ifp; 4923 struct ifnet *ifp;
4924 struct adapter *adapter = arg; 4924 struct adapter *adapter = arg;
4925 struct ixgbe_hw *hw = &adapter->hw; 4925 struct ixgbe_hw *hw = &adapter->hw;
4926 4926
4927 ifp = adapter->ifp; 4927 ifp = adapter->ifp;
4928 4928
4929 KASSERT(mutex_owned(&adapter->core_mtx)); 4929 KASSERT(mutex_owned(&adapter->core_mtx));
4930 4930
4931 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 4931 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4932 ixgbe_disable_intr(adapter); 4932 ixgbe_disable_intr(adapter);
4933 callout_stop(&adapter->timer); 4933 callout_stop(&adapter->timer);
4934 4934
4935 /* Don't schedule workqueues. */ 4935 /* Don't schedule workqueues. */
4936 adapter->schedule_wqs_ok = false; 4936 adapter->schedule_wqs_ok = false;
4937 4937
4938 /* Let the stack know...*/ 4938 /* Let the stack know...*/
4939 ifp->if_flags &= ~IFF_RUNNING; 4939 ifp->if_flags &= ~IFF_RUNNING;
4940 4940
4941 ixgbe_reset_hw(hw); 4941 ixgbe_reset_hw(hw);
4942 hw->adapter_stopped = FALSE; 4942 hw->adapter_stopped = FALSE;
4943 ixgbe_stop_adapter(hw); 4943 ixgbe_stop_adapter(hw);
4944 if (hw->mac.type == ixgbe_mac_82599EB) 4944 if (hw->mac.type == ixgbe_mac_82599EB)
4945 ixgbe_stop_mac_link_on_d3_82599(hw); 4945 ixgbe_stop_mac_link_on_d3_82599(hw);
4946 /* Turn off the laser - noop with no optics */ 4946 /* Turn off the laser - noop with no optics */
4947 ixgbe_disable_tx_laser(hw); 4947 ixgbe_disable_tx_laser(hw);
4948 4948
4949 /* Update the stack */ 4949 /* Update the stack */
4950 adapter->link_up = FALSE; 4950 adapter->link_up = FALSE;
4951 ixgbe_update_link_status(adapter); 4951 ixgbe_update_link_status(adapter);
4952 4952
4953 /* reprogram the RAR[0] in case user changed it. */ 4953 /* reprogram the RAR[0] in case user changed it. */
4954 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 4954 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4955 4955
4956 return; 4956 return;
4957} /* ixgbe_stop */ 4957} /* ixgbe_stop */
4958 4958
4959/************************************************************************ 4959/************************************************************************
4960 * ixgbe_update_link_status - Update OS on link state 4960 * ixgbe_update_link_status - Update OS on link state
4961 * 4961 *
4962 * Note: Only updates the OS on the cached link state. 4962 * Note: Only updates the OS on the cached link state.
4963 * The real check of the hardware only happens with 4963 * The real check of the hardware only happens with
4964 * a link interrupt. 4964 * a link interrupt.
4965 ************************************************************************/ 4965 ************************************************************************/
4966static void 4966static void
4967ixgbe_update_link_status(struct adapter *adapter) 4967ixgbe_update_link_status(struct adapter *adapter)
4968{ 4968{
4969 struct ifnet *ifp = adapter->ifp; 4969 struct ifnet *ifp = adapter->ifp;
4970 device_t dev = adapter->dev; 4970 device_t dev = adapter->dev;
4971 struct ixgbe_hw *hw = &adapter->hw; 4971 struct ixgbe_hw *hw = &adapter->hw;
4972 4972
4973 KASSERT(mutex_owned(&adapter->core_mtx)); 4973 KASSERT(mutex_owned(&adapter->core_mtx));
4974 4974
4975 if (adapter->link_up) { 4975 if (adapter->link_up) {
4976 if (adapter->link_active != LINK_STATE_UP) { 4976 if (adapter->link_active != LINK_STATE_UP) {
4977 /* 4977 /*
4978 * To eliminate influence of the previous state 4978 * To eliminate influence of the previous state
4979 * in the same way as ixgbe_init_locked(). 4979 * in the same way as ixgbe_init_locked().
4980 */ 4980 */
4981 struct ix_queue *que = adapter->queues; 4981 struct ix_queue *que = adapter->queues;
4982 for (int i = 0; i < adapter->num_queues; i++, que++) 4982 for (int i = 0; i < adapter->num_queues; i++, que++)
4983 que->eitr_setting = 0; 4983 que->eitr_setting = 0;
4984 4984
4985 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){ 4985 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4986 /* 4986 /*
4987 * Discard count for both MAC Local Fault and 4987 * Discard count for both MAC Local Fault and
4988 * Remote Fault because those registers are 4988 * Remote Fault because those registers are
4989 * valid only when the link speed is up and 4989 * valid only when the link speed is up and
4990 * 10Gbps. 4990 * 10Gbps.
4991 */ 4991 */
4992 IXGBE_READ_REG(hw, IXGBE_MLFC); 4992 IXGBE_READ_REG(hw, IXGBE_MLFC);
4993 IXGBE_READ_REG(hw, IXGBE_MRFC); 4993 IXGBE_READ_REG(hw, IXGBE_MRFC);
4994 } 4994 }
4995 4995
4996 if (bootverbose) { 4996 if (bootverbose) {
4997 const char *bpsmsg; 4997 const char *bpsmsg;
4998 4998
4999 switch (adapter->link_speed) { 4999 switch (adapter->link_speed) {
5000 case IXGBE_LINK_SPEED_10GB_FULL: 5000 case IXGBE_LINK_SPEED_10GB_FULL:
5001 bpsmsg = "10 Gbps"; 5001 bpsmsg = "10 Gbps";
5002 break; 5002 break;
5003 case IXGBE_LINK_SPEED_5GB_FULL: 5003 case IXGBE_LINK_SPEED_5GB_FULL:
5004 bpsmsg = "5 Gbps"; 5004 bpsmsg = "5 Gbps";
5005 break; 5005 break;
5006 case IXGBE_LINK_SPEED_2_5GB_FULL: 5006 case IXGBE_LINK_SPEED_2_5GB_FULL:
5007 bpsmsg = "2.5 Gbps"; 5007 bpsmsg = "2.5 Gbps";
5008 break; 5008 break;
5009 case IXGBE_LINK_SPEED_1GB_FULL: 5009 case IXGBE_LINK_SPEED_1GB_FULL:
5010 bpsmsg = "1 Gbps"; 5010 bpsmsg = "1 Gbps";
5011 break; 5011 break;
5012 case IXGBE_LINK_SPEED_100_FULL: 5012 case IXGBE_LINK_SPEED_100_FULL:
5013 bpsmsg = "100 Mbps"; 5013 bpsmsg = "100 Mbps";
5014 break; 5014 break;
5015 case IXGBE_LINK_SPEED_10_FULL: 5015 case IXGBE_LINK_SPEED_10_FULL:
5016 bpsmsg = "10 Mbps"; 5016 bpsmsg = "10 Mbps";
5017 break; 5017 break;
5018 default: 5018 default:
5019 bpsmsg = "unknown speed"; 5019 bpsmsg = "unknown speed";
5020 break; 5020 break;
5021 } 5021 }
5022 device_printf(dev, "Link is up %s %s \n", 5022 device_printf(dev, "Link is up %s %s \n",
5023 bpsmsg, "Full Duplex"); 5023 bpsmsg, "Full Duplex");
5024 } 5024 }
5025 adapter->link_active = LINK_STATE_UP; 5025 adapter->link_active = LINK_STATE_UP;
5026 /* Update any Flow Control changes */ 5026 /* Update any Flow Control changes */
5027 ixgbe_fc_enable(&adapter->hw); 5027 ixgbe_fc_enable(&adapter->hw);
5028 /* Update DMA coalescing config */ 5028 /* Update DMA coalescing config */
5029 ixgbe_config_dmac(adapter); 5029 ixgbe_config_dmac(adapter);
5030 if_link_state_change(ifp, LINK_STATE_UP); 5030 if_link_state_change(ifp, LINK_STATE_UP);
5031 5031
5032 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 5032 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5033 ixgbe_ping_all_vfs(adapter); 5033 ixgbe_ping_all_vfs(adapter);
5034 } 5034 }
5035 } else { 5035 } else {
5036 /* 5036 /*
5037 * Do it when link active changes to DOWN. i.e. 5037 * Do it when link active changes to DOWN. i.e.
5038 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 5038 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
5039 * b) LINK_STATE_UP -> LINK_STATE_DOWN 5039 * b) LINK_STATE_UP -> LINK_STATE_DOWN
5040 */ 5040 */
5041 if (adapter->link_active != LINK_STATE_DOWN) { 5041 if (adapter->link_active != LINK_STATE_DOWN) {
5042 if (bootverbose) 5042 if (bootverbose)
5043 device_printf(dev, "Link is Down\n"); 5043 device_printf(dev, "Link is Down\n");
5044 if_link_state_change(ifp, LINK_STATE_DOWN); 5044 if_link_state_change(ifp, LINK_STATE_DOWN);
5045 adapter->link_active = LINK_STATE_DOWN; 5045 adapter->link_active = LINK_STATE_DOWN;
5046 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 5046 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5047 ixgbe_ping_all_vfs(adapter); 5047 ixgbe_ping_all_vfs(adapter);
5048 ixgbe_drain_all(adapter); 5048 ixgbe_drain_all(adapter);
5049 } 5049 }
5050 } 5050 }
5051} /* ixgbe_update_link_status */ 5051} /* ixgbe_update_link_status */
5052 5052
5053/************************************************************************ 5053/************************************************************************
5054 * ixgbe_config_dmac - Configure DMA Coalescing 5054 * ixgbe_config_dmac - Configure DMA Coalescing
5055 ************************************************************************/ 5055 ************************************************************************/
5056static void 5056static void
5057ixgbe_config_dmac(struct adapter *adapter) 5057ixgbe_config_dmac(struct adapter *adapter)
5058{ 5058{
5059 struct ixgbe_hw *hw = &adapter->hw; 5059 struct ixgbe_hw *hw = &adapter->hw;
5060 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 5060 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
5061 5061
5062 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 5062 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
5063 return; 5063 return;
5064 5064
5065 if (dcfg->watchdog_timer ^ adapter->dmac || 5065 if (dcfg->watchdog_timer ^ adapter->dmac ||
5066 dcfg->link_speed ^ adapter->link_speed) { 5066 dcfg->link_speed ^ adapter->link_speed) {
5067 dcfg->watchdog_timer = adapter->dmac; 5067 dcfg->watchdog_timer = adapter->dmac;
5068 dcfg->fcoe_en = false; 5068 dcfg->fcoe_en = false;
5069 dcfg->link_speed = adapter->link_speed; 5069 dcfg->link_speed = adapter->link_speed;
5070 dcfg->num_tcs = 1; 5070 dcfg->num_tcs = 1;
5071 5071
5072 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 5072 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
5073 dcfg->watchdog_timer, dcfg->link_speed); 5073 dcfg->watchdog_timer, dcfg->link_speed);
5074 5074
5075 hw->mac.ops.dmac_config(hw); 5075 hw->mac.ops.dmac_config(hw);
5076 } 5076 }
5077} /* ixgbe_config_dmac */ 5077} /* ixgbe_config_dmac */
5078 5078
5079/************************************************************************ 5079/************************************************************************
5080 * ixgbe_enable_intr 5080 * ixgbe_enable_intr
5081 ************************************************************************/ 5081 ************************************************************************/
5082static void 5082static void
5083ixgbe_enable_intr(struct adapter *adapter) 5083ixgbe_enable_intr(struct adapter *adapter)
5084{ 5084{
5085 struct ixgbe_hw *hw = &adapter->hw; 5085 struct ixgbe_hw *hw = &adapter->hw;
5086 struct ix_queue *que = adapter->queues; 5086 struct ix_queue *que = adapter->queues;
5087 u32 mask, fwsm; 5087 u32 mask, fwsm;
5088 5088
5089 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 5089 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
5090 5090
5091 switch (adapter->hw.mac.type) { 5091 switch (adapter->hw.mac.type) {
5092 case ixgbe_mac_82599EB: 5092 case ixgbe_mac_82599EB:
5093 mask |= IXGBE_EIMS_ECC; 5093 mask |= IXGBE_EIMS_ECC;
5094 /* Temperature sensor on some adapters */ 5094 /* Temperature sensor on some adapters */
5095 mask |= IXGBE_EIMS_GPI_SDP0; 5095 mask |= IXGBE_EIMS_GPI_SDP0;
5096 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 5096 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
5097 mask |= IXGBE_EIMS_GPI_SDP1; 5097 mask |= IXGBE_EIMS_GPI_SDP1;
5098 mask |= IXGBE_EIMS_GPI_SDP2; 5098 mask |= IXGBE_EIMS_GPI_SDP2;
5099 break; 5099 break;
5100 case ixgbe_mac_X540: 5100 case ixgbe_mac_X540:
5101 /* Detect if Thermal Sensor is enabled */ 5101 /* Detect if Thermal Sensor is enabled */
5102 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 5102 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
5103 if (fwsm & IXGBE_FWSM_TS_ENABLED) 5103 if (fwsm & IXGBE_FWSM_TS_ENABLED)
5104 mask |= IXGBE_EIMS_TS; 5104 mask |= IXGBE_EIMS_TS;
5105 mask |= IXGBE_EIMS_ECC; 5105 mask |= IXGBE_EIMS_ECC;
5106 break; 5106 break;
5107 case ixgbe_mac_X550: 5107 case ixgbe_mac_X550:
5108 /* MAC thermal sensor is automatically enabled */ 5108 /* MAC thermal sensor is automatically enabled */
5109 mask |= IXGBE_EIMS_TS; 5109 mask |= IXGBE_EIMS_TS;
5110 mask |= IXGBE_EIMS_ECC; 5110 mask |= IXGBE_EIMS_ECC;
5111 break; 5111 break;
5112 case ixgbe_mac_X550EM_x: 5112 case ixgbe_mac_X550EM_x:
5113 case ixgbe_mac_X550EM_a: 5113 case ixgbe_mac_X550EM_a:
5114 /* Some devices use SDP0 for important information */ 5114 /* Some devices use SDP0 for important information */
5115 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 5115 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
5116 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 5116 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
5117 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 5117 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
5118 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 5118 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
5119 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 5119 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
5120 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 5120 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
5121 mask |= IXGBE_EICR_GPI_SDP0_X540; 5121 mask |= IXGBE_EICR_GPI_SDP0_X540;
5122 mask |= IXGBE_EIMS_ECC; 5122 mask |= IXGBE_EIMS_ECC;
5123 break; 5123 break;
5124 default: 5124 default:
5125 break; 5125 break;
5126 } 5126 }
5127 5127
5128 /* Enable Fan Failure detection */ 5128 /* Enable Fan Failure detection */
5129 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 5129 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
5130 mask |= IXGBE_EIMS_GPI_SDP1; 5130 mask |= IXGBE_EIMS_GPI_SDP1;
5131 /* Enable SR-IOV */ 5131 /* Enable SR-IOV */
5132 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 5132 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
5133 mask |= IXGBE_EIMS_MAILBOX; 5133 mask |= IXGBE_EIMS_MAILBOX;
5134 /* Enable Flow Director */ 5134 /* Enable Flow Director */
5135 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 5135 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
5136 mask |= IXGBE_EIMS_FLOW_DIR; 5136 mask |= IXGBE_EIMS_FLOW_DIR;
5137 5137
5138 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 5138 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
5139 5139
5140 /* With MSI-X we use auto clear */ 5140 /* With MSI-X we use auto clear */
5141 if (adapter->msix_mem) { 5141 if (adapter->msix_mem) {
5142 mask = IXGBE_EIMS_ENABLE_MASK; 5142 mask = IXGBE_EIMS_ENABLE_MASK;
5143 /* Don't autoclear Link */ 5143 /* Don't autoclear Link */
5144 mask &= ~IXGBE_EIMS_OTHER; 5144 mask &= ~IXGBE_EIMS_OTHER;
5145 mask &= ~IXGBE_EIMS_LSC; 5145 mask &= ~IXGBE_EIMS_LSC;
5146 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 5146 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
5147 mask &= ~IXGBE_EIMS_MAILBOX; 5147 mask &= ~IXGBE_EIMS_MAILBOX;
5148 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 5148 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
5149 } 5149 }
5150 5150
5151 /* 5151 /*
5152 * Now enable all queues, this is done separately to 5152 * Now enable all queues, this is done separately to
5153 * allow for handling the extended (beyond 32) MSI-X 5153 * allow for handling the extended (beyond 32) MSI-X
5154 * vectors that can be used by 82599 5154 * vectors that can be used by 82599
5155 */ 5155 */
5156 for (int i = 0; i < adapter->num_queues; i++, que++) 5156 for (int i = 0; i < adapter->num_queues; i++, que++)
5157 ixgbe_enable_queue(adapter, que->msix); 5157 ixgbe_enable_queue(adapter, que->msix);
5158 5158
5159 IXGBE_WRITE_FLUSH(hw); 5159 IXGBE_WRITE_FLUSH(hw);
5160 5160
5161} /* ixgbe_enable_intr */ 5161} /* ixgbe_enable_intr */
5162 5162
5163/************************************************************************ 5163/************************************************************************
5164 * ixgbe_disable_intr_internal 5164 * ixgbe_disable_intr_internal
5165 ************************************************************************/ 5165 ************************************************************************/
5166static void 5166static void
5167ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok) 5167ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
5168{ 5168{
5169 struct ix_queue *que = adapter->queues; 5169 struct ix_queue *que = adapter->queues;
5170 5170
5171 /* disable interrupts other than queues */ 5171 /* disable interrupts other than queues */
5172 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE); 5172 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
5173 5173
5174 if (adapter->msix_mem) 5174 if (adapter->msix_mem)
5175 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 5175 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
5176 5176
5177 for (int i = 0; i < adapter->num_queues; i++, que++) 5177 for (int i = 0; i < adapter->num_queues; i++, que++)
5178 ixgbe_disable_queue_internal(adapter, que->msix, nestok); 5178 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
5179 5179
5180 IXGBE_WRITE_FLUSH(&adapter->hw); 5180 IXGBE_WRITE_FLUSH(&adapter->hw);
5181 5181
5182} /* ixgbe_do_disable_intr_internal */ 5182} /* ixgbe_do_disable_intr_internal */
5183 5183
5184/************************************************************************ 5184/************************************************************************
5185 * ixgbe_disable_intr 5185 * ixgbe_disable_intr
5186 ************************************************************************/ 5186 ************************************************************************/
5187static void 5187static void
5188ixgbe_disable_intr(struct adapter *adapter) 5188ixgbe_disable_intr(struct adapter *adapter)
5189{ 5189{
5190 5190
5191 ixgbe_disable_intr_internal(adapter, true); 5191 ixgbe_disable_intr_internal(adapter, true);
5192} /* ixgbe_disable_intr */ 5192} /* ixgbe_disable_intr */
5193 5193
5194/************************************************************************ 5194/************************************************************************
5195 * ixgbe_ensure_disabled_intr 5195 * ixgbe_ensure_disabled_intr
5196 ************************************************************************/ 5196 ************************************************************************/
5197void 5197void
5198ixgbe_ensure_disabled_intr(struct adapter *adapter) 5198ixgbe_ensure_disabled_intr(struct adapter *adapter)
5199{ 5199{
5200 5200
5201 ixgbe_disable_intr_internal(adapter, false); 5201 ixgbe_disable_intr_internal(adapter, false);
5202} /* ixgbe_ensure_disabled_intr */ 5202} /* ixgbe_ensure_disabled_intr */
5203 5203
5204/************************************************************************ 5204/************************************************************************
5205 * ixgbe_legacy_irq - Legacy Interrupt Service routine 5205 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5206 ************************************************************************/ 5206 ************************************************************************/
5207static int 5207static int
5208ixgbe_legacy_irq(void *arg) 5208ixgbe_legacy_irq(void *arg)
5209{ 5209{
5210 struct ix_queue *que = arg; 5210 struct ix_queue *que = arg;
5211 struct adapter *adapter = que->adapter; 5211 struct adapter *adapter = que->adapter;
5212 struct ixgbe_hw *hw = &adapter->hw; 5212 struct ixgbe_hw *hw = &adapter->hw;
5213 struct ifnet *ifp = adapter->ifp; 5213 struct ifnet *ifp = adapter->ifp;
5214 struct tx_ring *txr = adapter->tx_rings; 5214 struct tx_ring *txr = adapter->tx_rings;
5215 bool more = false; 5215 bool more = false;
5216 bool reenable_intr = true; 5216 bool reenable_intr = true;
5217 u32 eicr, eicr_mask; 5217 u32 eicr, eicr_mask;
5218 u32 task_requests = 0; 5218 u32 task_requests = 0;
5219 5219
5220 /* Silicon errata #26 on 82598 */ 5220 /* Silicon errata #26 on 82598 */
5221 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 5221 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5222 5222
5223 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 5223 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5224 5224
5225 adapter->stats.pf.legint.ev_count++; 5225 adapter->stats.pf.legint.ev_count++;
5226 ++que->irqs.ev_count; 5226 ++que->irqs.ev_count;
5227 if (eicr == 0) { 5227 if (eicr == 0) {
5228 adapter->stats.pf.intzero.ev_count++; 5228 adapter->stats.pf.intzero.ev_count++;
5229 if ((ifp->if_flags & IFF_UP) != 0) 5229 if ((ifp->if_flags & IFF_UP) != 0)
5230 ixgbe_enable_intr(adapter); 5230 ixgbe_enable_intr(adapter);
5231 return 0; 5231 return 0;
5232 } 5232 }
5233 5233
5234 if ((ifp->if_flags & IFF_RUNNING) != 0) { 5234 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5235 /* 5235 /*
5236 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue". 5236 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5237 */ 5237 */
5238 que->txrx_use_workqueue = adapter->txrx_use_workqueue; 5238 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5239 5239
5240#ifdef __NetBSD__ 5240#ifdef __NetBSD__
5241 /* Don't run ixgbe_rxeof in interrupt context */ 5241 /* Don't run ixgbe_rxeof in interrupt context */
5242 more = true; 5242 more = true;
5243#else 5243#else
5244 more = ixgbe_rxeof(que); 5244 more = ixgbe_rxeof(que);
5245#endif 5245#endif
5246 5246
5247 IXGBE_TX_LOCK(txr); 5247 IXGBE_TX_LOCK(txr);
5248 ixgbe_txeof(txr); 5248 ixgbe_txeof(txr);
5249#ifdef notyet 5249#ifdef notyet
5250 if (!ixgbe_ring_empty(ifp, txr->br)) 5250 if (!ixgbe_ring_empty(ifp, txr->br))
5251 ixgbe_start_locked(ifp, txr); 5251 ixgbe_start_locked(ifp, txr);
5252#endif 5252#endif
5253 IXGBE_TX_UNLOCK(txr); 5253 IXGBE_TX_UNLOCK(txr);
5254 } 5254 }
5255 5255
5256 /* Check for fan failure */ 5256 /* Check for fan failure */
5257 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 5257 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5258 ixgbe_check_fan_failure(adapter, eicr, true); 5258 ixgbe_check_fan_failure(adapter, eicr, true);
5259 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 5259 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5260 } 5260 }
5261 5261
5262 /* Link status change */ 5262 /* Link status change */
5263 if (eicr & IXGBE_EICR_LSC) 5263 if (eicr & IXGBE_EICR_LSC)
5264 task_requests |= IXGBE_REQUEST_TASK_LSC; 5264 task_requests |= IXGBE_REQUEST_TASK_LSC;
5265 5265
5266 if (ixgbe_is_sfp(hw)) { 5266 if (ixgbe_is_sfp(hw)) {
5267 /* Pluggable optics-related interrupt */ 5267 /* Pluggable optics-related interrupt */
5268 if (hw->mac.type >= ixgbe_mac_X540) 5268 if (hw->mac.type >= ixgbe_mac_X540)
5269 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 5269 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5270 else 5270 else
5271 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 5271 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5272 5272
5273 if (eicr & eicr_mask) { 5273 if (eicr & eicr_mask) {
5274 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 5274 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5275 task_requests |= IXGBE_REQUEST_TASK_MOD; 5275 task_requests |= IXGBE_REQUEST_TASK_MOD;
5276 } 5276 }
5277 5277
5278 if ((hw->mac.type == ixgbe_mac_82599EB) && 5278 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5279 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 5279 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5280 IXGBE_WRITE_REG(hw, IXGBE_EICR, 5280 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5281 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 5281 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5282 task_requests |= IXGBE_REQUEST_TASK_MSF; 5282 task_requests |= IXGBE_REQUEST_TASK_MSF;
5283 } 5283 }
5284 } 5284 }
5285 5285
5286 /* External PHY interrupt */ 5286 /* External PHY interrupt */
5287 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 5287 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5288 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 5288 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5289 task_requests |= IXGBE_REQUEST_TASK_PHY; 5289 task_requests |= IXGBE_REQUEST_TASK_PHY;
5290 5290
5291 if (more) { 5291 if (more) {
5292 que->req.ev_count++; 5292 que->req.ev_count++;
5293 ixgbe_sched_handle_que(adapter, que); 5293 ixgbe_sched_handle_que(adapter, que);
5294 reenable_intr = false; 5294 reenable_intr = false;
5295 } 5295 }
5296 if (task_requests != 0) { 5296 if (task_requests != 0) {
5297 /* Re-enabling other interrupts is done in the admin task */ 5297 /* Re-enabling other interrupts is done in the admin task */
5298 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR; 5298 task_requests |= IXGBE_REQUEST_TASK_NEED_ACKINTR;
5299 atomic_or_32(&adapter->task_requests, task_requests); 5299 atomic_or_32(&adapter->task_requests, task_requests);
5300 ixgbe_schedule_admin_tasklet(adapter); 5300 ixgbe_schedule_admin_tasklet(adapter);
5301 reenable_intr = false; 5301 reenable_intr = false;
5302 } 5302 }
5303 5303
5304 if (reenable_intr == true) 5304 if (reenable_intr == true)
5305 ixgbe_enable_intr(adapter); 5305 ixgbe_enable_intr(adapter);
5306 5306
5307 return 1; 5307 return 1;
5308} /* ixgbe_legacy_irq */ 5308} /* ixgbe_legacy_irq */
5309 5309
5310/************************************************************************ 5310/************************************************************************
5311 * ixgbe_free_pciintr_resources 5311 * ixgbe_free_pciintr_resources
5312 ************************************************************************/ 5312 ************************************************************************/
5313static void 5313static void
5314ixgbe_free_pciintr_resources(struct adapter *adapter) 5314ixgbe_free_pciintr_resources(struct adapter *adapter)
5315{ 5315{
5316 struct ix_queue *que = adapter->queues; 5316 struct ix_queue *que = adapter->queues;
5317 int rid; 5317 int rid;
5318 5318
5319 /* 5319 /*
5320 * Release all msix queue resources: 5320 * Release all msix queue resources:
5321 */ 5321 */
5322 for (int i = 0; i < adapter->num_queues; i++, que++) { 5322 for (int i = 0; i < adapter->num_queues; i++, que++) {
5323 if (que->res != NULL) { 5323 if (que->res != NULL) {
5324 pci_intr_disestablish(adapter->osdep.pc, 5324 pci_intr_disestablish(adapter->osdep.pc,
5325 adapter->osdep.ihs[i]); 5325 adapter->osdep.ihs[i]);
5326 adapter->osdep.ihs[i] = NULL; 5326 adapter->osdep.ihs[i] = NULL;
5327 } 5327 }
5328 } 5328 }
5329 5329
5330 /* Clean the Legacy or Link interrupt last */ 5330 /* Clean the Legacy or Link interrupt last */
5331 if (adapter->vector) /* we are doing MSIX */ 5331 if (adapter->vector) /* we are doing MSIX */
5332 rid = adapter->vector; 5332 rid = adapter->vector;
5333 else 5333 else
5334 rid = 0; 5334 rid = 0;
5335 5335
5336 if (adapter->osdep.ihs[rid] != NULL) { 5336 if (adapter->osdep.ihs[rid] != NULL) {
5337 pci_intr_disestablish(adapter->osdep.pc, 5337 pci_intr_disestablish(adapter->osdep.pc,
5338 adapter->osdep.ihs[rid]); 5338 adapter->osdep.ihs[rid]);
5339 adapter->osdep.ihs[rid] = NULL; 5339 adapter->osdep.ihs[rid] = NULL;
5340 } 5340 }
5341 5341
5342 if (adapter->osdep.intrs != NULL) { 5342 if (adapter->osdep.intrs != NULL) {
5343 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 5343 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5344 adapter->osdep.nintrs); 5344 adapter->osdep.nintrs);
5345 adapter->osdep.intrs = NULL; 5345 adapter->osdep.intrs = NULL;
5346 } 5346 }
5347} /* ixgbe_free_pciintr_resources */ 5347} /* ixgbe_free_pciintr_resources */
5348 5348
5349/************************************************************************ 5349/************************************************************************
5350 * ixgbe_free_pci_resources 5350 * ixgbe_free_pci_resources
5351 ************************************************************************/ 5351 ************************************************************************/
5352static void 5352static void
5353ixgbe_free_pci_resources(struct adapter *adapter) 5353ixgbe_free_pci_resources(struct adapter *adapter)
5354{ 5354{
5355 5355
5356 ixgbe_free_pciintr_resources(adapter); 5356 ixgbe_free_pciintr_resources(adapter);
5357 5357
5358 if (adapter->osdep.mem_size != 0) { 5358 if (adapter->osdep.mem_size != 0) {
5359 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 5359 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5360 adapter->osdep.mem_bus_space_handle, 5360 adapter->osdep.mem_bus_space_handle,
5361 adapter->osdep.mem_size); 5361 adapter->osdep.mem_size);
5362 } 5362 }
5363 5363
5364} /* ixgbe_free_pci_resources */ 5364} /* ixgbe_free_pci_resources */
5365 5365
5366/************************************************************************ 5366/************************************************************************
5367 * ixgbe_set_sysctl_value 5367 * ixgbe_set_sysctl_value
5368 ************************************************************************/ 5368 ************************************************************************/
5369static void 5369static void
5370ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 5370ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5371 const char *description, int *limit, int value) 5371 const char *description, int *limit, int value)
5372{ 5372{
5373 device_t dev = adapter->dev; 5373 device_t dev = adapter->dev;
5374 struct sysctllog **log; 5374 struct sysctllog **log;
5375 const struct sysctlnode *rnode, *cnode; 5375 const struct sysctlnode *rnode, *cnode;
5376 5376
5377 /* 5377 /*
5378 * It's not required to check recovery mode because this function never 5378 * It's not required to check recovery mode because this function never
5379 * touches hardware. 5379 * touches hardware.
5380 */ 5380 */
5381 5381
5382 log = &adapter->sysctllog; 5382 log = &adapter->sysctllog;
5383 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 5383 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5384 aprint_error_dev(dev, "could not create sysctl root\n"); 5384 aprint_error_dev(dev, "could not create sysctl root\n");
5385 return; 5385 return;
5386 } 5386 }
5387 if (sysctl_createv(log, 0, &rnode, &cnode, 5387 if (sysctl_createv(log, 0, &rnode, &cnode,
5388 CTLFLAG_READWRITE, CTLTYPE_INT, 5388 CTLFLAG_READWRITE, CTLTYPE_INT,
5389 name, SYSCTL_DESCR(description), 5389 name, SYSCTL_DESCR(description),
5390 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) 5390 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5391 aprint_error_dev(dev, "could not create sysctl\n"); 5391 aprint_error_dev(dev, "could not create sysctl\n");
5392 *limit = value; 5392 *limit = value;
5393} /* ixgbe_set_sysctl_value */ 5393} /* ixgbe_set_sysctl_value */
5394 5394
5395/************************************************************************ 5395/************************************************************************
5396 * ixgbe_sysctl_flowcntl 5396 * ixgbe_sysctl_flowcntl
5397 * 5397 *
5398 * SYSCTL wrapper around setting Flow Control 5398 * SYSCTL wrapper around setting Flow Control
5399 ************************************************************************/ 5399 ************************************************************************/
5400static int 5400static int
5401ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS) 5401ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5402{ 5402{
5403 struct sysctlnode node = *rnode; 5403 struct sysctlnode node = *rnode;
5404 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5404 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5405 int error, fc; 5405 int error, fc;
5406 5406
5407 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5407 if (ixgbe_fw_recovery_mode_swflag(adapter))
5408 return (EPERM); 5408 return (EPERM);
5409 5409
5410 fc = adapter->hw.fc.current_mode; 5410 fc = adapter->hw.fc.current_mode;
5411 node.sysctl_data = &fc; 5411 node.sysctl_data = &fc;
5412 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5412 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5413 if (error != 0 || newp == NULL) 5413 if (error != 0 || newp == NULL)
5414 return error; 5414 return error;
5415 5415
5416 /* Don't bother if it's not changed */ 5416 /* Don't bother if it's not changed */
5417 if (fc == adapter->hw.fc.current_mode) 5417 if (fc == adapter->hw.fc.current_mode)
5418 return (0); 5418 return (0);
5419 5419
5420 return ixgbe_set_flowcntl(adapter, fc); 5420 return ixgbe_set_flowcntl(adapter, fc);
5421} /* ixgbe_sysctl_flowcntl */ 5421} /* ixgbe_sysctl_flowcntl */
5422 5422
5423/************************************************************************ 5423/************************************************************************
5424 * ixgbe_set_flowcntl - Set flow control 5424 * ixgbe_set_flowcntl - Set flow control
5425 * 5425 *
5426 * Flow control values: 5426 * Flow control values:
5427 * 0 - off 5427 * 0 - off
5428 * 1 - rx pause 5428 * 1 - rx pause
5429 * 2 - tx pause 5429 * 2 - tx pause
5430 * 3 - full 5430 * 3 - full
5431 ************************************************************************/ 5431 ************************************************************************/
5432static int 5432static int
5433ixgbe_set_flowcntl(struct adapter *adapter, int fc) 5433ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5434{ 5434{
5435 switch (fc) { 5435 switch (fc) {
5436 case ixgbe_fc_rx_pause: 5436 case ixgbe_fc_rx_pause:
5437 case ixgbe_fc_tx_pause: 5437 case ixgbe_fc_tx_pause:
5438 case ixgbe_fc_full: 5438 case ixgbe_fc_full:
5439 adapter->hw.fc.requested_mode = fc; 5439 adapter->hw.fc.requested_mode = fc;
5440 if (adapter->num_queues > 1) 5440 if (adapter->num_queues > 1)
5441 ixgbe_disable_rx_drop(adapter); 5441 ixgbe_disable_rx_drop(adapter);
5442 break; 5442 break;
5443 case ixgbe_fc_none: 5443 case ixgbe_fc_none:
5444 adapter->hw.fc.requested_mode = ixgbe_fc_none; 5444 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5445 if (adapter->num_queues > 1) 5445 if (adapter->num_queues > 1)
5446 ixgbe_enable_rx_drop(adapter); 5446 ixgbe_enable_rx_drop(adapter);
5447 break; 5447 break;
5448 default: 5448 default:
5449 return (EINVAL); 5449 return (EINVAL);
5450 } 5450 }
5451 5451
5452#if 0 /* XXX NetBSD */ 5452#if 0 /* XXX NetBSD */
5453 /* Don't autoneg if forcing a value */ 5453 /* Don't autoneg if forcing a value */
5454 adapter->hw.fc.disable_fc_autoneg = TRUE; 5454 adapter->hw.fc.disable_fc_autoneg = TRUE;
5455#endif 5455#endif
5456 ixgbe_fc_enable(&adapter->hw); 5456 ixgbe_fc_enable(&adapter->hw);
5457 5457
5458 return (0); 5458 return (0);
5459} /* ixgbe_set_flowcntl */ 5459} /* ixgbe_set_flowcntl */
5460 5460
5461/************************************************************************ 5461/************************************************************************
5462 * ixgbe_enable_rx_drop 5462 * ixgbe_enable_rx_drop
5463 * 5463 *
5464 * Enable the hardware to drop packets when the buffer is 5464 * Enable the hardware to drop packets when the buffer is
5465 * full. This is useful with multiqueue, so that no single 5465 * full. This is useful with multiqueue, so that no single
5466 * queue being full stalls the entire RX engine. We only 5466 * queue being full stalls the entire RX engine. We only
5467 * enable this when Multiqueue is enabled AND Flow Control 5467 * enable this when Multiqueue is enabled AND Flow Control
5468 * is disabled. 5468 * is disabled.
5469 ************************************************************************/ 5469 ************************************************************************/
5470static void 5470static void
5471ixgbe_enable_rx_drop(struct adapter *adapter) 5471ixgbe_enable_rx_drop(struct adapter *adapter)
5472{ 5472{
5473 struct ixgbe_hw *hw = &adapter->hw; 5473 struct ixgbe_hw *hw = &adapter->hw;
5474 struct rx_ring *rxr; 5474 struct rx_ring *rxr;
5475 u32 srrctl; 5475 u32 srrctl;
5476 5476
5477 for (int i = 0; i < adapter->num_queues; i++) { 5477 for (int i = 0; i < adapter->num_queues; i++) {
5478 rxr = &adapter->rx_rings[i]; 5478 rxr = &adapter->rx_rings[i];
5479 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5479 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5480 srrctl |= IXGBE_SRRCTL_DROP_EN; 5480 srrctl |= IXGBE_SRRCTL_DROP_EN;
5481 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5481 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5482 } 5482 }
5483 5483
5484 /* enable drop for each vf */ 5484 /* enable drop for each vf */
5485 for (int i = 0; i < adapter->num_vfs; i++) { 5485 for (int i = 0; i < adapter->num_vfs; i++) {
5486 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5486 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5487 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 5487 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5488 IXGBE_QDE_ENABLE)); 5488 IXGBE_QDE_ENABLE));
5489 } 5489 }
5490} /* ixgbe_enable_rx_drop */ 5490} /* ixgbe_enable_rx_drop */
5491 5491
5492/************************************************************************ 5492/************************************************************************
5493 * ixgbe_disable_rx_drop 5493 * ixgbe_disable_rx_drop
5494 ************************************************************************/ 5494 ************************************************************************/
5495static void 5495static void
5496ixgbe_disable_rx_drop(struct adapter *adapter) 5496ixgbe_disable_rx_drop(struct adapter *adapter)
5497{ 5497{
5498 struct ixgbe_hw *hw = &adapter->hw; 5498 struct ixgbe_hw *hw = &adapter->hw;
5499 struct rx_ring *rxr; 5499 struct rx_ring *rxr;
5500 u32 srrctl; 5500 u32 srrctl;
5501 5501
5502 for (int i = 0; i < adapter->num_queues; i++) { 5502 for (int i = 0; i < adapter->num_queues; i++) {
5503 rxr = &adapter->rx_rings[i]; 5503 rxr = &adapter->rx_rings[i];
5504 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5504 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5505 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 5505 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5506 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5506 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5507 } 5507 }
5508 5508
5509 /* disable drop for each vf */ 5509 /* disable drop for each vf */
5510 for (int i = 0; i < adapter->num_vfs; i++) { 5510 for (int i = 0; i < adapter->num_vfs; i++) {
5511 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5511 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5512 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 5512 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5513 } 5513 }
5514} /* ixgbe_disable_rx_drop */ 5514} /* ixgbe_disable_rx_drop */
5515 5515
5516/************************************************************************ 5516/************************************************************************
5517 * ixgbe_sysctl_advertise 5517 * ixgbe_sysctl_advertise
5518 * 5518 *
5519 * SYSCTL wrapper around setting advertised speed 5519 * SYSCTL wrapper around setting advertised speed
5520 ************************************************************************/ 5520 ************************************************************************/
5521static int 5521static int
5522ixgbe_sysctl_advertise(SYSCTLFN_ARGS) 5522ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5523{ 5523{
5524 struct sysctlnode node = *rnode; 5524 struct sysctlnode node = *rnode;
5525 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5525 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5526 int error = 0, advertise; 5526 int error = 0, advertise;
5527 5527
5528 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5528 if (ixgbe_fw_recovery_mode_swflag(adapter))
5529 return (EPERM); 5529 return (EPERM);
5530 5530
5531 advertise = adapter->advertise; 5531 advertise = adapter->advertise;
5532 node.sysctl_data = &advertise; 5532 node.sysctl_data = &advertise;
5533 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5533 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5534 if (error != 0 || newp == NULL) 5534 if (error != 0 || newp == NULL)
5535 return error; 5535 return error;
5536 5536
5537 return ixgbe_set_advertise(adapter, advertise); 5537 return ixgbe_set_advertise(adapter, advertise);
5538} /* ixgbe_sysctl_advertise */ 5538} /* ixgbe_sysctl_advertise */
5539 5539
5540/************************************************************************ 5540/************************************************************************
5541 * ixgbe_set_advertise - Control advertised link speed 5541 * ixgbe_set_advertise - Control advertised link speed
5542 * 5542 *
5543 * Flags: 5543 * Flags:
5544 * 0x00 - Default (all capable link speed) 5544 * 0x00 - Default (all capable link speed)
5545 * 0x01 - advertise 100 Mb 5545 * 0x01 - advertise 100 Mb
5546 * 0x02 - advertise 1G 5546 * 0x02 - advertise 1G
5547 * 0x04 - advertise 10G 5547 * 0x04 - advertise 10G
5548 * 0x08 - advertise 10 Mb 5548 * 0x08 - advertise 10 Mb
5549 * 0x10 - advertise 2.5G 5549 * 0x10 - advertise 2.5G
5550 * 0x20 - advertise 5G 5550 * 0x20 - advertise 5G
5551 ************************************************************************/ 5551 ************************************************************************/
5552static int 5552static int
5553ixgbe_set_advertise(struct adapter *adapter, int advertise) 5553ixgbe_set_advertise(struct adapter *adapter, int advertise)
5554{ 5554{
5555 device_t dev; 5555 device_t dev;
5556 struct ixgbe_hw *hw; 5556 struct ixgbe_hw *hw;
5557 ixgbe_link_speed speed = 0; 5557 ixgbe_link_speed speed = 0;
5558 ixgbe_link_speed link_caps = 0; 5558 ixgbe_link_speed link_caps = 0;
5559 s32 err = IXGBE_NOT_IMPLEMENTED; 5559 s32 err = IXGBE_NOT_IMPLEMENTED;
5560 bool negotiate = FALSE; 5560 bool negotiate = FALSE;
5561 5561
5562 /* Checks to validate new value */ 5562 /* Checks to validate new value */
5563 if (adapter->advertise == advertise) /* no change */ 5563 if (adapter->advertise == advertise) /* no change */
5564 return (0); 5564 return (0);
5565 5565
5566 dev = adapter->dev; 5566 dev = adapter->dev;
5567 hw = &adapter->hw; 5567 hw = &adapter->hw;
5568 5568
5569 /* No speed changes for backplane media */ 5569 /* No speed changes for backplane media */
5570 if (hw->phy.media_type == ixgbe_media_type_backplane) 5570 if (hw->phy.media_type == ixgbe_media_type_backplane)
5571 return (ENODEV); 5571 return (ENODEV);
5572 5572
5573 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 5573 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5574 (hw->phy.multispeed_fiber))) { 5574 (hw->phy.multispeed_fiber))) {
5575 device_printf(dev, 5575 device_printf(dev,
5576 "Advertised speed can only be set on copper or " 5576 "Advertised speed can only be set on copper or "
5577 "multispeed fiber media types.\n"); 5577 "multispeed fiber media types.\n");
5578 return (EINVAL); 5578 return (EINVAL);
5579 } 5579 }
5580 5580
5581 if (advertise < 0x0 || advertise > 0x2f) { 5581 if (advertise < 0x0 || advertise > 0x2f) {
5582 device_printf(dev, 5582 device_printf(dev,
5583 "Invalid advertised speed; valid modes are 0x0 through 0x7\n"); 5583 "Invalid advertised speed; valid modes are 0x0 through 0x7\n");
5584 return (EINVAL); 5584 return (EINVAL);
5585 } 5585 }
5586 5586
5587 if (hw->mac.ops.get_link_capabilities) { 5587 if (hw->mac.ops.get_link_capabilities) {
5588 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 5588 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5589 &negotiate); 5589 &negotiate);
5590 if (err != IXGBE_SUCCESS) { 5590 if (err != IXGBE_SUCCESS) {
5591 device_printf(dev, "Unable to determine supported advertise speeds\n"); 5591 device_printf(dev, "Unable to determine supported advertise speeds\n");
5592 return (ENODEV); 5592 return (ENODEV);
5593 } 5593 }
5594 } 5594 }
5595 5595
5596 /* Set new value and report new advertised mode */ 5596 /* Set new value and report new advertised mode */
5597 if (advertise & 0x1) { 5597 if (advertise & 0x1) {
5598 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 5598 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5599 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 5599 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5600 return (EINVAL); 5600 return (EINVAL);
5601 } 5601 }
5602 speed |= IXGBE_LINK_SPEED_100_FULL; 5602 speed |= IXGBE_LINK_SPEED_100_FULL;
5603 } 5603 }
5604 if (advertise & 0x2) { 5604 if (advertise & 0x2) {
5605 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 5605 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5606 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 5606 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5607 return (EINVAL); 5607 return (EINVAL);
5608 } 5608 }
5609 speed |= IXGBE_LINK_SPEED_1GB_FULL; 5609 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5610 } 5610 }
5611 if (advertise & 0x4) { 5611 if (advertise & 0x4) {
5612 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 5612 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5613 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 5613 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5614 return (EINVAL); 5614 return (EINVAL);
5615 } 5615 }
5616 speed |= IXGBE_LINK_SPEED_10GB_FULL; 5616 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5617 } 5617 }
5618 if (advertise & 0x8) { 5618 if (advertise & 0x8) {
5619 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 5619 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5620 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 5620 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5621 return (EINVAL); 5621 return (EINVAL);
5622 } 5622 }
5623 speed |= IXGBE_LINK_SPEED_10_FULL; 5623 speed |= IXGBE_LINK_SPEED_10_FULL;
5624 } 5624 }
5625 if (advertise & 0x10) { 5625 if (advertise & 0x10) {
5626 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 5626 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5627 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n"); 5627 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5628 return (EINVAL); 5628 return (EINVAL);
5629 } 5629 }
5630 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 5630 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5631 } 5631 }
5632 if (advertise & 0x20) { 5632 if (advertise & 0x20) {
5633 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 5633 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5634 device_printf(dev, "Interface does not support 5Gb advertised speed\n"); 5634 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5635 return (EINVAL); 5635 return (EINVAL);
5636 } 5636 }
5637 speed |= IXGBE_LINK_SPEED_5GB_FULL; 5637 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5638 } 5638 }
5639 if (advertise == 0) 5639 if (advertise == 0)
5640 speed = link_caps; /* All capable link speed */ 5640 speed = link_caps; /* All capable link speed */
5641 5641
5642 hw->mac.autotry_restart = TRUE; 5642 hw->mac.autotry_restart = TRUE;
5643 hw->mac.ops.setup_link(hw, speed, TRUE); 5643 hw->mac.ops.setup_link(hw, speed, TRUE);
5644 adapter->advertise = advertise; 5644 adapter->advertise = advertise;
5645 5645
5646 return (0); 5646 return (0);
5647} /* ixgbe_set_advertise */ 5647} /* ixgbe_set_advertise */
5648 5648
5649/************************************************************************ 5649/************************************************************************
5650 * ixgbe_get_advertise - Get current advertised speed settings 5650 * ixgbe_get_advertise - Get current advertised speed settings
5651 * 5651 *
5652 * Formatted for sysctl usage. 5652 * Formatted for sysctl usage.
5653 * Flags: 5653 * Flags:
5654 * 0x01 - advertise 100 Mb 5654 * 0x01 - advertise 100 Mb
5655 * 0x02 - advertise 1G 5655 * 0x02 - advertise 1G
5656 * 0x04 - advertise 10G 5656 * 0x04 - advertise 10G
5657 * 0x08 - advertise 10 Mb (yes, Mb) 5657 * 0x08 - advertise 10 Mb (yes, Mb)
5658 * 0x10 - advertise 2.5G 5658 * 0x10 - advertise 2.5G
5659 * 0x20 - advertise 5G 5659 * 0x20 - advertise 5G
5660 ************************************************************************/ 5660 ************************************************************************/
5661static int 5661static int
5662ixgbe_get_advertise(struct adapter *adapter) 5662ixgbe_get_advertise(struct adapter *adapter)
5663{ 5663{
5664 struct ixgbe_hw *hw = &adapter->hw; 5664 struct ixgbe_hw *hw = &adapter->hw;
5665 int speed; 5665 int speed;
5666 ixgbe_link_speed link_caps = 0; 5666 ixgbe_link_speed link_caps = 0;
5667 s32 err; 5667 s32 err;
5668 bool negotiate = FALSE; 5668 bool negotiate = FALSE;
5669 5669
5670 /* 5670 /*
5671 * Advertised speed means nothing unless it's copper or 5671 * Advertised speed means nothing unless it's copper or
5672 * multi-speed fiber 5672 * multi-speed fiber
5673 */ 5673 */
5674 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 5674 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5675 !(hw->phy.multispeed_fiber)) 5675 !(hw->phy.multispeed_fiber))
5676 return (0); 5676 return (0);
5677 5677
5678 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 5678 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5679 if (err != IXGBE_SUCCESS) 5679 if (err != IXGBE_SUCCESS)
5680 return (0); 5680 return (0);
5681 5681
5682 speed = 5682 speed =
5683 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) | 5683 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5684 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) | 5684 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5685 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) | 5685 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5686 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) | 5686 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5687 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 5687 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5688 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0); 5688 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5689 5689
5690 return speed; 5690 return speed;
5691} /* ixgbe_get_advertise */ 5691} /* ixgbe_get_advertise */
5692 5692
5693/************************************************************************ 5693/************************************************************************
5694 * ixgbe_sysctl_dmac - Manage DMA Coalescing 5694 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5695 * 5695 *
5696 * Control values: 5696 * Control values:
5697 * 0/1 - off / on (use default value of 1000) 5697 * 0/1 - off / on (use default value of 1000)
5698 * 5698 *
5699 * Legal timer values are: 5699 * Legal timer values are:
5700 * 50,100,250,500,1000,2000,5000,10000 5700 * 50,100,250,500,1000,2000,5000,10000
5701 * 5701 *
5702 * Turning off interrupt moderation will also turn this off. 5702 * Turning off interrupt moderation will also turn this off.
5703 ************************************************************************/ 5703 ************************************************************************/
5704static int 5704static int
5705ixgbe_sysctl_dmac(SYSCTLFN_ARGS) 5705ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5706{ 5706{
5707 struct sysctlnode node = *rnode; 5707 struct sysctlnode node = *rnode;
5708 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5708 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5709 struct ifnet *ifp = adapter->ifp; 5709 struct ifnet *ifp = adapter->ifp;
5710 int error; 5710 int error;
5711 int newval; 5711 int newval;
5712 5712
5713 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5713 if (ixgbe_fw_recovery_mode_swflag(adapter))
5714 return (EPERM); 5714 return (EPERM);
5715 5715
5716 newval = adapter->dmac; 5716 newval = adapter->dmac;
5717 node.sysctl_data = &newval; 5717 node.sysctl_data = &newval;
5718 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5718 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5719 if ((error) || (newp == NULL)) 5719 if ((error) || (newp == NULL))
5720 return (error); 5720 return (error);
5721 5721
5722 switch (newval) { 5722 switch (newval) {
5723 case 0: 5723 case 0:
5724 /* Disabled */ 5724 /* Disabled */
5725 adapter->dmac = 0; 5725 adapter->dmac = 0;
5726 break; 5726 break;
5727 case 1: 5727 case 1:
5728 /* Enable and use default */ 5728 /* Enable and use default */
5729 adapter->dmac = 1000; 5729 adapter->dmac = 1000;
5730 break; 5730 break;
5731 case 50: 5731 case 50:
5732 case 100: 5732 case 100:
5733 case 250: 5733 case 250:
5734 case 500: 5734 case 500:
5735 case 1000: 5735 case 1000:
5736 case 2000: 5736 case 2000:
5737 case 5000: 5737 case 5000:
5738 case 10000: 5738 case 10000:
5739 /* Legal values - allow */ 5739 /* Legal values - allow */
5740 adapter->dmac = newval; 5740 adapter->dmac = newval;
5741 break; 5741 break;
5742 default: 5742 default:
5743 /* Do nothing, illegal value */ 5743 /* Do nothing, illegal value */
5744 return (EINVAL); 5744 return (EINVAL);
5745 } 5745 }
5746 5746
5747 /* Re-initialize hardware if it's already running */ 5747 /* Re-initialize hardware if it's already running */
5748 if (ifp->if_flags & IFF_RUNNING) 5748 if (ifp->if_flags & IFF_RUNNING)
5749 ifp->if_init(ifp); 5749 ifp->if_init(ifp);
5750 5750
5751 return (0); 5751 return (0);
5752} 5752}
5753 5753
5754#ifdef IXGBE_DEBUG 5754#ifdef IXGBE_DEBUG
5755/************************************************************************ 5755/************************************************************************
5756 * ixgbe_sysctl_power_state 5756 * ixgbe_sysctl_power_state
5757 * 5757 *
5758 * Sysctl to test power states 5758 * Sysctl to test power states
5759 * Values: 5759 * Values:
5760 * 0 - set device to D0 5760 * 0 - set device to D0
5761 * 3 - set device to D3 5761 * 3 - set device to D3
5762 * (none) - get current device power state 5762 * (none) - get current device power state
5763 ************************************************************************/ 5763 ************************************************************************/
5764static int 5764static int
5765ixgbe_sysctl_power_state(SYSCTLFN_ARGS) 5765ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5766{ 5766{
5767#ifdef notyet 5767#ifdef notyet
5768 struct sysctlnode node = *rnode; 5768 struct sysctlnode node = *rnode;
5769 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5769 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5770 device_t dev = adapter->dev; 5770 device_t dev = adapter->dev;
5771 int curr_ps, new_ps, error = 0; 5771 int curr_ps, new_ps, error = 0;
5772 5772
5773 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5773 if (ixgbe_fw_recovery_mode_swflag(adapter))
5774 return (EPERM); 5774 return (EPERM);
5775 5775
5776 curr_ps = new_ps = pci_get_powerstate(dev); 5776 curr_ps = new_ps = pci_get_powerstate(dev);
5777 5777
5778 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5778 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5779 if ((error) || (req->newp == NULL)) 5779 if ((error) || (req->newp == NULL))