Wed Feb 16 10:29:13 2022 UTC ()
Print Printed Board Assembly (PBA) number.


(msaitoh)
diff -r1.306 -r1.307 src/sys/dev/pci/ixgbe/ixgbe.c

cvs diff -r1.306 -r1.307 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2022/02/01 04:59:16 1.306
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2022/02/16 10:29:13 1.307
@@ -1,2138 +1,2143 @@ @@ -1,2138 +1,2143 @@
1/* $NetBSD: ixgbe.c,v 1.306 2022/02/01 04:59:16 msaitoh Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.307 2022/02/16 10:29:13 msaitoh Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#include <sys/cdefs.h> 66#include <sys/cdefs.h>
67__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.306 2022/02/01 04:59:16 msaitoh Exp $"); 67__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.307 2022/02/16 10:29:13 msaitoh Exp $");
68 68
69#ifdef _KERNEL_OPT 69#ifdef _KERNEL_OPT
70#include "opt_inet.h" 70#include "opt_inet.h"
71#include "opt_inet6.h" 71#include "opt_inet6.h"
72#include "opt_net_mpsafe.h" 72#include "opt_net_mpsafe.h"
73#endif 73#endif
74 74
75#include "ixgbe.h" 75#include "ixgbe.h"
76#include "ixgbe_phy.h" 76#include "ixgbe_phy.h"
77#include "ixgbe_sriov.h" 77#include "ixgbe_sriov.h"
78 78
79#include <sys/cprng.h> 79#include <sys/cprng.h>
80#include <dev/mii/mii.h> 80#include <dev/mii/mii.h>
81#include <dev/mii/miivar.h> 81#include <dev/mii/miivar.h>
82 82
83/************************************************************************ 83/************************************************************************
84 * Driver version 84 * Driver version
85 ************************************************************************/ 85 ************************************************************************/
86static const char ixgbe_driver_version[] = "4.0.1-k"; 86static const char ixgbe_driver_version[] = "4.0.1-k";
87/* XXX NetBSD: + 3.3.24 */ 87/* XXX NetBSD: + 3.3.24 */
88 88
89/************************************************************************ 89/************************************************************************
90 * PCI Device ID Table 90 * PCI Device ID Table
91 * 91 *
92 * Used by probe to select devices to load on 92 * Used by probe to select devices to load on
93 * Last field stores an index into ixgbe_strings 93 * Last field stores an index into ixgbe_strings
94 * Last entry must be all 0s 94 * Last entry must be all 0s
95 * 95 *
96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 96 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
97 ************************************************************************/ 97 ************************************************************************/
98static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 98static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
99{ 99{
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 148 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 149 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
150 /* required last entry */ 150 /* required last entry */
151 {0, 0, 0, 0, 0} 151 {0, 0, 0, 0, 0}
152}; 152};
153 153
154/************************************************************************ 154/************************************************************************
155 * Table of branding strings 155 * Table of branding strings
156 ************************************************************************/ 156 ************************************************************************/
157static const char *ixgbe_strings[] = { 157static const char *ixgbe_strings[] = {
158 "Intel(R) PRO/10GbE PCI-Express Network Driver" 158 "Intel(R) PRO/10GbE PCI-Express Network Driver"
159}; 159};
160 160
161/************************************************************************ 161/************************************************************************
162 * Function prototypes 162 * Function prototypes
163 ************************************************************************/ 163 ************************************************************************/
164static int ixgbe_probe(device_t, cfdata_t, void *); 164static int ixgbe_probe(device_t, cfdata_t, void *);
165static void ixgbe_quirks(struct adapter *); 165static void ixgbe_quirks(struct adapter *);
166static void ixgbe_attach(device_t, device_t, void *); 166static void ixgbe_attach(device_t, device_t, void *);
167static int ixgbe_detach(device_t, int); 167static int ixgbe_detach(device_t, int);
168#if 0 168#if 0
169static int ixgbe_shutdown(device_t); 169static int ixgbe_shutdown(device_t);
170#endif 170#endif
171static bool ixgbe_suspend(device_t, const pmf_qual_t *); 171static bool ixgbe_suspend(device_t, const pmf_qual_t *);
172static bool ixgbe_resume(device_t, const pmf_qual_t *); 172static bool ixgbe_resume(device_t, const pmf_qual_t *);
173static int ixgbe_ifflags_cb(struct ethercom *); 173static int ixgbe_ifflags_cb(struct ethercom *);
174static int ixgbe_ioctl(struct ifnet *, u_long, void *); 174static int ixgbe_ioctl(struct ifnet *, u_long, void *);
175static int ixgbe_init(struct ifnet *); 175static int ixgbe_init(struct ifnet *);
176static void ixgbe_init_locked(struct adapter *); 176static void ixgbe_init_locked(struct adapter *);
177static void ixgbe_ifstop(struct ifnet *, int); 177static void ixgbe_ifstop(struct ifnet *, int);
178static void ixgbe_stop_locked(void *); 178static void ixgbe_stop_locked(void *);
179static void ixgbe_init_device_features(struct adapter *); 179static void ixgbe_init_device_features(struct adapter *);
180static int ixgbe_check_fan_failure(struct adapter *, u32, bool); 180static int ixgbe_check_fan_failure(struct adapter *, u32, bool);
181static void ixgbe_add_media_types(struct adapter *); 181static void ixgbe_add_media_types(struct adapter *);
182static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 182static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
183static int ixgbe_media_change(struct ifnet *); 183static int ixgbe_media_change(struct ifnet *);
184static int ixgbe_allocate_pci_resources(struct adapter *, 184static int ixgbe_allocate_pci_resources(struct adapter *,
185 const struct pci_attach_args *); 185 const struct pci_attach_args *);
186static void ixgbe_free_deferred_handlers(struct adapter *); 186static void ixgbe_free_deferred_handlers(struct adapter *);
187static void ixgbe_get_slot_info(struct adapter *); 187static void ixgbe_get_slot_info(struct adapter *);
188static int ixgbe_allocate_msix(struct adapter *, 188static int ixgbe_allocate_msix(struct adapter *,
189 const struct pci_attach_args *); 189 const struct pci_attach_args *);
190static int ixgbe_allocate_legacy(struct adapter *, 190static int ixgbe_allocate_legacy(struct adapter *,
191 const struct pci_attach_args *); 191 const struct pci_attach_args *);
192static int ixgbe_configure_interrupts(struct adapter *); 192static int ixgbe_configure_interrupts(struct adapter *);
193static void ixgbe_free_pciintr_resources(struct adapter *); 193static void ixgbe_free_pciintr_resources(struct adapter *);
194static void ixgbe_free_pci_resources(struct adapter *); 194static void ixgbe_free_pci_resources(struct adapter *);
195static void ixgbe_local_timer(void *); 195static void ixgbe_local_timer(void *);
196static void ixgbe_handle_timer(struct work *, void *); 196static void ixgbe_handle_timer(struct work *, void *);
197static void ixgbe_recovery_mode_timer(void *); 197static void ixgbe_recovery_mode_timer(void *);
198static void ixgbe_handle_recovery_mode_timer(struct work *, void *); 198static void ixgbe_handle_recovery_mode_timer(struct work *, void *);
199static int ixgbe_setup_interface(device_t, struct adapter *); 199static int ixgbe_setup_interface(device_t, struct adapter *);
200static void ixgbe_config_gpie(struct adapter *); 200static void ixgbe_config_gpie(struct adapter *);
201static void ixgbe_config_dmac(struct adapter *); 201static void ixgbe_config_dmac(struct adapter *);
202static void ixgbe_config_delay_values(struct adapter *); 202static void ixgbe_config_delay_values(struct adapter *);
203static void ixgbe_schedule_admin_tasklet(struct adapter *); 203static void ixgbe_schedule_admin_tasklet(struct adapter *);
204static void ixgbe_config_link(struct adapter *); 204static void ixgbe_config_link(struct adapter *);
205static void ixgbe_check_wol_support(struct adapter *); 205static void ixgbe_check_wol_support(struct adapter *);
206static int ixgbe_setup_low_power_mode(struct adapter *); 206static int ixgbe_setup_low_power_mode(struct adapter *);
207#if 0 207#if 0
208static void ixgbe_rearm_queues(struct adapter *, u64); 208static void ixgbe_rearm_queues(struct adapter *, u64);
209#endif 209#endif
210 210
211static void ixgbe_initialize_transmit_units(struct adapter *); 211static void ixgbe_initialize_transmit_units(struct adapter *);
212static void ixgbe_initialize_receive_units(struct adapter *); 212static void ixgbe_initialize_receive_units(struct adapter *);
213static void ixgbe_enable_rx_drop(struct adapter *); 213static void ixgbe_enable_rx_drop(struct adapter *);
214static void ixgbe_disable_rx_drop(struct adapter *); 214static void ixgbe_disable_rx_drop(struct adapter *);
215static void ixgbe_initialize_rss_mapping(struct adapter *); 215static void ixgbe_initialize_rss_mapping(struct adapter *);
216 216
217static void ixgbe_enable_intr(struct adapter *); 217static void ixgbe_enable_intr(struct adapter *);
218static void ixgbe_disable_intr(struct adapter *); 218static void ixgbe_disable_intr(struct adapter *);
219static void ixgbe_update_stats_counters(struct adapter *); 219static void ixgbe_update_stats_counters(struct adapter *);
220static void ixgbe_set_rxfilter(struct adapter *); 220static void ixgbe_set_rxfilter(struct adapter *);
221static void ixgbe_update_link_status(struct adapter *); 221static void ixgbe_update_link_status(struct adapter *);
222static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 222static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
223static void ixgbe_configure_ivars(struct adapter *); 223static void ixgbe_configure_ivars(struct adapter *);
224static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 224static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
225static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); 225static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
226 226
227static void ixgbe_setup_vlan_hw_tagging(struct adapter *); 227static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
228static void ixgbe_setup_vlan_hw_support(struct adapter *); 228static void ixgbe_setup_vlan_hw_support(struct adapter *);
229static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 229static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
230static int ixgbe_register_vlan(struct adapter *, u16); 230static int ixgbe_register_vlan(struct adapter *, u16);
231static int ixgbe_unregister_vlan(struct adapter *, u16); 231static int ixgbe_unregister_vlan(struct adapter *, u16);
232 232
233static void ixgbe_add_device_sysctls(struct adapter *); 233static void ixgbe_add_device_sysctls(struct adapter *);
234static void ixgbe_add_hw_stats(struct adapter *); 234static void ixgbe_add_hw_stats(struct adapter *);
235static void ixgbe_clear_evcnt(struct adapter *); 235static void ixgbe_clear_evcnt(struct adapter *);
236static int ixgbe_set_flowcntl(struct adapter *, int); 236static int ixgbe_set_flowcntl(struct adapter *, int);
237static int ixgbe_set_advertise(struct adapter *, int); 237static int ixgbe_set_advertise(struct adapter *, int);
238static int ixgbe_get_default_advertise(struct adapter *); 238static int ixgbe_get_default_advertise(struct adapter *);
239 239
240/* Sysctl handlers */ 240/* Sysctl handlers */
241static void ixgbe_set_sysctl_value(struct adapter *, const char *, 241static void ixgbe_set_sysctl_value(struct adapter *, const char *,
242 const char *, int *, int); 242 const char *, int *, int);
243static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
244static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 244static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
245static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
246static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 246static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
247static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 247static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
248static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 248static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
249#ifdef IXGBE_DEBUG 249#ifdef IXGBE_DEBUG
250static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
251static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 251static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
252#endif 252#endif
253static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_next_to_refresh_handler(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
256static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
257static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 257static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
258static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 258static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
259static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 259static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
260static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 260static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
261static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO); 261static int ixgbe_sysctl_rx_copy_len(SYSCTLFN_PROTO);
262static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 262static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
263static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 263static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
264 264
265/* Interrupt functions */ 265/* Interrupt functions */
266static int ixgbe_msix_que(void *); 266static int ixgbe_msix_que(void *);
267static int ixgbe_msix_admin(void *); 267static int ixgbe_msix_admin(void *);
268static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *); 268static void ixgbe_intr_admin_common(struct adapter *, u32, u32 *);
269static int ixgbe_legacy_irq(void *); 269static int ixgbe_legacy_irq(void *);
270 270
271/* Event handlers running on workqueue */ 271/* Event handlers running on workqueue */
272static void ixgbe_handle_que(void *); 272static void ixgbe_handle_que(void *);
273static void ixgbe_handle_link(void *); 273static void ixgbe_handle_link(void *);
274static void ixgbe_handle_msf(void *); 274static void ixgbe_handle_msf(void *);
275static void ixgbe_handle_mod(void *, bool); 275static void ixgbe_handle_mod(void *, bool);
276static void ixgbe_handle_phy(void *); 276static void ixgbe_handle_phy(void *);
277 277
278/* Deferred workqueue handlers */ 278/* Deferred workqueue handlers */
279static void ixgbe_handle_admin(struct work *, void *); 279static void ixgbe_handle_admin(struct work *, void *);
280static void ixgbe_handle_que_work(struct work *, void *); 280static void ixgbe_handle_que_work(struct work *, void *);
281 281
282static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 282static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
283 283
284/************************************************************************ 284/************************************************************************
285 * NetBSD Device Interface Entry Points 285 * NetBSD Device Interface Entry Points
286 ************************************************************************/ 286 ************************************************************************/
287CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), 287CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
288 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 288 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
289 DVF_DETACH_SHUTDOWN); 289 DVF_DETACH_SHUTDOWN);
290 290
291#if 0 291#if 0
292devclass_t ix_devclass; 292devclass_t ix_devclass;
293DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 293DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
294 294
295MODULE_DEPEND(ix, pci, 1, 1, 1); 295MODULE_DEPEND(ix, pci, 1, 1, 1);
296MODULE_DEPEND(ix, ether, 1, 1, 1); 296MODULE_DEPEND(ix, ether, 1, 1, 1);
297#ifdef DEV_NETMAP 297#ifdef DEV_NETMAP
298MODULE_DEPEND(ix, netmap, 1, 1, 1); 298MODULE_DEPEND(ix, netmap, 1, 1, 1);
299#endif 299#endif
300#endif 300#endif
301 301
302/* 302/*
303 * TUNEABLE PARAMETERS: 303 * TUNEABLE PARAMETERS:
304 */ 304 */
305 305
306/* 306/*
307 * AIM: Adaptive Interrupt Moderation 307 * AIM: Adaptive Interrupt Moderation
308 * which means that the interrupt rate 308 * which means that the interrupt rate
309 * is varied over time based on the 309 * is varied over time based on the
310 * traffic for that interrupt vector 310 * traffic for that interrupt vector
311 */ 311 */
312static bool ixgbe_enable_aim = true; 312static bool ixgbe_enable_aim = true;
313#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 313#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
314SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 314SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
315 "Enable adaptive interrupt moderation"); 315 "Enable adaptive interrupt moderation");
316 316
317static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 317static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
318SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 318SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
319 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 319 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
320 320
321/* How many packets rxeof tries to clean at a time */ 321/* How many packets rxeof tries to clean at a time */
322static int ixgbe_rx_process_limit = 256; 322static int ixgbe_rx_process_limit = 256;
323SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 323SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 324 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
325 325
326/* How many packets txeof tries to clean at a time */ 326/* How many packets txeof tries to clean at a time */
327static int ixgbe_tx_process_limit = 256; 327static int ixgbe_tx_process_limit = 256;
328SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 328SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
329 &ixgbe_tx_process_limit, 0, 329 &ixgbe_tx_process_limit, 0,
330 "Maximum number of sent packets to process at a time, -1 means unlimited"); 330 "Maximum number of sent packets to process at a time, -1 means unlimited");
331 331
332/* Flow control setting, default to full */ 332/* Flow control setting, default to full */
333static int ixgbe_flow_control = ixgbe_fc_full; 333static int ixgbe_flow_control = ixgbe_fc_full;
334SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 334SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
335 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 335 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
336 336
337/* Which packet processing uses workqueue or softint */ 337/* Which packet processing uses workqueue or softint */
338static bool ixgbe_txrx_workqueue = false; 338static bool ixgbe_txrx_workqueue = false;
339 339
340/* 340/*
341 * Smart speed setting, default to on 341 * Smart speed setting, default to on
342 * this only works as a compile option 342 * this only works as a compile option
343 * right now as its during attach, set 343 * right now as its during attach, set
344 * this to 'ixgbe_smart_speed_off' to 344 * this to 'ixgbe_smart_speed_off' to
345 * disable. 345 * disable.
346 */ 346 */
347static int ixgbe_smart_speed = ixgbe_smart_speed_on; 347static int ixgbe_smart_speed = ixgbe_smart_speed_on;
348 348
349/* 349/*
350 * MSI-X should be the default for best performance, 350 * MSI-X should be the default for best performance,
351 * but this allows it to be forced off for testing. 351 * but this allows it to be forced off for testing.
352 */ 352 */
353static int ixgbe_enable_msix = 1; 353static int ixgbe_enable_msix = 1;
354SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 354SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
355 "Enable MSI-X interrupts"); 355 "Enable MSI-X interrupts");
356 356
357/* 357/*
358 * Number of Queues, can be set to 0, 358 * Number of Queues, can be set to 0,
359 * it then autoconfigures based on the 359 * it then autoconfigures based on the
360 * number of cpus with a max of 8. This 360 * number of cpus with a max of 8. This
361 * can be overridden manually here. 361 * can be overridden manually here.
362 */ 362 */
363static int ixgbe_num_queues = 0; 363static int ixgbe_num_queues = 0;
364SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 364SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
365 "Number of queues to configure, 0 indicates autoconfigure"); 365 "Number of queues to configure, 0 indicates autoconfigure");
366 366
367/* 367/*
368 * Number of TX descriptors per ring, 368 * Number of TX descriptors per ring,
369 * setting higher than RX as this seems 369 * setting higher than RX as this seems
370 * the better performing choice. 370 * the better performing choice.
371 */ 371 */
372static int ixgbe_txd = PERFORM_TXD; 372static int ixgbe_txd = PERFORM_TXD;
373SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 373SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
374 "Number of transmit descriptors per queue"); 374 "Number of transmit descriptors per queue");
375 375
376/* Number of RX descriptors per ring */ 376/* Number of RX descriptors per ring */
377static int ixgbe_rxd = PERFORM_RXD; 377static int ixgbe_rxd = PERFORM_RXD;
378SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 378SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
379 "Number of receive descriptors per queue"); 379 "Number of receive descriptors per queue");
380 380
381/* 381/*
382 * Defining this on will allow the use 382 * Defining this on will allow the use
383 * of unsupported SFP+ modules, note that 383 * of unsupported SFP+ modules, note that
384 * doing so you are on your own :) 384 * doing so you are on your own :)
385 */ 385 */
386static int allow_unsupported_sfp = false; 386static int allow_unsupported_sfp = false;
387#define TUNABLE_INT(__x, __y) 387#define TUNABLE_INT(__x, __y)
388TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 388TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
389 389
390/* 390/*
391 * Not sure if Flow Director is fully baked, 391 * Not sure if Flow Director is fully baked,
392 * so we'll default to turning it off. 392 * so we'll default to turning it off.
393 */ 393 */
394static int ixgbe_enable_fdir = 0; 394static int ixgbe_enable_fdir = 0;
395SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 395SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
396 "Enable Flow Director"); 396 "Enable Flow Director");
397 397
398/* Legacy Transmit (single queue) */ 398/* Legacy Transmit (single queue) */
399static int ixgbe_enable_legacy_tx = 0; 399static int ixgbe_enable_legacy_tx = 0;
400SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 400SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
401 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 401 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
402 402
403/* Receive-Side Scaling */ 403/* Receive-Side Scaling */
404static int ixgbe_enable_rss = 1; 404static int ixgbe_enable_rss = 1;
405SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 405SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
406 "Enable Receive-Side Scaling (RSS)"); 406 "Enable Receive-Side Scaling (RSS)");
407 407
408#if 0 408#if 0
409static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 409static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
410static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 410static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
411#endif 411#endif
412 412
413#ifdef NET_MPSAFE 413#ifdef NET_MPSAFE
414#define IXGBE_MPSAFE 1 414#define IXGBE_MPSAFE 1
415#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 415#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
416#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 416#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
417#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 417#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
418#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE 418#define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE
419#else 419#else
420#define IXGBE_CALLOUT_FLAGS 0 420#define IXGBE_CALLOUT_FLAGS 0
421#define IXGBE_SOFTINT_FLAGS 0 421#define IXGBE_SOFTINT_FLAGS 0
422#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 422#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
423#define IXGBE_TASKLET_WQ_FLAGS 0 423#define IXGBE_TASKLET_WQ_FLAGS 0
424#endif 424#endif
425#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 425#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
426 426
427/************************************************************************ 427/************************************************************************
428 * ixgbe_initialize_rss_mapping 428 * ixgbe_initialize_rss_mapping
429 ************************************************************************/ 429 ************************************************************************/
430static void 430static void
431ixgbe_initialize_rss_mapping(struct adapter *adapter) 431ixgbe_initialize_rss_mapping(struct adapter *adapter)
432{ 432{
433 struct ixgbe_hw *hw = &adapter->hw; 433 struct ixgbe_hw *hw = &adapter->hw;
434 u32 reta = 0, mrqc, rss_key[10]; 434 u32 reta = 0, mrqc, rss_key[10];
435 int queue_id, table_size, index_mult; 435 int queue_id, table_size, index_mult;
436 int i, j; 436 int i, j;
437 u32 rss_hash_config; 437 u32 rss_hash_config;
438 438
439 /* force use default RSS key. */ 439 /* force use default RSS key. */
440#ifdef __NetBSD__ 440#ifdef __NetBSD__
441 rss_getkey((uint8_t *) &rss_key); 441 rss_getkey((uint8_t *) &rss_key);
442#else 442#else
443 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 443 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
444 /* Fetch the configured RSS key */ 444 /* Fetch the configured RSS key */
445 rss_getkey((uint8_t *) &rss_key); 445 rss_getkey((uint8_t *) &rss_key);
446 } else { 446 } else {
447 /* set up random bits */ 447 /* set up random bits */
448 cprng_fast(&rss_key, sizeof(rss_key)); 448 cprng_fast(&rss_key, sizeof(rss_key));
449 } 449 }
450#endif 450#endif
451 451
452 /* Set multiplier for RETA setup and table size based on MAC */ 452 /* Set multiplier for RETA setup and table size based on MAC */
453 index_mult = 0x1; 453 index_mult = 0x1;
454 table_size = 128; 454 table_size = 128;
455 switch (adapter->hw.mac.type) { 455 switch (adapter->hw.mac.type) {
456 case ixgbe_mac_82598EB: 456 case ixgbe_mac_82598EB:
457 index_mult = 0x11; 457 index_mult = 0x11;
458 break; 458 break;
459 case ixgbe_mac_X550: 459 case ixgbe_mac_X550:
460 case ixgbe_mac_X550EM_x: 460 case ixgbe_mac_X550EM_x:
461 case ixgbe_mac_X550EM_a: 461 case ixgbe_mac_X550EM_a:
462 table_size = 512; 462 table_size = 512;
463 break; 463 break;
464 default: 464 default:
465 break; 465 break;
466 } 466 }
467 467
468 /* Set up the redirection table */ 468 /* Set up the redirection table */
469 for (i = 0, j = 0; i < table_size; i++, j++) { 469 for (i = 0, j = 0; i < table_size; i++, j++) {
470 if (j == adapter->num_queues) 470 if (j == adapter->num_queues)
471 j = 0; 471 j = 0;
472 472
473 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 473 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
474 /* 474 /*
475 * Fetch the RSS bucket id for the given indirection 475 * Fetch the RSS bucket id for the given indirection
476 * entry. Cap it at the number of configured buckets 476 * entry. Cap it at the number of configured buckets
477 * (which is num_queues.) 477 * (which is num_queues.)
478 */ 478 */
479 queue_id = rss_get_indirection_to_bucket(i); 479 queue_id = rss_get_indirection_to_bucket(i);
480 queue_id = queue_id % adapter->num_queues; 480 queue_id = queue_id % adapter->num_queues;
481 } else 481 } else
482 queue_id = (j * index_mult); 482 queue_id = (j * index_mult);
483 483
484 /* 484 /*
485 * The low 8 bits are for hash value (n+0); 485 * The low 8 bits are for hash value (n+0);
486 * The next 8 bits are for hash value (n+1), etc. 486 * The next 8 bits are for hash value (n+1), etc.
487 */ 487 */
488 reta = reta >> 8; 488 reta = reta >> 8;
489 reta = reta | (((uint32_t) queue_id) << 24); 489 reta = reta | (((uint32_t) queue_id) << 24);
490 if ((i & 3) == 3) { 490 if ((i & 3) == 3) {
491 if (i < 128) 491 if (i < 128)
492 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 492 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
493 else 493 else
494 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 494 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
495 reta); 495 reta);
496 reta = 0; 496 reta = 0;
497 } 497 }
498 } 498 }
499 499
500 /* Now fill our hash function seeds */ 500 /* Now fill our hash function seeds */
501 for (i = 0; i < 10; i++) 501 for (i = 0; i < 10; i++)
502 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 502 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
503 503
504 /* Perform hash on these packet types */ 504 /* Perform hash on these packet types */
505 if (adapter->feat_en & IXGBE_FEATURE_RSS) 505 if (adapter->feat_en & IXGBE_FEATURE_RSS)
506 rss_hash_config = rss_gethashconfig(); 506 rss_hash_config = rss_gethashconfig();
507 else { 507 else {
508 /* 508 /*
509 * Disable UDP - IP fragments aren't currently being handled 509 * Disable UDP - IP fragments aren't currently being handled
510 * and so we end up with a mix of 2-tuple and 4-tuple 510 * and so we end up with a mix of 2-tuple and 4-tuple
511 * traffic. 511 * traffic.
512 */ 512 */
513 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 513 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
514 | RSS_HASHTYPE_RSS_TCP_IPV4 514 | RSS_HASHTYPE_RSS_TCP_IPV4
515 | RSS_HASHTYPE_RSS_IPV6 515 | RSS_HASHTYPE_RSS_IPV6
516 | RSS_HASHTYPE_RSS_TCP_IPV6 516 | RSS_HASHTYPE_RSS_TCP_IPV6
517 | RSS_HASHTYPE_RSS_IPV6_EX 517 | RSS_HASHTYPE_RSS_IPV6_EX
518 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 518 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
519 } 519 }
520 520
521 mrqc = IXGBE_MRQC_RSSEN; 521 mrqc = IXGBE_MRQC_RSSEN;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 522 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 524 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 526 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 528 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 530 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 532 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 534 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 535 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 536 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 537 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 538 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 539 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
540 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 540 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 541 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
542} /* ixgbe_initialize_rss_mapping */ 542} /* ixgbe_initialize_rss_mapping */
543 543
544/************************************************************************ 544/************************************************************************
545 * ixgbe_initialize_receive_units - Setup receive registers and features. 545 * ixgbe_initialize_receive_units - Setup receive registers and features.
546 ************************************************************************/ 546 ************************************************************************/
547#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 547#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
548 548
549static void 549static void
550ixgbe_initialize_receive_units(struct adapter *adapter) 550ixgbe_initialize_receive_units(struct adapter *adapter)
551{ 551{
552 struct rx_ring *rxr = adapter->rx_rings; 552 struct rx_ring *rxr = adapter->rx_rings;
553 struct ixgbe_hw *hw = &adapter->hw; 553 struct ixgbe_hw *hw = &adapter->hw;
554 struct ifnet *ifp = adapter->ifp; 554 struct ifnet *ifp = adapter->ifp;
555 int i, j; 555 int i, j;
556 u32 bufsz, fctrl, srrctl, rxcsum; 556 u32 bufsz, fctrl, srrctl, rxcsum;
557 u32 hlreg; 557 u32 hlreg;
558 558
559 /* 559 /*
560 * Make sure receives are disabled while 560 * Make sure receives are disabled while
561 * setting up the descriptor ring 561 * setting up the descriptor ring
562 */ 562 */
563 ixgbe_disable_rx(hw); 563 ixgbe_disable_rx(hw);
564 564
565 /* Enable broadcasts */ 565 /* Enable broadcasts */
566 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 566 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
567 fctrl |= IXGBE_FCTRL_BAM; 567 fctrl |= IXGBE_FCTRL_BAM;
568 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 568 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
569 fctrl |= IXGBE_FCTRL_DPF; 569 fctrl |= IXGBE_FCTRL_DPF;
570 fctrl |= IXGBE_FCTRL_PMCF; 570 fctrl |= IXGBE_FCTRL_PMCF;
571 } 571 }
572 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 572 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
573 573
574 /* Set for Jumbo Frames? */ 574 /* Set for Jumbo Frames? */
575 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 575 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
576 if (ifp->if_mtu > ETHERMTU) 576 if (ifp->if_mtu > ETHERMTU)
577 hlreg |= IXGBE_HLREG0_JUMBOEN; 577 hlreg |= IXGBE_HLREG0_JUMBOEN;
578 else 578 else
579 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 579 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
580 580
581#ifdef DEV_NETMAP 581#ifdef DEV_NETMAP
582 /* CRC stripping is conditional in Netmap */ 582 /* CRC stripping is conditional in Netmap */
583 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 583 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
584 (ifp->if_capenable & IFCAP_NETMAP) && 584 (ifp->if_capenable & IFCAP_NETMAP) &&
585 !ix_crcstrip) 585 !ix_crcstrip)
586 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 586 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
587 else 587 else
588#endif /* DEV_NETMAP */ 588#endif /* DEV_NETMAP */
589 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 589 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
590 590
591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 591 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
592 592
593 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 593 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
594 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 594 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
595 595
596 for (i = 0; i < adapter->num_queues; i++, rxr++) { 596 for (i = 0; i < adapter->num_queues; i++, rxr++) {
597 u64 rdba = rxr->rxdma.dma_paddr; 597 u64 rdba = rxr->rxdma.dma_paddr;
598 u32 reg; 598 u32 reg;
599 int regnum = i / 4; /* 1 register per 4 queues */ 599 int regnum = i / 4; /* 1 register per 4 queues */
600 int regshift = i % 4; /* 4 bits per 1 queue */ 600 int regshift = i % 4; /* 4 bits per 1 queue */
601 j = rxr->me; 601 j = rxr->me;
602 602
603 /* Setup the Base and Length of the Rx Descriptor Ring */ 603 /* Setup the Base and Length of the Rx Descriptor Ring */
604 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 604 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
605 (rdba & 0x00000000ffffffffULL)); 605 (rdba & 0x00000000ffffffffULL));
606 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 606 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
607 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 607 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
608 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 608 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
609 609
610 /* Set up the SRRCTL register */ 610 /* Set up the SRRCTL register */
611 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 611 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
612 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 612 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
613 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 613 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
614 srrctl |= bufsz; 614 srrctl |= bufsz;
615 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 615 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
616 616
617 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 617 /* Set RQSMR (Receive Queue Statistic Mapping) register */
618 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 618 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
619 reg &= ~(0x000000ffUL << (regshift * 8)); 619 reg &= ~(0x000000ffUL << (regshift * 8));
620 reg |= i << (regshift * 8); 620 reg |= i << (regshift * 8);
621 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 621 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
622 622
623 /* 623 /*
624 * Set DROP_EN iff we have no flow control and >1 queue. 624 * Set DROP_EN iff we have no flow control and >1 queue.
625 * Note that srrctl was cleared shortly before during reset, 625 * Note that srrctl was cleared shortly before during reset,
626 * so we do not need to clear the bit, but do it just in case 626 * so we do not need to clear the bit, but do it just in case
627 * this code is moved elsewhere. 627 * this code is moved elsewhere.
628 */ 628 */
629 if (adapter->num_queues > 1 && 629 if (adapter->num_queues > 1 &&
630 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 630 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
631 srrctl |= IXGBE_SRRCTL_DROP_EN; 631 srrctl |= IXGBE_SRRCTL_DROP_EN;
632 } else { 632 } else {
633 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 633 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
634 } 634 }
635 635
636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 636 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
637 637
638 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 638 /* Setup the HW Rx Head and Tail Descriptor Pointers */
639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 639 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 640 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
641 641
642 /* Set the driver rx tail address */ 642 /* Set the driver rx tail address */
643 rxr->tail = IXGBE_RDT(rxr->me); 643 rxr->tail = IXGBE_RDT(rxr->me);
644 } 644 }
645 645
646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 646 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 647 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
648 | IXGBE_PSRTYPE_UDPHDR 648 | IXGBE_PSRTYPE_UDPHDR
649 | IXGBE_PSRTYPE_IPV4HDR 649 | IXGBE_PSRTYPE_IPV4HDR
650 | IXGBE_PSRTYPE_IPV6HDR; 650 | IXGBE_PSRTYPE_IPV6HDR;
651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 651 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
652 } 652 }
653 653
654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 654 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
655 655
656 ixgbe_initialize_rss_mapping(adapter); 656 ixgbe_initialize_rss_mapping(adapter);
657 657
658 if (adapter->num_queues > 1) { 658 if (adapter->num_queues > 1) {
659 /* RSS and RX IPP Checksum are mutually exclusive */ 659 /* RSS and RX IPP Checksum are mutually exclusive */
660 rxcsum |= IXGBE_RXCSUM_PCSD; 660 rxcsum |= IXGBE_RXCSUM_PCSD;
661 } 661 }
662 662
663 if (ifp->if_capenable & IFCAP_RXCSUM) 663 if (ifp->if_capenable & IFCAP_RXCSUM)
664 rxcsum |= IXGBE_RXCSUM_PCSD; 664 rxcsum |= IXGBE_RXCSUM_PCSD;
665 665
666 /* This is useful for calculating UDP/IP fragment checksums */ 666 /* This is useful for calculating UDP/IP fragment checksums */
667 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 667 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
668 rxcsum |= IXGBE_RXCSUM_IPPCSE; 668 rxcsum |= IXGBE_RXCSUM_IPPCSE;
669 669
670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
671 671
672} /* ixgbe_initialize_receive_units */ 672} /* ixgbe_initialize_receive_units */
673 673
674/************************************************************************ 674/************************************************************************
675 * ixgbe_initialize_transmit_units - Enable transmit units. 675 * ixgbe_initialize_transmit_units - Enable transmit units.
676 ************************************************************************/ 676 ************************************************************************/
677static void 677static void
678ixgbe_initialize_transmit_units(struct adapter *adapter) 678ixgbe_initialize_transmit_units(struct adapter *adapter)
679{ 679{
680 struct tx_ring *txr = adapter->tx_rings; 680 struct tx_ring *txr = adapter->tx_rings;
681 struct ixgbe_hw *hw = &adapter->hw; 681 struct ixgbe_hw *hw = &adapter->hw;
682 int i; 682 int i;
683 683
684 INIT_DEBUGOUT("ixgbe_initialize_transmit_units"); 684 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
685 685
686 /* Setup the Base and Length of the Tx Descriptor Ring */ 686 /* Setup the Base and Length of the Tx Descriptor Ring */
687 for (i = 0; i < adapter->num_queues; i++, txr++) { 687 for (i = 0; i < adapter->num_queues; i++, txr++) {
688 u64 tdba = txr->txdma.dma_paddr; 688 u64 tdba = txr->txdma.dma_paddr;
689 u32 txctrl = 0; 689 u32 txctrl = 0;
690 u32 tqsmreg, reg; 690 u32 tqsmreg, reg;
691 int regnum = i / 4; /* 1 register per 4 queues */ 691 int regnum = i / 4; /* 1 register per 4 queues */
692 int regshift = i % 4; /* 4 bits per 1 queue */ 692 int regshift = i % 4; /* 4 bits per 1 queue */
693 int j = txr->me; 693 int j = txr->me;
694 694
695 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 695 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
696 (tdba & 0x00000000ffffffffULL)); 696 (tdba & 0x00000000ffffffffULL));
697 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 697 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
698 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 698 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
699 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 699 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
700 700
701 /* 701 /*
702 * Set TQSMR (Transmit Queue Statistic Mapping) register. 702 * Set TQSMR (Transmit Queue Statistic Mapping) register.
703 * Register location is different between 82598 and others. 703 * Register location is different between 82598 and others.
704 */ 704 */
705 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 705 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
706 tqsmreg = IXGBE_TQSMR(regnum); 706 tqsmreg = IXGBE_TQSMR(regnum);
707 else 707 else
708 tqsmreg = IXGBE_TQSM(regnum); 708 tqsmreg = IXGBE_TQSM(regnum);
709 reg = IXGBE_READ_REG(hw, tqsmreg); 709 reg = IXGBE_READ_REG(hw, tqsmreg);
710 reg &= ~(0x000000ffUL << (regshift * 8)); 710 reg &= ~(0x000000ffUL << (regshift * 8));
711 reg |= i << (regshift * 8); 711 reg |= i << (regshift * 8);
712 IXGBE_WRITE_REG(hw, tqsmreg, reg); 712 IXGBE_WRITE_REG(hw, tqsmreg, reg);
713 713
714 /* Setup the HW Tx Head and Tail descriptor pointers */ 714 /* Setup the HW Tx Head and Tail descriptor pointers */
715 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 715 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
716 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 716 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
717 717
718 /* Cache the tail address */ 718 /* Cache the tail address */
719 txr->tail = IXGBE_TDT(j); 719 txr->tail = IXGBE_TDT(j);
720 720
721 txr->txr_no_space = false; 721 txr->txr_no_space = false;
722 722
723 /* Disable Head Writeback */ 723 /* Disable Head Writeback */
724 /* 724 /*
725 * Note: for X550 series devices, these registers are actually 725 * Note: for X550 series devices, these registers are actually
726 * prefixed with TPH_ instead of DCA_, but the addresses and 726 * prefixed with TPH_ instead of DCA_, but the addresses and
727 * fields remain the same. 727 * fields remain the same.
728 */ 728 */
729 switch (hw->mac.type) { 729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB: 730 case ixgbe_mac_82598EB:
731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 731 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
732 break; 732 break;
733 default: 733 default:
734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 734 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
735 break; 735 break;
736 } 736 }
737 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 737 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
738 switch (hw->mac.type) { 738 switch (hw->mac.type) {
739 case ixgbe_mac_82598EB: 739 case ixgbe_mac_82598EB:
740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 740 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
741 break; 741 break;
742 default: 742 default:
743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 743 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
744 break; 744 break;
745 } 745 }
746 746
747 } 747 }
748 748
749 if (hw->mac.type != ixgbe_mac_82598EB) { 749 if (hw->mac.type != ixgbe_mac_82598EB) {
750 u32 dmatxctl, rttdcs; 750 u32 dmatxctl, rttdcs;
751 751
752 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 752 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
753 dmatxctl |= IXGBE_DMATXCTL_TE; 753 dmatxctl |= IXGBE_DMATXCTL_TE;
754 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 754 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
755 /* Disable arbiter to set MTQC */ 755 /* Disable arbiter to set MTQC */
756 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 756 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
757 rttdcs |= IXGBE_RTTDCS_ARBDIS; 757 rttdcs |= IXGBE_RTTDCS_ARBDIS;
758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 758 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
759 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 759 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
760 ixgbe_get_mtqc(adapter->iov_mode)); 760 ixgbe_get_mtqc(adapter->iov_mode));
761 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 761 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
762 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 762 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
763 } 763 }
764 764
765 return; 765 return;
766} /* ixgbe_initialize_transmit_units */ 766} /* ixgbe_initialize_transmit_units */
767 767
768static void 768static void
769ixgbe_quirks(struct adapter *adapter) 769ixgbe_quirks(struct adapter *adapter)
770{ 770{
771 device_t dev = adapter->dev; 771 device_t dev = adapter->dev;
772 struct ixgbe_hw *hw = &adapter->hw; 772 struct ixgbe_hw *hw = &adapter->hw;
773 const char *vendor, *product; 773 const char *vendor, *product;
774 774
775 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { 775 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
776 /* 776 /*
777 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE 777 * Quirk for inverted logic of SFP+'s MOD_ABS on GIGABYTE
778 * MA10-ST0. 778 * MA10-ST0.
779 */ 779 */
780 vendor = pmf_get_platform("system-vendor"); 780 vendor = pmf_get_platform("system-vendor");
781 product = pmf_get_platform("system-product"); 781 product = pmf_get_platform("system-product");
782 782
783 if ((vendor == NULL) || (product == NULL)) 783 if ((vendor == NULL) || (product == NULL))
784 return; 784 return;
785 785
786 if ((strcmp(vendor, "GIGABYTE") == 0) && 786 if ((strcmp(vendor, "GIGABYTE") == 0) &&
787 (strcmp(product, "MA10-ST0") == 0)) { 787 (strcmp(product, "MA10-ST0") == 0)) {
788 aprint_verbose_dev(dev, 788 aprint_verbose_dev(dev,
789 "Enable SFP+ MOD_ABS inverse quirk\n"); 789 "Enable SFP+ MOD_ABS inverse quirk\n");
790 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT; 790 adapter->hw.quirks |= IXGBE_QUIRK_MOD_ABS_INVERT;
791 } 791 }
792 } 792 }
793} 793}
794 794
795/************************************************************************ 795/************************************************************************
796 * ixgbe_attach - Device initialization routine 796 * ixgbe_attach - Device initialization routine
797 * 797 *
798 * Called when the driver is being loaded. 798 * Called when the driver is being loaded.
799 * Identifies the type of hardware, allocates all resources 799 * Identifies the type of hardware, allocates all resources
800 * and initializes the hardware. 800 * and initializes the hardware.
801 * 801 *
802 * return 0 on success, positive on failure 802 * return 0 on success, positive on failure
803 ************************************************************************/ 803 ************************************************************************/
804static void 804static void
805ixgbe_attach(device_t parent, device_t dev, void *aux) 805ixgbe_attach(device_t parent, device_t dev, void *aux)
806{ 806{
807 struct adapter *adapter; 807 struct adapter *adapter;
808 struct ixgbe_hw *hw; 808 struct ixgbe_hw *hw;
809 int error = -1; 809 int error = -1;
810 u32 ctrl_ext; 810 u32 ctrl_ext;
811 u16 high, low, nvmreg; 811 u16 high, low, nvmreg;
812 pcireg_t id, subid; 812 pcireg_t id, subid;
813 const ixgbe_vendor_info_t *ent; 813 const ixgbe_vendor_info_t *ent;
814 struct pci_attach_args *pa = aux; 814 struct pci_attach_args *pa = aux;
815 bool unsupported_sfp = false; 815 bool unsupported_sfp = false;
816 const char *str; 816 const char *str;
817 char wqname[MAXCOMLEN]; 817 char wqname[MAXCOMLEN];
818 char buf[256]; 818 char buf[256];
819 819
820 INIT_DEBUGOUT("ixgbe_attach: begin"); 820 INIT_DEBUGOUT("ixgbe_attach: begin");
821 821
822 /* Allocate, clear, and link in our adapter structure */ 822 /* Allocate, clear, and link in our adapter structure */
823 adapter = device_private(dev); 823 adapter = device_private(dev);
824 adapter->hw.back = adapter; 824 adapter->hw.back = adapter;
825 adapter->dev = dev; 825 adapter->dev = dev;
826 hw = &adapter->hw; 826 hw = &adapter->hw;
827 adapter->osdep.pc = pa->pa_pc; 827 adapter->osdep.pc = pa->pa_pc;
828 adapter->osdep.tag = pa->pa_tag; 828 adapter->osdep.tag = pa->pa_tag;
829 if (pci_dma64_available(pa)) 829 if (pci_dma64_available(pa))
830 adapter->osdep.dmat = pa->pa_dmat64; 830 adapter->osdep.dmat = pa->pa_dmat64;
831 else 831 else
832 adapter->osdep.dmat = pa->pa_dmat; 832 adapter->osdep.dmat = pa->pa_dmat;
833 adapter->osdep.attached = false; 833 adapter->osdep.attached = false;
834 adapter->osdep.detaching = false; 834 adapter->osdep.detaching = false;
835 835
836 ent = ixgbe_lookup(pa); 836 ent = ixgbe_lookup(pa);
837 837
838 KASSERT(ent != NULL); 838 KASSERT(ent != NULL);
839 839
840 aprint_normal(": %s, Version - %s\n", 840 aprint_normal(": %s, Version - %s\n",
841 ixgbe_strings[ent->index], ixgbe_driver_version); 841 ixgbe_strings[ent->index], ixgbe_driver_version);
842 842
843 /* Core Lock Init */ 843 /* Core Lock Init */
844 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 844 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
845 845
846 /* Set up the timer callout and workqueue */ 846 /* Set up the timer callout and workqueue */
847 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 847 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
848 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); 848 snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev));
849 error = workqueue_create(&adapter->timer_wq, wqname, 849 error = workqueue_create(&adapter->timer_wq, wqname,
850 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 850 ixgbe_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
851 IXGBE_TASKLET_WQ_FLAGS); 851 IXGBE_TASKLET_WQ_FLAGS);
852 if (error) { 852 if (error) {
853 aprint_error_dev(dev, 853 aprint_error_dev(dev,
854 "could not create timer workqueue (%d)\n", error); 854 "could not create timer workqueue (%d)\n", error);
855 goto err_out; 855 goto err_out;
856 } 856 }
857 857
858 /* Determine hardware revision */ 858 /* Determine hardware revision */
859 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 859 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
860 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 860 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
861 861
862 hw->vendor_id = PCI_VENDOR(id); 862 hw->vendor_id = PCI_VENDOR(id);
863 hw->device_id = PCI_PRODUCT(id); 863 hw->device_id = PCI_PRODUCT(id);
864 hw->revision_id = 864 hw->revision_id =
865 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 865 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
866 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 866 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
867 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 867 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
868 868
869 /* Set quirk flags */ 869 /* Set quirk flags */
870 ixgbe_quirks(adapter); 870 ixgbe_quirks(adapter);
871 871
872 /* 872 /*
873 * Make sure BUSMASTER is set 873 * Make sure BUSMASTER is set
874 */ 874 */
875 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 875 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
876 876
877 /* Do base PCI setup - map BAR0 */ 877 /* Do base PCI setup - map BAR0 */
878 if (ixgbe_allocate_pci_resources(adapter, pa)) { 878 if (ixgbe_allocate_pci_resources(adapter, pa)) {
879 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 879 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
880 error = ENXIO; 880 error = ENXIO;
881 goto err_out; 881 goto err_out;
882 } 882 }
883 883
884 /* let hardware know driver is loaded */ 884 /* let hardware know driver is loaded */
885 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 885 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
886 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 886 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
887 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 887 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
888 888
889 /* 889 /*
890 * Initialize the shared code 890 * Initialize the shared code
891 */ 891 */
892 if (ixgbe_init_shared_code(hw) != 0) { 892 if (ixgbe_init_shared_code(hw) != 0) {
893 aprint_error_dev(dev, "Unable to initialize the shared code\n"); 893 aprint_error_dev(dev, "Unable to initialize the shared code\n");
894 error = ENXIO; 894 error = ENXIO;
895 goto err_out; 895 goto err_out;
896 } 896 }
897 897
898 switch (hw->mac.type) { 898 switch (hw->mac.type) {
899 case ixgbe_mac_82598EB: 899 case ixgbe_mac_82598EB:
900 str = "82598EB"; 900 str = "82598EB";
901 break; 901 break;
902 case ixgbe_mac_82599EB: 902 case ixgbe_mac_82599EB:
903 str = "82599EB"; 903 str = "82599EB";
904 break; 904 break;
905 case ixgbe_mac_X540: 905 case ixgbe_mac_X540:
906 str = "X540"; 906 str = "X540";
907 break; 907 break;
908 case ixgbe_mac_X550: 908 case ixgbe_mac_X550:
909 str = "X550"; 909 str = "X550";
910 break; 910 break;
911 case ixgbe_mac_X550EM_x: 911 case ixgbe_mac_X550EM_x:
912 str = "X550EM X"; 912 str = "X550EM X";
913 break; 913 break;
914 case ixgbe_mac_X550EM_a: 914 case ixgbe_mac_X550EM_a:
915 str = "X550EM A"; 915 str = "X550EM A";
916 break; 916 break;
917 default: 917 default:
918 str = "Unknown"; 918 str = "Unknown";
919 break; 919 break;
920 } 920 }
921 aprint_normal_dev(dev, "device %s\n", str); 921 aprint_normal_dev(dev, "device %s\n", str);
922 922
923 hw->allow_unsupported_sfp = allow_unsupported_sfp; 923 hw->allow_unsupported_sfp = allow_unsupported_sfp;
924 924
925 /* Pick up the 82599 settings */ 925 /* Pick up the 82599 settings */
926 if (hw->mac.type != ixgbe_mac_82598EB) 926 if (hw->mac.type != ixgbe_mac_82598EB)
927 hw->phy.smart_speed = ixgbe_smart_speed; 927 hw->phy.smart_speed = ixgbe_smart_speed;
928 928
929 /* Set the right number of segments */ 929 /* Set the right number of segments */
930 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT); 930 KASSERT(IXGBE_82599_SCATTER_MAX >= IXGBE_SCATTER_DEFAULT);
931 adapter->num_segs = IXGBE_SCATTER_DEFAULT; 931 adapter->num_segs = IXGBE_SCATTER_DEFAULT;
932 932
933 /* Ensure SW/FW semaphore is free */ 933 /* Ensure SW/FW semaphore is free */
934 ixgbe_init_swfw_semaphore(hw); 934 ixgbe_init_swfw_semaphore(hw);
935 935
936 hw->mac.ops.set_lan_id(hw); 936 hw->mac.ops.set_lan_id(hw);
937 ixgbe_init_device_features(adapter); 937 ixgbe_init_device_features(adapter);
938 938
939 if (ixgbe_configure_interrupts(adapter)) { 939 if (ixgbe_configure_interrupts(adapter)) {
940 error = ENXIO; 940 error = ENXIO;
941 goto err_out; 941 goto err_out;
942 } 942 }
943 943
944 /* Allocate multicast array memory. */ 944 /* Allocate multicast array memory. */
945 adapter->mta = malloc(sizeof(*adapter->mta) * 945 adapter->mta = malloc(sizeof(*adapter->mta) *
946 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK); 946 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_WAITOK);
947 947
948 /* Enable WoL (if supported) */ 948 /* Enable WoL (if supported) */
949 ixgbe_check_wol_support(adapter); 949 ixgbe_check_wol_support(adapter);
950 950
951 /* Register for VLAN events */ 951 /* Register for VLAN events */
952 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); 952 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
953 953
954 /* Verify adapter fan is still functional (if applicable) */ 954 /* Verify adapter fan is still functional (if applicable) */
955 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 955 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
956 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 956 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
957 ixgbe_check_fan_failure(adapter, esdp, FALSE); 957 ixgbe_check_fan_failure(adapter, esdp, FALSE);
958 } 958 }
959 959
960 /* Set an initial default flow control value */ 960 /* Set an initial default flow control value */
961 hw->fc.requested_mode = ixgbe_flow_control; 961 hw->fc.requested_mode = ixgbe_flow_control;
962 962
963 /* Sysctls for limiting the amount of work done in the taskqueues */ 963 /* Sysctls for limiting the amount of work done in the taskqueues */
964 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 964 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
965 "max number of rx packets to process", 965 "max number of rx packets to process",
966 &adapter->rx_process_limit, ixgbe_rx_process_limit); 966 &adapter->rx_process_limit, ixgbe_rx_process_limit);
967 967
968 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 968 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
969 "max number of tx packets to process", 969 "max number of tx packets to process",
970 &adapter->tx_process_limit, ixgbe_tx_process_limit); 970 &adapter->tx_process_limit, ixgbe_tx_process_limit);
971 971
972 /* Do descriptor calc and sanity checks */ 972 /* Do descriptor calc and sanity checks */
973 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 973 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
974 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 974 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
975 aprint_error_dev(dev, "TXD config issue, using default!\n"); 975 aprint_error_dev(dev, "TXD config issue, using default!\n");
976 adapter->num_tx_desc = DEFAULT_TXD; 976 adapter->num_tx_desc = DEFAULT_TXD;
977 } else 977 } else
978 adapter->num_tx_desc = ixgbe_txd; 978 adapter->num_tx_desc = ixgbe_txd;
979 979
980 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 980 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
981 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 981 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
982 aprint_error_dev(dev, "RXD config issue, using default!\n"); 982 aprint_error_dev(dev, "RXD config issue, using default!\n");
983 adapter->num_rx_desc = DEFAULT_RXD; 983 adapter->num_rx_desc = DEFAULT_RXD;
984 } else 984 } else
985 adapter->num_rx_desc = ixgbe_rxd; 985 adapter->num_rx_desc = ixgbe_rxd;
986 986
987 /* Set default high limit of copying mbuf in rxeof */ 987 /* Set default high limit of copying mbuf in rxeof */
988 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX; 988 adapter->rx_copy_len = IXGBE_RX_COPY_LEN_MAX;
989 989
990 /* Allocate our TX/RX Queues */ 990 /* Allocate our TX/RX Queues */
991 if (ixgbe_allocate_queues(adapter)) { 991 if (ixgbe_allocate_queues(adapter)) {
992 error = ENOMEM; 992 error = ENOMEM;
993 goto err_out; 993 goto err_out;
994 } 994 }
995 995
996 hw->phy.reset_if_overtemp = TRUE; 996 hw->phy.reset_if_overtemp = TRUE;
997 error = ixgbe_reset_hw(hw); 997 error = ixgbe_reset_hw(hw);
998 hw->phy.reset_if_overtemp = FALSE; 998 hw->phy.reset_if_overtemp = FALSE;
999 if (error == IXGBE_ERR_SFP_NOT_PRESENT) 999 if (error == IXGBE_ERR_SFP_NOT_PRESENT)
1000 error = IXGBE_SUCCESS; 1000 error = IXGBE_SUCCESS;
1001 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 1001 else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1002 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); 1002 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
1003 unsupported_sfp = true; 1003 unsupported_sfp = true;
1004 error = IXGBE_SUCCESS; 1004 error = IXGBE_SUCCESS;
1005 } else if (error) { 1005 } else if (error) {
1006 aprint_error_dev(dev, 1006 aprint_error_dev(dev,
1007 "Hardware initialization failed(error = %d)\n", error); 1007 "Hardware initialization failed(error = %d)\n", error);
1008 error = EIO; 1008 error = EIO;
1009 goto err_late; 1009 goto err_late;
1010 } 1010 }
1011 1011
1012 /* Make sure we have a good EEPROM before we read from it */ 1012 /* Make sure we have a good EEPROM before we read from it */
1013 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 1013 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
1014 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); 1014 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
1015 error = EIO; 1015 error = EIO;
1016 goto err_late; 1016 goto err_late;
1017 } 1017 }
1018 1018
1019 aprint_normal("%s:", device_xname(dev)); 1019 aprint_normal("%s:", device_xname(dev));
1020 /* NVM Image Version */ 1020 /* NVM Image Version */
1021 high = low = 0; 1021 high = low = 0;
1022 switch (hw->mac.type) { 1022 switch (hw->mac.type) {
1023 case ixgbe_mac_82598EB: 1023 case ixgbe_mac_82598EB:
1024 /* 1024 /*
1025 * Print version from the dev starter version (0x29). The 1025 * Print version from the dev starter version (0x29). The
1026 * location is the same as newer device's IXGBE_NVM_MAP_VER. 1026 * location is the same as newer device's IXGBE_NVM_MAP_VER.
1027 */ 1027 */
1028 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1028 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1029 if (nvmreg == 0xffff) 1029 if (nvmreg == 0xffff)
1030 break; 1030 break;
1031 high = (nvmreg >> 12) & 0x0f; 1031 high = (nvmreg >> 12) & 0x0f;
1032 low = (nvmreg >> 4) & 0xff; 1032 low = (nvmreg >> 4) & 0xff;
1033 id = nvmreg & 0x0f; 1033 id = nvmreg & 0x0f;
1034 /* 1034 /*
1035 * The following output might not be correct. Some 82598 cards 1035 * The following output might not be correct. Some 82598 cards
1036 * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0. 1036 * have 0x1070 or 0x2090. 82598 spec update notes about 2.9.0.
1037 */ 1037 */
1038 aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id); 1038 aprint_normal(" NVM Image Version %u.%u.%u,", high, low, id);
1039 break; 1039 break;
1040 case ixgbe_mac_X540: 1040 case ixgbe_mac_X540:
1041 case ixgbe_mac_X550EM_a: 1041 case ixgbe_mac_X550EM_a:
1042 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1042 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1043 if (nvmreg == 0xffff) 1043 if (nvmreg == 0xffff)
1044 break; 1044 break;
1045 high = (nvmreg >> 12) & 0x0f; 1045 high = (nvmreg >> 12) & 0x0f;
1046 low = (nvmreg >> 4) & 0xff; 1046 low = (nvmreg >> 4) & 0xff;
1047 id = nvmreg & 0x0f; 1047 id = nvmreg & 0x0f;
1048 aprint_normal(" NVM Image Version %u.", high); 1048 aprint_normal(" NVM Image Version %u.", high);
1049 if (hw->mac.type == ixgbe_mac_X540) 1049 if (hw->mac.type == ixgbe_mac_X540)
1050 str = "%x"; 1050 str = "%x";
1051 else 1051 else
1052 str = "%02x"; 1052 str = "%02x";
1053 aprint_normal(str, low); 1053 aprint_normal(str, low);
1054 aprint_normal(" ID 0x%x,", id); 1054 aprint_normal(" ID 0x%x,", id);
1055 break; 1055 break;
1056 case ixgbe_mac_X550EM_x: 1056 case ixgbe_mac_X550EM_x:
1057 case ixgbe_mac_X550: 1057 case ixgbe_mac_X550:
1058 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1058 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1059 if (nvmreg == 0xffff) 1059 if (nvmreg == 0xffff)
1060 break; 1060 break;
1061 high = (nvmreg >> 12) & 0x0f; 1061 high = (nvmreg >> 12) & 0x0f;
1062 low = nvmreg & 0xff; 1062 low = nvmreg & 0xff;
1063 aprint_normal(" NVM Image Version %u.%02x,", high, low); 1063 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1064 break; 1064 break;
1065 default: 1065 default:
1066 break; 1066 break;
1067 } 1067 }
1068 hw->eeprom.nvm_image_ver_high = high; 1068 hw->eeprom.nvm_image_ver_high = high;
1069 hw->eeprom.nvm_image_ver_low = low; 1069 hw->eeprom.nvm_image_ver_low = low;
1070 1070
1071 /* PHY firmware revision */ 1071 /* PHY firmware revision */
1072 switch (hw->mac.type) { 1072 switch (hw->mac.type) {
1073 case ixgbe_mac_X540: 1073 case ixgbe_mac_X540:
1074 case ixgbe_mac_X550: 1074 case ixgbe_mac_X550:
1075 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); 1075 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1076 if (nvmreg == 0xffff) 1076 if (nvmreg == 0xffff)
1077 break; 1077 break;
1078 high = (nvmreg >> 12) & 0x0f; 1078 high = (nvmreg >> 12) & 0x0f;
1079 low = (nvmreg >> 4) & 0xff; 1079 low = (nvmreg >> 4) & 0xff;
1080 id = nvmreg & 0x000f; 1080 id = nvmreg & 0x000f;
1081 aprint_normal(" PHY FW Revision %u.", high); 1081 aprint_normal(" PHY FW Revision %u.", high);
1082 if (hw->mac.type == ixgbe_mac_X540) 1082 if (hw->mac.type == ixgbe_mac_X540)
1083 str = "%x"; 1083 str = "%x";
1084 else 1084 else
1085 str = "%02x"; 1085 str = "%02x";
1086 aprint_normal(str, low); 1086 aprint_normal(str, low);
1087 aprint_normal(" ID 0x%x,", id); 1087 aprint_normal(" ID 0x%x,", id);
1088 break; 1088 break;
1089 default: 1089 default:
1090 break; 1090 break;
1091 } 1091 }
1092 1092
1093 /* NVM Map version & OEM NVM Image version */ 1093 /* NVM Map version & OEM NVM Image version */
1094 switch (hw->mac.type) { 1094 switch (hw->mac.type) {
1095 case ixgbe_mac_X550: 1095 case ixgbe_mac_X550:
1096 case ixgbe_mac_X550EM_x: 1096 case ixgbe_mac_X550EM_x:
1097 case ixgbe_mac_X550EM_a: 1097 case ixgbe_mac_X550EM_a:
1098 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1098 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1099 if (nvmreg != 0xffff) { 1099 if (nvmreg != 0xffff) {
1100 high = (nvmreg >> 12) & 0x0f; 1100 high = (nvmreg >> 12) & 0x0f;
1101 low = nvmreg & 0x00ff; 1101 low = nvmreg & 0x00ff;
1102 aprint_normal(" NVM Map version %u.%02x,", high, low); 1102 aprint_normal(" NVM Map version %u.%02x,", high, low);
1103 } 1103 }
1104 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); 1104 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1105 if (nvmreg != 0xffff) { 1105 if (nvmreg != 0xffff) {
1106 high = (nvmreg >> 12) & 0x0f; 1106 high = (nvmreg >> 12) & 0x0f;
1107 low = nvmreg & 0x00ff; 1107 low = nvmreg & 0x00ff;
1108 aprint_verbose(" OEM NVM Image version %u.%02x,", high, 1108 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1109 low); 1109 low);
1110 } 1110 }
1111 break; 1111 break;
1112 default: 1112 default:
1113 break; 1113 break;
1114 } 1114 }
1115 1115
1116 /* Print the ETrackID */ 1116 /* Print the ETrackID */
1117 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); 1117 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1118 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); 1118 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1119 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); 1119 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1120 1120
 1121 /* Printed Board Assembly number */
 1122 error = ixgbe_read_pba_string(hw, buf, IXGBE_PBANUM_LENGTH);
 1123 aprint_normal_dev(dev, "PBA number %s\n", error ? "unknown" : buf);
 1124
1121 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 1125 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1122 error = ixgbe_allocate_msix(adapter, pa); 1126 error = ixgbe_allocate_msix(adapter, pa);
1123 if (error) { 1127 if (error) {
1124 /* Free allocated queue structures first */ 1128 /* Free allocated queue structures first */
1125 ixgbe_free_queues(adapter); 1129 ixgbe_free_queues(adapter);
1126 1130
1127 /* Fallback to legacy interrupt */ 1131 /* Fallback to legacy interrupt */
1128 adapter->feat_en &= ~IXGBE_FEATURE_MSIX; 1132 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1129 if (adapter->feat_cap & IXGBE_FEATURE_MSI) 1133 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1130 adapter->feat_en |= IXGBE_FEATURE_MSI; 1134 adapter->feat_en |= IXGBE_FEATURE_MSI;
1131 adapter->num_queues = 1; 1135 adapter->num_queues = 1;
1132 1136
1133 /* Allocate our TX/RX Queues again */ 1137 /* Allocate our TX/RX Queues again */
1134 if (ixgbe_allocate_queues(adapter)) { 1138 if (ixgbe_allocate_queues(adapter)) {
1135 error = ENOMEM; 1139 error = ENOMEM;
1136 goto err_out; 1140 goto err_out;
1137 } 1141 }
1138 } 1142 }
1139 } 1143 }
 1144
1140 /* Recovery mode */ 1145 /* Recovery mode */
1141 switch (adapter->hw.mac.type) { 1146 switch (adapter->hw.mac.type) {
1142 case ixgbe_mac_X550: 1147 case ixgbe_mac_X550:
1143 case ixgbe_mac_X550EM_x: 1148 case ixgbe_mac_X550EM_x:
1144 case ixgbe_mac_X550EM_a: 1149 case ixgbe_mac_X550EM_a:
1145 /* >= 2.00 */ 1150 /* >= 2.00 */
1146 if (hw->eeprom.nvm_image_ver_high >= 2) { 1151 if (hw->eeprom.nvm_image_ver_high >= 2) {
1147 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; 1152 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1148 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; 1153 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1149 } 1154 }
1150 break; 1155 break;
1151 default: 1156 default:
1152 break; 1157 break;
1153 } 1158 }
1154 1159
1155 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0) 1160 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1156 error = ixgbe_allocate_legacy(adapter, pa); 1161 error = ixgbe_allocate_legacy(adapter, pa);
1157 if (error) 1162 if (error)
1158 goto err_late; 1163 goto err_late;
1159 1164
1160 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ 1165 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1161 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET); 1166 mutex_init(&(adapter)->admin_mtx, MUTEX_DEFAULT, IPL_NET);
1162 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev)); 1167 snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev));
1163 error = workqueue_create(&adapter->admin_wq, wqname, 1168 error = workqueue_create(&adapter->admin_wq, wqname,
1164 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 1169 ixgbe_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
1165 IXGBE_TASKLET_WQ_FLAGS); 1170 IXGBE_TASKLET_WQ_FLAGS);
1166 if (error) { 1171 if (error) {
1167 aprint_error_dev(dev, 1172 aprint_error_dev(dev,
1168 "could not create admin workqueue (%d)\n", error); 1173 "could not create admin workqueue (%d)\n", error);
1169 goto err_out; 1174 goto err_out;
1170 } 1175 }
1171 1176
1172 error = ixgbe_start_hw(hw); 1177 error = ixgbe_start_hw(hw);
1173 switch (error) { 1178 switch (error) {
1174 case IXGBE_ERR_EEPROM_VERSION: 1179 case IXGBE_ERR_EEPROM_VERSION:
1175 aprint_error_dev(dev, "This device is a pre-production adapter/" 1180 aprint_error_dev(dev, "This device is a pre-production adapter/"
1176 "LOM. Please be aware there may be issues associated " 1181 "LOM. Please be aware there may be issues associated "
1177 "with your hardware.\nIf you are experiencing problems " 1182 "with your hardware.\nIf you are experiencing problems "
1178 "please contact your Intel or hardware representative " 1183 "please contact your Intel or hardware representative "
1179 "who provided you with this hardware.\n"); 1184 "who provided you with this hardware.\n");
1180 break; 1185 break;
1181 default: 1186 default:
1182 break; 1187 break;
1183 } 1188 }
1184 1189
1185 /* Setup OS specific network interface */ 1190 /* Setup OS specific network interface */
1186 if (ixgbe_setup_interface(dev, adapter) != 0) 1191 if (ixgbe_setup_interface(dev, adapter) != 0)
1187 goto err_late; 1192 goto err_late;
1188 1193
1189 /* 1194 /*
1190 * Print PHY ID only for copper PHY. On device which has SFP(+) cage 1195 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1191 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. 1196 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1192 */ 1197 */
1193 if (hw->phy.media_type == ixgbe_media_type_copper) { 1198 if (hw->phy.media_type == ixgbe_media_type_copper) {
1194 uint16_t id1, id2; 1199 uint16_t id1, id2;
1195 int oui, model, rev; 1200 int oui, model, rev;
1196 char descr[MII_MAX_DESCR_LEN]; 1201 char descr[MII_MAX_DESCR_LEN];
1197 1202
1198 id1 = hw->phy.id >> 16; 1203 id1 = hw->phy.id >> 16;
1199 id2 = hw->phy.id & 0xffff; 1204 id2 = hw->phy.id & 0xffff;
1200 oui = MII_OUI(id1, id2); 1205 oui = MII_OUI(id1, id2);
1201 model = MII_MODEL(id2); 1206 model = MII_MODEL(id2);
1202 rev = MII_REV(id2); 1207 rev = MII_REV(id2);
1203 mii_get_descr(descr, sizeof(descr), oui, model); 1208 mii_get_descr(descr, sizeof(descr), oui, model);
1204 if (descr[0]) 1209 if (descr[0])
1205 aprint_normal_dev(dev, "PHY: %s, rev. %d\n", 1210 aprint_normal_dev(dev, "PHY: %s, rev. %d\n",
1206 descr, rev); 1211 descr, rev);
1207 else 1212 else
1208 aprint_normal_dev(dev, 1213 aprint_normal_dev(dev,
1209 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", 1214 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1210 oui, model, rev); 1215 oui, model, rev);
1211 } 1216 }
1212 1217
1213 /* Enable EEE power saving */ 1218 /* Enable EEE power saving */
1214 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 1219 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1215 hw->mac.ops.setup_eee(hw, 1220 hw->mac.ops.setup_eee(hw,
1216 adapter->feat_en & IXGBE_FEATURE_EEE); 1221 adapter->feat_en & IXGBE_FEATURE_EEE);
1217 1222
1218 /* Enable power to the phy. */ 1223 /* Enable power to the phy. */
1219 if (!unsupported_sfp) { 1224 if (!unsupported_sfp) {
1220 /* Enable the optics for 82599 SFP+ fiber */ 1225 /* Enable the optics for 82599 SFP+ fiber */
1221 ixgbe_enable_tx_laser(hw); 1226 ixgbe_enable_tx_laser(hw);
1222 1227
1223 /* 1228 /*
1224 * XXX Currently, ixgbe_set_phy_power() supports only copper 1229 * XXX Currently, ixgbe_set_phy_power() supports only copper
1225 * PHY, so it's not required to test with !unsupported_sfp. 1230 * PHY, so it's not required to test with !unsupported_sfp.
1226 */ 1231 */
1227 ixgbe_set_phy_power(hw, TRUE); 1232 ixgbe_set_phy_power(hw, TRUE);
1228 } 1233 }
1229 1234
1230 /* Initialize statistics */ 1235 /* Initialize statistics */
1231 ixgbe_update_stats_counters(adapter); 1236 ixgbe_update_stats_counters(adapter);
1232 1237
1233 /* Check PCIE slot type/speed/width */ 1238 /* Check PCIE slot type/speed/width */
1234 ixgbe_get_slot_info(adapter); 1239 ixgbe_get_slot_info(adapter);
1235 1240
1236 /* 1241 /*
1237 * Do time init and sysctl init here, but 1242 * Do time init and sysctl init here, but
1238 * only on the first port of a bypass adapter. 1243 * only on the first port of a bypass adapter.
1239 */ 1244 */
1240 ixgbe_bypass_init(adapter); 1245 ixgbe_bypass_init(adapter);
1241 1246
1242 /* Set an initial dmac value */ 1247 /* Set an initial dmac value */
1243 adapter->dmac = 0; 1248 adapter->dmac = 0;
1244 /* Set initial advertised speeds (if applicable) */ 1249 /* Set initial advertised speeds (if applicable) */
1245 adapter->advertise = ixgbe_get_default_advertise(adapter); 1250 adapter->advertise = ixgbe_get_default_advertise(adapter);
1246 1251
1247 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1252 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1248 ixgbe_define_iov_schemas(dev, &error); 1253 ixgbe_define_iov_schemas(dev, &error);
1249 1254
1250 /* Add sysctls */ 1255 /* Add sysctls */
1251 ixgbe_add_device_sysctls(adapter); 1256 ixgbe_add_device_sysctls(adapter);
1252 ixgbe_add_hw_stats(adapter); 1257 ixgbe_add_hw_stats(adapter);
1253 1258
1254 /* For Netmap */ 1259 /* For Netmap */
1255 adapter->init_locked = ixgbe_init_locked; 1260 adapter->init_locked = ixgbe_init_locked;
1256 adapter->stop_locked = ixgbe_stop_locked; 1261 adapter->stop_locked = ixgbe_stop_locked;
1257 1262
1258 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 1263 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1259 ixgbe_netmap_attach(adapter); 1264 ixgbe_netmap_attach(adapter);
1260 1265
1261 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 1266 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1262 aprint_verbose_dev(dev, "feature cap %s\n", buf); 1267 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1263 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 1268 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1264 aprint_verbose_dev(dev, "feature ena %s\n", buf); 1269 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1265 1270
1266 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) 1271 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1267 pmf_class_network_register(dev, adapter->ifp); 1272 pmf_class_network_register(dev, adapter->ifp);
1268 else 1273 else
1269 aprint_error_dev(dev, "couldn't establish power handler\n"); 1274 aprint_error_dev(dev, "couldn't establish power handler\n");
1270 1275
1271 /* Init recovery mode timer and state variable */ 1276 /* Init recovery mode timer and state variable */
1272 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { 1277 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1273 adapter->recovery_mode = 0; 1278 adapter->recovery_mode = 0;
1274 1279
1275 /* Set up the timer callout */ 1280 /* Set up the timer callout */
1276 callout_init(&adapter->recovery_mode_timer, 1281 callout_init(&adapter->recovery_mode_timer,
1277 IXGBE_CALLOUT_FLAGS); 1282 IXGBE_CALLOUT_FLAGS);
1278 snprintf(wqname, sizeof(wqname), "%s-recovery", 1283 snprintf(wqname, sizeof(wqname), "%s-recovery",
1279 device_xname(dev)); 1284 device_xname(dev));
1280 error = workqueue_create(&adapter->recovery_mode_timer_wq, 1285 error = workqueue_create(&adapter->recovery_mode_timer_wq,
1281 wqname, ixgbe_handle_recovery_mode_timer, adapter, 1286 wqname, ixgbe_handle_recovery_mode_timer, adapter,
1282 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS); 1287 IXGBE_WORKQUEUE_PRI, IPL_NET, IXGBE_TASKLET_WQ_FLAGS);
1283 if (error) { 1288 if (error) {
1284 aprint_error_dev(dev, "could not create " 1289 aprint_error_dev(dev, "could not create "
1285 "recovery_mode_timer workqueue (%d)\n", error); 1290 "recovery_mode_timer workqueue (%d)\n", error);
1286 goto err_out; 1291 goto err_out;
1287 } 1292 }
1288 1293
1289 /* Start the task */ 1294 /* Start the task */
1290 callout_reset(&adapter->recovery_mode_timer, hz, 1295 callout_reset(&adapter->recovery_mode_timer, hz,
1291 ixgbe_recovery_mode_timer, adapter); 1296 ixgbe_recovery_mode_timer, adapter);
1292 } 1297 }
1293 1298
1294 INIT_DEBUGOUT("ixgbe_attach: end"); 1299 INIT_DEBUGOUT("ixgbe_attach: end");
1295 adapter->osdep.attached = true; 1300 adapter->osdep.attached = true;
1296 1301
1297 return; 1302 return;
1298 1303
1299err_late: 1304err_late:
1300 ixgbe_free_queues(adapter); 1305 ixgbe_free_queues(adapter);
1301err_out: 1306err_out:
1302 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1307 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1303 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1308 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1304 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1309 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1305 ixgbe_free_deferred_handlers(adapter); 1310 ixgbe_free_deferred_handlers(adapter);
1306 ixgbe_free_pci_resources(adapter); 1311 ixgbe_free_pci_resources(adapter);
1307 if (adapter->mta != NULL) 1312 if (adapter->mta != NULL)
1308 free(adapter->mta, M_DEVBUF); 1313 free(adapter->mta, M_DEVBUF);
1309 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */ 1314 mutex_destroy(&(adapter)->admin_mtx); /* XXX appropriate order? */
1310 IXGBE_CORE_LOCK_DESTROY(adapter); 1315 IXGBE_CORE_LOCK_DESTROY(adapter);
1311 1316
1312 return; 1317 return;
1313} /* ixgbe_attach */ 1318} /* ixgbe_attach */
1314 1319
1315/************************************************************************ 1320/************************************************************************
1316 * ixgbe_check_wol_support 1321 * ixgbe_check_wol_support
1317 * 1322 *
1318 * Checks whether the adapter's ports are capable of 1323 * Checks whether the adapter's ports are capable of
1319 * Wake On LAN by reading the adapter's NVM. 1324 * Wake On LAN by reading the adapter's NVM.
1320 * 1325 *
1321 * Sets each port's hw->wol_enabled value depending 1326 * Sets each port's hw->wol_enabled value depending
1322 * on the value read here. 1327 * on the value read here.
1323 ************************************************************************/ 1328 ************************************************************************/
1324static void 1329static void
1325ixgbe_check_wol_support(struct adapter *adapter) 1330ixgbe_check_wol_support(struct adapter *adapter)
1326{ 1331{
1327 struct ixgbe_hw *hw = &adapter->hw; 1332 struct ixgbe_hw *hw = &adapter->hw;
1328 u16 dev_caps = 0; 1333 u16 dev_caps = 0;
1329 1334
1330 /* Find out WoL support for port */ 1335 /* Find out WoL support for port */
1331 adapter->wol_support = hw->wol_enabled = 0; 1336 adapter->wol_support = hw->wol_enabled = 0;
1332 ixgbe_get_device_caps(hw, &dev_caps); 1337 ixgbe_get_device_caps(hw, &dev_caps);
1333 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1338 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1334 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1339 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1335 hw->bus.func == 0)) 1340 hw->bus.func == 0))
1336 adapter->wol_support = hw->wol_enabled = 1; 1341 adapter->wol_support = hw->wol_enabled = 1;
1337 1342
1338 /* Save initial wake up filter configuration */ 1343 /* Save initial wake up filter configuration */
1339 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1344 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1340 1345
1341 return; 1346 return;
1342} /* ixgbe_check_wol_support */ 1347} /* ixgbe_check_wol_support */
1343 1348
1344/************************************************************************ 1349/************************************************************************
1345 * ixgbe_setup_interface 1350 * ixgbe_setup_interface
1346 * 1351 *
1347 * Setup networking device structure and register an interface. 1352 * Setup networking device structure and register an interface.
1348 ************************************************************************/ 1353 ************************************************************************/
1349static int 1354static int
1350ixgbe_setup_interface(device_t dev, struct adapter *adapter) 1355ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1351{ 1356{
1352 struct ethercom *ec = &adapter->osdep.ec; 1357 struct ethercom *ec = &adapter->osdep.ec;
1353 struct ifnet *ifp; 1358 struct ifnet *ifp;
1354 1359
1355 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1360 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1356 1361
1357 ifp = adapter->ifp = &ec->ec_if; 1362 ifp = adapter->ifp = &ec->ec_if;
1358 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1363 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1359 ifp->if_baudrate = IF_Gbps(10); 1364 ifp->if_baudrate = IF_Gbps(10);
1360 ifp->if_init = ixgbe_init; 1365 ifp->if_init = ixgbe_init;
1361 ifp->if_stop = ixgbe_ifstop; 1366 ifp->if_stop = ixgbe_ifstop;
1362 ifp->if_softc = adapter; 1367 ifp->if_softc = adapter;
1363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1368 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1364#ifdef IXGBE_MPSAFE 1369#ifdef IXGBE_MPSAFE
1365 ifp->if_extflags = IFEF_MPSAFE; 1370 ifp->if_extflags = IFEF_MPSAFE;
1366#endif 1371#endif
1367 ifp->if_ioctl = ixgbe_ioctl; 1372 ifp->if_ioctl = ixgbe_ioctl;
1368#if __FreeBSD_version >= 1100045 1373#if __FreeBSD_version >= 1100045
1369 /* TSO parameters */ 1374 /* TSO parameters */
1370 ifp->if_hw_tsomax = 65518; 1375 ifp->if_hw_tsomax = 65518;
1371 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1376 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1372 ifp->if_hw_tsomaxsegsize = 2048; 1377 ifp->if_hw_tsomaxsegsize = 2048;
1373#endif 1378#endif
1374 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1379 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1375#if 0 1380#if 0
1376 ixgbe_start_locked = ixgbe_legacy_start_locked; 1381 ixgbe_start_locked = ixgbe_legacy_start_locked;
1377#endif 1382#endif
1378 } else { 1383 } else {
1379 ifp->if_transmit = ixgbe_mq_start; 1384 ifp->if_transmit = ixgbe_mq_start;
1380#if 0 1385#if 0
1381 ixgbe_start_locked = ixgbe_mq_start_locked; 1386 ixgbe_start_locked = ixgbe_mq_start_locked;
1382#endif 1387#endif
1383 } 1388 }
1384 ifp->if_start = ixgbe_legacy_start; 1389 ifp->if_start = ixgbe_legacy_start;
1385 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1390 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1386 IFQ_SET_READY(&ifp->if_snd); 1391 IFQ_SET_READY(&ifp->if_snd);
1387 1392
1388 if_initialize(ifp); 1393 if_initialize(ifp);
1389 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1394 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1390 ether_ifattach(ifp, adapter->hw.mac.addr); 1395 ether_ifattach(ifp, adapter->hw.mac.addr);
1391 aprint_normal_dev(dev, "Ethernet address %s\n", 1396 aprint_normal_dev(dev, "Ethernet address %s\n",
1392 ether_sprintf(adapter->hw.mac.addr)); 1397 ether_sprintf(adapter->hw.mac.addr));
1393 /* 1398 /*
1394 * We use per TX queue softint, so if_deferred_start_init() isn't 1399 * We use per TX queue softint, so if_deferred_start_init() isn't
1395 * used. 1400 * used.
1396 */ 1401 */
1397 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); 1402 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1398 1403
1399 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1404 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1400 1405
1401 /* 1406 /*
1402 * Tell the upper layer(s) we support long frames. 1407 * Tell the upper layer(s) we support long frames.
1403 */ 1408 */
1404 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1409 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1405 1410
1406 /* Set capability flags */ 1411 /* Set capability flags */
1407 ifp->if_capabilities |= IFCAP_RXCSUM 1412 ifp->if_capabilities |= IFCAP_RXCSUM
1408 | IFCAP_TXCSUM 1413 | IFCAP_TXCSUM
1409 | IFCAP_TSOv4 1414 | IFCAP_TSOv4
1410 | IFCAP_TSOv6; 1415 | IFCAP_TSOv6;
1411 ifp->if_capenable = 0; 1416 ifp->if_capenable = 0;
1412 1417
1413 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING 1418 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1414 | ETHERCAP_VLAN_HWCSUM 1419 | ETHERCAP_VLAN_HWCSUM
1415 | ETHERCAP_JUMBO_MTU 1420 | ETHERCAP_JUMBO_MTU
1416 | ETHERCAP_VLAN_MTU; 1421 | ETHERCAP_VLAN_MTU;
1417 1422
1418 /* Enable the above capabilities by default */ 1423 /* Enable the above capabilities by default */
1419 ec->ec_capenable = ec->ec_capabilities; 1424 ec->ec_capenable = ec->ec_capabilities;
1420 1425
1421 /* 1426 /*
1422 * Don't turn this on by default, if vlans are 1427 * Don't turn this on by default, if vlans are
1423 * created on another pseudo device (eg. lagg) 1428 * created on another pseudo device (eg. lagg)
1424 * then vlan events are not passed thru, breaking 1429 * then vlan events are not passed thru, breaking
1425 * operation, but with HW FILTER off it works. If 1430 * operation, but with HW FILTER off it works. If
1426 * using vlans directly on the ixgbe driver you can 1431 * using vlans directly on the ixgbe driver you can
1427 * enable this and get full hardware tag filtering. 1432 * enable this and get full hardware tag filtering.
1428 */ 1433 */
1429 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1434 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1430 1435
1431 /* 1436 /*
1432 * Specify the media types supported by this adapter and register 1437 * Specify the media types supported by this adapter and register
1433 * callbacks to update media and link information 1438 * callbacks to update media and link information
1434 */ 1439 */
1435 ec->ec_ifmedia = &adapter->media; 1440 ec->ec_ifmedia = &adapter->media;
1436 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change, 1441 ifmedia_init_with_lock(&adapter->media, IFM_IMASK, ixgbe_media_change,
1437 ixgbe_media_status, &adapter->core_mtx); 1442 ixgbe_media_status, &adapter->core_mtx);
1438 1443
1439 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1444 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1440 ixgbe_add_media_types(adapter); 1445 ixgbe_add_media_types(adapter);
1441 1446
1442 /* Set autoselect media by default */ 1447 /* Set autoselect media by default */
1443 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1448 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1444 1449
1445 if_register(ifp); 1450 if_register(ifp);
1446 1451
1447 return (0); 1452 return (0);
1448} /* ixgbe_setup_interface */ 1453} /* ixgbe_setup_interface */
1449 1454
1450/************************************************************************ 1455/************************************************************************
1451 * ixgbe_add_media_types 1456 * ixgbe_add_media_types
1452 ************************************************************************/ 1457 ************************************************************************/
1453static void 1458static void
1454ixgbe_add_media_types(struct adapter *adapter) 1459ixgbe_add_media_types(struct adapter *adapter)
1455{ 1460{
1456 struct ixgbe_hw *hw = &adapter->hw; 1461 struct ixgbe_hw *hw = &adapter->hw;
1457 u64 layer; 1462 u64 layer;
1458 1463
1459 layer = adapter->phy_layer; 1464 layer = adapter->phy_layer;
1460 1465
1461#define ADD(mm, dd) \ 1466#define ADD(mm, dd) \
1462 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL); 1467 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1463 1468
1464 ADD(IFM_NONE, 0); 1469 ADD(IFM_NONE, 0);
1465 1470
1466 /* Media types with matching NetBSD media defines */ 1471 /* Media types with matching NetBSD media defines */
1467 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 1472 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1468 ADD(IFM_10G_T | IFM_FDX, 0); 1473 ADD(IFM_10G_T | IFM_FDX, 0);
1469 } 1474 }
1470 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 1475 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1471 ADD(IFM_1000_T | IFM_FDX, 0); 1476 ADD(IFM_1000_T | IFM_FDX, 0);
1472 } 1477 }
1473 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) { 1478 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1474 ADD(IFM_100_TX | IFM_FDX, 0); 1479 ADD(IFM_100_TX | IFM_FDX, 0);
1475 } 1480 }
1476 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) { 1481 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1477 ADD(IFM_10_T | IFM_FDX, 0); 1482 ADD(IFM_10_T | IFM_FDX, 0);
1478 } 1483 }
1479 1484
1480 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1485 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1481 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) { 1486 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1482 ADD(IFM_10G_TWINAX | IFM_FDX, 0); 1487 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1483 } 1488 }
1484 1489
1485 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1490 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1486 ADD(IFM_10G_LR | IFM_FDX, 0); 1491 ADD(IFM_10G_LR | IFM_FDX, 0);
1487 if (hw->phy.multispeed_fiber) { 1492 if (hw->phy.multispeed_fiber) {
1488 ADD(IFM_1000_LX | IFM_FDX, 0); 1493 ADD(IFM_1000_LX | IFM_FDX, 0);
1489 } 1494 }
1490 } 1495 }
1491 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1496 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1492 ADD(IFM_10G_SR | IFM_FDX, 0); 1497 ADD(IFM_10G_SR | IFM_FDX, 0);
1493 if (hw->phy.multispeed_fiber) { 1498 if (hw->phy.multispeed_fiber) {
1494 ADD(IFM_1000_SX | IFM_FDX, 0); 1499 ADD(IFM_1000_SX | IFM_FDX, 0);
1495 } 1500 }
1496 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 1501 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1497 ADD(IFM_1000_SX | IFM_FDX, 0); 1502 ADD(IFM_1000_SX | IFM_FDX, 0);
1498 } 1503 }
1499 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) { 1504 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1500 ADD(IFM_10G_CX4 | IFM_FDX, 0); 1505 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1501 } 1506 }
1502 1507
1503 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1508 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1504 ADD(IFM_10G_KR | IFM_FDX, 0); 1509 ADD(IFM_10G_KR | IFM_FDX, 0);
1505 } 1510 }
1506 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1511 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1507 ADD(IFM_10G_KX4 | IFM_FDX, 0); 1512 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1508 } 1513 }
1509 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1514 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1510 ADD(IFM_1000_KX | IFM_FDX, 0); 1515 ADD(IFM_1000_KX | IFM_FDX, 0);
1511 } 1516 }
1512 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1517 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1513 ADD(IFM_2500_KX | IFM_FDX, 0); 1518 ADD(IFM_2500_KX | IFM_FDX, 0);
1514 } 1519 }
1515 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) { 1520 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1516 ADD(IFM_2500_T | IFM_FDX, 0); 1521 ADD(IFM_2500_T | IFM_FDX, 0);
1517 } 1522 }
1518 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) { 1523 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1519 ADD(IFM_5000_T | IFM_FDX, 0); 1524 ADD(IFM_5000_T | IFM_FDX, 0);
1520 } 1525 }
1521 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1526 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1522 ADD(IFM_1000_BX10 | IFM_FDX, 0); 1527 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1523 /* XXX no ifmedia_set? */ 1528 /* XXX no ifmedia_set? */
1524 1529
1525 ADD(IFM_AUTO, 0); 1530 ADD(IFM_AUTO, 0);
1526 1531
1527#undef ADD 1532#undef ADD
1528} /* ixgbe_add_media_types */ 1533} /* ixgbe_add_media_types */
1529 1534
1530/************************************************************************ 1535/************************************************************************
1531 * ixgbe_is_sfp 1536 * ixgbe_is_sfp
1532 ************************************************************************/ 1537 ************************************************************************/
1533static inline bool 1538static inline bool
1534ixgbe_is_sfp(struct ixgbe_hw *hw) 1539ixgbe_is_sfp(struct ixgbe_hw *hw)
1535{ 1540{
1536 switch (hw->mac.type) { 1541 switch (hw->mac.type) {
1537 case ixgbe_mac_82598EB: 1542 case ixgbe_mac_82598EB:
1538 if (hw->phy.type == ixgbe_phy_nl) 1543 if (hw->phy.type == ixgbe_phy_nl)
1539 return (TRUE); 1544 return (TRUE);
1540 return (FALSE); 1545 return (FALSE);
1541 case ixgbe_mac_82599EB: 1546 case ixgbe_mac_82599EB:
1542 case ixgbe_mac_X550EM_x: 1547 case ixgbe_mac_X550EM_x:
1543 case ixgbe_mac_X550EM_a: 1548 case ixgbe_mac_X550EM_a:
1544 switch (hw->mac.ops.get_media_type(hw)) { 1549 switch (hw->mac.ops.get_media_type(hw)) {
1545 case ixgbe_media_type_fiber: 1550 case ixgbe_media_type_fiber:
1546 case ixgbe_media_type_fiber_qsfp: 1551 case ixgbe_media_type_fiber_qsfp:
1547 return (TRUE); 1552 return (TRUE);
1548 default: 1553 default:
1549 return (FALSE); 1554 return (FALSE);
1550 } 1555 }
1551 default: 1556 default:
1552 return (FALSE); 1557 return (FALSE);
1553 } 1558 }
1554} /* ixgbe_is_sfp */ 1559} /* ixgbe_is_sfp */
1555 1560
1556static void 1561static void
1557ixgbe_schedule_admin_tasklet(struct adapter *adapter) 1562ixgbe_schedule_admin_tasklet(struct adapter *adapter)
1558{ 1563{
1559 1564
1560 KASSERT(mutex_owned(&adapter->admin_mtx)); 1565 KASSERT(mutex_owned(&adapter->admin_mtx));
1561 1566
1562 if (__predict_true(adapter->osdep.detaching == false)) { 1567 if (__predict_true(adapter->osdep.detaching == false)) {
1563 if (adapter->admin_pending == 0) 1568 if (adapter->admin_pending == 0)
1564 workqueue_enqueue(adapter->admin_wq, 1569 workqueue_enqueue(adapter->admin_wq,
1565 &adapter->admin_wc, NULL); 1570 &adapter->admin_wc, NULL);
1566 adapter->admin_pending = 1; 1571 adapter->admin_pending = 1;
1567 } 1572 }
1568} 1573}
1569 1574
1570/************************************************************************ 1575/************************************************************************
1571 * ixgbe_config_link 1576 * ixgbe_config_link
1572 ************************************************************************/ 1577 ************************************************************************/
1573static void 1578static void
1574ixgbe_config_link(struct adapter *adapter) 1579ixgbe_config_link(struct adapter *adapter)
1575{ 1580{
1576 struct ixgbe_hw *hw = &adapter->hw; 1581 struct ixgbe_hw *hw = &adapter->hw;
1577 u32 autoneg, err = 0; 1582 u32 autoneg, err = 0;
1578 u32 task_requests = 0; 1583 u32 task_requests = 0;
1579 bool sfp, negotiate = false; 1584 bool sfp, negotiate = false;
1580 1585
1581 sfp = ixgbe_is_sfp(hw); 1586 sfp = ixgbe_is_sfp(hw);
1582 1587
1583 if (sfp) { 1588 if (sfp) {
1584 if (hw->phy.multispeed_fiber) { 1589 if (hw->phy.multispeed_fiber) {
1585 ixgbe_enable_tx_laser(hw); 1590 ixgbe_enable_tx_laser(hw);
1586 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI; 1591 task_requests |= IXGBE_REQUEST_TASK_MSF_WOI;
1587 } 1592 }
1588 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI; 1593 task_requests |= IXGBE_REQUEST_TASK_MOD_WOI;
1589 1594
1590 mutex_enter(&adapter->admin_mtx); 1595 mutex_enter(&adapter->admin_mtx);
1591 adapter->task_requests |= task_requests; 1596 adapter->task_requests |= task_requests;
1592 ixgbe_schedule_admin_tasklet(adapter); 1597 ixgbe_schedule_admin_tasklet(adapter);
1593 mutex_exit(&adapter->admin_mtx); 1598 mutex_exit(&adapter->admin_mtx);
1594 } else { 1599 } else {
1595 struct ifmedia *ifm = &adapter->media; 1600 struct ifmedia *ifm = &adapter->media;
1596 1601
1597 if (hw->mac.ops.check_link) 1602 if (hw->mac.ops.check_link)
1598 err = ixgbe_check_link(hw, &adapter->link_speed, 1603 err = ixgbe_check_link(hw, &adapter->link_speed,
1599 &adapter->link_up, FALSE); 1604 &adapter->link_up, FALSE);
1600 if (err) 1605 if (err)
1601 return; 1606 return;
1602 1607
1603 /* 1608 /*
1604 * Check if it's the first call. If it's the first call, 1609 * Check if it's the first call. If it's the first call,
1605 * get value for auto negotiation. 1610 * get value for auto negotiation.
1606 */ 1611 */
1607 autoneg = hw->phy.autoneg_advertised; 1612 autoneg = hw->phy.autoneg_advertised;
1608 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) 1613 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1609 && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) 1614 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1610 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1615 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1611 &negotiate); 1616 &negotiate);
1612 if (err) 1617 if (err)
1613 return; 1618 return;
1614 if (hw->mac.ops.setup_link) 1619 if (hw->mac.ops.setup_link)
1615 err = hw->mac.ops.setup_link(hw, autoneg, 1620 err = hw->mac.ops.setup_link(hw, autoneg,
1616 adapter->link_up); 1621 adapter->link_up);
1617 } 1622 }
1618 1623
1619} /* ixgbe_config_link */ 1624} /* ixgbe_config_link */
1620 1625
1621/************************************************************************ 1626/************************************************************************
1622 * ixgbe_update_stats_counters - Update board statistics counters. 1627 * ixgbe_update_stats_counters - Update board statistics counters.
1623 ************************************************************************/ 1628 ************************************************************************/
1624static void 1629static void
1625ixgbe_update_stats_counters(struct adapter *adapter) 1630ixgbe_update_stats_counters(struct adapter *adapter)
1626{ 1631{
1627 struct ifnet *ifp = adapter->ifp; 1632 struct ifnet *ifp = adapter->ifp;
1628 struct ixgbe_hw *hw = &adapter->hw; 1633 struct ixgbe_hw *hw = &adapter->hw;
1629 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1634 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1630 u32 missed_rx = 0, bprc, lxontxc, lxofftxc; 1635 u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
1631 u64 total, total_missed_rx = 0; 1636 u64 total, total_missed_rx = 0;
1632 uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc; 1637 uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
1633 unsigned int queue_counters; 1638 unsigned int queue_counters;
1634 int i; 1639 int i;
1635 1640
1636 IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs); 1641 IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
1637 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc); 1642 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
1638 1643
1639 IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc); 1644 IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
1640 IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc); 1645 IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
1641 if (hw->mac.type >= ixgbe_mac_X550) 1646 if (hw->mac.type >= ixgbe_mac_X550)
1642 IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc); 1647 IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
1643 1648
1644 /* 16 registers exist */ 1649 /* 16 registers exist */
1645 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues); 1650 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1646 for (i = 0; i < queue_counters; i++) { 1651 for (i = 0; i < queue_counters; i++) {
1647 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]); 1652 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
1648 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]); 1653 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
1649 if (hw->mac.type >= ixgbe_mac_82599EB) 1654 if (hw->mac.type >= ixgbe_mac_82599EB)
1650 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]); 1655 IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
1651 } 1656 }
1652 1657
1653 /* 8 registers exist */ 1658 /* 8 registers exist */
1654 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1659 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1655 uint32_t mp; 1660 uint32_t mp;
1656 1661
1657 /* MPC */ 1662 /* MPC */
1658 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 1663 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1659 /* global total per queue */ 1664 /* global total per queue */
1660 IXGBE_EVC_ADD(&stats->mpc[i], mp); 1665 IXGBE_EVC_ADD(&stats->mpc[i], mp);
1661 /* running comprehensive total for stats display */ 1666 /* running comprehensive total for stats display */
1662 total_missed_rx += mp; 1667 total_missed_rx += mp;
1663 1668
1664 if (hw->mac.type == ixgbe_mac_82598EB) 1669 if (hw->mac.type == ixgbe_mac_82598EB)
1665 IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]); 1670 IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
1666 1671
1667 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]); 1672 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
1668 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]); 1673 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
1669 if (hw->mac.type >= ixgbe_mac_82599EB) { 1674 if (hw->mac.type >= ixgbe_mac_82599EB) {
1670 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONRXCNT(i), pxonrxc[i]); 1675 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONRXCNT(i), pxonrxc[i]);
1671 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFRXCNT(i), pxoffrxc[i]); 1676 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
1672 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXON2OFFCNT(i), 1677 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXON2OFFCNT(i),
1673 pxon2offc[i]); 1678 pxon2offc[i]);
1674 } else { 1679 } else {
1675 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONRXC(i), pxonrxc[i]); 1680 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONRXC(i), pxonrxc[i]);
1676 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFRXC(i), pxoffrxc[i]); 1681 IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFRXC(i), pxoffrxc[i]);
1677 } 1682 }
1678 } 1683 }
1679 IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx); 1684 IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
1680 1685
1681 /* Document says M[LR]FC are valid when link is up and 10Gbps */ 1686 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1682 if ((adapter->link_active == LINK_STATE_UP) 1687 if ((adapter->link_active == LINK_STATE_UP)
1683 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { 1688 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1684 IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc); 1689 IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
1685 IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc); 1690 IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
1686 } 1691 }
1687 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec); 1692 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
1688 1693
1689 /* Hardware workaround, gprc counts missed packets */ 1694 /* Hardware workaround, gprc counts missed packets */
1690 IXGBE_EVC_ADD(&stats->gprc, 1695 IXGBE_EVC_ADD(&stats->gprc,
1691 IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx); 1696 IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
1692 1697
1693 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc); 1698 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
1694 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc); 1699 IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
1695 total = lxontxc + lxofftxc; 1700 total = lxontxc + lxofftxc;
1696 1701
1697 if (hw->mac.type != ixgbe_mac_82598EB) { 1702 if (hw->mac.type != ixgbe_mac_82598EB) {
1698 IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) + 1703 IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
1699 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32)); 1704 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
1700 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1705 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1701 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) 1706 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
1702 - total * ETHER_MIN_LEN); 1707 - total * ETHER_MIN_LEN);
1703 IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) + 1708 IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
1704 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32)); 1709 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
1705 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc); 1710 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
1706 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc); 1711 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
1707 } else { 1712 } else {
1708 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc); 1713 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
1709 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc); 1714 IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
1710 /* 82598 only has a counter in the high register */ 1715 /* 82598 only has a counter in the high register */
1711 IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc); 1716 IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
1712 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH) 1717 IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
1713 - total * ETHER_MIN_LEN); 1718 - total * ETHER_MIN_LEN);
1714 IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor); 1719 IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
1715 } 1720 }
1716 1721
1717 /* 1722 /*
1718 * Workaround: mprc hardware is incorrectly counting 1723 * Workaround: mprc hardware is incorrectly counting
1719 * broadcasts, so for now we subtract those. 1724 * broadcasts, so for now we subtract those.
1720 */ 1725 */
1721 IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc); 1726 IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
1722 IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC) 1727 IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
1723 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0)); 1728 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
1724 1729
1725 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64); 1730 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
1726 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127); 1731 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
1727 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255); 1732 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
1728 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511); 1733 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
1729 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023); 1734 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
1730 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522); 1735 IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
1731 1736
1732 IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total); 1737 IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
1733 IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total); 1738 IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
1734 IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total); 1739 IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
1735 1740
1736 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc); 1741 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
1737 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc); 1742 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
1738 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc); 1743 IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
1739 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc); 1744 IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
1740 1745
1741 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc); 1746 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
1742 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc); 1747 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
1743 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc); 1748 IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
1744 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr); 1749 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
1745 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt); 1750 IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
1746 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127); 1751 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
1747 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255); 1752 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
1748 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511); 1753 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
1749 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023); 1754 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
1750 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522); 1755 IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
1751 IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc); 1756 IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
1752 IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec); 1757 IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
1753 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc); 1758 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
1754 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast); 1759 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
1755 /* Only read FCOE on 82599 */ 1760 /* Only read FCOE on 82599 */
1756 if (hw->mac.type != ixgbe_mac_82598EB) { 1761 if (hw->mac.type != ixgbe_mac_82598EB) {
1757 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc); 1762 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
1758 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc); 1763 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
1759 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc); 1764 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
1760 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc); 1765 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
1761 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc); 1766 IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
1762 } 1767 }
1763 1768
1764 /* 1769 /*
1765 * Fill out the OS statistics structure. Only RX errors are required 1770 * Fill out the OS statistics structure. Only RX errors are required
1766 * here because all TX counters are incremented in the TX path and 1771 * here because all TX counters are incremented in the TX path and
1767 * normal RX counters are prepared in ether_input(). 1772 * normal RX counters are prepared in ether_input().
1768 */ 1773 */
1769 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1774 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1770 if_statadd_ref(nsr, if_iqdrops, total_missed_rx); 1775 if_statadd_ref(nsr, if_iqdrops, total_missed_rx);
1771 1776
1772 /* 1777 /*
1773 * Aggregate following types of errors as RX errors: 1778 * Aggregate following types of errors as RX errors:
1774 * - CRC error count, 1779 * - CRC error count,
1775 * - illegal byte error count, 1780 * - illegal byte error count,
1776 * - length error count, 1781 * - length error count,
1777 * - undersized packets count, 1782 * - undersized packets count,
1778 * - fragmented packets count, 1783 * - fragmented packets count,
1779 * - oversized packets count, 1784 * - oversized packets count,
1780 * - jabber count. 1785 * - jabber count.
1781 */ 1786 */
1782 if_statadd_ref(nsr, if_ierrors, 1787 if_statadd_ref(nsr, if_ierrors,
1783 crcerrs + illerrc + rlec + ruc + rfc + roc + rjc); 1788 crcerrs + illerrc + rlec + ruc + rfc + roc + rjc);
1784 1789
1785 IF_STAT_PUTREF(ifp); 1790 IF_STAT_PUTREF(ifp);
1786} /* ixgbe_update_stats_counters */ 1791} /* ixgbe_update_stats_counters */
1787 1792
1788/************************************************************************ 1793/************************************************************************
1789 * ixgbe_add_hw_stats 1794 * ixgbe_add_hw_stats
1790 * 1795 *
1791 * Add sysctl variables, one per statistic, to the system. 1796 * Add sysctl variables, one per statistic, to the system.
1792 ************************************************************************/ 1797 ************************************************************************/
1793static void 1798static void
1794ixgbe_add_hw_stats(struct adapter *adapter) 1799ixgbe_add_hw_stats(struct adapter *adapter)
1795{ 1800{
1796 device_t dev = adapter->dev; 1801 device_t dev = adapter->dev;
1797 const struct sysctlnode *rnode, *cnode; 1802 const struct sysctlnode *rnode, *cnode;
1798 struct sysctllog **log = &adapter->sysctllog; 1803 struct sysctllog **log = &adapter->sysctllog;
1799 struct tx_ring *txr = adapter->tx_rings; 1804 struct tx_ring *txr = adapter->tx_rings;
1800 struct rx_ring *rxr = adapter->rx_rings; 1805 struct rx_ring *rxr = adapter->rx_rings;
1801 struct ixgbe_hw *hw = &adapter->hw; 1806 struct ixgbe_hw *hw = &adapter->hw;
1802 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1807 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1803 const char *xname = device_xname(dev); 1808 const char *xname = device_xname(dev);
1804 int i; 1809 int i;
1805 1810
1806 /* Driver Statistics */ 1811 /* Driver Statistics */
1807 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 1812 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1808 NULL, xname, "Driver tx dma soft fail EFBIG"); 1813 NULL, xname, "Driver tx dma soft fail EFBIG");
1809 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 1814 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1810 NULL, xname, "m_defrag() failed"); 1815 NULL, xname, "m_defrag() failed");
1811 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 1816 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1812 NULL, xname, "Driver tx dma hard fail EFBIG"); 1817 NULL, xname, "Driver tx dma hard fail EFBIG");
1813 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 1818 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1814 NULL, xname, "Driver tx dma hard fail EINVAL"); 1819 NULL, xname, "Driver tx dma hard fail EINVAL");
1815 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 1820 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1816 NULL, xname, "Driver tx dma hard fail other"); 1821 NULL, xname, "Driver tx dma hard fail other");
1817 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 1822 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1818 NULL, xname, "Driver tx dma soft fail EAGAIN"); 1823 NULL, xname, "Driver tx dma soft fail EAGAIN");
1819 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 1824 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1820 NULL, xname, "Driver tx dma soft fail ENOMEM"); 1825 NULL, xname, "Driver tx dma soft fail ENOMEM");
1821 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 1826 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1822 NULL, xname, "Watchdog timeouts"); 1827 NULL, xname, "Watchdog timeouts");
1823 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 1828 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1824 NULL, xname, "TSO errors"); 1829 NULL, xname, "TSO errors");
1825 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR, 1830 evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR,
1826 NULL, xname, "Admin MSI-X IRQ Handled"); 1831 NULL, xname, "Admin MSI-X IRQ Handled");
1827 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR, 1832 evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR,
1828 NULL, xname, "Link event"); 1833 NULL, xname, "Link event");
1829 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR, 1834 evcnt_attach_dynamic(&adapter->mod_workev, EVCNT_TYPE_INTR,
1830 NULL, xname, "SFP+ module event"); 1835 NULL, xname, "SFP+ module event");
1831 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR, 1836 evcnt_attach_dynamic(&adapter->msf_workev, EVCNT_TYPE_INTR,
1832 NULL, xname, "Multispeed event"); 1837 NULL, xname, "Multispeed event");
1833 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR, 1838 evcnt_attach_dynamic(&adapter->phy_workev, EVCNT_TYPE_INTR,
1834 NULL, xname, "External PHY event"); 1839 NULL, xname, "External PHY event");
1835 1840
1836 /* Max number of traffic class is 8 */ 1841 /* Max number of traffic class is 8 */
1837 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); 1842 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1838 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1843 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1839 snprintf(adapter->tcs[i].evnamebuf, 1844 snprintf(adapter->tcs[i].evnamebuf,
1840 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", 1845 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1841 xname, i); 1846 xname, i);
1842 if (i < __arraycount(stats->mpc)) { 1847 if (i < __arraycount(stats->mpc)) {
1843 evcnt_attach_dynamic(&stats->mpc[i], 1848 evcnt_attach_dynamic(&stats->mpc[i],
1844 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1849 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1845 "RX Missed Packet Count"); 1850 "RX Missed Packet Count");
1846 if (hw->mac.type == ixgbe_mac_82598EB) 1851 if (hw->mac.type == ixgbe_mac_82598EB)
1847 evcnt_attach_dynamic(&stats->rnbc[i], 1852 evcnt_attach_dynamic(&stats->rnbc[i],
1848 EVCNT_TYPE_MISC, NULL, 1853 EVCNT_TYPE_MISC, NULL,
1849 adapter->tcs[i].evnamebuf, 1854 adapter->tcs[i].evnamebuf,
1850 "Receive No Buffers"); 1855 "Receive No Buffers");
1851 } 1856 }
1852 if (i < __arraycount(stats->pxontxc)) { 1857 if (i < __arraycount(stats->pxontxc)) {
1853 evcnt_attach_dynamic(&stats->pxontxc[i], 1858 evcnt_attach_dynamic(&stats->pxontxc[i],
1854 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1859 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1855 "pxontxc"); 1860 "pxontxc");
1856 evcnt_attach_dynamic(&stats->pxonrxc[i], 1861 evcnt_attach_dynamic(&stats->pxonrxc[i],
1857 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1862 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1858 "pxonrxc"); 1863 "pxonrxc");
1859 evcnt_attach_dynamic(&stats->pxofftxc[i], 1864 evcnt_attach_dynamic(&stats->pxofftxc[i],
1860 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1865 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1861 "pxofftxc"); 1866 "pxofftxc");
1862 evcnt_attach_dynamic(&stats->pxoffrxc[i], 1867 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1863 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1868 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1864 "pxoffrxc"); 1869 "pxoffrxc");
1865 if (hw->mac.type >= ixgbe_mac_82599EB) 1870 if (hw->mac.type >= ixgbe_mac_82599EB)
1866 evcnt_attach_dynamic(&stats->pxon2offc[i], 1871 evcnt_attach_dynamic(&stats->pxon2offc[i],
1867 EVCNT_TYPE_MISC, NULL, 1872 EVCNT_TYPE_MISC, NULL,
1868 adapter->tcs[i].evnamebuf, 1873 adapter->tcs[i].evnamebuf,
1869 "pxon2offc"); 1874 "pxon2offc");
1870 } 1875 }
1871 } 1876 }
1872 1877
1873 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 1878 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1874#ifdef LRO 1879#ifdef LRO
1875 struct lro_ctrl *lro = &rxr->lro; 1880 struct lro_ctrl *lro = &rxr->lro;
1876#endif /* LRO */ 1881#endif /* LRO */
1877 1882
1878 snprintf(adapter->queues[i].evnamebuf, 1883 snprintf(adapter->queues[i].evnamebuf,
1879 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 1884 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1880 xname, i); 1885 xname, i);
1881 snprintf(adapter->queues[i].namebuf, 1886 snprintf(adapter->queues[i].namebuf,
1882 sizeof(adapter->queues[i].namebuf), "q%d", i); 1887 sizeof(adapter->queues[i].namebuf), "q%d", i);
1883 1888
1884 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 1889 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1885 aprint_error_dev(dev, "could not create sysctl root\n"); 1890 aprint_error_dev(dev, "could not create sysctl root\n");
1886 break; 1891 break;
1887 } 1892 }
1888 1893
1889 if (sysctl_createv(log, 0, &rnode, &rnode, 1894 if (sysctl_createv(log, 0, &rnode, &rnode,
1890 0, CTLTYPE_NODE, 1895 0, CTLTYPE_NODE,
1891 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 1896 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1892 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1897 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1893 break; 1898 break;
1894 1899
1895 if (sysctl_createv(log, 0, &rnode, &cnode, 1900 if (sysctl_createv(log, 0, &rnode, &cnode,
1896 CTLFLAG_READWRITE, CTLTYPE_INT, 1901 CTLFLAG_READWRITE, CTLTYPE_INT,
1897 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 1902 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1898 ixgbe_sysctl_interrupt_rate_handler, 0, 1903 ixgbe_sysctl_interrupt_rate_handler, 0,
1899 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 1904 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1900 break; 1905 break;
1901 1906
1902 if (sysctl_createv(log, 0, &rnode, &cnode, 1907 if (sysctl_createv(log, 0, &rnode, &cnode,
1903 CTLFLAG_READONLY, CTLTYPE_INT, 1908 CTLFLAG_READONLY, CTLTYPE_INT,
1904 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 1909 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1905 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1910 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1906 0, CTL_CREATE, CTL_EOL) != 0) 1911 0, CTL_CREATE, CTL_EOL) != 0)
1907 break; 1912 break;
1908 1913
1909 if (sysctl_createv(log, 0, &rnode, &cnode, 1914 if (sysctl_createv(log, 0, &rnode, &cnode,
1910 CTLFLAG_READONLY, CTLTYPE_INT, 1915 CTLFLAG_READONLY, CTLTYPE_INT,
1911 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 1916 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1912 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 1917 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1913 0, CTL_CREATE, CTL_EOL) != 0) 1918 0, CTL_CREATE, CTL_EOL) != 0)
1914 break; 1919 break;
1915 1920
1916 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 1921 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1917 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 1922 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1918 evcnt_attach_dynamic(&adapter->queues[i].handleq, 1923 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1919 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1924 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1920 "Handled queue in softint"); 1925 "Handled queue in softint");
1921 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 1926 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1922 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 1927 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1923 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 1928 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1924 NULL, adapter->queues[i].evnamebuf, "TSO"); 1929 NULL, adapter->queues[i].evnamebuf, "TSO");
1925 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 1930 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1926 NULL, adapter->queues[i].evnamebuf, 1931 NULL, adapter->queues[i].evnamebuf,
1927 "TX Queue No Descriptor Available"); 1932 "TX Queue No Descriptor Available");
1928 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 1933 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1929 NULL, adapter->queues[i].evnamebuf, 1934 NULL, adapter->queues[i].evnamebuf,
1930 "Queue Packets Transmitted"); 1935 "Queue Packets Transmitted");
1931#ifndef IXGBE_LEGACY_TX 1936#ifndef IXGBE_LEGACY_TX
1932 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 1937 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1933 NULL, adapter->queues[i].evnamebuf, 1938 NULL, adapter->queues[i].evnamebuf,
1934 "Packets dropped in pcq"); 1939 "Packets dropped in pcq");
1935#endif 1940#endif
1936 1941
1937 if (sysctl_createv(log, 0, &rnode, &cnode, 1942 if (sysctl_createv(log, 0, &rnode, &cnode,
1938 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck", 1943 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxck",
1939 SYSCTL_DESCR("Receive Descriptor next to check"), 1944 SYSCTL_DESCR("Receive Descriptor next to check"),
1940 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 1945 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1941 CTL_CREATE, CTL_EOL) != 0) 1946 CTL_CREATE, CTL_EOL) != 0)
1942 break; 1947 break;
1943 1948
1944 if (sysctl_createv(log, 0, &rnode, &cnode, 1949 if (sysctl_createv(log, 0, &rnode, &cnode,
1945 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf", 1950 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_nxrf",
1946 SYSCTL_DESCR("Receive Descriptor next to refresh"), 1951 SYSCTL_DESCR("Receive Descriptor next to refresh"),
1947 ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0, 1952 ixgbe_sysctl_next_to_refresh_handler, 0, (void *)rxr, 0,
1948 CTL_CREATE, CTL_EOL) != 0) 1953 CTL_CREATE, CTL_EOL) != 0)
1949 break; 1954 break;
1950 1955
1951 if (sysctl_createv(log, 0, &rnode, &cnode, 1956 if (sysctl_createv(log, 0, &rnode, &cnode,
1952 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head", 1957 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_head",
1953 SYSCTL_DESCR("Receive Descriptor Head"), 1958 SYSCTL_DESCR("Receive Descriptor Head"),
1954 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, 1959 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1955 CTL_CREATE, CTL_EOL) != 0) 1960 CTL_CREATE, CTL_EOL) != 0)
1956 break; 1961 break;
1957 1962
1958 if (sysctl_createv(log, 0, &rnode, &cnode, 1963 if (sysctl_createv(log, 0, &rnode, &cnode,
1959 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail", 1964 CTLFLAG_READONLY, CTLTYPE_INT, "rxd_tail",
1960 SYSCTL_DESCR("Receive Descriptor Tail"), 1965 SYSCTL_DESCR("Receive Descriptor Tail"),
1961 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, 1966 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1962 CTL_CREATE, CTL_EOL) != 0) 1967 CTL_CREATE, CTL_EOL) != 0)
1963 break; 1968 break;
1964 1969
1965 if (i < __arraycount(stats->qprc)) { 1970 if (i < __arraycount(stats->qprc)) {
1966 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC, 1971 evcnt_attach_dynamic(&stats->qprc[i], EVCNT_TYPE_MISC,
1967 NULL, adapter->queues[i].evnamebuf, "qprc"); 1972 NULL, adapter->queues[i].evnamebuf, "qprc");
1968 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC, 1973 evcnt_attach_dynamic(&stats->qptc[i], EVCNT_TYPE_MISC,
1969 NULL, adapter->queues[i].evnamebuf, "qptc"); 1974 NULL, adapter->queues[i].evnamebuf, "qptc");
1970 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC, 1975 evcnt_attach_dynamic(&stats->qbrc[i], EVCNT_TYPE_MISC,
1971 NULL, adapter->queues[i].evnamebuf, "qbrc"); 1976 NULL, adapter->queues[i].evnamebuf, "qbrc");
1972 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC, 1977 evcnt_attach_dynamic(&stats->qbtc[i], EVCNT_TYPE_MISC,
1973 NULL, adapter->queues[i].evnamebuf, "qbtc"); 1978 NULL, adapter->queues[i].evnamebuf, "qbtc");
1974 if (hw->mac.type >= ixgbe_mac_82599EB) 1979 if (hw->mac.type >= ixgbe_mac_82599EB)
1975 evcnt_attach_dynamic(&stats->qprdc[i], 1980 evcnt_attach_dynamic(&stats->qprdc[i],
1976 EVCNT_TYPE_MISC, NULL, 1981 EVCNT_TYPE_MISC, NULL,
1977 adapter->queues[i].evnamebuf, "qprdc"); 1982 adapter->queues[i].evnamebuf, "qprdc");
1978 } 1983 }
1979 1984
1980 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 1985 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1981 NULL, adapter->queues[i].evnamebuf, 1986 NULL, adapter->queues[i].evnamebuf,
1982 "Queue Packets Received"); 1987 "Queue Packets Received");
1983 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 1988 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1984 NULL, adapter->queues[i].evnamebuf, 1989 NULL, adapter->queues[i].evnamebuf,
1985 "Queue Bytes Received"); 1990 "Queue Bytes Received");
1986 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 1991 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1987 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 1992 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1988 evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC, 1993 evcnt_attach_dynamic(&rxr->no_mbuf, EVCNT_TYPE_MISC,
1989 NULL, adapter->queues[i].evnamebuf, "Rx no mbuf"); 1994 NULL, adapter->queues[i].evnamebuf, "Rx no mbuf");
1990 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 1995 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1991 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 1996 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1992#ifdef LRO 1997#ifdef LRO
1993 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 1998 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1994 CTLFLAG_RD, &lro->lro_queued, 0, 1999 CTLFLAG_RD, &lro->lro_queued, 0,
1995 "LRO Queued"); 2000 "LRO Queued");
1996 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 2001 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1997 CTLFLAG_RD, &lro->lro_flushed, 0, 2002 CTLFLAG_RD, &lro->lro_flushed, 0,
1998 "LRO Flushed"); 2003 "LRO Flushed");
1999#endif /* LRO */ 2004#endif /* LRO */
2000 } 2005 }
2001 2006
2002 /* MAC stats get their own sub node */ 2007 /* MAC stats get their own sub node */
2003 2008
2004 snprintf(stats->namebuf, 2009 snprintf(stats->namebuf,
2005 sizeof(stats->namebuf), "%s MAC Statistics", xname); 2010 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2006 2011
2007 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 2012 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "rx csum offload - IP"); 2013 stats->namebuf, "rx csum offload - IP");
2009 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 2014 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "rx csum offload - L4"); 2015 stats->namebuf, "rx csum offload - L4");
2011 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 2016 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "rx csum offload - IP bad"); 2017 stats->namebuf, "rx csum offload - IP bad");
2013 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 2018 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "rx csum offload - L4 bad"); 2019 stats->namebuf, "rx csum offload - L4 bad");
2015 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, 2020 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Interrupt conditions zero"); 2021 stats->namebuf, "Interrupt conditions zero");
2017 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, 2022 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Legacy interrupts"); 2023 stats->namebuf, "Legacy interrupts");
2019 2024
2020 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, 2025 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
2021 stats->namebuf, "CRC Errors"); 2026 stats->namebuf, "CRC Errors");
2022 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, 2027 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Illegal Byte Errors"); 2028 stats->namebuf, "Illegal Byte Errors");
2024 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, 2029 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Byte Errors"); 2030 stats->namebuf, "Byte Errors");
2026 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, 2031 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "MAC Short Packets Discarded"); 2032 stats->namebuf, "MAC Short Packets Discarded");
2028 if (hw->mac.type >= ixgbe_mac_X550) 2033 if (hw->mac.type >= ixgbe_mac_X550)
2029 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, 2034 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Bad SFD"); 2035 stats->namebuf, "Bad SFD");
2031 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, 2036 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Total Packets Missed"); 2037 stats->namebuf, "Total Packets Missed");
2033 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, 2038 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "MAC Local Faults"); 2039 stats->namebuf, "MAC Local Faults");
2035 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, 2040 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "MAC Remote Faults"); 2041 stats->namebuf, "MAC Remote Faults");
2037 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, 2042 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "Receive Length Errors"); 2043 stats->namebuf, "Receive Length Errors");
2039 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, 2044 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "Link XON Transmitted"); 2045 stats->namebuf, "Link XON Transmitted");
2041 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, 2046 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "Link XON Received"); 2047 stats->namebuf, "Link XON Received");
2043 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, 2048 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "Link XOFF Transmitted"); 2049 stats->namebuf, "Link XOFF Transmitted");
2045 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, 2050 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
2046 stats->namebuf, "Link XOFF Received"); 2051 stats->namebuf, "Link XOFF Received");
2047 2052
2048 /* Packet Reception Stats */ 2053 /* Packet Reception Stats */
2049 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, 2054 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
2050 stats->namebuf, "Total Octets Received"); 2055 stats->namebuf, "Total Octets Received");
2051 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, 2056 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
2052 stats->namebuf, "Good Octets Received"); 2057 stats->namebuf, "Good Octets Received");
2053 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, 2058 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
2054 stats->namebuf, "Total Packets Received"); 2059 stats->namebuf, "Total Packets Received");
2055 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, 2060 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
2056 stats->namebuf, "Good Packets Received"); 2061 stats->namebuf, "Good Packets Received");
2057 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, 2062 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
2058 stats->namebuf, "Multicast Packets Received"); 2063 stats->namebuf, "Multicast Packets Received");
2059 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, 2064 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
2060 stats->namebuf, "Broadcast Packets Received"); 2065 stats->namebuf, "Broadcast Packets Received");
2061 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, 2066 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
2062 stats->namebuf, "64 byte frames received "); 2067 stats->namebuf, "64 byte frames received ");
2063 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, 2068 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
2064 stats->namebuf, "65-127 byte frames received"); 2069 stats->namebuf, "65-127 byte frames received");
2065 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, 2070 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
2066 stats->namebuf, "128-255 byte frames received"); 2071 stats->namebuf, "128-255 byte frames received");
2067 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, 2072 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2068 stats->namebuf, "256-511 byte frames received"); 2073 stats->namebuf, "256-511 byte frames received");
2069 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, 2074 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2070 stats->namebuf, "512-1023 byte frames received"); 2075 stats->namebuf, "512-1023 byte frames received");
2071 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, 2076 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2072 stats->namebuf, "1023-1522 byte frames received"); 2077 stats->namebuf, "1023-1522 byte frames received");
2073 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, 2078 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2074 stats->namebuf, "Receive Undersized"); 2079 stats->namebuf, "Receive Undersized");
2075 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, 2080 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2076 stats->namebuf, "Fragmented Packets Received "); 2081 stats->namebuf, "Fragmented Packets Received ");
2077 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, 2082 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2078 stats->namebuf, "Oversized Packets Received"); 2083 stats->namebuf, "Oversized Packets Received");
2079 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, 2084 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2080 stats->namebuf, "Received Jabber"); 2085 stats->namebuf, "Received Jabber");
2081 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, 2086 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2082 stats->namebuf, "Management Packets Received"); 2087 stats->namebuf, "Management Packets Received");
2083 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, 2088 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2084 stats->namebuf, "Management Packets Dropped"); 2089 stats->namebuf, "Management Packets Dropped");
2085 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, 2090 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2086 stats->namebuf, "Checksum Errors"); 2091 stats->namebuf, "Checksum Errors");
2087 2092
2088 /* Packet Transmission Stats */ 2093 /* Packet Transmission Stats */
2089 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, 2094 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2090 stats->namebuf, "Good Octets Transmitted"); 2095 stats->namebuf, "Good Octets Transmitted");
2091 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, 2096 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2092 stats->namebuf, "Total Packets Transmitted"); 2097 stats->namebuf, "Total Packets Transmitted");
2093 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, 2098 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2094 stats->namebuf, "Good Packets Transmitted"); 2099 stats->namebuf, "Good Packets Transmitted");
2095 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, 2100 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2096 stats->namebuf, "Broadcast Packets Transmitted"); 2101 stats->namebuf, "Broadcast Packets Transmitted");
2097 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, 2102 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2098 stats->namebuf, "Multicast Packets Transmitted"); 2103 stats->namebuf, "Multicast Packets Transmitted");
2099 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, 2104 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2100 stats->namebuf, "Management Packets Transmitted"); 2105 stats->namebuf, "Management Packets Transmitted");
2101 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, 2106 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2102 stats->namebuf, "64 byte frames transmitted "); 2107 stats->namebuf, "64 byte frames transmitted ");
2103 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, 2108 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2104 stats->namebuf, "65-127 byte frames transmitted"); 2109 stats->namebuf, "65-127 byte frames transmitted");
2105 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, 2110 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2106 stats->namebuf, "128-255 byte frames transmitted"); 2111 stats->namebuf, "128-255 byte frames transmitted");
2107 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, 2112 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2108 stats->namebuf, "256-511 byte frames transmitted"); 2113 stats->namebuf, "256-511 byte frames transmitted");
2109 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, 2114 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2110 stats->namebuf, "512-1023 byte frames transmitted"); 2115 stats->namebuf, "512-1023 byte frames transmitted");
2111 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, 2116 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2112 stats->namebuf, "1024-1522 byte frames transmitted"); 2117 stats->namebuf, "1024-1522 byte frames transmitted");
2113} /* ixgbe_add_hw_stats */ 2118} /* ixgbe_add_hw_stats */
2114 2119
2115static void 2120static void
2116ixgbe_clear_evcnt(struct adapter *adapter) 2121ixgbe_clear_evcnt(struct adapter *adapter)
2117{ 2122{
2118 struct tx_ring *txr = adapter->tx_rings; 2123 struct tx_ring *txr = adapter->tx_rings;
2119 struct rx_ring *rxr = adapter->rx_rings; 2124 struct rx_ring *rxr = adapter->rx_rings;
2120 struct ixgbe_hw *hw = &adapter->hw; 2125 struct ixgbe_hw *hw = &adapter->hw;
2121 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 2126 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2122 int i; 2127 int i;
2123 2128
2124 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0); 2129 IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0);
2125 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0); 2130 IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0);
2126 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0); 2131 IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0);
2127 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0); 2132 IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0);
2128 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0); 2133 IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0);
2129 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0); 2134 IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0);
2130 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0); 2135 IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0);
2131 IXGBE_EVC_STORE(&adapter->tso_err, 0); 2136 IXGBE_EVC_STORE(&adapter->tso_err, 0);
2132 IXGBE_EVC_STORE(&adapter->watchdog_events, 0); 2137 IXGBE_EVC_STORE(&adapter->watchdog_events, 0);
2133 IXGBE_EVC_STORE(&adapter->admin_irqev, 0); 2138 IXGBE_EVC_STORE(&adapter->admin_irqev, 0);
2134 IXGBE_EVC_STORE(&adapter->link_workev, 0); 2139 IXGBE_EVC_STORE(&adapter->link_workev, 0);
2135 IXGBE_EVC_STORE(&adapter->mod_workev, 0); 2140 IXGBE_EVC_STORE(&adapter->mod_workev, 0);
2136 IXGBE_EVC_STORE(&adapter->msf_workev, 0); 2141 IXGBE_EVC_STORE(&adapter->msf_workev, 0);
2137 IXGBE_EVC_STORE(&adapter->phy_workev, 0); 2142 IXGBE_EVC_STORE(&adapter->phy_workev, 0);
2138 2143