Sun Sep 1 11:12:45 2019 UTC ()
Pull up following revision(s) (requested by msaitoh in ticket #134):

	sys/dev/pci/ixgbe/ixgbe.c: revision 1.202
	sys/dev/pci/ixgbe/ixgbe.c: revision 1.203
	sys/dev/pci/ixgbe/ixgbe.c: revision 1.204
	sys/dev/pci/ixgbe/ixv.c: revision 1.128

Simplify ix{gbe,v}_[un]register_vlan() API suggested by knakahara.

 The API was the same as FreeBSD's pre-iflib's. They use iflib now and it's
not required for us to keep the old API.
X550EM supports QSFP, so check ixgbe_media_type_fiber_qsfp too.

 An interrupt might not arrive when a module is inserted. When an link status
change interrupt occurred and the driver still regard SFP as unplugged, link
becomes up and the real media type is unknown. e.g:

 % ifconfig -m ixg0
 (snip)
         media: Ethernet autoselect (autoselect rxpause,txpause)
         status: active
         supported Ethernet media:
                 media none
                 media autoselect
 (snip)

To resolve this problem, when an link status change interrupt occurred and the
driver still regard SFP as unplugged, issue the module softint before issuing
LSC interrupt.


(martin)
diff -r1.199.2.1 -r1.199.2.2 src/sys/dev/pci/ixgbe/ixgbe.c
diff -r1.125.2.1 -r1.125.2.2 src/sys/dev/pci/ixgbe/ixv.c

cvs diff -r1.199.2.1 -r1.199.2.2 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2019/09/01 11:07:05 1.199.2.1
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2019/09/01 11:12:45 1.199.2.2
@@ -1,4203 +1,4200 @@ @@ -1,4203 +1,4200 @@
1/* $NetBSD: ixgbe.c,v 1.199.2.1 2019/09/01 11:07:05 martin Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.199.2.2 2019/09/01 11:12:45 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_inet.h" 67#include "opt_inet.h"
68#include "opt_inet6.h" 68#include "opt_inet6.h"
69#include "opt_net_mpsafe.h" 69#include "opt_net_mpsafe.h"
70#endif 70#endif
71 71
72#include "ixgbe.h" 72#include "ixgbe.h"
73#include "ixgbe_sriov.h" 73#include "ixgbe_sriov.h"
74#include "vlan.h" 74#include "vlan.h"
75 75
76#include <sys/cprng.h> 76#include <sys/cprng.h>
77#include <dev/mii/mii.h> 77#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h> 78#include <dev/mii/miivar.h>
79 79
80/************************************************************************ 80/************************************************************************
81 * Driver version 81 * Driver version
82 ************************************************************************/ 82 ************************************************************************/
83static const char ixgbe_driver_version[] = "4.0.1-k"; 83static const char ixgbe_driver_version[] = "4.0.1-k";
84/* XXX NetBSD: + 3.3.10 */ 84/* XXX NetBSD: + 3.3.10 */
85 85
86/************************************************************************ 86/************************************************************************
87 * PCI Device ID Table 87 * PCI Device ID Table
88 * 88 *
89 * Used by probe to select devices to load on 89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings 90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s 91 * Last entry must be all 0s
92 * 92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/ 94 ************************************************************************/
95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96{ 96{
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */ 147 /* required last entry */
148 {0, 0, 0, 0, 0} 148 {0, 0, 0, 0, 0}
149}; 149};
150 150
151/************************************************************************ 151/************************************************************************
152 * Table of branding strings 152 * Table of branding strings
153 ************************************************************************/ 153 ************************************************************************/
154static const char *ixgbe_strings[] = { 154static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver" 155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156}; 156};
157 157
158/************************************************************************ 158/************************************************************************
159 * Function prototypes 159 * Function prototypes
160 ************************************************************************/ 160 ************************************************************************/
161static int ixgbe_probe(device_t, cfdata_t, void *); 161static int ixgbe_probe(device_t, cfdata_t, void *);
162static void ixgbe_attach(device_t, device_t, void *); 162static void ixgbe_attach(device_t, device_t, void *);
163static int ixgbe_detach(device_t, int); 163static int ixgbe_detach(device_t, int);
164#if 0 164#if 0
165static int ixgbe_shutdown(device_t); 165static int ixgbe_shutdown(device_t);
166#endif 166#endif
167static bool ixgbe_suspend(device_t, const pmf_qual_t *); 167static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168static bool ixgbe_resume(device_t, const pmf_qual_t *); 168static bool ixgbe_resume(device_t, const pmf_qual_t *);
169static int ixgbe_ifflags_cb(struct ethercom *); 169static int ixgbe_ifflags_cb(struct ethercom *);
170static int ixgbe_ioctl(struct ifnet *, u_long, void *); 170static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171static void ixgbe_ifstop(struct ifnet *, int); 171static void ixgbe_ifstop(struct ifnet *, int);
172static int ixgbe_init(struct ifnet *); 172static int ixgbe_init(struct ifnet *);
173static void ixgbe_init_locked(struct adapter *); 173static void ixgbe_init_locked(struct adapter *);
174static void ixgbe_stop(void *); 174static void ixgbe_stop(void *);
175static void ixgbe_init_device_features(struct adapter *); 175static void ixgbe_init_device_features(struct adapter *);
176static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 176static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177static void ixgbe_add_media_types(struct adapter *); 177static void ixgbe_add_media_types(struct adapter *);
178static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 178static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179static int ixgbe_media_change(struct ifnet *); 179static int ixgbe_media_change(struct ifnet *);
180static int ixgbe_allocate_pci_resources(struct adapter *, 180static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *); 181 const struct pci_attach_args *);
182static void ixgbe_free_softint(struct adapter *); 182static void ixgbe_free_softint(struct adapter *);
183static void ixgbe_get_slot_info(struct adapter *); 183static void ixgbe_get_slot_info(struct adapter *);
184static int ixgbe_allocate_msix(struct adapter *, 184static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *); 185 const struct pci_attach_args *);
186static int ixgbe_allocate_legacy(struct adapter *, 186static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *); 187 const struct pci_attach_args *);
188static int ixgbe_configure_interrupts(struct adapter *); 188static int ixgbe_configure_interrupts(struct adapter *);
189static void ixgbe_free_pciintr_resources(struct adapter *); 189static void ixgbe_free_pciintr_resources(struct adapter *);
190static void ixgbe_free_pci_resources(struct adapter *); 190static void ixgbe_free_pci_resources(struct adapter *);
191static void ixgbe_local_timer(void *); 191static void ixgbe_local_timer(void *);
192static void ixgbe_local_timer1(void *); 192static void ixgbe_local_timer1(void *);
193static void ixgbe_recovery_mode_timer(void *); 193static void ixgbe_recovery_mode_timer(void *);
194static int ixgbe_setup_interface(device_t, struct adapter *); 194static int ixgbe_setup_interface(device_t, struct adapter *);
195static void ixgbe_config_gpie(struct adapter *); 195static void ixgbe_config_gpie(struct adapter *);
196static void ixgbe_config_dmac(struct adapter *); 196static void ixgbe_config_dmac(struct adapter *);
197static void ixgbe_config_delay_values(struct adapter *); 197static void ixgbe_config_delay_values(struct adapter *);
198static void ixgbe_config_link(struct adapter *); 198static void ixgbe_config_link(struct adapter *);
199static void ixgbe_check_wol_support(struct adapter *); 199static void ixgbe_check_wol_support(struct adapter *);
200static int ixgbe_setup_low_power_mode(struct adapter *); 200static int ixgbe_setup_low_power_mode(struct adapter *);
201#if 0 201#if 0
202static void ixgbe_rearm_queues(struct adapter *, u64); 202static void ixgbe_rearm_queues(struct adapter *, u64);
203#endif 203#endif
204 204
205static void ixgbe_initialize_transmit_units(struct adapter *); 205static void ixgbe_initialize_transmit_units(struct adapter *);
206static void ixgbe_initialize_receive_units(struct adapter *); 206static void ixgbe_initialize_receive_units(struct adapter *);
207static void ixgbe_enable_rx_drop(struct adapter *); 207static void ixgbe_enable_rx_drop(struct adapter *);
208static void ixgbe_disable_rx_drop(struct adapter *); 208static void ixgbe_disable_rx_drop(struct adapter *);
209static void ixgbe_initialize_rss_mapping(struct adapter *); 209static void ixgbe_initialize_rss_mapping(struct adapter *);
210 210
211static void ixgbe_enable_intr(struct adapter *); 211static void ixgbe_enable_intr(struct adapter *);
212static void ixgbe_disable_intr(struct adapter *); 212static void ixgbe_disable_intr(struct adapter *);
213static void ixgbe_update_stats_counters(struct adapter *); 213static void ixgbe_update_stats_counters(struct adapter *);
214static void ixgbe_set_promisc(struct adapter *); 214static void ixgbe_set_promisc(struct adapter *);
215static void ixgbe_set_multi(struct adapter *); 215static void ixgbe_set_multi(struct adapter *);
216static void ixgbe_update_link_status(struct adapter *); 216static void ixgbe_update_link_status(struct adapter *);
217static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 217static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
218static void ixgbe_configure_ivars(struct adapter *); 218static void ixgbe_configure_ivars(struct adapter *);
219static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 219static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
220static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); 220static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
221 221
222static void ixgbe_setup_vlan_hw_tagging(struct adapter *); 222static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
223static void ixgbe_setup_vlan_hw_support(struct adapter *); 223static void ixgbe_setup_vlan_hw_support(struct adapter *);
224static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 224static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
225static int ixgbe_register_vlan(void *, struct ifnet *, u16); 225static int ixgbe_register_vlan(struct adapter *, u16);
226static int ixgbe_unregister_vlan(void *, struct ifnet *, u16); 226static int ixgbe_unregister_vlan(struct adapter *, u16);
227 227
228static void ixgbe_add_device_sysctls(struct adapter *); 228static void ixgbe_add_device_sysctls(struct adapter *);
229static void ixgbe_add_hw_stats(struct adapter *); 229static void ixgbe_add_hw_stats(struct adapter *);
230static void ixgbe_clear_evcnt(struct adapter *); 230static void ixgbe_clear_evcnt(struct adapter *);
231static int ixgbe_set_flowcntl(struct adapter *, int); 231static int ixgbe_set_flowcntl(struct adapter *, int);
232static int ixgbe_set_advertise(struct adapter *, int); 232static int ixgbe_set_advertise(struct adapter *, int);
233static int ixgbe_get_advertise(struct adapter *); 233static int ixgbe_get_advertise(struct adapter *);
234 234
235/* Sysctl handlers */ 235/* Sysctl handlers */
236static void ixgbe_set_sysctl_value(struct adapter *, const char *, 236static void ixgbe_set_sysctl_value(struct adapter *, const char *,
237 const char *, int *, int); 237 const char *, int *, int);
238static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 238static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
239static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 239static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
240static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 240static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
241static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 241static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
242static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 242static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
243static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
244#ifdef IXGBE_DEBUG 244#ifdef IXGBE_DEBUG
245static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
246static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 246static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
247#endif 247#endif
248static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 248static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
249static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 249static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
250static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
251static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 251static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
252static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 252static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
253static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
256static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
257 257
258/* Support for pluggable optic modules */ 258/* Support for pluggable optic modules */
259static bool ixgbe_sfp_probe(struct adapter *); 259static bool ixgbe_sfp_probe(struct adapter *);
260 260
261/* Legacy (single vector) interrupt handler */ 261/* Legacy (single vector) interrupt handler */
262static int ixgbe_legacy_irq(void *); 262static int ixgbe_legacy_irq(void *);
263 263
264/* The MSI/MSI-X Interrupt handlers */ 264/* The MSI/MSI-X Interrupt handlers */
265static int ixgbe_msix_que(void *); 265static int ixgbe_msix_que(void *);
266static int ixgbe_msix_link(void *); 266static int ixgbe_msix_link(void *);
267 267
268/* Software interrupts for deferred work */ 268/* Software interrupts for deferred work */
269static void ixgbe_handle_que(void *); 269static void ixgbe_handle_que(void *);
270static void ixgbe_handle_link(void *); 270static void ixgbe_handle_link(void *);
271static void ixgbe_handle_msf(void *); 271static void ixgbe_handle_msf(void *);
272static void ixgbe_handle_mod(void *); 272static void ixgbe_handle_mod(void *);
273static void ixgbe_handle_phy(void *); 273static void ixgbe_handle_phy(void *);
274 274
275/* Workqueue handler for deferred work */ 275/* Workqueue handler for deferred work */
276static void ixgbe_handle_que_work(struct work *, void *); 276static void ixgbe_handle_que_work(struct work *, void *);
277 277
278static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 278static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
279 279
280/************************************************************************ 280/************************************************************************
281 * NetBSD Device Interface Entry Points 281 * NetBSD Device Interface Entry Points
282 ************************************************************************/ 282 ************************************************************************/
283CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), 283CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
284 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 284 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
285 DVF_DETACH_SHUTDOWN); 285 DVF_DETACH_SHUTDOWN);
286 286
287#if 0 287#if 0
288devclass_t ix_devclass; 288devclass_t ix_devclass;
289DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 289DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
290 290
291MODULE_DEPEND(ix, pci, 1, 1, 1); 291MODULE_DEPEND(ix, pci, 1, 1, 1);
292MODULE_DEPEND(ix, ether, 1, 1, 1); 292MODULE_DEPEND(ix, ether, 1, 1, 1);
293#ifdef DEV_NETMAP 293#ifdef DEV_NETMAP
294MODULE_DEPEND(ix, netmap, 1, 1, 1); 294MODULE_DEPEND(ix, netmap, 1, 1, 1);
295#endif 295#endif
296#endif 296#endif
297 297
298/* 298/*
299 * TUNEABLE PARAMETERS: 299 * TUNEABLE PARAMETERS:
300 */ 300 */
301 301
302/* 302/*
303 * AIM: Adaptive Interrupt Moderation 303 * AIM: Adaptive Interrupt Moderation
304 * which means that the interrupt rate 304 * which means that the interrupt rate
305 * is varied over time based on the 305 * is varied over time based on the
306 * traffic for that interrupt vector 306 * traffic for that interrupt vector
307 */ 307 */
308static bool ixgbe_enable_aim = true; 308static bool ixgbe_enable_aim = true;
309#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 309#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
310SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 310SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
311 "Enable adaptive interrupt moderation"); 311 "Enable adaptive interrupt moderation");
312 312
313static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 313static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
314SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 314SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
315 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 315 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
316 316
317/* How many packets rxeof tries to clean at a time */ 317/* How many packets rxeof tries to clean at a time */
318static int ixgbe_rx_process_limit = 256; 318static int ixgbe_rx_process_limit = 256;
319SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 319SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
320 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 320 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
321 321
322/* How many packets txeof tries to clean at a time */ 322/* How many packets txeof tries to clean at a time */
323static int ixgbe_tx_process_limit = 256; 323static int ixgbe_tx_process_limit = 256;
324SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 324SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
325 &ixgbe_tx_process_limit, 0, 325 &ixgbe_tx_process_limit, 0,
326 "Maximum number of sent packets to process at a time, -1 means unlimited"); 326 "Maximum number of sent packets to process at a time, -1 means unlimited");
327 327
328/* Flow control setting, default to full */ 328/* Flow control setting, default to full */
329static int ixgbe_flow_control = ixgbe_fc_full; 329static int ixgbe_flow_control = ixgbe_fc_full;
330SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 330SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
331 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 331 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
332 332
333/* Which packet processing uses workqueue or softint */ 333/* Which packet processing uses workqueue or softint */
334static bool ixgbe_txrx_workqueue = false; 334static bool ixgbe_txrx_workqueue = false;
335 335
336/* 336/*
337 * Smart speed setting, default to on 337 * Smart speed setting, default to on
338 * this only works as a compile option 338 * this only works as a compile option
339 * right now as its during attach, set 339 * right now as its during attach, set
340 * this to 'ixgbe_smart_speed_off' to 340 * this to 'ixgbe_smart_speed_off' to
341 * disable. 341 * disable.
342 */ 342 */
343static int ixgbe_smart_speed = ixgbe_smart_speed_on; 343static int ixgbe_smart_speed = ixgbe_smart_speed_on;
344 344
345/* 345/*
346 * MSI-X should be the default for best performance, 346 * MSI-X should be the default for best performance,
347 * but this allows it to be forced off for testing. 347 * but this allows it to be forced off for testing.
348 */ 348 */
349static int ixgbe_enable_msix = 1; 349static int ixgbe_enable_msix = 1;
350SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 350SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
351 "Enable MSI-X interrupts"); 351 "Enable MSI-X interrupts");
352 352
353/* 353/*
354 * Number of Queues, can be set to 0, 354 * Number of Queues, can be set to 0,
355 * it then autoconfigures based on the 355 * it then autoconfigures based on the
356 * number of cpus with a max of 8. This 356 * number of cpus with a max of 8. This
357 * can be overriden manually here. 357 * can be overriden manually here.
358 */ 358 */
359static int ixgbe_num_queues = 0; 359static int ixgbe_num_queues = 0;
360SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 360SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
361 "Number of queues to configure, 0 indicates autoconfigure"); 361 "Number of queues to configure, 0 indicates autoconfigure");
362 362
363/* 363/*
364 * Number of TX descriptors per ring, 364 * Number of TX descriptors per ring,
365 * setting higher than RX as this seems 365 * setting higher than RX as this seems
366 * the better performing choice. 366 * the better performing choice.
367 */ 367 */
368static int ixgbe_txd = PERFORM_TXD; 368static int ixgbe_txd = PERFORM_TXD;
369SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 369SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
370 "Number of transmit descriptors per queue"); 370 "Number of transmit descriptors per queue");
371 371
372/* Number of RX descriptors per ring */ 372/* Number of RX descriptors per ring */
373static int ixgbe_rxd = PERFORM_RXD; 373static int ixgbe_rxd = PERFORM_RXD;
374SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 374SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
375 "Number of receive descriptors per queue"); 375 "Number of receive descriptors per queue");
376 376
377/* 377/*
378 * Defining this on will allow the use 378 * Defining this on will allow the use
379 * of unsupported SFP+ modules, note that 379 * of unsupported SFP+ modules, note that
380 * doing so you are on your own :) 380 * doing so you are on your own :)
381 */ 381 */
382static int allow_unsupported_sfp = false; 382static int allow_unsupported_sfp = false;
383#define TUNABLE_INT(__x, __y) 383#define TUNABLE_INT(__x, __y)
384TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 384TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
385 385
386/* 386/*
387 * Not sure if Flow Director is fully baked, 387 * Not sure if Flow Director is fully baked,
388 * so we'll default to turning it off. 388 * so we'll default to turning it off.
389 */ 389 */
390static int ixgbe_enable_fdir = 0; 390static int ixgbe_enable_fdir = 0;
391SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 391SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
392 "Enable Flow Director"); 392 "Enable Flow Director");
393 393
394/* Legacy Transmit (single queue) */ 394/* Legacy Transmit (single queue) */
395static int ixgbe_enable_legacy_tx = 0; 395static int ixgbe_enable_legacy_tx = 0;
396SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 396SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
397 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 397 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
398 398
399/* Receive-Side Scaling */ 399/* Receive-Side Scaling */
400static int ixgbe_enable_rss = 1; 400static int ixgbe_enable_rss = 1;
401SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 401SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
402 "Enable Receive-Side Scaling (RSS)"); 402 "Enable Receive-Side Scaling (RSS)");
403 403
404#if 0 404#if 0
405static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 405static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
406static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 406static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
407#endif 407#endif
408 408
409#ifdef NET_MPSAFE 409#ifdef NET_MPSAFE
410#define IXGBE_MPSAFE 1 410#define IXGBE_MPSAFE 1
411#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 411#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
412#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE 412#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
413#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 413#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
414#else 414#else
415#define IXGBE_CALLOUT_FLAGS 0 415#define IXGBE_CALLOUT_FLAGS 0
416#define IXGBE_SOFTINFT_FLAGS 0 416#define IXGBE_SOFTINFT_FLAGS 0
417#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 417#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
418#endif 418#endif
419#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 419#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
420 420
421/************************************************************************ 421/************************************************************************
422 * ixgbe_initialize_rss_mapping 422 * ixgbe_initialize_rss_mapping
423 ************************************************************************/ 423 ************************************************************************/
424static void 424static void
425ixgbe_initialize_rss_mapping(struct adapter *adapter) 425ixgbe_initialize_rss_mapping(struct adapter *adapter)
426{ 426{
427 struct ixgbe_hw *hw = &adapter->hw; 427 struct ixgbe_hw *hw = &adapter->hw;
428 u32 reta = 0, mrqc, rss_key[10]; 428 u32 reta = 0, mrqc, rss_key[10];
429 int queue_id, table_size, index_mult; 429 int queue_id, table_size, index_mult;
430 int i, j; 430 int i, j;
431 u32 rss_hash_config; 431 u32 rss_hash_config;
432 432
433 /* force use default RSS key. */ 433 /* force use default RSS key. */
434#ifdef __NetBSD__ 434#ifdef __NetBSD__
435 rss_getkey((uint8_t *) &rss_key); 435 rss_getkey((uint8_t *) &rss_key);
436#else 436#else
437 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 437 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
438 /* Fetch the configured RSS key */ 438 /* Fetch the configured RSS key */
439 rss_getkey((uint8_t *) &rss_key); 439 rss_getkey((uint8_t *) &rss_key);
440 } else { 440 } else {
441 /* set up random bits */ 441 /* set up random bits */
442 cprng_fast(&rss_key, sizeof(rss_key)); 442 cprng_fast(&rss_key, sizeof(rss_key));
443 } 443 }
444#endif 444#endif
445 445
446 /* Set multiplier for RETA setup and table size based on MAC */ 446 /* Set multiplier for RETA setup and table size based on MAC */
447 index_mult = 0x1; 447 index_mult = 0x1;
448 table_size = 128; 448 table_size = 128;
449 switch (adapter->hw.mac.type) { 449 switch (adapter->hw.mac.type) {
450 case ixgbe_mac_82598EB: 450 case ixgbe_mac_82598EB:
451 index_mult = 0x11; 451 index_mult = 0x11;
452 break; 452 break;
453 case ixgbe_mac_X550: 453 case ixgbe_mac_X550:
454 case ixgbe_mac_X550EM_x: 454 case ixgbe_mac_X550EM_x:
455 case ixgbe_mac_X550EM_a: 455 case ixgbe_mac_X550EM_a:
456 table_size = 512; 456 table_size = 512;
457 break; 457 break;
458 default: 458 default:
459 break; 459 break;
460 } 460 }
461 461
462 /* Set up the redirection table */ 462 /* Set up the redirection table */
463 for (i = 0, j = 0; i < table_size; i++, j++) { 463 for (i = 0, j = 0; i < table_size; i++, j++) {
464 if (j == adapter->num_queues) 464 if (j == adapter->num_queues)
465 j = 0; 465 j = 0;
466 466
467 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 467 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
468 /* 468 /*
469 * Fetch the RSS bucket id for the given indirection 469 * Fetch the RSS bucket id for the given indirection
470 * entry. Cap it at the number of configured buckets 470 * entry. Cap it at the number of configured buckets
471 * (which is num_queues.) 471 * (which is num_queues.)
472 */ 472 */
473 queue_id = rss_get_indirection_to_bucket(i); 473 queue_id = rss_get_indirection_to_bucket(i);
474 queue_id = queue_id % adapter->num_queues; 474 queue_id = queue_id % adapter->num_queues;
475 } else 475 } else
476 queue_id = (j * index_mult); 476 queue_id = (j * index_mult);
477 477
478 /* 478 /*
479 * The low 8 bits are for hash value (n+0); 479 * The low 8 bits are for hash value (n+0);
480 * The next 8 bits are for hash value (n+1), etc. 480 * The next 8 bits are for hash value (n+1), etc.
481 */ 481 */
482 reta = reta >> 8; 482 reta = reta >> 8;
483 reta = reta | (((uint32_t) queue_id) << 24); 483 reta = reta | (((uint32_t) queue_id) << 24);
484 if ((i & 3) == 3) { 484 if ((i & 3) == 3) {
485 if (i < 128) 485 if (i < 128)
486 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 486 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
487 else 487 else
488 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 488 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
489 reta); 489 reta);
490 reta = 0; 490 reta = 0;
491 } 491 }
492 } 492 }
493 493
494 /* Now fill our hash function seeds */ 494 /* Now fill our hash function seeds */
495 for (i = 0; i < 10; i++) 495 for (i = 0; i < 10; i++)
496 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 496 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
497 497
498 /* Perform hash on these packet types */ 498 /* Perform hash on these packet types */
499 if (adapter->feat_en & IXGBE_FEATURE_RSS) 499 if (adapter->feat_en & IXGBE_FEATURE_RSS)
500 rss_hash_config = rss_gethashconfig(); 500 rss_hash_config = rss_gethashconfig();
501 else { 501 else {
502 /* 502 /*
503 * Disable UDP - IP fragments aren't currently being handled 503 * Disable UDP - IP fragments aren't currently being handled
504 * and so we end up with a mix of 2-tuple and 4-tuple 504 * and so we end up with a mix of 2-tuple and 4-tuple
505 * traffic. 505 * traffic.
506 */ 506 */
507 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 507 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
508 | RSS_HASHTYPE_RSS_TCP_IPV4 508 | RSS_HASHTYPE_RSS_TCP_IPV4
509 | RSS_HASHTYPE_RSS_IPV6 509 | RSS_HASHTYPE_RSS_IPV6
510 | RSS_HASHTYPE_RSS_TCP_IPV6 510 | RSS_HASHTYPE_RSS_TCP_IPV6
511 | RSS_HASHTYPE_RSS_IPV6_EX 511 | RSS_HASHTYPE_RSS_IPV6_EX
512 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 512 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
513 } 513 }
514 514
515 mrqc = IXGBE_MRQC_RSSEN; 515 mrqc = IXGBE_MRQC_RSSEN;
516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
528 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 528 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
534 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 534 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
535 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 535 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
536} /* ixgbe_initialize_rss_mapping */ 536} /* ixgbe_initialize_rss_mapping */
537 537
538/************************************************************************ 538/************************************************************************
539 * ixgbe_initialize_receive_units - Setup receive registers and features. 539 * ixgbe_initialize_receive_units - Setup receive registers and features.
540 ************************************************************************/ 540 ************************************************************************/
541#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 541#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
542 542
543static void 543static void
544ixgbe_initialize_receive_units(struct adapter *adapter) 544ixgbe_initialize_receive_units(struct adapter *adapter)
545{ 545{
546 struct rx_ring *rxr = adapter->rx_rings; 546 struct rx_ring *rxr = adapter->rx_rings;
547 struct ixgbe_hw *hw = &adapter->hw; 547 struct ixgbe_hw *hw = &adapter->hw;
548 struct ifnet *ifp = adapter->ifp; 548 struct ifnet *ifp = adapter->ifp;
549 int i, j; 549 int i, j;
550 u32 bufsz, fctrl, srrctl, rxcsum; 550 u32 bufsz, fctrl, srrctl, rxcsum;
551 u32 hlreg; 551 u32 hlreg;
552 552
553 /* 553 /*
554 * Make sure receives are disabled while 554 * Make sure receives are disabled while
555 * setting up the descriptor ring 555 * setting up the descriptor ring
556 */ 556 */
557 ixgbe_disable_rx(hw); 557 ixgbe_disable_rx(hw);
558 558
559 /* Enable broadcasts */ 559 /* Enable broadcasts */
560 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 560 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
561 fctrl |= IXGBE_FCTRL_BAM; 561 fctrl |= IXGBE_FCTRL_BAM;
562 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 562 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
563 fctrl |= IXGBE_FCTRL_DPF; 563 fctrl |= IXGBE_FCTRL_DPF;
564 fctrl |= IXGBE_FCTRL_PMCF; 564 fctrl |= IXGBE_FCTRL_PMCF;
565 } 565 }
566 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 566 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
567 567
568 /* Set for Jumbo Frames? */ 568 /* Set for Jumbo Frames? */
569 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 569 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
570 if (ifp->if_mtu > ETHERMTU) 570 if (ifp->if_mtu > ETHERMTU)
571 hlreg |= IXGBE_HLREG0_JUMBOEN; 571 hlreg |= IXGBE_HLREG0_JUMBOEN;
572 else 572 else
573 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 573 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
574 574
575#ifdef DEV_NETMAP 575#ifdef DEV_NETMAP
576 /* CRC stripping is conditional in Netmap */ 576 /* CRC stripping is conditional in Netmap */
577 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 577 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
578 (ifp->if_capenable & IFCAP_NETMAP) && 578 (ifp->if_capenable & IFCAP_NETMAP) &&
579 !ix_crcstrip) 579 !ix_crcstrip)
580 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 580 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
581 else 581 else
582#endif /* DEV_NETMAP */ 582#endif /* DEV_NETMAP */
583 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 583 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
584 584
585 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 585 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
586 586
587 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 587 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
588 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 588 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
589 589
590 for (i = 0; i < adapter->num_queues; i++, rxr++) { 590 for (i = 0; i < adapter->num_queues; i++, rxr++) {
591 u64 rdba = rxr->rxdma.dma_paddr; 591 u64 rdba = rxr->rxdma.dma_paddr;
592 u32 reg; 592 u32 reg;
593 int regnum = i / 4; /* 1 register per 4 queues */ 593 int regnum = i / 4; /* 1 register per 4 queues */
594 int regshift = i % 4; /* 4 bits per 1 queue */ 594 int regshift = i % 4; /* 4 bits per 1 queue */
595 j = rxr->me; 595 j = rxr->me;
596 596
597 /* Setup the Base and Length of the Rx Descriptor Ring */ 597 /* Setup the Base and Length of the Rx Descriptor Ring */
598 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 598 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
599 (rdba & 0x00000000ffffffffULL)); 599 (rdba & 0x00000000ffffffffULL));
600 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 600 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
601 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 601 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
602 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 602 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
603 603
604 /* Set up the SRRCTL register */ 604 /* Set up the SRRCTL register */
605 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 605 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
608 srrctl |= bufsz; 608 srrctl |= bufsz;
609 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 609 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
610 610
611 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 611 /* Set RQSMR (Receive Queue Statistic Mapping) register */
612 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 612 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
613 reg &= ~(0x000000ffUL << (regshift * 8)); 613 reg &= ~(0x000000ffUL << (regshift * 8));
614 reg |= i << (regshift * 8); 614 reg |= i << (regshift * 8);
615 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 615 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
616 616
617 /* 617 /*
618 * Set DROP_EN iff we have no flow control and >1 queue. 618 * Set DROP_EN iff we have no flow control and >1 queue.
619 * Note that srrctl was cleared shortly before during reset, 619 * Note that srrctl was cleared shortly before during reset,
620 * so we do not need to clear the bit, but do it just in case 620 * so we do not need to clear the bit, but do it just in case
621 * this code is moved elsewhere. 621 * this code is moved elsewhere.
622 */ 622 */
623 if (adapter->num_queues > 1 && 623 if (adapter->num_queues > 1 &&
624 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 624 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
625 srrctl |= IXGBE_SRRCTL_DROP_EN; 625 srrctl |= IXGBE_SRRCTL_DROP_EN;
626 } else { 626 } else {
627 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 627 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
628 } 628 }
629 629
630 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 630 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
631 631
632 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 632 /* Setup the HW Rx Head and Tail Descriptor Pointers */
633 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 633 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
634 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 634 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
635 635
636 /* Set the driver rx tail address */ 636 /* Set the driver rx tail address */
637 rxr->tail = IXGBE_RDT(rxr->me); 637 rxr->tail = IXGBE_RDT(rxr->me);
638 } 638 }
639 639
640 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 640 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
641 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 641 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
642 | IXGBE_PSRTYPE_UDPHDR 642 | IXGBE_PSRTYPE_UDPHDR
643 | IXGBE_PSRTYPE_IPV4HDR 643 | IXGBE_PSRTYPE_IPV4HDR
644 | IXGBE_PSRTYPE_IPV6HDR; 644 | IXGBE_PSRTYPE_IPV6HDR;
645 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 645 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
646 } 646 }
647 647
648 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 648 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
649 649
650 ixgbe_initialize_rss_mapping(adapter); 650 ixgbe_initialize_rss_mapping(adapter);
651 651
652 if (adapter->num_queues > 1) { 652 if (adapter->num_queues > 1) {
653 /* RSS and RX IPP Checksum are mutually exclusive */ 653 /* RSS and RX IPP Checksum are mutually exclusive */
654 rxcsum |= IXGBE_RXCSUM_PCSD; 654 rxcsum |= IXGBE_RXCSUM_PCSD;
655 } 655 }
656 656
657 if (ifp->if_capenable & IFCAP_RXCSUM) 657 if (ifp->if_capenable & IFCAP_RXCSUM)
658 rxcsum |= IXGBE_RXCSUM_PCSD; 658 rxcsum |= IXGBE_RXCSUM_PCSD;
659 659
660 /* This is useful for calculating UDP/IP fragment checksums */ 660 /* This is useful for calculating UDP/IP fragment checksums */
661 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 661 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
662 rxcsum |= IXGBE_RXCSUM_IPPCSE; 662 rxcsum |= IXGBE_RXCSUM_IPPCSE;
663 663
664 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 664 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
665 665
666} /* ixgbe_initialize_receive_units */ 666} /* ixgbe_initialize_receive_units */
667 667
668/************************************************************************ 668/************************************************************************
669 * ixgbe_initialize_transmit_units - Enable transmit units. 669 * ixgbe_initialize_transmit_units - Enable transmit units.
670 ************************************************************************/ 670 ************************************************************************/
671static void 671static void
672ixgbe_initialize_transmit_units(struct adapter *adapter) 672ixgbe_initialize_transmit_units(struct adapter *adapter)
673{ 673{
674 struct tx_ring *txr = adapter->tx_rings; 674 struct tx_ring *txr = adapter->tx_rings;
675 struct ixgbe_hw *hw = &adapter->hw; 675 struct ixgbe_hw *hw = &adapter->hw;
676 int i; 676 int i;
677 677
678 /* Setup the Base and Length of the Tx Descriptor Ring */ 678 /* Setup the Base and Length of the Tx Descriptor Ring */
679 for (i = 0; i < adapter->num_queues; i++, txr++) { 679 for (i = 0; i < adapter->num_queues; i++, txr++) {
680 u64 tdba = txr->txdma.dma_paddr; 680 u64 tdba = txr->txdma.dma_paddr;
681 u32 txctrl = 0; 681 u32 txctrl = 0;
682 u32 tqsmreg, reg; 682 u32 tqsmreg, reg;
683 int regnum = i / 4; /* 1 register per 4 queues */ 683 int regnum = i / 4; /* 1 register per 4 queues */
684 int regshift = i % 4; /* 4 bits per 1 queue */ 684 int regshift = i % 4; /* 4 bits per 1 queue */
685 int j = txr->me; 685 int j = txr->me;
686 686
687 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 687 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
688 (tdba & 0x00000000ffffffffULL)); 688 (tdba & 0x00000000ffffffffULL));
689 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 689 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
690 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 690 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
691 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 691 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
692 692
693 /* 693 /*
694 * Set TQSMR (Transmit Queue Statistic Mapping) register. 694 * Set TQSMR (Transmit Queue Statistic Mapping) register.
695 * Register location is different between 82598 and others. 695 * Register location is different between 82598 and others.
696 */ 696 */
697 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 697 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
698 tqsmreg = IXGBE_TQSMR(regnum); 698 tqsmreg = IXGBE_TQSMR(regnum);
699 else 699 else
700 tqsmreg = IXGBE_TQSM(regnum); 700 tqsmreg = IXGBE_TQSM(regnum);
701 reg = IXGBE_READ_REG(hw, tqsmreg); 701 reg = IXGBE_READ_REG(hw, tqsmreg);
702 reg &= ~(0x000000ffUL << (regshift * 8)); 702 reg &= ~(0x000000ffUL << (regshift * 8));
703 reg |= i << (regshift * 8); 703 reg |= i << (regshift * 8);
704 IXGBE_WRITE_REG(hw, tqsmreg, reg); 704 IXGBE_WRITE_REG(hw, tqsmreg, reg);
705 705
706 /* Setup the HW Tx Head and Tail descriptor pointers */ 706 /* Setup the HW Tx Head and Tail descriptor pointers */
707 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 707 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
708 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 708 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
709 709
710 /* Cache the tail address */ 710 /* Cache the tail address */
711 txr->tail = IXGBE_TDT(j); 711 txr->tail = IXGBE_TDT(j);
712 712
713 txr->txr_no_space = false; 713 txr->txr_no_space = false;
714 714
715 /* Disable Head Writeback */ 715 /* Disable Head Writeback */
716 /* 716 /*
717 * Note: for X550 series devices, these registers are actually 717 * Note: for X550 series devices, these registers are actually
718 * prefixed with TPH_ isntead of DCA_, but the addresses and 718 * prefixed with TPH_ isntead of DCA_, but the addresses and
719 * fields remain the same. 719 * fields remain the same.
720 */ 720 */
721 switch (hw->mac.type) { 721 switch (hw->mac.type) {
722 case ixgbe_mac_82598EB: 722 case ixgbe_mac_82598EB:
723 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 723 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
724 break; 724 break;
725 default: 725 default:
726 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 726 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
727 break; 727 break;
728 } 728 }
729 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 729 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
730 switch (hw->mac.type) { 730 switch (hw->mac.type) {
731 case ixgbe_mac_82598EB: 731 case ixgbe_mac_82598EB:
732 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 732 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
733 break; 733 break;
734 default: 734 default:
735 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 735 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
736 break; 736 break;
737 } 737 }
738 738
739 } 739 }
740 740
741 if (hw->mac.type != ixgbe_mac_82598EB) { 741 if (hw->mac.type != ixgbe_mac_82598EB) {
742 u32 dmatxctl, rttdcs; 742 u32 dmatxctl, rttdcs;
743 743
744 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 744 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
745 dmatxctl |= IXGBE_DMATXCTL_TE; 745 dmatxctl |= IXGBE_DMATXCTL_TE;
746 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 746 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
747 /* Disable arbiter to set MTQC */ 747 /* Disable arbiter to set MTQC */
748 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 748 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
749 rttdcs |= IXGBE_RTTDCS_ARBDIS; 749 rttdcs |= IXGBE_RTTDCS_ARBDIS;
750 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 750 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
751 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 751 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
752 ixgbe_get_mtqc(adapter->iov_mode)); 752 ixgbe_get_mtqc(adapter->iov_mode));
753 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 753 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
754 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 754 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
755 } 755 }
756 756
757 return; 757 return;
758} /* ixgbe_initialize_transmit_units */ 758} /* ixgbe_initialize_transmit_units */
759 759
760/************************************************************************ 760/************************************************************************
761 * ixgbe_attach - Device initialization routine 761 * ixgbe_attach - Device initialization routine
762 * 762 *
763 * Called when the driver is being loaded. 763 * Called when the driver is being loaded.
764 * Identifies the type of hardware, allocates all resources 764 * Identifies the type of hardware, allocates all resources
765 * and initializes the hardware. 765 * and initializes the hardware.
766 * 766 *
767 * return 0 on success, positive on failure 767 * return 0 on success, positive on failure
768 ************************************************************************/ 768 ************************************************************************/
769static void 769static void
770ixgbe_attach(device_t parent, device_t dev, void *aux) 770ixgbe_attach(device_t parent, device_t dev, void *aux)
771{ 771{
772 struct adapter *adapter; 772 struct adapter *adapter;
773 struct ixgbe_hw *hw; 773 struct ixgbe_hw *hw;
774 int error = -1; 774 int error = -1;
775 u32 ctrl_ext; 775 u32 ctrl_ext;
776 u16 high, low, nvmreg; 776 u16 high, low, nvmreg;
777 pcireg_t id, subid; 777 pcireg_t id, subid;
778 const ixgbe_vendor_info_t *ent; 778 const ixgbe_vendor_info_t *ent;
779 struct pci_attach_args *pa = aux; 779 struct pci_attach_args *pa = aux;
780 const char *str; 780 const char *str;
781 char buf[256]; 781 char buf[256];
782 782
783 INIT_DEBUGOUT("ixgbe_attach: begin"); 783 INIT_DEBUGOUT("ixgbe_attach: begin");
784 784
785 /* Allocate, clear, and link in our adapter structure */ 785 /* Allocate, clear, and link in our adapter structure */
786 adapter = device_private(dev); 786 adapter = device_private(dev);
787 adapter->hw.back = adapter; 787 adapter->hw.back = adapter;
788 adapter->dev = dev; 788 adapter->dev = dev;
789 hw = &adapter->hw; 789 hw = &adapter->hw;
790 adapter->osdep.pc = pa->pa_pc; 790 adapter->osdep.pc = pa->pa_pc;
791 adapter->osdep.tag = pa->pa_tag; 791 adapter->osdep.tag = pa->pa_tag;
792 if (pci_dma64_available(pa)) 792 if (pci_dma64_available(pa))
793 adapter->osdep.dmat = pa->pa_dmat64; 793 adapter->osdep.dmat = pa->pa_dmat64;
794 else 794 else
795 adapter->osdep.dmat = pa->pa_dmat; 795 adapter->osdep.dmat = pa->pa_dmat;
796 adapter->osdep.attached = false; 796 adapter->osdep.attached = false;
797 797
798 ent = ixgbe_lookup(pa); 798 ent = ixgbe_lookup(pa);
799 799
800 KASSERT(ent != NULL); 800 KASSERT(ent != NULL);
801 801
802 aprint_normal(": %s, Version - %s\n", 802 aprint_normal(": %s, Version - %s\n",
803 ixgbe_strings[ent->index], ixgbe_driver_version); 803 ixgbe_strings[ent->index], ixgbe_driver_version);
804 804
805 /* Core Lock Init*/ 805 /* Core Lock Init*/
806 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 806 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
807 807
808 /* Set up the timer callout */ 808 /* Set up the timer callout */
809 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 809 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
810 810
811 /* Determine hardware revision */ 811 /* Determine hardware revision */
812 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 812 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
813 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 813 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
814 814
815 hw->vendor_id = PCI_VENDOR(id); 815 hw->vendor_id = PCI_VENDOR(id);
816 hw->device_id = PCI_PRODUCT(id); 816 hw->device_id = PCI_PRODUCT(id);
817 hw->revision_id = 817 hw->revision_id =
818 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 818 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
819 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 819 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
820 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 820 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
821 821
822 /* 822 /*
823 * Make sure BUSMASTER is set 823 * Make sure BUSMASTER is set
824 */ 824 */
825 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 825 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
826 826
827 /* Do base PCI setup - map BAR0 */ 827 /* Do base PCI setup - map BAR0 */
828 if (ixgbe_allocate_pci_resources(adapter, pa)) { 828 if (ixgbe_allocate_pci_resources(adapter, pa)) {
829 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 829 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
830 error = ENXIO; 830 error = ENXIO;
831 goto err_out; 831 goto err_out;
832 } 832 }
833 833
834 /* let hardware know driver is loaded */ 834 /* let hardware know driver is loaded */
835 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 835 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
836 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 836 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
837 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 837 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
838 838
839 /* 839 /*
840 * Initialize the shared code 840 * Initialize the shared code
841 */ 841 */
842 if (ixgbe_init_shared_code(hw) != 0) { 842 if (ixgbe_init_shared_code(hw) != 0) {
843 aprint_error_dev(dev, "Unable to initialize the shared code\n"); 843 aprint_error_dev(dev, "Unable to initialize the shared code\n");
844 error = ENXIO; 844 error = ENXIO;
845 goto err_out; 845 goto err_out;
846 } 846 }
847 847
848 switch (hw->mac.type) { 848 switch (hw->mac.type) {
849 case ixgbe_mac_82598EB: 849 case ixgbe_mac_82598EB:
850 str = "82598EB"; 850 str = "82598EB";
851 break; 851 break;
852 case ixgbe_mac_82599EB: 852 case ixgbe_mac_82599EB:
853 str = "82599EB"; 853 str = "82599EB";
854 break; 854 break;
855 case ixgbe_mac_X540: 855 case ixgbe_mac_X540:
856 str = "X540"; 856 str = "X540";
857 break; 857 break;
858 case ixgbe_mac_X550: 858 case ixgbe_mac_X550:
859 str = "X550"; 859 str = "X550";
860 break; 860 break;
861 case ixgbe_mac_X550EM_x: 861 case ixgbe_mac_X550EM_x:
862 str = "X550EM"; 862 str = "X550EM";
863 break; 863 break;
864 case ixgbe_mac_X550EM_a: 864 case ixgbe_mac_X550EM_a:
865 str = "X550EM A"; 865 str = "X550EM A";
866 break; 866 break;
867 default: 867 default:
868 str = "Unknown"; 868 str = "Unknown";
869 break; 869 break;
870 } 870 }
871 aprint_normal_dev(dev, "device %s\n", str); 871 aprint_normal_dev(dev, "device %s\n", str);
872 872
873 if (hw->mbx.ops.init_params) 873 if (hw->mbx.ops.init_params)
874 hw->mbx.ops.init_params(hw); 874 hw->mbx.ops.init_params(hw);
875 875
876 hw->allow_unsupported_sfp = allow_unsupported_sfp; 876 hw->allow_unsupported_sfp = allow_unsupported_sfp;
877 877
878 /* Pick up the 82599 settings */ 878 /* Pick up the 82599 settings */
879 if (hw->mac.type != ixgbe_mac_82598EB) { 879 if (hw->mac.type != ixgbe_mac_82598EB) {
880 hw->phy.smart_speed = ixgbe_smart_speed; 880 hw->phy.smart_speed = ixgbe_smart_speed;
881 adapter->num_segs = IXGBE_82599_SCATTER; 881 adapter->num_segs = IXGBE_82599_SCATTER;
882 } else 882 } else
883 adapter->num_segs = IXGBE_82598_SCATTER; 883 adapter->num_segs = IXGBE_82598_SCATTER;
884 884
885 /* Ensure SW/FW semaphore is free */ 885 /* Ensure SW/FW semaphore is free */
886 ixgbe_init_swfw_semaphore(hw); 886 ixgbe_init_swfw_semaphore(hw);
887 887
888 hw->mac.ops.set_lan_id(hw); 888 hw->mac.ops.set_lan_id(hw);
889 ixgbe_init_device_features(adapter); 889 ixgbe_init_device_features(adapter);
890 890
891 if (ixgbe_configure_interrupts(adapter)) { 891 if (ixgbe_configure_interrupts(adapter)) {
892 error = ENXIO; 892 error = ENXIO;
893 goto err_out; 893 goto err_out;
894 } 894 }
895 895
896 /* Allocate multicast array memory. */ 896 /* Allocate multicast array memory. */
897 adapter->mta = malloc(sizeof(*adapter->mta) * 897 adapter->mta = malloc(sizeof(*adapter->mta) *
898 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 898 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
899 if (adapter->mta == NULL) { 899 if (adapter->mta == NULL) {
900 aprint_error_dev(dev, "Cannot allocate multicast setup array\n"); 900 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
901 error = ENOMEM; 901 error = ENOMEM;
902 goto err_out; 902 goto err_out;
903 } 903 }
904 904
905 /* Enable WoL (if supported) */ 905 /* Enable WoL (if supported) */
906 ixgbe_check_wol_support(adapter); 906 ixgbe_check_wol_support(adapter);
907 907
908 /* Register for VLAN events */ 908 /* Register for VLAN events */
909 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); 909 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
910 910
911 /* Verify adapter fan is still functional (if applicable) */ 911 /* Verify adapter fan is still functional (if applicable) */
912 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 912 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
913 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 913 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
914 ixgbe_check_fan_failure(adapter, esdp, FALSE); 914 ixgbe_check_fan_failure(adapter, esdp, FALSE);
915 } 915 }
916 916
917 /* Set an initial default flow control value */ 917 /* Set an initial default flow control value */
918 hw->fc.requested_mode = ixgbe_flow_control; 918 hw->fc.requested_mode = ixgbe_flow_control;
919 919
920 /* Sysctls for limiting the amount of work done in the taskqueues */ 920 /* Sysctls for limiting the amount of work done in the taskqueues */
921 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 921 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
922 "max number of rx packets to process", 922 "max number of rx packets to process",
923 &adapter->rx_process_limit, ixgbe_rx_process_limit); 923 &adapter->rx_process_limit, ixgbe_rx_process_limit);
924 924
925 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 925 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
926 "max number of tx packets to process", 926 "max number of tx packets to process",
927 &adapter->tx_process_limit, ixgbe_tx_process_limit); 927 &adapter->tx_process_limit, ixgbe_tx_process_limit);
928 928
929 /* Do descriptor calc and sanity checks */ 929 /* Do descriptor calc and sanity checks */
930 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 930 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
931 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 931 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
932 aprint_error_dev(dev, "TXD config issue, using default!\n"); 932 aprint_error_dev(dev, "TXD config issue, using default!\n");
933 adapter->num_tx_desc = DEFAULT_TXD; 933 adapter->num_tx_desc = DEFAULT_TXD;
934 } else 934 } else
935 adapter->num_tx_desc = ixgbe_txd; 935 adapter->num_tx_desc = ixgbe_txd;
936 936
937 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 937 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
938 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 938 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
939 aprint_error_dev(dev, "RXD config issue, using default!\n"); 939 aprint_error_dev(dev, "RXD config issue, using default!\n");
940 adapter->num_rx_desc = DEFAULT_RXD; 940 adapter->num_rx_desc = DEFAULT_RXD;
941 } else 941 } else
942 adapter->num_rx_desc = ixgbe_rxd; 942 adapter->num_rx_desc = ixgbe_rxd;
943 943
944 /* Allocate our TX/RX Queues */ 944 /* Allocate our TX/RX Queues */
945 if (ixgbe_allocate_queues(adapter)) { 945 if (ixgbe_allocate_queues(adapter)) {
946 error = ENOMEM; 946 error = ENOMEM;
947 goto err_out; 947 goto err_out;
948 } 948 }
949 949
950 hw->phy.reset_if_overtemp = TRUE; 950 hw->phy.reset_if_overtemp = TRUE;
951 error = ixgbe_reset_hw(hw); 951 error = ixgbe_reset_hw(hw);
952 hw->phy.reset_if_overtemp = FALSE; 952 hw->phy.reset_if_overtemp = FALSE;
953 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 953 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
954 /* 954 /*
955 * No optics in this port, set up 955 * No optics in this port, set up
956 * so the timer routine will probe 956 * so the timer routine will probe
957 * for later insertion. 957 * for later insertion.
958 */ 958 */
959 adapter->sfp_probe = TRUE; 959 adapter->sfp_probe = TRUE;
960 error = IXGBE_SUCCESS; 960 error = IXGBE_SUCCESS;
961 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 961 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
962 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); 962 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
963 error = EIO; 963 error = EIO;
964 goto err_late; 964 goto err_late;
965 } else if (error) { 965 } else if (error) {
966 aprint_error_dev(dev, "Hardware initialization failed\n"); 966 aprint_error_dev(dev, "Hardware initialization failed\n");
967 error = EIO; 967 error = EIO;
968 goto err_late; 968 goto err_late;
969 } 969 }
970 970
971 /* Make sure we have a good EEPROM before we read from it */ 971 /* Make sure we have a good EEPROM before we read from it */
972 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 972 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
973 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); 973 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
974 error = EIO; 974 error = EIO;
975 goto err_late; 975 goto err_late;
976 } 976 }
977 977
978 aprint_normal("%s:", device_xname(dev)); 978 aprint_normal("%s:", device_xname(dev));
979 /* NVM Image Version */ 979 /* NVM Image Version */
980 high = low = 0; 980 high = low = 0;
981 switch (hw->mac.type) { 981 switch (hw->mac.type) {
982 case ixgbe_mac_X540: 982 case ixgbe_mac_X540:
983 case ixgbe_mac_X550EM_a: 983 case ixgbe_mac_X550EM_a:
984 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 984 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
985 if (nvmreg == 0xffff) 985 if (nvmreg == 0xffff)
986 break; 986 break;
987 high = (nvmreg >> 12) & 0x0f; 987 high = (nvmreg >> 12) & 0x0f;
988 low = (nvmreg >> 4) & 0xff; 988 low = (nvmreg >> 4) & 0xff;
989 id = nvmreg & 0x0f; 989 id = nvmreg & 0x0f;
990 aprint_normal(" NVM Image Version %u.", high); 990 aprint_normal(" NVM Image Version %u.", high);
991 if (hw->mac.type == ixgbe_mac_X540) 991 if (hw->mac.type == ixgbe_mac_X540)
992 str = "%x"; 992 str = "%x";
993 else 993 else
994 str = "%02x"; 994 str = "%02x";
995 aprint_normal(str, low); 995 aprint_normal(str, low);
996 aprint_normal(" ID 0x%x,", id); 996 aprint_normal(" ID 0x%x,", id);
997 break; 997 break;
998 case ixgbe_mac_X550EM_x: 998 case ixgbe_mac_X550EM_x:
999 case ixgbe_mac_X550: 999 case ixgbe_mac_X550:
1000 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1000 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1001 if (nvmreg == 0xffff) 1001 if (nvmreg == 0xffff)
1002 break; 1002 break;
1003 high = (nvmreg >> 12) & 0x0f; 1003 high = (nvmreg >> 12) & 0x0f;
1004 low = nvmreg & 0xff; 1004 low = nvmreg & 0xff;
1005 aprint_normal(" NVM Image Version %u.%02x,", high, low); 1005 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1006 break; 1006 break;
1007 default: 1007 default:
1008 break; 1008 break;
1009 } 1009 }
1010 hw->eeprom.nvm_image_ver_high = high; 1010 hw->eeprom.nvm_image_ver_high = high;
1011 hw->eeprom.nvm_image_ver_low = low; 1011 hw->eeprom.nvm_image_ver_low = low;
1012 1012
1013 /* PHY firmware revision */ 1013 /* PHY firmware revision */
1014 switch (hw->mac.type) { 1014 switch (hw->mac.type) {
1015 case ixgbe_mac_X540: 1015 case ixgbe_mac_X540:
1016 case ixgbe_mac_X550: 1016 case ixgbe_mac_X550:
1017 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); 1017 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1018 if (nvmreg == 0xffff) 1018 if (nvmreg == 0xffff)
1019 break; 1019 break;
1020 high = (nvmreg >> 12) & 0x0f; 1020 high = (nvmreg >> 12) & 0x0f;
1021 low = (nvmreg >> 4) & 0xff; 1021 low = (nvmreg >> 4) & 0xff;
1022 id = nvmreg & 0x000f; 1022 id = nvmreg & 0x000f;
1023 aprint_normal(" PHY FW Revision %u.", high); 1023 aprint_normal(" PHY FW Revision %u.", high);
1024 if (hw->mac.type == ixgbe_mac_X540) 1024 if (hw->mac.type == ixgbe_mac_X540)
1025 str = "%x"; 1025 str = "%x";
1026 else 1026 else
1027 str = "%02x"; 1027 str = "%02x";
1028 aprint_normal(str, low); 1028 aprint_normal(str, low);
1029 aprint_normal(" ID 0x%x,", id); 1029 aprint_normal(" ID 0x%x,", id);
1030 break; 1030 break;
1031 default: 1031 default:
1032 break; 1032 break;
1033 } 1033 }
1034 1034
1035 /* NVM Map version & OEM NVM Image version */ 1035 /* NVM Map version & OEM NVM Image version */
1036 switch (hw->mac.type) { 1036 switch (hw->mac.type) {
1037 case ixgbe_mac_X550: 1037 case ixgbe_mac_X550:
1038 case ixgbe_mac_X550EM_x: 1038 case ixgbe_mac_X550EM_x:
1039 case ixgbe_mac_X550EM_a: 1039 case ixgbe_mac_X550EM_a:
1040 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1040 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1041 if (nvmreg != 0xffff) { 1041 if (nvmreg != 0xffff) {
1042 high = (nvmreg >> 12) & 0x0f; 1042 high = (nvmreg >> 12) & 0x0f;
1043 low = nvmreg & 0x00ff; 1043 low = nvmreg & 0x00ff;
1044 aprint_normal(" NVM Map version %u.%02x,", high, low); 1044 aprint_normal(" NVM Map version %u.%02x,", high, low);
1045 } 1045 }
1046 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); 1046 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1047 if (nvmreg != 0xffff) { 1047 if (nvmreg != 0xffff) {
1048 high = (nvmreg >> 12) & 0x0f; 1048 high = (nvmreg >> 12) & 0x0f;
1049 low = nvmreg & 0x00ff; 1049 low = nvmreg & 0x00ff;
1050 aprint_verbose(" OEM NVM Image version %u.%02x,", high, 1050 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1051 low); 1051 low);
1052 } 1052 }
1053 break; 1053 break;
1054 default: 1054 default:
1055 break; 1055 break;
1056 } 1056 }
1057 1057
1058 /* Print the ETrackID */ 1058 /* Print the ETrackID */
1059 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); 1059 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); 1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1061 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); 1061 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1062 1062
1063 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 1063 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1064 error = ixgbe_allocate_msix(adapter, pa); 1064 error = ixgbe_allocate_msix(adapter, pa);
1065 if (error) { 1065 if (error) {
1066 /* Free allocated queue structures first */ 1066 /* Free allocated queue structures first */
1067 ixgbe_free_transmit_structures(adapter); 1067 ixgbe_free_transmit_structures(adapter);
1068 ixgbe_free_receive_structures(adapter); 1068 ixgbe_free_receive_structures(adapter);
1069 free(adapter->queues, M_DEVBUF); 1069 free(adapter->queues, M_DEVBUF);
1070 1070
1071 /* Fallback to legacy interrupt */ 1071 /* Fallback to legacy interrupt */
1072 adapter->feat_en &= ~IXGBE_FEATURE_MSIX; 1072 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1073 if (adapter->feat_cap & IXGBE_FEATURE_MSI) 1073 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1074 adapter->feat_en |= IXGBE_FEATURE_MSI; 1074 adapter->feat_en |= IXGBE_FEATURE_MSI;
1075 adapter->num_queues = 1; 1075 adapter->num_queues = 1;
1076 1076
1077 /* Allocate our TX/RX Queues again */ 1077 /* Allocate our TX/RX Queues again */
1078 if (ixgbe_allocate_queues(adapter)) { 1078 if (ixgbe_allocate_queues(adapter)) {
1079 error = ENOMEM; 1079 error = ENOMEM;
1080 goto err_out; 1080 goto err_out;
1081 } 1081 }
1082 } 1082 }
1083 } 1083 }
1084 /* Recovery mode */ 1084 /* Recovery mode */
1085 switch (adapter->hw.mac.type) { 1085 switch (adapter->hw.mac.type) {
1086 case ixgbe_mac_X550: 1086 case ixgbe_mac_X550:
1087 case ixgbe_mac_X550EM_x: 1087 case ixgbe_mac_X550EM_x:
1088 case ixgbe_mac_X550EM_a: 1088 case ixgbe_mac_X550EM_a:
1089 /* >= 2.00 */ 1089 /* >= 2.00 */
1090 if (hw->eeprom.nvm_image_ver_high >= 2) { 1090 if (hw->eeprom.nvm_image_ver_high >= 2) {
1091 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; 1091 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1092 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; 1092 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1093 } 1093 }
1094 break; 1094 break;
1095 default: 1095 default:
1096 break; 1096 break;
1097 } 1097 }
1098 1098
1099 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0) 1099 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1100 error = ixgbe_allocate_legacy(adapter, pa); 1100 error = ixgbe_allocate_legacy(adapter, pa);
1101 if (error) 1101 if (error)
1102 goto err_late; 1102 goto err_late;
1103 1103
1104 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ 1104 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1105 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS, 1105 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1106 ixgbe_handle_link, adapter); 1106 ixgbe_handle_link, adapter);
1107 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1107 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1108 ixgbe_handle_mod, adapter); 1108 ixgbe_handle_mod, adapter);
1109 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1109 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1110 ixgbe_handle_msf, adapter); 1110 ixgbe_handle_msf, adapter);
1111 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1111 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1112 ixgbe_handle_phy, adapter); 1112 ixgbe_handle_phy, adapter);
1113 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 1113 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1114 adapter->fdir_si = 1114 adapter->fdir_si =
1115 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1115 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1116 ixgbe_reinit_fdir, adapter); 1116 ixgbe_reinit_fdir, adapter);
1117 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL) 1117 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1118 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL) 1118 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1119 || ((adapter->feat_en & IXGBE_FEATURE_FDIR) 1119 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1120 && (adapter->fdir_si == NULL))) { 1120 && (adapter->fdir_si == NULL))) {
1121 aprint_error_dev(dev, 1121 aprint_error_dev(dev,
1122 "could not establish software interrupts ()\n"); 1122 "could not establish software interrupts ()\n");
1123 goto err_out; 1123 goto err_out;
1124 } 1124 }
1125 1125
1126 error = ixgbe_start_hw(hw); 1126 error = ixgbe_start_hw(hw);
1127 switch (error) { 1127 switch (error) {
1128 case IXGBE_ERR_EEPROM_VERSION: 1128 case IXGBE_ERR_EEPROM_VERSION:
1129 aprint_error_dev(dev, "This device is a pre-production adapter/" 1129 aprint_error_dev(dev, "This device is a pre-production adapter/"
1130 "LOM. Please be aware there may be issues associated " 1130 "LOM. Please be aware there may be issues associated "
1131 "with your hardware.\nIf you are experiencing problems " 1131 "with your hardware.\nIf you are experiencing problems "
1132 "please contact your Intel or hardware representative " 1132 "please contact your Intel or hardware representative "
1133 "who provided you with this hardware.\n"); 1133 "who provided you with this hardware.\n");
1134 break; 1134 break;
1135 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1135 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1136 aprint_error_dev(dev, "Unsupported SFP+ Module\n"); 1136 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1137 error = EIO; 1137 error = EIO;
1138 goto err_late; 1138 goto err_late;
1139 case IXGBE_ERR_SFP_NOT_PRESENT: 1139 case IXGBE_ERR_SFP_NOT_PRESENT:
1140 aprint_error_dev(dev, "No SFP+ Module found\n"); 1140 aprint_error_dev(dev, "No SFP+ Module found\n");
1141 /* falls thru */ 1141 /* falls thru */
1142 default: 1142 default:
1143 break; 1143 break;
1144 } 1144 }
1145 1145
1146 /* Setup OS specific network interface */ 1146 /* Setup OS specific network interface */
1147 if (ixgbe_setup_interface(dev, adapter) != 0) 1147 if (ixgbe_setup_interface(dev, adapter) != 0)
1148 goto err_late; 1148 goto err_late;
1149 1149
1150 /* 1150 /*
1151 * Print PHY ID only for copper PHY. On device which has SFP(+) cage 1151 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1152 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. 1152 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1153 */ 1153 */
1154 if (hw->phy.media_type == ixgbe_media_type_copper) { 1154 if (hw->phy.media_type == ixgbe_media_type_copper) {
1155 uint16_t id1, id2; 1155 uint16_t id1, id2;
1156 int oui, model, rev; 1156 int oui, model, rev;
1157 const char *descr; 1157 const char *descr;
1158 1158
1159 id1 = hw->phy.id >> 16; 1159 id1 = hw->phy.id >> 16;
1160 id2 = hw->phy.id & 0xffff; 1160 id2 = hw->phy.id & 0xffff;
1161 oui = MII_OUI(id1, id2); 1161 oui = MII_OUI(id1, id2);
1162 model = MII_MODEL(id2); 1162 model = MII_MODEL(id2);
1163 rev = MII_REV(id2); 1163 rev = MII_REV(id2);
1164 if ((descr = mii_get_descr(oui, model)) != NULL) 1164 if ((descr = mii_get_descr(oui, model)) != NULL)
1165 aprint_normal_dev(dev, 1165 aprint_normal_dev(dev,
1166 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n", 1166 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1167 descr, oui, model, rev); 1167 descr, oui, model, rev);
1168 else 1168 else
1169 aprint_normal_dev(dev, 1169 aprint_normal_dev(dev,
1170 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", 1170 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1171 oui, model, rev); 1171 oui, model, rev);
1172 } 1172 }
1173 1173
1174 /* Enable the optics for 82599 SFP+ fiber */ 1174 /* Enable the optics for 82599 SFP+ fiber */
1175 ixgbe_enable_tx_laser(hw); 1175 ixgbe_enable_tx_laser(hw);
1176 1176
1177 /* Enable EEE power saving */ 1177 /* Enable EEE power saving */
1178 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 1178 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1179 hw->mac.ops.setup_eee(hw, 1179 hw->mac.ops.setup_eee(hw,
1180 adapter->feat_en & IXGBE_FEATURE_EEE); 1180 adapter->feat_en & IXGBE_FEATURE_EEE);
1181 1181
1182 /* Enable power to the phy. */ 1182 /* Enable power to the phy. */
1183 ixgbe_set_phy_power(hw, TRUE); 1183 ixgbe_set_phy_power(hw, TRUE);
1184 1184
1185 /* Initialize statistics */ 1185 /* Initialize statistics */
1186 ixgbe_update_stats_counters(adapter); 1186 ixgbe_update_stats_counters(adapter);
1187 1187
1188 /* Check PCIE slot type/speed/width */ 1188 /* Check PCIE slot type/speed/width */
1189 ixgbe_get_slot_info(adapter); 1189 ixgbe_get_slot_info(adapter);
1190 1190
1191 /* 1191 /*
1192 * Do time init and sysctl init here, but 1192 * Do time init and sysctl init here, but
1193 * only on the first port of a bypass adapter. 1193 * only on the first port of a bypass adapter.
1194 */ 1194 */
1195 ixgbe_bypass_init(adapter); 1195 ixgbe_bypass_init(adapter);
1196 1196
1197 /* Set an initial dmac value */ 1197 /* Set an initial dmac value */
1198 adapter->dmac = 0; 1198 adapter->dmac = 0;
1199 /* Set initial advertised speeds (if applicable) */ 1199 /* Set initial advertised speeds (if applicable) */
1200 adapter->advertise = ixgbe_get_advertise(adapter); 1200 adapter->advertise = ixgbe_get_advertise(adapter);
1201 1201
1202 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1202 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1203 ixgbe_define_iov_schemas(dev, &error); 1203 ixgbe_define_iov_schemas(dev, &error);
1204 1204
1205 /* Add sysctls */ 1205 /* Add sysctls */
1206 ixgbe_add_device_sysctls(adapter); 1206 ixgbe_add_device_sysctls(adapter);
1207 ixgbe_add_hw_stats(adapter); 1207 ixgbe_add_hw_stats(adapter);
1208 1208
1209 /* For Netmap */ 1209 /* For Netmap */
1210 adapter->init_locked = ixgbe_init_locked; 1210 adapter->init_locked = ixgbe_init_locked;
1211 adapter->stop_locked = ixgbe_stop; 1211 adapter->stop_locked = ixgbe_stop;
1212 1212
1213 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 1213 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1214 ixgbe_netmap_attach(adapter); 1214 ixgbe_netmap_attach(adapter);
1215 1215
1216 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 1216 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1217 aprint_verbose_dev(dev, "feature cap %s\n", buf); 1217 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1218 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 1218 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1219 aprint_verbose_dev(dev, "feature ena %s\n", buf); 1219 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1220 1220
1221 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) 1221 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1222 pmf_class_network_register(dev, adapter->ifp); 1222 pmf_class_network_register(dev, adapter->ifp);
1223 else 1223 else
1224 aprint_error_dev(dev, "couldn't establish power handler\n"); 1224 aprint_error_dev(dev, "couldn't establish power handler\n");
1225 1225
1226 /* Init recovery mode timer and state variable */ 1226 /* Init recovery mode timer and state variable */
1227 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { 1227 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1228 adapter->recovery_mode = 0; 1228 adapter->recovery_mode = 0;
1229 1229
1230 /* Set up the timer callout */ 1230 /* Set up the timer callout */
1231 callout_init(&adapter->recovery_mode_timer, 1231 callout_init(&adapter->recovery_mode_timer,
1232 IXGBE_CALLOUT_FLAGS); 1232 IXGBE_CALLOUT_FLAGS);
1233 1233
1234 /* Start the task */ 1234 /* Start the task */
1235 callout_reset(&adapter->recovery_mode_timer, hz, 1235 callout_reset(&adapter->recovery_mode_timer, hz,
1236 ixgbe_recovery_mode_timer, adapter); 1236 ixgbe_recovery_mode_timer, adapter);
1237 } 1237 }
1238 1238
1239 INIT_DEBUGOUT("ixgbe_attach: end"); 1239 INIT_DEBUGOUT("ixgbe_attach: end");
1240 adapter->osdep.attached = true; 1240 adapter->osdep.attached = true;
1241 1241
1242 return; 1242 return;
1243 1243
1244err_late: 1244err_late:
1245 ixgbe_free_transmit_structures(adapter); 1245 ixgbe_free_transmit_structures(adapter);
1246 ixgbe_free_receive_structures(adapter); 1246 ixgbe_free_receive_structures(adapter);
1247 free(adapter->queues, M_DEVBUF); 1247 free(adapter->queues, M_DEVBUF);
1248err_out: 1248err_out:
1249 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1249 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1250 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1250 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1251 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1251 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1252 ixgbe_free_softint(adapter); 1252 ixgbe_free_softint(adapter);
1253 ixgbe_free_pci_resources(adapter); 1253 ixgbe_free_pci_resources(adapter);
1254 if (adapter->mta != NULL) 1254 if (adapter->mta != NULL)
1255 free(adapter->mta, M_DEVBUF); 1255 free(adapter->mta, M_DEVBUF);
1256 IXGBE_CORE_LOCK_DESTROY(adapter); 1256 IXGBE_CORE_LOCK_DESTROY(adapter);
1257 1257
1258 return; 1258 return;
1259} /* ixgbe_attach */ 1259} /* ixgbe_attach */
1260 1260
1261/************************************************************************ 1261/************************************************************************
1262 * ixgbe_check_wol_support 1262 * ixgbe_check_wol_support
1263 * 1263 *
1264 * Checks whether the adapter's ports are capable of 1264 * Checks whether the adapter's ports are capable of
1265 * Wake On LAN by reading the adapter's NVM. 1265 * Wake On LAN by reading the adapter's NVM.
1266 * 1266 *
1267 * Sets each port's hw->wol_enabled value depending 1267 * Sets each port's hw->wol_enabled value depending
1268 * on the value read here. 1268 * on the value read here.
1269 ************************************************************************/ 1269 ************************************************************************/
1270static void 1270static void
1271ixgbe_check_wol_support(struct adapter *adapter) 1271ixgbe_check_wol_support(struct adapter *adapter)
1272{ 1272{
1273 struct ixgbe_hw *hw = &adapter->hw; 1273 struct ixgbe_hw *hw = &adapter->hw;
1274 u16 dev_caps = 0; 1274 u16 dev_caps = 0;
1275 1275
1276 /* Find out WoL support for port */ 1276 /* Find out WoL support for port */
1277 adapter->wol_support = hw->wol_enabled = 0; 1277 adapter->wol_support = hw->wol_enabled = 0;
1278 ixgbe_get_device_caps(hw, &dev_caps); 1278 ixgbe_get_device_caps(hw, &dev_caps);
1279 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1279 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1280 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1280 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1281 hw->bus.func == 0)) 1281 hw->bus.func == 0))
1282 adapter->wol_support = hw->wol_enabled = 1; 1282 adapter->wol_support = hw->wol_enabled = 1;
1283 1283
1284 /* Save initial wake up filter configuration */ 1284 /* Save initial wake up filter configuration */
1285 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1285 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1286 1286
1287 return; 1287 return;
1288} /* ixgbe_check_wol_support */ 1288} /* ixgbe_check_wol_support */
1289 1289
1290/************************************************************************ 1290/************************************************************************
1291 * ixgbe_setup_interface 1291 * ixgbe_setup_interface
1292 * 1292 *
1293 * Setup networking device structure and register an interface. 1293 * Setup networking device structure and register an interface.
1294 ************************************************************************/ 1294 ************************************************************************/
1295static int 1295static int
1296ixgbe_setup_interface(device_t dev, struct adapter *adapter) 1296ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1297{ 1297{
1298 struct ethercom *ec = &adapter->osdep.ec; 1298 struct ethercom *ec = &adapter->osdep.ec;
1299 struct ifnet *ifp; 1299 struct ifnet *ifp;
1300 int rv; 1300 int rv;
1301 1301
1302 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1302 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1303 1303
1304 ifp = adapter->ifp = &ec->ec_if; 1304 ifp = adapter->ifp = &ec->ec_if;
1305 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1305 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1306 ifp->if_baudrate = IF_Gbps(10); 1306 ifp->if_baudrate = IF_Gbps(10);
1307 ifp->if_init = ixgbe_init; 1307 ifp->if_init = ixgbe_init;
1308 ifp->if_stop = ixgbe_ifstop; 1308 ifp->if_stop = ixgbe_ifstop;
1309 ifp->if_softc = adapter; 1309 ifp->if_softc = adapter;
1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1311#ifdef IXGBE_MPSAFE 1311#ifdef IXGBE_MPSAFE
1312 ifp->if_extflags = IFEF_MPSAFE; 1312 ifp->if_extflags = IFEF_MPSAFE;
1313#endif 1313#endif
1314 ifp->if_ioctl = ixgbe_ioctl; 1314 ifp->if_ioctl = ixgbe_ioctl;
1315#if __FreeBSD_version >= 1100045 1315#if __FreeBSD_version >= 1100045
1316 /* TSO parameters */ 1316 /* TSO parameters */
1317 ifp->if_hw_tsomax = 65518; 1317 ifp->if_hw_tsomax = 65518;
1318 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1318 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1319 ifp->if_hw_tsomaxsegsize = 2048; 1319 ifp->if_hw_tsomaxsegsize = 2048;
1320#endif 1320#endif
1321 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1321 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1322#if 0 1322#if 0
1323 ixgbe_start_locked = ixgbe_legacy_start_locked; 1323 ixgbe_start_locked = ixgbe_legacy_start_locked;
1324#endif 1324#endif
1325 } else { 1325 } else {
1326 ifp->if_transmit = ixgbe_mq_start; 1326 ifp->if_transmit = ixgbe_mq_start;
1327#if 0 1327#if 0
1328 ixgbe_start_locked = ixgbe_mq_start_locked; 1328 ixgbe_start_locked = ixgbe_mq_start_locked;
1329#endif 1329#endif
1330 } 1330 }
1331 ifp->if_start = ixgbe_legacy_start; 1331 ifp->if_start = ixgbe_legacy_start;
1332 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1332 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1333 IFQ_SET_READY(&ifp->if_snd); 1333 IFQ_SET_READY(&ifp->if_snd);
1334 1334
1335 rv = if_initialize(ifp); 1335 rv = if_initialize(ifp);
1336 if (rv != 0) { 1336 if (rv != 0) {
1337 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); 1337 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1338 return rv; 1338 return rv;
1339 } 1339 }
1340 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1340 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1341 ether_ifattach(ifp, adapter->hw.mac.addr); 1341 ether_ifattach(ifp, adapter->hw.mac.addr);
1342 /* 1342 /*
1343 * We use per TX queue softint, so if_deferred_start_init() isn't 1343 * We use per TX queue softint, so if_deferred_start_init() isn't
1344 * used. 1344 * used.
1345 */ 1345 */
1346 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); 1346 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1347 1347
1348 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1348 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1349 1349
1350 /* 1350 /*
1351 * Tell the upper layer(s) we support long frames. 1351 * Tell the upper layer(s) we support long frames.
1352 */ 1352 */
1353 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1353 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1354 1354
1355 /* Set capability flags */ 1355 /* Set capability flags */
1356 ifp->if_capabilities |= IFCAP_RXCSUM 1356 ifp->if_capabilities |= IFCAP_RXCSUM
1357 | IFCAP_TXCSUM 1357 | IFCAP_TXCSUM
1358 | IFCAP_TSOv4 1358 | IFCAP_TSOv4
1359 | IFCAP_TSOv6; 1359 | IFCAP_TSOv6;
1360 ifp->if_capenable = 0; 1360 ifp->if_capenable = 0;
1361 1361
1362 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING 1362 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1363 | ETHERCAP_VLAN_HWCSUM 1363 | ETHERCAP_VLAN_HWCSUM
1364 | ETHERCAP_JUMBO_MTU 1364 | ETHERCAP_JUMBO_MTU
1365 | ETHERCAP_VLAN_MTU; 1365 | ETHERCAP_VLAN_MTU;
1366 1366
1367 /* Enable the above capabilities by default */ 1367 /* Enable the above capabilities by default */
1368 ec->ec_capenable = ec->ec_capabilities; 1368 ec->ec_capenable = ec->ec_capabilities;
1369 1369
1370 /* 1370 /*
1371 * Don't turn this on by default, if vlans are 1371 * Don't turn this on by default, if vlans are
1372 * created on another pseudo device (eg. lagg) 1372 * created on another pseudo device (eg. lagg)
1373 * then vlan events are not passed thru, breaking 1373 * then vlan events are not passed thru, breaking
1374 * operation, but with HW FILTER off it works. If 1374 * operation, but with HW FILTER off it works. If
1375 * using vlans directly on the ixgbe driver you can 1375 * using vlans directly on the ixgbe driver you can
1376 * enable this and get full hardware tag filtering. 1376 * enable this and get full hardware tag filtering.
1377 */ 1377 */
1378 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1378 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1379 1379
1380 /* 1380 /*
1381 * Specify the media types supported by this adapter and register 1381 * Specify the media types supported by this adapter and register
1382 * callbacks to update media and link information 1382 * callbacks to update media and link information
1383 */ 1383 */
1384 ec->ec_ifmedia = &adapter->media; 1384 ec->ec_ifmedia = &adapter->media;
1385 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 1385 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1386 ixgbe_media_status); 1386 ixgbe_media_status);
1387 1387
1388 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1388 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1389 ixgbe_add_media_types(adapter); 1389 ixgbe_add_media_types(adapter);
1390 1390
1391 /* Set autoselect media by default */ 1391 /* Set autoselect media by default */
1392 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1392 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1393 1393
1394 if_register(ifp); 1394 if_register(ifp);
1395 1395
1396 return (0); 1396 return (0);
1397} /* ixgbe_setup_interface */ 1397} /* ixgbe_setup_interface */
1398 1398
1399/************************************************************************ 1399/************************************************************************
1400 * ixgbe_add_media_types 1400 * ixgbe_add_media_types
1401 ************************************************************************/ 1401 ************************************************************************/
1402static void 1402static void
1403ixgbe_add_media_types(struct adapter *adapter) 1403ixgbe_add_media_types(struct adapter *adapter)
1404{ 1404{
1405 struct ixgbe_hw *hw = &adapter->hw; 1405 struct ixgbe_hw *hw = &adapter->hw;
1406 device_t dev = adapter->dev; 1406 device_t dev = adapter->dev;
1407 u64 layer; 1407 u64 layer;
1408 1408
1409 layer = adapter->phy_layer; 1409 layer = adapter->phy_layer;
1410 1410
1411#define ADD(mm, dd) \ 1411#define ADD(mm, dd) \
1412 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL); 1412 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1413 1413
1414 ADD(IFM_NONE, 0); 1414 ADD(IFM_NONE, 0);
1415 1415
1416 /* Media types with matching NetBSD media defines */ 1416 /* Media types with matching NetBSD media defines */
1417 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 1417 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1418 ADD(IFM_10G_T | IFM_FDX, 0); 1418 ADD(IFM_10G_T | IFM_FDX, 0);
1419 } 1419 }
1420 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 1420 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1421 ADD(IFM_1000_T | IFM_FDX, 0); 1421 ADD(IFM_1000_T | IFM_FDX, 0);
1422 } 1422 }
1423 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) { 1423 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1424 ADD(IFM_100_TX | IFM_FDX, 0); 1424 ADD(IFM_100_TX | IFM_FDX, 0);
1425 } 1425 }
1426 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) { 1426 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1427 ADD(IFM_10_T | IFM_FDX, 0); 1427 ADD(IFM_10_T | IFM_FDX, 0);
1428 } 1428 }
1429 1429
1430 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1430 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1431 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) { 1431 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1432 ADD(IFM_10G_TWINAX | IFM_FDX, 0); 1432 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1433 } 1433 }
1434 1434
1435 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1435 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1436 ADD(IFM_10G_LR | IFM_FDX, 0); 1436 ADD(IFM_10G_LR | IFM_FDX, 0);
1437 if (hw->phy.multispeed_fiber) { 1437 if (hw->phy.multispeed_fiber) {
1438 ADD(IFM_1000_LX | IFM_FDX, 0); 1438 ADD(IFM_1000_LX | IFM_FDX, 0);
1439 } 1439 }
1440 } 1440 }
1441 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1441 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1442 ADD(IFM_10G_SR | IFM_FDX, 0); 1442 ADD(IFM_10G_SR | IFM_FDX, 0);
1443 if (hw->phy.multispeed_fiber) { 1443 if (hw->phy.multispeed_fiber) {
1444 ADD(IFM_1000_SX | IFM_FDX, 0); 1444 ADD(IFM_1000_SX | IFM_FDX, 0);
1445 } 1445 }
1446 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 1446 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1447 ADD(IFM_1000_SX | IFM_FDX, 0); 1447 ADD(IFM_1000_SX | IFM_FDX, 0);
1448 } 1448 }
1449 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) { 1449 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1450 ADD(IFM_10G_CX4 | IFM_FDX, 0); 1450 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1451 } 1451 }
1452 1452
1453 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1453 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1454 ADD(IFM_10G_KR | IFM_FDX, 0); 1454 ADD(IFM_10G_KR | IFM_FDX, 0);
1455 } 1455 }
1456 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1456 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1457 ADD(IFM_10G_KX4 | IFM_FDX, 0); 1457 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1458 } 1458 }
1459 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1459 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1460 ADD(IFM_1000_KX | IFM_FDX, 0); 1460 ADD(IFM_1000_KX | IFM_FDX, 0);
1461 } 1461 }
1462 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1462 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1463 ADD(IFM_2500_KX | IFM_FDX, 0); 1463 ADD(IFM_2500_KX | IFM_FDX, 0);
1464 } 1464 }
1465 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) { 1465 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1466 ADD(IFM_2500_T | IFM_FDX, 0); 1466 ADD(IFM_2500_T | IFM_FDX, 0);
1467 } 1467 }
1468 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) { 1468 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1469 ADD(IFM_5000_T | IFM_FDX, 0); 1469 ADD(IFM_5000_T | IFM_FDX, 0);
1470 } 1470 }
1471 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1471 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1472 device_printf(dev, "Media supported: 1000baseBX\n"); 1472 device_printf(dev, "Media supported: 1000baseBX\n");
1473 /* XXX no ifmedia_set? */ 1473 /* XXX no ifmedia_set? */
1474 1474
1475 ADD(IFM_AUTO, 0); 1475 ADD(IFM_AUTO, 0);
1476 1476
1477#undef ADD 1477#undef ADD
1478} /* ixgbe_add_media_types */ 1478} /* ixgbe_add_media_types */
1479 1479
1480/************************************************************************ 1480/************************************************************************
1481 * ixgbe_is_sfp 1481 * ixgbe_is_sfp
1482 ************************************************************************/ 1482 ************************************************************************/
1483static inline bool 1483static inline bool
1484ixgbe_is_sfp(struct ixgbe_hw *hw) 1484ixgbe_is_sfp(struct ixgbe_hw *hw)
1485{ 1485{
1486 switch (hw->mac.type) { 1486 switch (hw->mac.type) {
1487 case ixgbe_mac_82598EB: 1487 case ixgbe_mac_82598EB:
1488 if (hw->phy.type == ixgbe_phy_nl) 1488 if (hw->phy.type == ixgbe_phy_nl)
1489 return (TRUE); 1489 return (TRUE);
1490 return (FALSE); 1490 return (FALSE);
1491 case ixgbe_mac_82599EB: 1491 case ixgbe_mac_82599EB:
 1492 case ixgbe_mac_X550EM_x:
 1493 case ixgbe_mac_X550EM_a:
1492 switch (hw->mac.ops.get_media_type(hw)) { 1494 switch (hw->mac.ops.get_media_type(hw)) {
1493 case ixgbe_media_type_fiber: 1495 case ixgbe_media_type_fiber:
1494 case ixgbe_media_type_fiber_qsfp: 1496 case ixgbe_media_type_fiber_qsfp:
1495 return (TRUE); 1497 return (TRUE);
1496 default: 1498 default:
1497 return (FALSE); 1499 return (FALSE);
1498 } 1500 }
1499 case ixgbe_mac_X550EM_x: 
1500 case ixgbe_mac_X550EM_a: 
1501 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 
1502 return (TRUE); 
1503 return (FALSE); 
1504 default: 1501 default:
1505 return (FALSE); 1502 return (FALSE);
1506 } 1503 }
1507} /* ixgbe_is_sfp */ 1504} /* ixgbe_is_sfp */
1508 1505
1509/************************************************************************ 1506/************************************************************************
1510 * ixgbe_config_link 1507 * ixgbe_config_link
1511 ************************************************************************/ 1508 ************************************************************************/
1512static void 1509static void
1513ixgbe_config_link(struct adapter *adapter) 1510ixgbe_config_link(struct adapter *adapter)
1514{ 1511{
1515 struct ixgbe_hw *hw = &adapter->hw; 1512 struct ixgbe_hw *hw = &adapter->hw;
1516 u32 autoneg, err = 0; 1513 u32 autoneg, err = 0;
1517 bool sfp, negotiate = false; 1514 bool sfp, negotiate = false;
1518 1515
1519 sfp = ixgbe_is_sfp(hw); 1516 sfp = ixgbe_is_sfp(hw);
1520 1517
1521 if (sfp) { 1518 if (sfp) {
1522 if (hw->phy.multispeed_fiber) { 1519 if (hw->phy.multispeed_fiber) {
1523 ixgbe_enable_tx_laser(hw); 1520 ixgbe_enable_tx_laser(hw);
1524 kpreempt_disable(); 1521 kpreempt_disable();
1525 softint_schedule(adapter->msf_si); 1522 softint_schedule(adapter->msf_si);
1526 kpreempt_enable(); 1523 kpreempt_enable();
1527 } 1524 }
1528 kpreempt_disable(); 1525 kpreempt_disable();
1529 softint_schedule(adapter->mod_si); 1526 softint_schedule(adapter->mod_si);
1530 kpreempt_enable(); 1527 kpreempt_enable();
1531 } else { 1528 } else {
1532 struct ifmedia *ifm = &adapter->media; 1529 struct ifmedia *ifm = &adapter->media;
1533 1530
1534 if (hw->mac.ops.check_link) 1531 if (hw->mac.ops.check_link)
1535 err = ixgbe_check_link(hw, &adapter->link_speed, 1532 err = ixgbe_check_link(hw, &adapter->link_speed,
1536 &adapter->link_up, FALSE); 1533 &adapter->link_up, FALSE);
1537 if (err) 1534 if (err)
1538 return; 1535 return;
1539 1536
1540 /* 1537 /*
1541 * Check if it's the first call. If it's the first call, 1538 * Check if it's the first call. If it's the first call,
1542 * get value for auto negotiation. 1539 * get value for auto negotiation.
1543 */ 1540 */
1544 autoneg = hw->phy.autoneg_advertised; 1541 autoneg = hw->phy.autoneg_advertised;
1545 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) 1542 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1546 && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) 1543 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1547 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1544 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1548 &negotiate); 1545 &negotiate);
1549 if (err) 1546 if (err)
1550 return; 1547 return;
1551 if (hw->mac.ops.setup_link) 1548 if (hw->mac.ops.setup_link)
1552 err = hw->mac.ops.setup_link(hw, autoneg, 1549 err = hw->mac.ops.setup_link(hw, autoneg,
1553 adapter->link_up); 1550 adapter->link_up);
1554 } 1551 }
1555 1552
1556} /* ixgbe_config_link */ 1553} /* ixgbe_config_link */
1557 1554
1558/************************************************************************ 1555/************************************************************************
1559 * ixgbe_update_stats_counters - Update board statistics counters. 1556 * ixgbe_update_stats_counters - Update board statistics counters.
1560 ************************************************************************/ 1557 ************************************************************************/
1561static void 1558static void
1562ixgbe_update_stats_counters(struct adapter *adapter) 1559ixgbe_update_stats_counters(struct adapter *adapter)
1563{ 1560{
1564 struct ifnet *ifp = adapter->ifp; 1561 struct ifnet *ifp = adapter->ifp;
1565 struct ixgbe_hw *hw = &adapter->hw; 1562 struct ixgbe_hw *hw = &adapter->hw;
1566 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1563 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1567 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1564 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1568 u64 total_missed_rx = 0; 1565 u64 total_missed_rx = 0;
1569 uint64_t crcerrs, rlec; 1566 uint64_t crcerrs, rlec;
1570 unsigned int queue_counters; 1567 unsigned int queue_counters;
1571 int i; 1568 int i;
1572 1569
1573 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1570 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1574 stats->crcerrs.ev_count += crcerrs; 1571 stats->crcerrs.ev_count += crcerrs;
1575 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1572 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1576 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1573 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1577 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1574 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1578 if (hw->mac.type == ixgbe_mac_X550) 1575 if (hw->mac.type == ixgbe_mac_X550)
1579 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC); 1576 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1580 1577
1581 /* 16 registers exist */ 1578 /* 16 registers exist */
1582 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues); 1579 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1583 for (i = 0; i < queue_counters; i++) { 1580 for (i = 0; i < queue_counters; i++) {
1584 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1581 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1585 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1582 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1586 if (hw->mac.type >= ixgbe_mac_82599EB) { 1583 if (hw->mac.type >= ixgbe_mac_82599EB) {
1587 stats->qprdc[i].ev_count 1584 stats->qprdc[i].ev_count
1588 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1585 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1589 } 1586 }
1590 } 1587 }
1591 1588
1592 /* 8 registers exist */ 1589 /* 8 registers exist */
1593 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1590 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1594 uint32_t mp; 1591 uint32_t mp;
1595 1592
1596 /* MPC */ 1593 /* MPC */
1597 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 1594 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1598 /* global total per queue */ 1595 /* global total per queue */
1599 stats->mpc[i].ev_count += mp; 1596 stats->mpc[i].ev_count += mp;
1600 /* running comprehensive total for stats display */ 1597 /* running comprehensive total for stats display */
1601 total_missed_rx += mp; 1598 total_missed_rx += mp;
1602 1599
1603 if (hw->mac.type == ixgbe_mac_82598EB) 1600 if (hw->mac.type == ixgbe_mac_82598EB)
1604 stats->rnbc[i].ev_count 1601 stats->rnbc[i].ev_count
1605 += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 1602 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1606 1603
1607 stats->pxontxc[i].ev_count 1604 stats->pxontxc[i].ev_count
1608 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 1605 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1609 stats->pxofftxc[i].ev_count 1606 stats->pxofftxc[i].ev_count
1610 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 1607 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1611 if (hw->mac.type >= ixgbe_mac_82599EB) { 1608 if (hw->mac.type >= ixgbe_mac_82599EB) {
1612 stats->pxonrxc[i].ev_count 1609 stats->pxonrxc[i].ev_count
1613 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 1610 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1614 stats->pxoffrxc[i].ev_count 1611 stats->pxoffrxc[i].ev_count
1615 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 1612 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1616 stats->pxon2offc[i].ev_count 1613 stats->pxon2offc[i].ev_count
1617 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 1614 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1618 } else { 1615 } else {
1619 stats->pxonrxc[i].ev_count 1616 stats->pxonrxc[i].ev_count
1620 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 1617 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1621 stats->pxoffrxc[i].ev_count 1618 stats->pxoffrxc[i].ev_count
1622 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 1619 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1623 } 1620 }
1624 } 1621 }
1625 stats->mpctotal.ev_count += total_missed_rx; 1622 stats->mpctotal.ev_count += total_missed_rx;
1626 1623
1627 /* Document says M[LR]FC are valid when link is up and 10Gbps */ 1624 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1628 if ((adapter->link_active == LINK_STATE_UP) 1625 if ((adapter->link_active == LINK_STATE_UP)
1629 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { 1626 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1630 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC); 1627 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1631 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC); 1628 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1632 } 1629 }
1633 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC); 1630 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1634 stats->rlec.ev_count += rlec; 1631 stats->rlec.ev_count += rlec;
1635 1632
1636 /* Hardware workaround, gprc counts missed packets */ 1633 /* Hardware workaround, gprc counts missed packets */
1637 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx; 1634 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1638 1635
1639 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1636 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1640 stats->lxontxc.ev_count += lxon; 1637 stats->lxontxc.ev_count += lxon;
1641 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1638 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1642 stats->lxofftxc.ev_count += lxoff; 1639 stats->lxofftxc.ev_count += lxoff;
1643 total = lxon + lxoff; 1640 total = lxon + lxoff;
1644 1641
1645 if (hw->mac.type != ixgbe_mac_82598EB) { 1642 if (hw->mac.type != ixgbe_mac_82598EB) {
1646 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1643 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1647 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1644 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1648 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1645 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1649 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN; 1646 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1650 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) + 1647 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1651 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1648 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1652 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1649 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1653 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1650 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1654 } else { 1651 } else {
1655 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1652 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1656 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1653 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1657 /* 82598 only has a counter in the high register */ 1654 /* 82598 only has a counter in the high register */
1658 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH); 1655 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1659 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN; 1656 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1660 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH); 1657 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1661 } 1658 }
1662 1659
1663 /* 1660 /*
1664 * Workaround: mprc hardware is incorrectly counting 1661 * Workaround: mprc hardware is incorrectly counting
1665 * broadcasts, so for now we subtract those. 1662 * broadcasts, so for now we subtract those.
1666 */ 1663 */
1667 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1664 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1668 stats->bprc.ev_count += bprc; 1665 stats->bprc.ev_count += bprc;
1669 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) 1666 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1670 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0); 1667 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1671 1668
1672 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64); 1669 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1673 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127); 1670 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1674 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255); 1671 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1675 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511); 1672 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1676 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1673 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1677 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1674 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1678 1675
1679 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total; 1676 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1680 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total; 1677 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1681 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total; 1678 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1682 1679
1683 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC); 1680 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1684 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC); 1681 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1685 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC); 1682 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1686 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC); 1683 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1687 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1684 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1688 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1685 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1689 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1686 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1690 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR); 1687 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1691 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT); 1688 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1692 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127); 1689 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1693 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255); 1690 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1694 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511); 1691 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1695 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1692 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1696 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1693 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1697 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC); 1694 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1698 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC); 1695 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1699 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1696 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1700 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1697 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1701 /* Only read FCOE on 82599 */ 1698 /* Only read FCOE on 82599 */
1702 if (hw->mac.type != ixgbe_mac_82598EB) { 1699 if (hw->mac.type != ixgbe_mac_82598EB) {
1703 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1700 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1704 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1701 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1705 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1702 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1706 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1703 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1707 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1704 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1708 } 1705 }
1709 1706
1710 /* Fill out the OS statistics structure */ 1707 /* Fill out the OS statistics structure */
1711 /* 1708 /*
1712 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with 1709 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1713 * adapter->stats counters. It's required to make ifconfig -z 1710 * adapter->stats counters. It's required to make ifconfig -z
1714 * (SOICZIFDATA) work. 1711 * (SOICZIFDATA) work.
1715 */ 1712 */
1716 ifp->if_collisions = 0; 1713 ifp->if_collisions = 0;
1717 1714
1718 /* Rx Errors */ 1715 /* Rx Errors */
1719 ifp->if_iqdrops += total_missed_rx; 1716 ifp->if_iqdrops += total_missed_rx;
1720 ifp->if_ierrors += crcerrs + rlec; 1717 ifp->if_ierrors += crcerrs + rlec;
1721} /* ixgbe_update_stats_counters */ 1718} /* ixgbe_update_stats_counters */
1722 1719
1723/************************************************************************ 1720/************************************************************************
1724 * ixgbe_add_hw_stats 1721 * ixgbe_add_hw_stats
1725 * 1722 *
1726 * Add sysctl variables, one per statistic, to the system. 1723 * Add sysctl variables, one per statistic, to the system.
1727 ************************************************************************/ 1724 ************************************************************************/
1728static void 1725static void
1729ixgbe_add_hw_stats(struct adapter *adapter) 1726ixgbe_add_hw_stats(struct adapter *adapter)
1730{ 1727{
1731 device_t dev = adapter->dev; 1728 device_t dev = adapter->dev;
1732 const struct sysctlnode *rnode, *cnode; 1729 const struct sysctlnode *rnode, *cnode;
1733 struct sysctllog **log = &adapter->sysctllog; 1730 struct sysctllog **log = &adapter->sysctllog;
1734 struct tx_ring *txr = adapter->tx_rings; 1731 struct tx_ring *txr = adapter->tx_rings;
1735 struct rx_ring *rxr = adapter->rx_rings; 1732 struct rx_ring *rxr = adapter->rx_rings;
1736 struct ixgbe_hw *hw = &adapter->hw; 1733 struct ixgbe_hw *hw = &adapter->hw;
1737 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1734 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1738 const char *xname = device_xname(dev); 1735 const char *xname = device_xname(dev);
1739 int i; 1736 int i;
1740 1737
1741 /* Driver Statistics */ 1738 /* Driver Statistics */
1742 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 1739 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1743 NULL, xname, "Driver tx dma soft fail EFBIG"); 1740 NULL, xname, "Driver tx dma soft fail EFBIG");
1744 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 1741 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1745 NULL, xname, "m_defrag() failed"); 1742 NULL, xname, "m_defrag() failed");
1746 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 1743 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1747 NULL, xname, "Driver tx dma hard fail EFBIG"); 1744 NULL, xname, "Driver tx dma hard fail EFBIG");
1748 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 1745 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1749 NULL, xname, "Driver tx dma hard fail EINVAL"); 1746 NULL, xname, "Driver tx dma hard fail EINVAL");
1750 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 1747 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1751 NULL, xname, "Driver tx dma hard fail other"); 1748 NULL, xname, "Driver tx dma hard fail other");
1752 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 1749 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1753 NULL, xname, "Driver tx dma soft fail EAGAIN"); 1750 NULL, xname, "Driver tx dma soft fail EAGAIN");
1754 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 1751 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1755 NULL, xname, "Driver tx dma soft fail ENOMEM"); 1752 NULL, xname, "Driver tx dma soft fail ENOMEM");
1756 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 1753 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1757 NULL, xname, "Watchdog timeouts"); 1754 NULL, xname, "Watchdog timeouts");
1758 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 1755 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1759 NULL, xname, "TSO errors"); 1756 NULL, xname, "TSO errors");
1760 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, 1757 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1761 NULL, xname, "Link MSI-X IRQ Handled"); 1758 NULL, xname, "Link MSI-X IRQ Handled");
1762 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR, 1759 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1763 NULL, xname, "Link softint"); 1760 NULL, xname, "Link softint");
1764 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR, 1761 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1765 NULL, xname, "module softint"); 1762 NULL, xname, "module softint");
1766 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR, 1763 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1767 NULL, xname, "multimode softint"); 1764 NULL, xname, "multimode softint");
1768 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR, 1765 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1769 NULL, xname, "external PHY softint"); 1766 NULL, xname, "external PHY softint");
1770 1767
1771 /* Max number of traffic class is 8 */ 1768 /* Max number of traffic class is 8 */
1772 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); 1769 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1773 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1770 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1774 snprintf(adapter->tcs[i].evnamebuf, 1771 snprintf(adapter->tcs[i].evnamebuf,
1775 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", 1772 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1776 xname, i); 1773 xname, i);
1777 if (i < __arraycount(stats->mpc)) { 1774 if (i < __arraycount(stats->mpc)) {
1778 evcnt_attach_dynamic(&stats->mpc[i], 1775 evcnt_attach_dynamic(&stats->mpc[i],
1779 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1776 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1780 "RX Missed Packet Count"); 1777 "RX Missed Packet Count");
1781 if (hw->mac.type == ixgbe_mac_82598EB) 1778 if (hw->mac.type == ixgbe_mac_82598EB)
1782 evcnt_attach_dynamic(&stats->rnbc[i], 1779 evcnt_attach_dynamic(&stats->rnbc[i],
1783 EVCNT_TYPE_MISC, NULL, 1780 EVCNT_TYPE_MISC, NULL,
1784 adapter->tcs[i].evnamebuf, 1781 adapter->tcs[i].evnamebuf,
1785 "Receive No Buffers"); 1782 "Receive No Buffers");
1786 } 1783 }
1787 if (i < __arraycount(stats->pxontxc)) { 1784 if (i < __arraycount(stats->pxontxc)) {
1788 evcnt_attach_dynamic(&stats->pxontxc[i], 1785 evcnt_attach_dynamic(&stats->pxontxc[i],
1789 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1786 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1790 "pxontxc"); 1787 "pxontxc");
1791 evcnt_attach_dynamic(&stats->pxonrxc[i], 1788 evcnt_attach_dynamic(&stats->pxonrxc[i],
1792 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1789 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1793 "pxonrxc"); 1790 "pxonrxc");
1794 evcnt_attach_dynamic(&stats->pxofftxc[i], 1791 evcnt_attach_dynamic(&stats->pxofftxc[i],
1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1792 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1796 "pxofftxc"); 1793 "pxofftxc");
1797 evcnt_attach_dynamic(&stats->pxoffrxc[i], 1794 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1798 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1799 "pxoffrxc"); 1796 "pxoffrxc");
1800 if (hw->mac.type >= ixgbe_mac_82599EB) 1797 if (hw->mac.type >= ixgbe_mac_82599EB)
1801 evcnt_attach_dynamic(&stats->pxon2offc[i], 1798 evcnt_attach_dynamic(&stats->pxon2offc[i],
1802 EVCNT_TYPE_MISC, NULL, 1799 EVCNT_TYPE_MISC, NULL,
1803 adapter->tcs[i].evnamebuf, 1800 adapter->tcs[i].evnamebuf,
1804 "pxon2offc"); 1801 "pxon2offc");
1805 } 1802 }
1806 } 1803 }
1807 1804
1808 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 1805 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1809#ifdef LRO 1806#ifdef LRO
1810 struct lro_ctrl *lro = &rxr->lro; 1807 struct lro_ctrl *lro = &rxr->lro;
1811#endif /* LRO */ 1808#endif /* LRO */
1812 1809
1813 snprintf(adapter->queues[i].evnamebuf, 1810 snprintf(adapter->queues[i].evnamebuf,
1814 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 1811 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1815 xname, i); 1812 xname, i);
1816 snprintf(adapter->queues[i].namebuf, 1813 snprintf(adapter->queues[i].namebuf,
1817 sizeof(adapter->queues[i].namebuf), "q%d", i); 1814 sizeof(adapter->queues[i].namebuf), "q%d", i);
1818 1815
1819 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 1816 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1820 aprint_error_dev(dev, "could not create sysctl root\n"); 1817 aprint_error_dev(dev, "could not create sysctl root\n");
1821 break; 1818 break;
1822 } 1819 }
1823 1820
1824 if (sysctl_createv(log, 0, &rnode, &rnode, 1821 if (sysctl_createv(log, 0, &rnode, &rnode,
1825 0, CTLTYPE_NODE, 1822 0, CTLTYPE_NODE,
1826 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 1823 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1827 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1824 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1828 break; 1825 break;
1829 1826
1830 if (sysctl_createv(log, 0, &rnode, &cnode, 1827 if (sysctl_createv(log, 0, &rnode, &cnode,
1831 CTLFLAG_READWRITE, CTLTYPE_INT, 1828 CTLFLAG_READWRITE, CTLTYPE_INT,
1832 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 1829 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1833 ixgbe_sysctl_interrupt_rate_handler, 0, 1830 ixgbe_sysctl_interrupt_rate_handler, 0,
1834 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 1831 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1835 break; 1832 break;
1836 1833
1837 if (sysctl_createv(log, 0, &rnode, &cnode, 1834 if (sysctl_createv(log, 0, &rnode, &cnode,
1838 CTLFLAG_READONLY, CTLTYPE_INT, 1835 CTLFLAG_READONLY, CTLTYPE_INT,
1839 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 1836 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1840 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1837 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1841 0, CTL_CREATE, CTL_EOL) != 0) 1838 0, CTL_CREATE, CTL_EOL) != 0)
1842 break; 1839 break;
1843 1840
1844 if (sysctl_createv(log, 0, &rnode, &cnode, 1841 if (sysctl_createv(log, 0, &rnode, &cnode,
1845 CTLFLAG_READONLY, CTLTYPE_INT, 1842 CTLFLAG_READONLY, CTLTYPE_INT,
1846 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 1843 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1847 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 1844 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1848 0, CTL_CREATE, CTL_EOL) != 0) 1845 0, CTL_CREATE, CTL_EOL) != 0)
1849 break; 1846 break;
1850 1847
1851 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 1848 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1852 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 1849 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1853 evcnt_attach_dynamic(&adapter->queues[i].handleq, 1850 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1854 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1851 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1855 "Handled queue in softint"); 1852 "Handled queue in softint");
1856 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 1853 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1857 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 1854 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1858 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 1855 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1859 NULL, adapter->queues[i].evnamebuf, "TSO"); 1856 NULL, adapter->queues[i].evnamebuf, "TSO");
1860 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 1857 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1861 NULL, adapter->queues[i].evnamebuf, 1858 NULL, adapter->queues[i].evnamebuf,
1862 "Queue No Descriptor Available"); 1859 "Queue No Descriptor Available");
1863 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 1860 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1864 NULL, adapter->queues[i].evnamebuf, 1861 NULL, adapter->queues[i].evnamebuf,
1865 "Queue Packets Transmitted"); 1862 "Queue Packets Transmitted");
1866#ifndef IXGBE_LEGACY_TX 1863#ifndef IXGBE_LEGACY_TX
1867 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 1864 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1868 NULL, adapter->queues[i].evnamebuf, 1865 NULL, adapter->queues[i].evnamebuf,
1869 "Packets dropped in pcq"); 1866 "Packets dropped in pcq");
1870#endif 1867#endif
1871 1868
1872 if (sysctl_createv(log, 0, &rnode, &cnode, 1869 if (sysctl_createv(log, 0, &rnode, &cnode,
1873 CTLFLAG_READONLY, 1870 CTLFLAG_READONLY,
1874 CTLTYPE_INT, 1871 CTLTYPE_INT,
1875 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), 1872 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1876 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 1873 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1877 CTL_CREATE, CTL_EOL) != 0) 1874 CTL_CREATE, CTL_EOL) != 0)
1878 break; 1875 break;
1879 1876
1880 if (sysctl_createv(log, 0, &rnode, &cnode, 1877 if (sysctl_createv(log, 0, &rnode, &cnode,
1881 CTLFLAG_READONLY, 1878 CTLFLAG_READONLY,
1882 CTLTYPE_INT, 1879 CTLTYPE_INT,
1883 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), 1880 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1884 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, 1881 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1885 CTL_CREATE, CTL_EOL) != 0) 1882 CTL_CREATE, CTL_EOL) != 0)
1886 break; 1883 break;
1887 1884
1888 if (sysctl_createv(log, 0, &rnode, &cnode, 1885 if (sysctl_createv(log, 0, &rnode, &cnode,
1889 CTLFLAG_READONLY, 1886 CTLFLAG_READONLY,
1890 CTLTYPE_INT, 1887 CTLTYPE_INT,
1891 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), 1888 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1892 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, 1889 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1893 CTL_CREATE, CTL_EOL) != 0) 1890 CTL_CREATE, CTL_EOL) != 0)
1894 break; 1891 break;
1895 1892
1896 if (i < __arraycount(stats->qprc)) { 1893 if (i < __arraycount(stats->qprc)) {
1897 evcnt_attach_dynamic(&stats->qprc[i], 1894 evcnt_attach_dynamic(&stats->qprc[i],
1898 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1895 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1899 "qprc"); 1896 "qprc");
1900 evcnt_attach_dynamic(&stats->qptc[i], 1897 evcnt_attach_dynamic(&stats->qptc[i],
1901 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1898 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1902 "qptc"); 1899 "qptc");
1903 evcnt_attach_dynamic(&stats->qbrc[i], 1900 evcnt_attach_dynamic(&stats->qbrc[i],
1904 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1901 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1905 "qbrc"); 1902 "qbrc");
1906 evcnt_attach_dynamic(&stats->qbtc[i], 1903 evcnt_attach_dynamic(&stats->qbtc[i],
1907 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1904 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1908 "qbtc"); 1905 "qbtc");
1909 if (hw->mac.type >= ixgbe_mac_82599EB) 1906 if (hw->mac.type >= ixgbe_mac_82599EB)
1910 evcnt_attach_dynamic(&stats->qprdc[i], 1907 evcnt_attach_dynamic(&stats->qprdc[i],
1911 EVCNT_TYPE_MISC, NULL, 1908 EVCNT_TYPE_MISC, NULL,
1912 adapter->queues[i].evnamebuf, "qprdc"); 1909 adapter->queues[i].evnamebuf, "qprdc");
1913 } 1910 }
1914 1911
1915 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 1912 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1916 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); 1913 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1917 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 1914 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1918 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); 1915 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1919 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 1916 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1920 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 1917 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1921 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, 1918 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1922 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); 1919 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1923 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 1920 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1924 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 1921 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1925#ifdef LRO 1922#ifdef LRO
1926 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 1923 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1927 CTLFLAG_RD, &lro->lro_queued, 0, 1924 CTLFLAG_RD, &lro->lro_queued, 0,
1928 "LRO Queued"); 1925 "LRO Queued");
1929 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 1926 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1930 CTLFLAG_RD, &lro->lro_flushed, 0, 1927 CTLFLAG_RD, &lro->lro_flushed, 0,
1931 "LRO Flushed"); 1928 "LRO Flushed");
1932#endif /* LRO */ 1929#endif /* LRO */
1933 } 1930 }
1934 1931
1935 /* MAC stats get their own sub node */ 1932 /* MAC stats get their own sub node */
1936 1933
1937 snprintf(stats->namebuf, 1934 snprintf(stats->namebuf,
1938 sizeof(stats->namebuf), "%s MAC Statistics", xname); 1935 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1939 1936
1940 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 1937 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1941 stats->namebuf, "rx csum offload - IP"); 1938 stats->namebuf, "rx csum offload - IP");
1942 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 1939 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1943 stats->namebuf, "rx csum offload - L4"); 1940 stats->namebuf, "rx csum offload - L4");
1944 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 1941 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1945 stats->namebuf, "rx csum offload - IP bad"); 1942 stats->namebuf, "rx csum offload - IP bad");
1946 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 1943 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1947 stats->namebuf, "rx csum offload - L4 bad"); 1944 stats->namebuf, "rx csum offload - L4 bad");
1948 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, 1945 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "Interrupt conditions zero"); 1946 stats->namebuf, "Interrupt conditions zero");
1950 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, 1947 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Legacy interrupts"); 1948 stats->namebuf, "Legacy interrupts");
1952 1949
1953 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, 1950 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1954 stats->namebuf, "CRC Errors"); 1951 stats->namebuf, "CRC Errors");
1955 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, 1952 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1956 stats->namebuf, "Illegal Byte Errors"); 1953 stats->namebuf, "Illegal Byte Errors");
1957 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, 1954 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "Byte Errors"); 1955 stats->namebuf, "Byte Errors");
1959 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, 1956 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "MAC Short Packets Discarded"); 1957 stats->namebuf, "MAC Short Packets Discarded");
1961 if (hw->mac.type >= ixgbe_mac_X550) 1958 if (hw->mac.type >= ixgbe_mac_X550)
1962 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, 1959 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1963 stats->namebuf, "Bad SFD"); 1960 stats->namebuf, "Bad SFD");
1964 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, 1961 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1965 stats->namebuf, "Total Packets Missed"); 1962 stats->namebuf, "Total Packets Missed");
1966 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, 1963 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1967 stats->namebuf, "MAC Local Faults"); 1964 stats->namebuf, "MAC Local Faults");
1968 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, 1965 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1969 stats->namebuf, "MAC Remote Faults"); 1966 stats->namebuf, "MAC Remote Faults");
1970 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, 1967 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1971 stats->namebuf, "Receive Length Errors"); 1968 stats->namebuf, "Receive Length Errors");
1972 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, 1969 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1973 stats->namebuf, "Link XON Transmitted"); 1970 stats->namebuf, "Link XON Transmitted");
1974 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, 1971 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1975 stats->namebuf, "Link XON Received"); 1972 stats->namebuf, "Link XON Received");
1976 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, 1973 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1977 stats->namebuf, "Link XOFF Transmitted"); 1974 stats->namebuf, "Link XOFF Transmitted");
1978 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, 1975 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1979 stats->namebuf, "Link XOFF Received"); 1976 stats->namebuf, "Link XOFF Received");
1980 1977
1981 /* Packet Reception Stats */ 1978 /* Packet Reception Stats */
1982 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, 1979 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1983 stats->namebuf, "Total Octets Received"); 1980 stats->namebuf, "Total Octets Received");
1984 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, 1981 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1985 stats->namebuf, "Good Octets Received"); 1982 stats->namebuf, "Good Octets Received");
1986 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, 1983 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1987 stats->namebuf, "Total Packets Received"); 1984 stats->namebuf, "Total Packets Received");
1988 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, 1985 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1989 stats->namebuf, "Good Packets Received"); 1986 stats->namebuf, "Good Packets Received");
1990 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, 1987 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1991 stats->namebuf, "Multicast Packets Received"); 1988 stats->namebuf, "Multicast Packets Received");
1992 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, 1989 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1993 stats->namebuf, "Broadcast Packets Received"); 1990 stats->namebuf, "Broadcast Packets Received");
1994 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, 1991 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1995 stats->namebuf, "64 byte frames received "); 1992 stats->namebuf, "64 byte frames received ");
1996 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, 1993 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1997 stats->namebuf, "65-127 byte frames received"); 1994 stats->namebuf, "65-127 byte frames received");
1998 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, 1995 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1999 stats->namebuf, "128-255 byte frames received"); 1996 stats->namebuf, "128-255 byte frames received");
2000 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, 1997 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2001 stats->namebuf, "256-511 byte frames received"); 1998 stats->namebuf, "256-511 byte frames received");
2002 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, 1999 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2003 stats->namebuf, "512-1023 byte frames received"); 2000 stats->namebuf, "512-1023 byte frames received");
2004 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, 2001 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2005 stats->namebuf, "1023-1522 byte frames received"); 2002 stats->namebuf, "1023-1522 byte frames received");
2006 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, 2003 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2007 stats->namebuf, "Receive Undersized"); 2004 stats->namebuf, "Receive Undersized");
2008 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, 2005 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2009 stats->namebuf, "Fragmented Packets Received "); 2006 stats->namebuf, "Fragmented Packets Received ");
2010 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, 2007 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2011 stats->namebuf, "Oversized Packets Received"); 2008 stats->namebuf, "Oversized Packets Received");
2012 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, 2009 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2013 stats->namebuf, "Received Jabber"); 2010 stats->namebuf, "Received Jabber");
2014 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, 2011 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2015 stats->namebuf, "Management Packets Received"); 2012 stats->namebuf, "Management Packets Received");
2016 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, 2013 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2017 stats->namebuf, "Management Packets Dropped"); 2014 stats->namebuf, "Management Packets Dropped");
2018 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, 2015 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2019 stats->namebuf, "Checksum Errors"); 2016 stats->namebuf, "Checksum Errors");
2020 2017
2021 /* Packet Transmission Stats */ 2018 /* Packet Transmission Stats */
2022 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, 2019 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2023 stats->namebuf, "Good Octets Transmitted"); 2020 stats->namebuf, "Good Octets Transmitted");
2024 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, 2021 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2025 stats->namebuf, "Total Packets Transmitted"); 2022 stats->namebuf, "Total Packets Transmitted");
2026 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, 2023 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2027 stats->namebuf, "Good Packets Transmitted"); 2024 stats->namebuf, "Good Packets Transmitted");
2028 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, 2025 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2029 stats->namebuf, "Broadcast Packets Transmitted"); 2026 stats->namebuf, "Broadcast Packets Transmitted");
2030 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, 2027 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2031 stats->namebuf, "Multicast Packets Transmitted"); 2028 stats->namebuf, "Multicast Packets Transmitted");
2032 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, 2029 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2033 stats->namebuf, "Management Packets Transmitted"); 2030 stats->namebuf, "Management Packets Transmitted");
2034 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, 2031 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2035 stats->namebuf, "64 byte frames transmitted "); 2032 stats->namebuf, "64 byte frames transmitted ");
2036 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, 2033 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2037 stats->namebuf, "65-127 byte frames transmitted"); 2034 stats->namebuf, "65-127 byte frames transmitted");
2038 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, 2035 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2039 stats->namebuf, "128-255 byte frames transmitted"); 2036 stats->namebuf, "128-255 byte frames transmitted");
2040 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, 2037 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2041 stats->namebuf, "256-511 byte frames transmitted"); 2038 stats->namebuf, "256-511 byte frames transmitted");
2042 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, 2039 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2043 stats->namebuf, "512-1023 byte frames transmitted"); 2040 stats->namebuf, "512-1023 byte frames transmitted");
2044 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, 2041 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2045 stats->namebuf, "1024-1522 byte frames transmitted"); 2042 stats->namebuf, "1024-1522 byte frames transmitted");
2046} /* ixgbe_add_hw_stats */ 2043} /* ixgbe_add_hw_stats */
2047 2044
2048static void 2045static void
2049ixgbe_clear_evcnt(struct adapter *adapter) 2046ixgbe_clear_evcnt(struct adapter *adapter)
2050{ 2047{
2051 struct tx_ring *txr = adapter->tx_rings; 2048 struct tx_ring *txr = adapter->tx_rings;
2052 struct rx_ring *rxr = adapter->rx_rings; 2049 struct rx_ring *rxr = adapter->rx_rings;
2053 struct ixgbe_hw *hw = &adapter->hw; 2050 struct ixgbe_hw *hw = &adapter->hw;
2054 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 2051 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2055 int i; 2052 int i;
2056 2053
2057 adapter->efbig_tx_dma_setup.ev_count = 0; 2054 adapter->efbig_tx_dma_setup.ev_count = 0;
2058 adapter->mbuf_defrag_failed.ev_count = 0; 2055 adapter->mbuf_defrag_failed.ev_count = 0;
2059 adapter->efbig2_tx_dma_setup.ev_count = 0; 2056 adapter->efbig2_tx_dma_setup.ev_count = 0;
2060 adapter->einval_tx_dma_setup.ev_count = 0; 2057 adapter->einval_tx_dma_setup.ev_count = 0;
2061 adapter->other_tx_dma_setup.ev_count = 0; 2058 adapter->other_tx_dma_setup.ev_count = 0;
2062 adapter->eagain_tx_dma_setup.ev_count = 0; 2059 adapter->eagain_tx_dma_setup.ev_count = 0;
2063 adapter->enomem_tx_dma_setup.ev_count = 0; 2060 adapter->enomem_tx_dma_setup.ev_count = 0;
2064 adapter->tso_err.ev_count = 0; 2061 adapter->tso_err.ev_count = 0;
2065 adapter->watchdog_events.ev_count = 0; 2062 adapter->watchdog_events.ev_count = 0;
2066 adapter->link_irq.ev_count = 0; 2063 adapter->link_irq.ev_count = 0;
2067 adapter->link_sicount.ev_count = 0; 2064 adapter->link_sicount.ev_count = 0;
2068 adapter->mod_sicount.ev_count = 0; 2065 adapter->mod_sicount.ev_count = 0;
2069 adapter->msf_sicount.ev_count = 0; 2066 adapter->msf_sicount.ev_count = 0;
2070 adapter->phy_sicount.ev_count = 0; 2067 adapter->phy_sicount.ev_count = 0;
2071 2068
2072 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 2069 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2073 if (i < __arraycount(stats->mpc)) { 2070 if (i < __arraycount(stats->mpc)) {
2074 stats->mpc[i].ev_count = 0; 2071 stats->mpc[i].ev_count = 0;
2075 if (hw->mac.type == ixgbe_mac_82598EB) 2072 if (hw->mac.type == ixgbe_mac_82598EB)
2076 stats->rnbc[i].ev_count = 0; 2073 stats->rnbc[i].ev_count = 0;
2077 } 2074 }
2078 if (i < __arraycount(stats->pxontxc)) { 2075 if (i < __arraycount(stats->pxontxc)) {
2079 stats->pxontxc[i].ev_count = 0; 2076 stats->pxontxc[i].ev_count = 0;
2080 stats->pxonrxc[i].ev_count = 0; 2077 stats->pxonrxc[i].ev_count = 0;
2081 stats->pxofftxc[i].ev_count = 0; 2078 stats->pxofftxc[i].ev_count = 0;
2082 stats->pxoffrxc[i].ev_count = 0; 2079 stats->pxoffrxc[i].ev_count = 0;
2083 if (hw->mac.type >= ixgbe_mac_82599EB) 2080 if (hw->mac.type >= ixgbe_mac_82599EB)
2084 stats->pxon2offc[i].ev_count = 0; 2081 stats->pxon2offc[i].ev_count = 0;
2085 } 2082 }
2086 } 2083 }
2087 2084
2088 txr = adapter->tx_rings; 2085 txr = adapter->tx_rings;
2089 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2086 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2090 adapter->queues[i].irqs.ev_count = 0; 2087 adapter->queues[i].irqs.ev_count = 0;
2091 adapter->queues[i].handleq.ev_count = 0; 2088 adapter->queues[i].handleq.ev_count = 0;
2092 adapter->queues[i].req.ev_count = 0; 2089 adapter->queues[i].req.ev_count = 0;
2093 txr->no_desc_avail.ev_count = 0; 2090 txr->no_desc_avail.ev_count = 0;
2094 txr->total_packets.ev_count = 0; 2091 txr->total_packets.ev_count = 0;
2095 txr->tso_tx.ev_count = 0; 2092 txr->tso_tx.ev_count = 0;
2096#ifndef IXGBE_LEGACY_TX 2093#ifndef IXGBE_LEGACY_TX
2097 txr->pcq_drops.ev_count = 0; 2094 txr->pcq_drops.ev_count = 0;
2098#endif 2095#endif
2099 txr->q_efbig_tx_dma_setup = 0; 2096 txr->q_efbig_tx_dma_setup = 0;
2100 txr->q_mbuf_defrag_failed = 0; 2097 txr->q_mbuf_defrag_failed = 0;
2101 txr->q_efbig2_tx_dma_setup = 0; 2098 txr->q_efbig2_tx_dma_setup = 0;
2102 txr->q_einval_tx_dma_setup = 0; 2099 txr->q_einval_tx_dma_setup = 0;
2103 txr->q_other_tx_dma_setup = 0; 2100 txr->q_other_tx_dma_setup = 0;
2104 txr->q_eagain_tx_dma_setup = 0; 2101 txr->q_eagain_tx_dma_setup = 0;
2105 txr->q_enomem_tx_dma_setup = 0; 2102 txr->q_enomem_tx_dma_setup = 0;
2106 txr->q_tso_err = 0; 2103 txr->q_tso_err = 0;
2107 2104
2108 if (i < __arraycount(stats->qprc)) { 2105 if (i < __arraycount(stats->qprc)) {
2109 stats->qprc[i].ev_count = 0; 2106 stats->qprc[i].ev_count = 0;
2110 stats->qptc[i].ev_count = 0; 2107 stats->qptc[i].ev_count = 0;
2111 stats->qbrc[i].ev_count = 0; 2108 stats->qbrc[i].ev_count = 0;
2112 stats->qbtc[i].ev_count = 0; 2109 stats->qbtc[i].ev_count = 0;
2113 if (hw->mac.type >= ixgbe_mac_82599EB) 2110 if (hw->mac.type >= ixgbe_mac_82599EB)
2114 stats->qprdc[i].ev_count = 0; 2111 stats->qprdc[i].ev_count = 0;
2115 } 2112 }
2116 2113
2117 rxr->rx_packets.ev_count = 0; 2114 rxr->rx_packets.ev_count = 0;
2118 rxr->rx_bytes.ev_count = 0; 2115 rxr->rx_bytes.ev_count = 0;
2119 rxr->rx_copies.ev_count = 0; 2116 rxr->rx_copies.ev_count = 0;
2120 rxr->no_jmbuf.ev_count = 0; 2117 rxr->no_jmbuf.ev_count = 0;
2121 rxr->rx_discarded.ev_count = 0; 2118 rxr->rx_discarded.ev_count = 0;
2122 } 2119 }
2123 stats->ipcs.ev_count = 0; 2120 stats->ipcs.ev_count = 0;
2124 stats->l4cs.ev_count = 0; 2121 stats->l4cs.ev_count = 0;
2125 stats->ipcs_bad.ev_count = 0; 2122 stats->ipcs_bad.ev_count = 0;
2126 stats->l4cs_bad.ev_count = 0; 2123 stats->l4cs_bad.ev_count = 0;
2127 stats->intzero.ev_count = 0; 2124 stats->intzero.ev_count = 0;
2128 stats->legint.ev_count = 0; 2125 stats->legint.ev_count = 0;
2129 stats->crcerrs.ev_count = 0; 2126 stats->crcerrs.ev_count = 0;
2130 stats->illerrc.ev_count = 0; 2127 stats->illerrc.ev_count = 0;
2131 stats->errbc.ev_count = 0; 2128 stats->errbc.ev_count = 0;
2132 stats->mspdc.ev_count = 0; 2129 stats->mspdc.ev_count = 0;
2133 stats->mbsdc.ev_count = 0; 2130 stats->mbsdc.ev_count = 0;
2134 stats->mpctotal.ev_count = 0; 2131 stats->mpctotal.ev_count = 0;
2135 stats->mlfc.ev_count = 0; 2132 stats->mlfc.ev_count = 0;
2136 stats->mrfc.ev_count = 0; 2133 stats->mrfc.ev_count = 0;
2137 stats->rlec.ev_count = 0; 2134 stats->rlec.ev_count = 0;
2138 stats->lxontxc.ev_count = 0; 2135 stats->lxontxc.ev_count = 0;
2139 stats->lxonrxc.ev_count = 0; 2136 stats->lxonrxc.ev_count = 0;
2140 stats->lxofftxc.ev_count = 0; 2137 stats->lxofftxc.ev_count = 0;
2141 stats->lxoffrxc.ev_count = 0; 2138 stats->lxoffrxc.ev_count = 0;
2142 2139
2143 /* Packet Reception Stats */ 2140 /* Packet Reception Stats */
2144 stats->tor.ev_count = 0; 2141 stats->tor.ev_count = 0;
2145 stats->gorc.ev_count = 0; 2142 stats->gorc.ev_count = 0;
2146 stats->tpr.ev_count = 0; 2143 stats->tpr.ev_count = 0;
2147 stats->gprc.ev_count = 0; 2144 stats->gprc.ev_count = 0;
2148 stats->mprc.ev_count = 0; 2145 stats->mprc.ev_count = 0;
2149 stats->bprc.ev_count = 0; 2146 stats->bprc.ev_count = 0;
2150 stats->prc64.ev_count = 0; 2147 stats->prc64.ev_count = 0;
2151 stats->prc127.ev_count = 0; 2148 stats->prc127.ev_count = 0;
2152 stats->prc255.ev_count = 0; 2149 stats->prc255.ev_count = 0;
2153 stats->prc511.ev_count = 0; 2150 stats->prc511.ev_count = 0;
2154 stats->prc1023.ev_count = 0; 2151 stats->prc1023.ev_count = 0;
2155 stats->prc1522.ev_count = 0; 2152 stats->prc1522.ev_count = 0;
2156 stats->ruc.ev_count = 0; 2153 stats->ruc.ev_count = 0;
2157 stats->rfc.ev_count = 0; 2154 stats->rfc.ev_count = 0;
2158 stats->roc.ev_count = 0; 2155 stats->roc.ev_count = 0;
2159 stats->rjc.ev_count = 0; 2156 stats->rjc.ev_count = 0;
2160 stats->mngprc.ev_count = 0; 2157 stats->mngprc.ev_count = 0;
2161 stats->mngpdc.ev_count = 0; 2158 stats->mngpdc.ev_count = 0;
2162 stats->xec.ev_count = 0; 2159 stats->xec.ev_count = 0;
2163 2160
2164 /* Packet Transmission Stats */ 2161 /* Packet Transmission Stats */
2165 stats->gotc.ev_count = 0; 2162 stats->gotc.ev_count = 0;
2166 stats->tpt.ev_count = 0; 2163 stats->tpt.ev_count = 0;
2167 stats->gptc.ev_count = 0; 2164 stats->gptc.ev_count = 0;
2168 stats->bptc.ev_count = 0; 2165 stats->bptc.ev_count = 0;
2169 stats->mptc.ev_count = 0; 2166 stats->mptc.ev_count = 0;
2170 stats->mngptc.ev_count = 0; 2167 stats->mngptc.ev_count = 0;
2171 stats->ptc64.ev_count = 0; 2168 stats->ptc64.ev_count = 0;
2172 stats->ptc127.ev_count = 0; 2169 stats->ptc127.ev_count = 0;
2173 stats->ptc255.ev_count = 0; 2170 stats->ptc255.ev_count = 0;
2174 stats->ptc511.ev_count = 0; 2171 stats->ptc511.ev_count = 0;
2175 stats->ptc1023.ev_count = 0; 2172 stats->ptc1023.ev_count = 0;
2176 stats->ptc1522.ev_count = 0; 2173 stats->ptc1522.ev_count = 0;
2177} 2174}
2178 2175
2179/************************************************************************ 2176/************************************************************************
2180 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 2177 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2181 * 2178 *
2182 * Retrieves the TDH value from the hardware 2179 * Retrieves the TDH value from the hardware
2183 ************************************************************************/ 2180 ************************************************************************/
2184static int 2181static int
2185ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS) 2182ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2186{ 2183{
2187 struct sysctlnode node = *rnode; 2184 struct sysctlnode node = *rnode;
2188 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2185 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2189 struct adapter *adapter; 2186 struct adapter *adapter;
2190 uint32_t val; 2187 uint32_t val;
2191 2188
2192 if (!txr) 2189 if (!txr)
2193 return (0); 2190 return (0);
2194 2191
2195 adapter = txr->adapter; 2192 adapter = txr->adapter;
2196 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2193 if (ixgbe_fw_recovery_mode_swflag(adapter))
2197 return (EPERM); 2194 return (EPERM);
2198 2195
2199 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)); 2196 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2200 node.sysctl_data = &val; 2197 node.sysctl_data = &val;
2201 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2198 return sysctl_lookup(SYSCTLFN_CALL(&node));
2202} /* ixgbe_sysctl_tdh_handler */ 2199} /* ixgbe_sysctl_tdh_handler */
2203 2200
2204/************************************************************************ 2201/************************************************************************
2205 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 2202 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2206 * 2203 *
2207 * Retrieves the TDT value from the hardware 2204 * Retrieves the TDT value from the hardware
2208 ************************************************************************/ 2205 ************************************************************************/
2209static int 2206static int
2210ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS) 2207ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2211{ 2208{
2212 struct sysctlnode node = *rnode; 2209 struct sysctlnode node = *rnode;
2213 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2210 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2214 struct adapter *adapter; 2211 struct adapter *adapter;
2215 uint32_t val; 2212 uint32_t val;
2216 2213
2217 if (!txr) 2214 if (!txr)
2218 return (0); 2215 return (0);
2219 2216
2220 adapter = txr->adapter; 2217 adapter = txr->adapter;
2221 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2218 if (ixgbe_fw_recovery_mode_swflag(adapter))
2222 return (EPERM); 2219 return (EPERM);
2223 2220
2224 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)); 2221 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2225 node.sysctl_data = &val; 2222 node.sysctl_data = &val;
2226 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2223 return sysctl_lookup(SYSCTLFN_CALL(&node));
2227} /* ixgbe_sysctl_tdt_handler */ 2224} /* ixgbe_sysctl_tdt_handler */
2228 2225
2229/************************************************************************ 2226/************************************************************************
2230 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check 2227 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2231 * handler function 2228 * handler function
2232 * 2229 *
2233 * Retrieves the next_to_check value 2230 * Retrieves the next_to_check value
2234 ************************************************************************/ 2231 ************************************************************************/
2235static int 2232static int
2236ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 2233ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2237{ 2234{
2238 struct sysctlnode node = *rnode; 2235 struct sysctlnode node = *rnode;
2239 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2236 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2240 struct adapter *adapter; 2237 struct adapter *adapter;
2241 uint32_t val; 2238 uint32_t val;
2242 2239
2243 if (!rxr) 2240 if (!rxr)
2244 return (0); 2241 return (0);
2245 2242
2246 adapter = rxr->adapter; 2243 adapter = rxr->adapter;
2247 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2244 if (ixgbe_fw_recovery_mode_swflag(adapter))
2248 return (EPERM); 2245 return (EPERM);
2249 2246
2250 val = rxr->next_to_check; 2247 val = rxr->next_to_check;
2251 node.sysctl_data = &val; 2248 node.sysctl_data = &val;
2252 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2249 return sysctl_lookup(SYSCTLFN_CALL(&node));
2253} /* ixgbe_sysctl_next_to_check_handler */ 2250} /* ixgbe_sysctl_next_to_check_handler */
2254 2251
2255/************************************************************************ 2252/************************************************************************
2256 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 2253 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2257 * 2254 *
2258 * Retrieves the RDH value from the hardware 2255 * Retrieves the RDH value from the hardware
2259 ************************************************************************/ 2256 ************************************************************************/
2260static int 2257static int
2261ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS) 2258ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2262{ 2259{
2263 struct sysctlnode node = *rnode; 2260 struct sysctlnode node = *rnode;
2264 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2261 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2265 struct adapter *adapter; 2262 struct adapter *adapter;
2266 uint32_t val; 2263 uint32_t val;
2267 2264
2268 if (!rxr) 2265 if (!rxr)
2269 return (0); 2266 return (0);
2270 2267
2271 adapter = rxr->adapter; 2268 adapter = rxr->adapter;
2272 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2269 if (ixgbe_fw_recovery_mode_swflag(adapter))
2273 return (EPERM); 2270 return (EPERM);
2274 2271
2275 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me)); 2272 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2276 node.sysctl_data = &val; 2273 node.sysctl_data = &val;
2277 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2274 return sysctl_lookup(SYSCTLFN_CALL(&node));
2278} /* ixgbe_sysctl_rdh_handler */ 2275} /* ixgbe_sysctl_rdh_handler */
2279 2276
2280/************************************************************************ 2277/************************************************************************
2281 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 2278 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2282 * 2279 *
2283 * Retrieves the RDT value from the hardware 2280 * Retrieves the RDT value from the hardware
2284 ************************************************************************/ 2281 ************************************************************************/
2285static int 2282static int
2286ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) 2283ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2287{ 2284{
2288 struct sysctlnode node = *rnode; 2285 struct sysctlnode node = *rnode;
2289 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2286 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2290 struct adapter *adapter; 2287 struct adapter *adapter;
2291 uint32_t val; 2288 uint32_t val;
2292 2289
2293 if (!rxr) 2290 if (!rxr)
2294 return (0); 2291 return (0);
2295 2292
2296 adapter = rxr->adapter; 2293 adapter = rxr->adapter;
2297 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2294 if (ixgbe_fw_recovery_mode_swflag(adapter))
2298 return (EPERM); 2295 return (EPERM);
2299 2296
2300 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me)); 2297 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2301 node.sysctl_data = &val; 2298 node.sysctl_data = &val;
2302 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2299 return sysctl_lookup(SYSCTLFN_CALL(&node));
2303} /* ixgbe_sysctl_rdt_handler */ 2300} /* ixgbe_sysctl_rdt_handler */
2304 2301
2305static int 2302static int
2306ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2303ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2307{ 2304{
2308 struct ifnet *ifp = &ec->ec_if; 2305 struct ifnet *ifp = &ec->ec_if;
2309 struct adapter *adapter = ifp->if_softc; 2306 struct adapter *adapter = ifp->if_softc;
2310 int rv; 2307 int rv;
2311 2308
2312 if (set) 2309 if (set)
2313 rv = ixgbe_register_vlan(ifp->if_softc, ifp, vid); 2310 rv = ixgbe_register_vlan(adapter, vid);
2314 else 2311 else
2315 rv = ixgbe_unregister_vlan(ifp->if_softc, ifp, vid); 2312 rv = ixgbe_unregister_vlan(adapter, vid);
2316 2313
2317 if (rv != 0) 2314 if (rv != 0)
2318 return rv; 2315 return rv;
2319 2316
2320 /* 2317 /*
2321 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 2318 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2322 * or 0 to 1. 2319 * or 0 to 1.
2323 */ 2320 */
2324 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) 2321 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2325 ixgbe_setup_vlan_hw_tagging(adapter); 2322 ixgbe_setup_vlan_hw_tagging(adapter);
2326 2323
2327 return rv; 2324 return rv;
2328} 2325}
2329 2326
2330/************************************************************************ 2327/************************************************************************
2331 * ixgbe_register_vlan 2328 * ixgbe_register_vlan
2332 * 2329 *
2333 * Run via vlan config EVENT, it enables us to use the 2330 * Run via vlan config EVENT, it enables us to use the
2334 * HW Filter table since we can get the vlan id. This 2331 * HW Filter table since we can get the vlan id. This
2335 * just creates the entry in the soft version of the 2332 * just creates the entry in the soft version of the
2336 * VFTA, init will repopulate the real table. 2333 * VFTA, init will repopulate the real table.
2337 ************************************************************************/ 2334 ************************************************************************/
2338static int 2335static int
2339ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2336ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2340{ 2337{
2341 struct adapter *adapter = ifp->if_softc; 
2342 u16 index, bit; 2338 u16 index, bit;
2343 int error; 2339 int error;
2344 2340
2345 if (ifp->if_softc != arg) /* Not our event */ 
2346 return EINVAL; 
2347 
2348 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2341 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2349 return EINVAL; 2342 return EINVAL;
2350 2343
2351 IXGBE_CORE_LOCK(adapter); 2344 IXGBE_CORE_LOCK(adapter);
2352 index = (vtag >> 5) & 0x7F; 2345 index = (vtag >> 5) & 0x7F;
2353 bit = vtag & 0x1F; 2346 bit = vtag & 0x1F;
2354 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2347 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2355 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true, 2348 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2356 true); 2349 true);
2357 IXGBE_CORE_UNLOCK(adapter); 2350 IXGBE_CORE_UNLOCK(adapter);
2358 if (error != 0) 2351 if (error != 0)
2359 error = EACCES; 2352 error = EACCES;
2360 2353
2361 return error; 2354 return error;
2362} /* ixgbe_register_vlan */ 2355} /* ixgbe_register_vlan */
2363 2356
2364/************************************************************************ 2357/************************************************************************
2365 * ixgbe_unregister_vlan 2358 * ixgbe_unregister_vlan
2366 * 2359 *
2367 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 2360 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2368 ************************************************************************/ 2361 ************************************************************************/
2369static int 2362static int
2370ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2363ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2371{ 2364{
2372 struct adapter *adapter = ifp->if_softc; 
2373 u16 index, bit; 2365 u16 index, bit;
2374 int error; 2366 int error;
2375 2367
2376 if (ifp->if_softc != arg) 
2377 return EINVAL; 
2378 
2379 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2368 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2380 return EINVAL; 2369 return EINVAL;
2381 2370
2382 IXGBE_CORE_LOCK(adapter); 2371 IXGBE_CORE_LOCK(adapter);
2383 index = (vtag >> 5) & 0x7F; 2372 index = (vtag >> 5) & 0x7F;
2384 bit = vtag & 0x1F; 2373 bit = vtag & 0x1F;
2385 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2374 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2386 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false, 2375 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2387 true); 2376 true);
2388 IXGBE_CORE_UNLOCK(adapter); 2377 IXGBE_CORE_UNLOCK(adapter);
2389 if (error != 0) 2378 if (error != 0)
2390 error = EACCES; 2379 error = EACCES;
2391 2380
2392 return error; 2381 return error;
2393} /* ixgbe_unregister_vlan */ 2382} /* ixgbe_unregister_vlan */
2394 2383
2395static void 2384static void
2396ixgbe_setup_vlan_hw_tagging(struct adapter *adapter) 2385ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2397{ 2386{
2398 struct ethercom *ec = &adapter->osdep.ec; 2387 struct ethercom *ec = &adapter->osdep.ec;
2399 struct ixgbe_hw *hw = &adapter->hw; 2388 struct ixgbe_hw *hw = &adapter->hw;
2400 struct rx_ring *rxr; 2389 struct rx_ring *rxr;
2401 u32 ctrl; 2390 u32 ctrl;
2402 int i; 2391 int i;
2403 bool hwtagging; 2392 bool hwtagging;
2404 2393
2405 /* Enable HW tagging only if any vlan is attached */ 2394 /* Enable HW tagging only if any vlan is attached */
2406 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2395 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2407 && VLAN_ATTACHED(ec); 2396 && VLAN_ATTACHED(ec);
2408 2397
2409 /* Setup the queues for vlans */ 2398 /* Setup the queues for vlans */
2410 for (i = 0; i < adapter->num_queues; i++) { 2399 for (i = 0; i < adapter->num_queues; i++) {
2411 rxr = &adapter->rx_rings[i]; 2400 rxr = &adapter->rx_rings[i];
2412 /* 2401 /*
2413 * On 82599 and later, the VLAN enable is per/queue in RXDCTL. 2402 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2414 */ 2403 */
2415 if (hw->mac.type != ixgbe_mac_82598EB) { 2404 if (hw->mac.type != ixgbe_mac_82598EB) {
2416 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2405 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2417 if (hwtagging) 2406 if (hwtagging)
2418 ctrl |= IXGBE_RXDCTL_VME; 2407 ctrl |= IXGBE_RXDCTL_VME;
2419 else 2408 else
2420 ctrl &= ~IXGBE_RXDCTL_VME; 2409 ctrl &= ~IXGBE_RXDCTL_VME;
2421 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 2410 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2422 } 2411 }
2423 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 2412 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2424 } 2413 }
2425 2414
2426 /* VLAN hw tagging for 82598 */ 2415 /* VLAN hw tagging for 82598 */
2427 if (hw->mac.type == ixgbe_mac_82598EB) { 2416 if (hw->mac.type == ixgbe_mac_82598EB) {
2428 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2417 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2429 if (hwtagging) 2418 if (hwtagging)
2430 ctrl |= IXGBE_VLNCTRL_VME; 2419 ctrl |= IXGBE_VLNCTRL_VME;
2431 else 2420 else
2432 ctrl &= ~IXGBE_VLNCTRL_VME; 2421 ctrl &= ~IXGBE_VLNCTRL_VME;
2433 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2422 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2434 } 2423 }
2435} /* ixgbe_setup_vlan_hw_tagging */ 2424} /* ixgbe_setup_vlan_hw_tagging */
2436 2425
2437static void 2426static void
2438ixgbe_setup_vlan_hw_support(struct adapter *adapter) 2427ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2439{ 2428{
2440 struct ethercom *ec = &adapter->osdep.ec; 2429 struct ethercom *ec = &adapter->osdep.ec;
2441 struct ixgbe_hw *hw = &adapter->hw; 2430 struct ixgbe_hw *hw = &adapter->hw;
2442 int i; 2431 int i;
2443 u32 ctrl; 2432 u32 ctrl;
2444 struct vlanid_list *vlanidp; 2433 struct vlanid_list *vlanidp;
2445 2434
2446 /* 2435 /*
2447 * This function is called from both if_init and ifflags_cb() 2436 * This function is called from both if_init and ifflags_cb()
2448 * on NetBSD. 2437 * on NetBSD.
2449 */ 2438 */
2450 2439
2451 /* 2440 /*
2452 * Part 1: 2441 * Part 1:
2453 * Setup VLAN HW tagging 2442 * Setup VLAN HW tagging
2454 */ 2443 */
2455 ixgbe_setup_vlan_hw_tagging(adapter); 2444 ixgbe_setup_vlan_hw_tagging(adapter);
2456 2445
2457 /* 2446 /*
2458 * Part 2: 2447 * Part 2:
2459 * Setup VLAN HW filter 2448 * Setup VLAN HW filter
2460 */ 2449 */
2461 /* Cleanup shadow_vfta */ 2450 /* Cleanup shadow_vfta */
2462 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2451 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2463 adapter->shadow_vfta[i] = 0; 2452 adapter->shadow_vfta[i] = 0;
2464 /* Generate shadow_vfta from ec_vids */ 2453 /* Generate shadow_vfta from ec_vids */
2465 ETHER_LOCK(ec); 2454 ETHER_LOCK(ec);
2466 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2455 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2467 uint32_t idx; 2456 uint32_t idx;
2468 2457
2469 idx = vlanidp->vid / 32; 2458 idx = vlanidp->vid / 32;
2470 KASSERT(idx < IXGBE_VFTA_SIZE); 2459 KASSERT(idx < IXGBE_VFTA_SIZE);
2471 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2460 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2472 } 2461 }
2473 ETHER_UNLOCK(ec); 2462 ETHER_UNLOCK(ec);
2474 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2463 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2475 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]); 2464 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2476 2465
2477 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2466 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2478 /* Enable the Filter Table if enabled */ 2467 /* Enable the Filter Table if enabled */
2479 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) 2468 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2480 ctrl |= IXGBE_VLNCTRL_VFE; 2469 ctrl |= IXGBE_VLNCTRL_VFE;
2481 else 2470 else
2482 ctrl &= ~IXGBE_VLNCTRL_VFE; 2471 ctrl &= ~IXGBE_VLNCTRL_VFE;
2483 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2472 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2484} /* ixgbe_setup_vlan_hw_support */ 2473} /* ixgbe_setup_vlan_hw_support */
2485 2474
2486/************************************************************************ 2475/************************************************************************
2487 * ixgbe_get_slot_info 2476 * ixgbe_get_slot_info
2488 * 2477 *
2489 * Get the width and transaction speed of 2478 * Get the width and transaction speed of
2490 * the slot this adapter is plugged into. 2479 * the slot this adapter is plugged into.
2491 ************************************************************************/ 2480 ************************************************************************/
2492static void 2481static void
2493ixgbe_get_slot_info(struct adapter *adapter) 2482ixgbe_get_slot_info(struct adapter *adapter)
2494{ 2483{
2495 device_t dev = adapter->dev; 2484 device_t dev = adapter->dev;
2496 struct ixgbe_hw *hw = &adapter->hw; 2485 struct ixgbe_hw *hw = &adapter->hw;
2497 u32 offset; 2486 u32 offset;
2498 u16 link; 2487 u16 link;
2499 int bus_info_valid = TRUE; 2488 int bus_info_valid = TRUE;
2500 2489
2501 /* Some devices are behind an internal bridge */ 2490 /* Some devices are behind an internal bridge */
2502 switch (hw->device_id) { 2491 switch (hw->device_id) {
2503 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2492 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2504 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2493 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2505 goto get_parent_info; 2494 goto get_parent_info;
2506 default: 2495 default:
2507 break; 2496 break;
2508 } 2497 }
2509 2498
2510 ixgbe_get_bus_info(hw); 2499 ixgbe_get_bus_info(hw);
2511 2500
2512 /* 2501 /*
2513 * Some devices don't use PCI-E, but there is no need 2502 * Some devices don't use PCI-E, but there is no need
2514 * to display "Unknown" for bus speed and width. 2503 * to display "Unknown" for bus speed and width.
2515 */ 2504 */
2516 switch (hw->mac.type) { 2505 switch (hw->mac.type) {
2517 case ixgbe_mac_X550EM_x: 2506 case ixgbe_mac_X550EM_x:
2518 case ixgbe_mac_X550EM_a: 2507 case ixgbe_mac_X550EM_a:
2519 return; 2508 return;
2520 default: 2509 default:
2521 goto display; 2510 goto display;
2522 } 2511 }
2523 2512
2524get_parent_info: 2513get_parent_info:
2525 /* 2514 /*
2526 * For the Quad port adapter we need to parse back 2515 * For the Quad port adapter we need to parse back
2527 * up the PCI tree to find the speed of the expansion 2516 * up the PCI tree to find the speed of the expansion
2528 * slot into which this adapter is plugged. A bit more work. 2517 * slot into which this adapter is plugged. A bit more work.
2529 */ 2518 */
2530 dev = device_parent(device_parent(dev)); 2519 dev = device_parent(device_parent(dev));
2531#if 0 2520#if 0
2532#ifdef IXGBE_DEBUG 2521#ifdef IXGBE_DEBUG
2533 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2522 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2534 pci_get_slot(dev), pci_get_function(dev)); 2523 pci_get_slot(dev), pci_get_function(dev));
2535#endif 2524#endif
2536 dev = device_parent(device_parent(dev)); 2525 dev = device_parent(device_parent(dev));
2537#ifdef IXGBE_DEBUG 2526#ifdef IXGBE_DEBUG
2538 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2527 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2539 pci_get_slot(dev), pci_get_function(dev)); 2528 pci_get_slot(dev), pci_get_function(dev));
2540#endif 2529#endif
2541#endif 2530#endif
2542 /* Now get the PCI Express Capabilities offset */ 2531 /* Now get the PCI Express Capabilities offset */
2543 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag, 2532 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2544 PCI_CAP_PCIEXPRESS, &offset, NULL)) { 2533 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2545 /* 2534 /*
2546 * Hmm...can't get PCI-Express capabilities. 2535 * Hmm...can't get PCI-Express capabilities.
2547 * Falling back to default method. 2536 * Falling back to default method.
2548 */ 2537 */
2549 bus_info_valid = FALSE; 2538 bus_info_valid = FALSE;
2550 ixgbe_get_bus_info(hw); 2539 ixgbe_get_bus_info(hw);
2551 goto display; 2540 goto display;
2552 } 2541 }
2553 /* ...and read the Link Status Register */ 2542 /* ...and read the Link Status Register */
2554 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag, 2543 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2555 offset + PCIE_LCSR) >> 16; 2544 offset + PCIE_LCSR) >> 16;
2556 ixgbe_set_pci_config_data_generic(hw, link); 2545 ixgbe_set_pci_config_data_generic(hw, link);
2557 2546
2558display: 2547display:
2559 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", 2548 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2560 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2549 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2561 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2550 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2562 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2551 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2563 "Unknown"), 2552 "Unknown"),
2564 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" : 2553 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2565 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" : 2554 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2566 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" : 2555 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2567 "Unknown")); 2556 "Unknown"));
2568 2557
2569 if (bus_info_valid) { 2558 if (bus_info_valid) {
2570 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2559 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2571 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2560 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2572 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2561 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2573 device_printf(dev, "PCI-Express bandwidth available" 2562 device_printf(dev, "PCI-Express bandwidth available"
2574 " for this card\n is not sufficient for" 2563 " for this card\n is not sufficient for"
2575 " optimal performance.\n"); 2564 " optimal performance.\n");
2576 device_printf(dev, "For optimal performance a x8 " 2565 device_printf(dev, "For optimal performance a x8 "
2577 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 2566 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2578 } 2567 }
2579 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2568 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2580 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2569 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2581 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2570 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2582 device_printf(dev, "PCI-Express bandwidth available" 2571 device_printf(dev, "PCI-Express bandwidth available"
2583 " for this card\n is not sufficient for" 2572 " for this card\n is not sufficient for"
2584 " optimal performance.\n"); 2573 " optimal performance.\n");
2585 device_printf(dev, "For optimal performance a x8 " 2574 device_printf(dev, "For optimal performance a x8 "
2586 "PCIE Gen3 slot is required.\n"); 2575 "PCIE Gen3 slot is required.\n");
2587 } 2576 }
2588 } else 2577 } else
2589 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2578 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2590 2579
2591 return; 2580 return;
2592} /* ixgbe_get_slot_info */ 2581} /* ixgbe_get_slot_info */
2593 2582
2594/************************************************************************ 2583/************************************************************************
2595 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets 2584 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2596 ************************************************************************/ 2585 ************************************************************************/
2597static inline void 2586static inline void
2598ixgbe_enable_queue(struct adapter *adapter, u32 vector) 2587ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2599{ 2588{
2600 struct ixgbe_hw *hw = &adapter->hw; 2589 struct ixgbe_hw *hw = &adapter->hw;
2601 struct ix_queue *que = &adapter->queues[vector]; 2590 struct ix_queue *que = &adapter->queues[vector];
2602 u64 queue = 1ULL << vector; 2591 u64 queue = 1ULL << vector;
2603 u32 mask; 2592 u32 mask;
2604 2593
2605 mutex_enter(&que->dc_mtx); 2594 mutex_enter(&que->dc_mtx);
2606 if (que->disabled_count > 0 && --que->disabled_count > 0) 2595 if (que->disabled_count > 0 && --que->disabled_count > 0)
2607 goto out; 2596 goto out;
2608 2597
2609 if (hw->mac.type == ixgbe_mac_82598EB) { 2598 if (hw->mac.type == ixgbe_mac_82598EB) {
2610 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2599 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2611 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2600 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2612 } else { 2601 } else {
2613 mask = (queue & 0xFFFFFFFF); 2602 mask = (queue & 0xFFFFFFFF);
2614 if (mask) 2603 if (mask)
2615 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2604 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2616 mask = (queue >> 32); 2605 mask = (queue >> 32);
2617 if (mask) 2606 if (mask)
2618 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2607 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2619 } 2608 }
2620out: 2609out:
2621 mutex_exit(&que->dc_mtx); 2610 mutex_exit(&que->dc_mtx);
2622} /* ixgbe_enable_queue */ 2611} /* ixgbe_enable_queue */
2623 2612
2624/************************************************************************ 2613/************************************************************************
2625 * ixgbe_disable_queue_internal 2614 * ixgbe_disable_queue_internal
2626 ************************************************************************/ 2615 ************************************************************************/
2627static inline void 2616static inline void
2628ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok) 2617ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2629{ 2618{
2630 struct ixgbe_hw *hw = &adapter->hw; 2619 struct ixgbe_hw *hw = &adapter->hw;
2631 struct ix_queue *que = &adapter->queues[vector]; 2620 struct ix_queue *que = &adapter->queues[vector];
2632 u64 queue = 1ULL << vector; 2621 u64 queue = 1ULL << vector;
2633 u32 mask; 2622 u32 mask;
2634 2623
2635 mutex_enter(&que->dc_mtx); 2624 mutex_enter(&que->dc_mtx);
2636 2625
2637 if (que->disabled_count > 0) { 2626 if (que->disabled_count > 0) {
2638 if (nestok) 2627 if (nestok)
2639 que->disabled_count++; 2628 que->disabled_count++;
2640 goto out; 2629 goto out;
2641 } 2630 }
2642 que->disabled_count++; 2631 que->disabled_count++;
2643 2632
2644 if (hw->mac.type == ixgbe_mac_82598EB) { 2633 if (hw->mac.type == ixgbe_mac_82598EB) {
2645 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2634 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2646 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 2635 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2647 } else { 2636 } else {
2648 mask = (queue & 0xFFFFFFFF); 2637 mask = (queue & 0xFFFFFFFF);
2649 if (mask) 2638 if (mask)
2650 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2639 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2651 mask = (queue >> 32); 2640 mask = (queue >> 32);
2652 if (mask) 2641 if (mask)
2653 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 2642 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2654 } 2643 }
2655out: 2644out:
2656 mutex_exit(&que->dc_mtx); 2645 mutex_exit(&que->dc_mtx);
2657} /* ixgbe_disable_queue_internal */ 2646} /* ixgbe_disable_queue_internal */
2658 2647
2659/************************************************************************ 2648/************************************************************************
2660 * ixgbe_disable_queue 2649 * ixgbe_disable_queue
2661 ************************************************************************/ 2650 ************************************************************************/
2662static inline void 2651static inline void
2663ixgbe_disable_queue(struct adapter *adapter, u32 vector) 2652ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2664{ 2653{
2665 2654
2666 ixgbe_disable_queue_internal(adapter, vector, true); 2655 ixgbe_disable_queue_internal(adapter, vector, true);
2667} /* ixgbe_disable_queue */ 2656} /* ixgbe_disable_queue */
2668 2657
2669/************************************************************************ 2658/************************************************************************
2670 * ixgbe_sched_handle_que - schedule deferred packet processing 2659 * ixgbe_sched_handle_que - schedule deferred packet processing
2671 ************************************************************************/ 2660 ************************************************************************/
2672static inline void 2661static inline void
2673ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que) 2662ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2674{ 2663{
2675 2664
2676 if (que->txrx_use_workqueue) { 2665 if (que->txrx_use_workqueue) {
2677 /* 2666 /*
2678 * adapter->que_wq is bound to each CPU instead of 2667 * adapter->que_wq is bound to each CPU instead of
2679 * each NIC queue to reduce workqueue kthread. As we 2668 * each NIC queue to reduce workqueue kthread. As we
2680 * should consider about interrupt affinity in this 2669 * should consider about interrupt affinity in this
2681 * function, the workqueue kthread must be WQ_PERCPU. 2670 * function, the workqueue kthread must be WQ_PERCPU.
2682 * If create WQ_PERCPU workqueue kthread for each NIC 2671 * If create WQ_PERCPU workqueue kthread for each NIC
2683 * queue, that number of created workqueue kthread is 2672 * queue, that number of created workqueue kthread is
2684 * (number of used NIC queue) * (number of CPUs) = 2673 * (number of used NIC queue) * (number of CPUs) =
2685 * (number of CPUs) ^ 2 most often. 2674 * (number of CPUs) ^ 2 most often.
2686 * 2675 *
2687 * The same NIC queue's interrupts are avoided by 2676 * The same NIC queue's interrupts are avoided by
2688 * masking the queue's interrupt. And different 2677 * masking the queue's interrupt. And different
2689 * NIC queue's interrupts use different struct work 2678 * NIC queue's interrupts use different struct work
2690 * (que->wq_cookie). So, "enqueued flag" to avoid 2679 * (que->wq_cookie). So, "enqueued flag" to avoid
2691 * twice workqueue_enqueue() is not required . 2680 * twice workqueue_enqueue() is not required .
2692 */ 2681 */
2693 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu()); 2682 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2694 } else { 2683 } else {
2695 softint_schedule(que->que_si); 2684 softint_schedule(que->que_si);
2696 } 2685 }
2697} 2686}
2698 2687
2699/************************************************************************ 2688/************************************************************************
2700 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2689 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2701 ************************************************************************/ 2690 ************************************************************************/
2702static int 2691static int
2703ixgbe_msix_que(void *arg) 2692ixgbe_msix_que(void *arg)
2704{ 2693{
2705 struct ix_queue *que = arg; 2694 struct ix_queue *que = arg;
2706 struct adapter *adapter = que->adapter; 2695 struct adapter *adapter = que->adapter;
2707 struct ifnet *ifp = adapter->ifp; 2696 struct ifnet *ifp = adapter->ifp;
2708 struct tx_ring *txr = que->txr; 2697 struct tx_ring *txr = que->txr;
2709 struct rx_ring *rxr = que->rxr; 2698 struct rx_ring *rxr = que->rxr;
2710 bool more; 2699 bool more;
2711 u32 newitr = 0; 2700 u32 newitr = 0;
2712 2701
2713 /* Protect against spurious interrupts */ 2702 /* Protect against spurious interrupts */
2714 if ((ifp->if_flags & IFF_RUNNING) == 0) 2703 if ((ifp->if_flags & IFF_RUNNING) == 0)
2715 return 0; 2704 return 0;
2716 2705
2717 ixgbe_disable_queue(adapter, que->msix); 2706 ixgbe_disable_queue(adapter, que->msix);
2718 ++que->irqs.ev_count; 2707 ++que->irqs.ev_count;
2719 2708
2720 /* 2709 /*
2721 * Don't change "que->txrx_use_workqueue" from this point to avoid 2710 * Don't change "que->txrx_use_workqueue" from this point to avoid
2722 * flip-flopping softint/workqueue mode in one deferred processing. 2711 * flip-flopping softint/workqueue mode in one deferred processing.
2723 */ 2712 */
2724 que->txrx_use_workqueue = adapter->txrx_use_workqueue; 2713 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2725 2714
2726#ifdef __NetBSD__ 2715#ifdef __NetBSD__
2727 /* Don't run ixgbe_rxeof in interrupt context */ 2716 /* Don't run ixgbe_rxeof in interrupt context */
2728 more = true; 2717 more = true;
2729#else 2718#else
2730 more = ixgbe_rxeof(que); 2719 more = ixgbe_rxeof(que);
2731#endif 2720#endif
2732 2721
2733 IXGBE_TX_LOCK(txr); 2722 IXGBE_TX_LOCK(txr);
2734 ixgbe_txeof(txr); 2723 ixgbe_txeof(txr);
2735 IXGBE_TX_UNLOCK(txr); 2724 IXGBE_TX_UNLOCK(txr);
2736 2725
2737 /* Do AIM now? */ 2726 /* Do AIM now? */
2738 2727
2739 if (adapter->enable_aim == false) 2728 if (adapter->enable_aim == false)
2740 goto no_calc; 2729 goto no_calc;
2741 /* 2730 /*
2742 * Do Adaptive Interrupt Moderation: 2731 * Do Adaptive Interrupt Moderation:
2743 * - Write out last calculated setting 2732 * - Write out last calculated setting
2744 * - Calculate based on average size over 2733 * - Calculate based on average size over
2745 * the last interval. 2734 * the last interval.
2746 */ 2735 */
2747 if (que->eitr_setting) 2736 if (que->eitr_setting)
2748 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting); 2737 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2749 2738
2750 que->eitr_setting = 0; 2739 que->eitr_setting = 0;
2751 2740
2752 /* Idle, do nothing */ 2741 /* Idle, do nothing */
2753 if ((txr->bytes == 0) && (rxr->bytes == 0)) 2742 if ((txr->bytes == 0) && (rxr->bytes == 0))
2754 goto no_calc; 2743 goto no_calc;
2755 2744
2756 if ((txr->bytes) && (txr->packets)) 2745 if ((txr->bytes) && (txr->packets))
2757 newitr = txr->bytes/txr->packets; 2746 newitr = txr->bytes/txr->packets;
2758 if ((rxr->bytes) && (rxr->packets)) 2747 if ((rxr->bytes) && (rxr->packets))
2759 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 2748 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2760 newitr += 24; /* account for hardware frame, crc */ 2749 newitr += 24; /* account for hardware frame, crc */
2761 2750
2762 /* set an upper boundary */ 2751 /* set an upper boundary */
2763 newitr = uimin(newitr, 3000); 2752 newitr = uimin(newitr, 3000);
2764 2753
2765 /* Be nice to the mid range */ 2754 /* Be nice to the mid range */
2766 if ((newitr > 300) && (newitr < 1200)) 2755 if ((newitr > 300) && (newitr < 1200))
2767 newitr = (newitr / 3); 2756 newitr = (newitr / 3);
2768 else 2757 else
2769 newitr = (newitr / 2); 2758 newitr = (newitr / 2);
2770 2759
2771 /* 2760 /*
2772 * When RSC is used, ITR interval must be larger than RSC_DELAY. 2761 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2773 * Currently, we use 2us for RSC_DELAY. The minimum value is always 2762 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2774 * greater than 2us on 100M (and 10M?(not documented)), but it's not 2763 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2775 * on 1G and higher. 2764 * on 1G and higher.
2776 */ 2765 */
2777 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2766 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2778 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2767 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2779 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 2768 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2780 newitr = IXGBE_MIN_RSC_EITR_10G1G; 2769 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2781 } 2770 }
2782 2771
2783 /* save for next interrupt */ 2772 /* save for next interrupt */
2784 que->eitr_setting = newitr; 2773 que->eitr_setting = newitr;
2785 2774
2786 /* Reset state */ 2775 /* Reset state */
2787 txr->bytes = 0; 2776 txr->bytes = 0;
2788 txr->packets = 0; 2777 txr->packets = 0;
2789 rxr->bytes = 0; 2778 rxr->bytes = 0;
2790 rxr->packets = 0; 2779 rxr->packets = 0;
2791 2780
2792no_calc: 2781no_calc:
2793 if (more) 2782 if (more)
2794 ixgbe_sched_handle_que(adapter, que); 2783 ixgbe_sched_handle_que(adapter, que);
2795 else 2784 else
2796 ixgbe_enable_queue(adapter, que->msix); 2785 ixgbe_enable_queue(adapter, que->msix);
2797 2786
2798 return 1; 2787 return 1;
2799} /* ixgbe_msix_que */ 2788} /* ixgbe_msix_que */
2800 2789
2801/************************************************************************ 2790/************************************************************************
2802 * ixgbe_media_status - Media Ioctl callback 2791 * ixgbe_media_status - Media Ioctl callback
2803 * 2792 *
2804 * Called whenever the user queries the status of 2793 * Called whenever the user queries the status of
2805 * the interface using ifconfig. 2794 * the interface using ifconfig.
2806 ************************************************************************/ 2795 ************************************************************************/
2807static void 2796static void
2808ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2797ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2809{ 2798{
2810 struct adapter *adapter = ifp->if_softc; 2799 struct adapter *adapter = ifp->if_softc;
2811 struct ixgbe_hw *hw = &adapter->hw; 2800 struct ixgbe_hw *hw = &adapter->hw;
2812 int layer; 2801 int layer;
2813 2802
2814 INIT_DEBUGOUT("ixgbe_media_status: begin"); 2803 INIT_DEBUGOUT("ixgbe_media_status: begin");
2815 IXGBE_CORE_LOCK(adapter); 2804 IXGBE_CORE_LOCK(adapter);
2816 ixgbe_update_link_status(adapter); 2805 ixgbe_update_link_status(adapter);
2817 2806
2818 ifmr->ifm_status = IFM_AVALID; 2807 ifmr->ifm_status = IFM_AVALID;
2819 ifmr->ifm_active = IFM_ETHER; 2808 ifmr->ifm_active = IFM_ETHER;
2820 2809
2821 if (adapter->link_active != LINK_STATE_UP) { 2810 if (adapter->link_active != LINK_STATE_UP) {
2822 ifmr->ifm_active |= IFM_NONE; 2811 ifmr->ifm_active |= IFM_NONE;
2823 IXGBE_CORE_UNLOCK(adapter); 2812 IXGBE_CORE_UNLOCK(adapter);
2824 return; 2813 return;
2825 } 2814 }
2826 2815
2827 ifmr->ifm_status |= IFM_ACTIVE; 2816 ifmr->ifm_status |= IFM_ACTIVE;
2828 layer = adapter->phy_layer; 2817 layer = adapter->phy_layer;
2829 2818
2830 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2819 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2831 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T || 2820 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2832 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T || 2821 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2833 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2822 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2834 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2823 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2835 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2824 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2836 switch (adapter->link_speed) { 2825 switch (adapter->link_speed) {
2837 case IXGBE_LINK_SPEED_10GB_FULL: 2826 case IXGBE_LINK_SPEED_10GB_FULL:
2838 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2827 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2839 break; 2828 break;
2840 case IXGBE_LINK_SPEED_5GB_FULL: 2829 case IXGBE_LINK_SPEED_5GB_FULL:
2841 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2830 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2842 break; 2831 break;
2843 case IXGBE_LINK_SPEED_2_5GB_FULL: 2832 case IXGBE_LINK_SPEED_2_5GB_FULL:
2844 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2833 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2845 break; 2834 break;
2846 case IXGBE_LINK_SPEED_1GB_FULL: 2835 case IXGBE_LINK_SPEED_1GB_FULL:
2847 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2836 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2848 break; 2837 break;
2849 case IXGBE_LINK_SPEED_100_FULL: 2838 case IXGBE_LINK_SPEED_100_FULL:
2850 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2839 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2851 break; 2840 break;
2852 case IXGBE_LINK_SPEED_10_FULL: 2841 case IXGBE_LINK_SPEED_10_FULL:
2853 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2842 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2854 break; 2843 break;
2855 } 2844 }
2856 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2845 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2857 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2846 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2858 switch (adapter->link_speed) { 2847 switch (adapter->link_speed) {
2859 case IXGBE_LINK_SPEED_10GB_FULL: 2848 case IXGBE_LINK_SPEED_10GB_FULL:
2860 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2849 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2861 break; 2850 break;
2862 } 2851 }
2863 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2852 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2864 switch (adapter->link_speed) { 2853 switch (adapter->link_speed) {
2865 case IXGBE_LINK_SPEED_10GB_FULL: 2854 case IXGBE_LINK_SPEED_10GB_FULL:
2866 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2855 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2867 break; 2856 break;
2868 case IXGBE_LINK_SPEED_1GB_FULL: 2857 case IXGBE_LINK_SPEED_1GB_FULL:
2869 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2858 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2870 break; 2859 break;
2871 } 2860 }
2872 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2861 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2873 switch (adapter->link_speed) { 2862 switch (adapter->link_speed) {
2874 case IXGBE_LINK_SPEED_10GB_FULL: 2863 case IXGBE_LINK_SPEED_10GB_FULL:
2875 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2864 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2876 break; 2865 break;
2877 case IXGBE_LINK_SPEED_1GB_FULL: 2866 case IXGBE_LINK_SPEED_1GB_FULL:
2878 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2867 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2879 break; 2868 break;
2880 } 2869 }
2881 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2870 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2882 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2871 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2883 switch (adapter->link_speed) { 2872 switch (adapter->link_speed) {
2884 case IXGBE_LINK_SPEED_10GB_FULL: 2873 case IXGBE_LINK_SPEED_10GB_FULL:
2885 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2874 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2886 break; 2875 break;
2887 case IXGBE_LINK_SPEED_1GB_FULL: 2876 case IXGBE_LINK_SPEED_1GB_FULL:
2888 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2877 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2889 break; 2878 break;
2890 } 2879 }
2891 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2880 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2892 switch (adapter->link_speed) { 2881 switch (adapter->link_speed) {
2893 case IXGBE_LINK_SPEED_10GB_FULL: 2882 case IXGBE_LINK_SPEED_10GB_FULL:
2894 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2883 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2895 break; 2884 break;
2896 } 2885 }
2897 /* 2886 /*
2898 * XXX: These need to use the proper media types once 2887 * XXX: These need to use the proper media types once
2899 * they're added. 2888 * they're added.
2900 */ 2889 */
2901 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2890 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2902 switch (adapter->link_speed) { 2891 switch (adapter->link_speed) {
2903 case IXGBE_LINK_SPEED_10GB_FULL: 2892 case IXGBE_LINK_SPEED_10GB_FULL:
2904 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2893 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2905 break; 2894 break;
2906 case IXGBE_LINK_SPEED_2_5GB_FULL: 2895 case IXGBE_LINK_SPEED_2_5GB_FULL:
2907 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2896 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2908 break; 2897 break;
2909 case IXGBE_LINK_SPEED_1GB_FULL: 2898 case IXGBE_LINK_SPEED_1GB_FULL:
2910 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2899 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2911 break; 2900 break;
2912 } 2901 }
2913 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2902 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2914 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2903 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2915 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2904 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2916 switch (adapter->link_speed) { 2905 switch (adapter->link_speed) {
2917 case IXGBE_LINK_SPEED_10GB_FULL: 2906 case IXGBE_LINK_SPEED_10GB_FULL:
2918 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2907 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2919 break; 2908 break;
2920 case IXGBE_LINK_SPEED_2_5GB_FULL: 2909 case IXGBE_LINK_SPEED_2_5GB_FULL:
2921 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2910 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2922 break; 2911 break;
2923 case IXGBE_LINK_SPEED_1GB_FULL: 2912 case IXGBE_LINK_SPEED_1GB_FULL:
2924 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2913 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2925 break; 2914 break;
2926 } 2915 }
2927 2916
2928 /* If nothing is recognized... */ 2917 /* If nothing is recognized... */
2929#if 0 2918#if 0
2930 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2919 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2931 ifmr->ifm_active |= IFM_UNKNOWN; 2920 ifmr->ifm_active |= IFM_UNKNOWN;
2932#endif 2921#endif
2933 2922
2934 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 2923 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2935 2924
2936 /* Display current flow control setting used on link */ 2925 /* Display current flow control setting used on link */
2937 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2926 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2938 hw->fc.current_mode == ixgbe_fc_full) 2927 hw->fc.current_mode == ixgbe_fc_full)
2939 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2928 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2940 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2929 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2941 hw->fc.current_mode == ixgbe_fc_full) 2930 hw->fc.current_mode == ixgbe_fc_full)
2942 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2931 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2943 2932
2944 IXGBE_CORE_UNLOCK(adapter); 2933 IXGBE_CORE_UNLOCK(adapter);
2945 2934
2946 return; 2935 return;
2947} /* ixgbe_media_status */ 2936} /* ixgbe_media_status */
2948 2937
2949/************************************************************************ 2938/************************************************************************
2950 * ixgbe_media_change - Media Ioctl callback 2939 * ixgbe_media_change - Media Ioctl callback
2951 * 2940 *
2952 * Called when the user changes speed/duplex using 2941 * Called when the user changes speed/duplex using
2953 * media/mediopt option with ifconfig. 2942 * media/mediopt option with ifconfig.
2954 ************************************************************************/ 2943 ************************************************************************/
2955static int 2944static int
2956ixgbe_media_change(struct ifnet *ifp) 2945ixgbe_media_change(struct ifnet *ifp)
2957{ 2946{
2958 struct adapter *adapter = ifp->if_softc; 2947 struct adapter *adapter = ifp->if_softc;
2959 struct ifmedia *ifm = &adapter->media; 2948 struct ifmedia *ifm = &adapter->media;
2960 struct ixgbe_hw *hw = &adapter->hw; 2949 struct ixgbe_hw *hw = &adapter->hw;
2961 ixgbe_link_speed speed = 0; 2950 ixgbe_link_speed speed = 0;
2962 ixgbe_link_speed link_caps = 0; 2951 ixgbe_link_speed link_caps = 0;
2963 bool negotiate = false; 2952 bool negotiate = false;
2964 s32 err = IXGBE_NOT_IMPLEMENTED; 2953 s32 err = IXGBE_NOT_IMPLEMENTED;
2965 2954
2966 INIT_DEBUGOUT("ixgbe_media_change: begin"); 2955 INIT_DEBUGOUT("ixgbe_media_change: begin");
2967 2956
2968 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2957 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2969 return (EINVAL); 2958 return (EINVAL);
2970 2959
2971 if (hw->phy.media_type == ixgbe_media_type_backplane) 2960 if (hw->phy.media_type == ixgbe_media_type_backplane)
2972 return (EPERM); 2961 return (EPERM);
2973 2962
2974 IXGBE_CORE_LOCK(adapter); 2963 IXGBE_CORE_LOCK(adapter);
2975 /* 2964 /*
2976 * We don't actually need to check against the supported 2965 * We don't actually need to check against the supported
2977 * media types of the adapter; ifmedia will take care of 2966 * media types of the adapter; ifmedia will take care of
2978 * that for us. 2967 * that for us.
2979 */ 2968 */
2980 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2969 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2981 case IFM_AUTO: 2970 case IFM_AUTO:
2982 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 2971 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2983 &negotiate); 2972 &negotiate);
2984 if (err != IXGBE_SUCCESS) { 2973 if (err != IXGBE_SUCCESS) {
2985 device_printf(adapter->dev, "Unable to determine " 2974 device_printf(adapter->dev, "Unable to determine "
2986 "supported advertise speeds\n"); 2975 "supported advertise speeds\n");
2987 IXGBE_CORE_UNLOCK(adapter); 2976 IXGBE_CORE_UNLOCK(adapter);
2988 return (ENODEV); 2977 return (ENODEV);
2989 } 2978 }
2990 speed |= link_caps; 2979 speed |= link_caps;
2991 break; 2980 break;
2992 case IFM_10G_T: 2981 case IFM_10G_T:
2993 case IFM_10G_LRM: 2982 case IFM_10G_LRM:
2994 case IFM_10G_LR: 2983 case IFM_10G_LR:
2995 case IFM_10G_TWINAX: 2984 case IFM_10G_TWINAX:
2996 case IFM_10G_SR: 2985 case IFM_10G_SR:
2997 case IFM_10G_CX4: 2986 case IFM_10G_CX4:
2998 case IFM_10G_KR: 2987 case IFM_10G_KR:
2999 case IFM_10G_KX4: 2988 case IFM_10G_KX4:
3000 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2989 speed |= IXGBE_LINK_SPEED_10GB_FULL;
3001 break; 2990 break;
3002 case IFM_5000_T: 2991 case IFM_5000_T:
3003 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2992 speed |= IXGBE_LINK_SPEED_5GB_FULL;
3004 break; 2993 break;
3005 case IFM_2500_T: 2994 case IFM_2500_T:
3006 case IFM_2500_KX: 2995 case IFM_2500_KX:
3007 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2996 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
3008 break; 2997 break;
3009 case IFM_1000_T: 2998 case IFM_1000_T:
3010 case IFM_1000_LX: 2999 case IFM_1000_LX:
3011 case IFM_1000_SX: 3000 case IFM_1000_SX:
3012 case IFM_1000_KX: 3001 case IFM_1000_KX:
3013 speed |= IXGBE_LINK_SPEED_1GB_FULL; 3002 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3014 break; 3003 break;
3015 case IFM_100_TX: 3004 case IFM_100_TX:
3016 speed |= IXGBE_LINK_SPEED_100_FULL; 3005 speed |= IXGBE_LINK_SPEED_100_FULL;
3017 break; 3006 break;
3018 case IFM_10_T: 3007 case IFM_10_T:
3019 speed |= IXGBE_LINK_SPEED_10_FULL; 3008 speed |= IXGBE_LINK_SPEED_10_FULL;
3020 break; 3009 break;
3021 case IFM_NONE: 3010 case IFM_NONE:
3022 break; 3011 break;
3023 default: 3012 default:
3024 goto invalid; 3013 goto invalid;
3025 } 3014 }
3026 3015
3027 hw->mac.autotry_restart = TRUE; 3016 hw->mac.autotry_restart = TRUE;
3028 hw->mac.ops.setup_link(hw, speed, TRUE); 3017 hw->mac.ops.setup_link(hw, speed, TRUE);
3029 adapter->advertise = 0; 3018 adapter->advertise = 0;
3030 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { 3019 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3031 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) 3020 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3032 adapter->advertise |= 1 << 2; 3021 adapter->advertise |= 1 << 2;
3033 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) 3022 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3034 adapter->advertise |= 1 << 1; 3023 adapter->advertise |= 1 << 1;
3035 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) 3024 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3036 adapter->advertise |= 1 << 0; 3025 adapter->advertise |= 1 << 0;
3037 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0) 3026 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3038 adapter->advertise |= 1 << 3; 3027 adapter->advertise |= 1 << 3;
3039 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0) 3028 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3040 adapter->advertise |= 1 << 4; 3029 adapter->advertise |= 1 << 4;
3041 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0) 3030 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3042 adapter->advertise |= 1 << 5; 3031 adapter->advertise |= 1 << 5;
3043 } 3032 }
3044 3033
3045 IXGBE_CORE_UNLOCK(adapter); 3034 IXGBE_CORE_UNLOCK(adapter);
3046 return (0); 3035 return (0);
3047 3036
3048invalid: 3037invalid:
3049 device_printf(adapter->dev, "Invalid media type!\n"); 3038 device_printf(adapter->dev, "Invalid media type!\n");
3050 IXGBE_CORE_UNLOCK(adapter); 3039 IXGBE_CORE_UNLOCK(adapter);
3051 3040
3052 return (EINVAL); 3041 return (EINVAL);
3053} /* ixgbe_media_change */ 3042} /* ixgbe_media_change */
3054 3043
3055/************************************************************************ 3044/************************************************************************
3056 * ixgbe_set_promisc 3045 * ixgbe_set_promisc
3057 ************************************************************************/ 3046 ************************************************************************/
3058static void 3047static void
3059ixgbe_set_promisc(struct adapter *adapter) 3048ixgbe_set_promisc(struct adapter *adapter)
3060{ 3049{
3061 struct ifnet *ifp = adapter->ifp; 3050 struct ifnet *ifp = adapter->ifp;
3062 int mcnt = 0; 3051 int mcnt = 0;
3063 u32 rctl; 3052 u32 rctl;
3064 struct ether_multi *enm; 3053 struct ether_multi *enm;
3065 struct ether_multistep step; 3054 struct ether_multistep step;
3066 struct ethercom *ec = &adapter->osdep.ec; 3055 struct ethercom *ec = &adapter->osdep.ec;
3067 3056
3068 KASSERT(mutex_owned(&adapter->core_mtx)); 3057 KASSERT(mutex_owned(&adapter->core_mtx));
3069 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3058 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3070 rctl &= (~IXGBE_FCTRL_UPE); 3059 rctl &= (~IXGBE_FCTRL_UPE);
3071 ETHER_LOCK(ec); 3060 ETHER_LOCK(ec);
3072 if (ec->ec_flags & ETHER_F_ALLMULTI) 3061 if (ec->ec_flags & ETHER_F_ALLMULTI)
3073 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 3062 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
3074 else { 3063 else {
3075 ETHER_FIRST_MULTI(step, ec, enm); 3064 ETHER_FIRST_MULTI(step, ec, enm);
3076 while (enm != NULL) { 3065 while (enm != NULL) {
3077 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 3066 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3078 break; 3067 break;
3079 mcnt++; 3068 mcnt++;
3080 ETHER_NEXT_MULTI(step, enm); 3069 ETHER_NEXT_MULTI(step, enm);
3081 } 3070 }
3082 } 3071 }
3083 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 3072 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
3084 rctl &= (~IXGBE_FCTRL_MPE); 3073 rctl &= (~IXGBE_FCTRL_MPE);
3085 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 3074 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3086 3075
3087 if (ifp->if_flags & IFF_PROMISC) { 3076 if (ifp->if_flags & IFF_PROMISC) {
3088 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3077 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3089 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 3078 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3090 } else if (ec->ec_flags & ETHER_F_ALLMULTI) { 3079 } else if (ec->ec_flags & ETHER_F_ALLMULTI) {
3091 rctl |= IXGBE_FCTRL_MPE; 3080 rctl |= IXGBE_FCTRL_MPE;
3092 rctl &= ~IXGBE_FCTRL_UPE; 3081 rctl &= ~IXGBE_FCTRL_UPE;
3093 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 3082 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3094 } 3083 }
3095 ETHER_UNLOCK(ec); 3084 ETHER_UNLOCK(ec);
3096} /* ixgbe_set_promisc */ 3085} /* ixgbe_set_promisc */
3097 3086
3098/************************************************************************ 3087/************************************************************************
3099 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 3088 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3100 ************************************************************************/ 3089 ************************************************************************/
3101static int 3090static int
3102ixgbe_msix_link(void *arg) 3091ixgbe_msix_link(void *arg)
3103{ 3092{
3104 struct adapter *adapter = arg; 3093 struct adapter *adapter = arg;
3105 struct ixgbe_hw *hw = &adapter->hw; 3094 struct ixgbe_hw *hw = &adapter->hw;
3106 u32 eicr, eicr_mask; 3095 u32 eicr, eicr_mask;
3107 s32 retval; 3096 s32 retval;
3108 3097
3109 ++adapter->link_irq.ev_count; 3098 ++adapter->link_irq.ev_count;
3110 3099
3111 /* Pause other interrupts */ 3100 /* Pause other interrupts */
3112 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 3101 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3113 3102
3114 /* First get the cause */ 3103 /* First get the cause */
3115 /* 3104 /*
3116 * The specifications of 82598, 82599, X540 and X550 say EICS register 3105 * The specifications of 82598, 82599, X540 and X550 say EICS register
3117 * is write only. However, Linux says it is a workaround for silicon 3106 * is write only. However, Linux says it is a workaround for silicon
3118 * errata to read EICS instead of EICR to get interrupt cause. It seems 3107 * errata to read EICS instead of EICR to get interrupt cause. It seems
3119 * there is a problem about read clear mechanism for EICR register. 3108 * there is a problem about read clear mechanism for EICR register.
3120 */ 3109 */
3121 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 3110 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3122 /* Be sure the queue bits are not cleared */ 3111 /* Be sure the queue bits are not cleared */
3123 eicr &= ~IXGBE_EICR_RTX_QUEUE; 3112 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3124 /* Clear interrupt with write */ 3113 /* Clear interrupt with write */
3125 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3114 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3126 3115
 3116 if (ixgbe_is_sfp(hw)) {
 3117 /* Pluggable optics-related interrupt */
 3118 if (hw->mac.type >= ixgbe_mac_X540)
 3119 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
 3120 else
 3121 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
 3122
 3123 /*
 3124 * An interrupt might not arrive when a module is inserted.
 3125 * When an link status change interrupt occurred and the driver
 3126 * still regard SFP as unplugged, issue the module softint
 3127 * and then issue LSC interrupt.
 3128 */
 3129 if ((eicr & eicr_mask)
 3130 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
 3131 && (eicr & IXGBE_EICR_LSC))) {
 3132 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
 3133 softint_schedule(adapter->mod_si);
 3134 }
 3135
 3136 if ((hw->mac.type == ixgbe_mac_82599EB) &&
 3137 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
 3138 IXGBE_WRITE_REG(hw, IXGBE_EICR,
 3139 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
 3140 softint_schedule(adapter->msf_si);
 3141 }
 3142 }
 3143
3127 /* Link status change */ 3144 /* Link status change */
3128 if (eicr & IXGBE_EICR_LSC) { 3145 if (eicr & IXGBE_EICR_LSC) {
3129 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3146 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3130 softint_schedule(adapter->link_si); 3147 softint_schedule(adapter->link_si);
3131 } 3148 }
3132 3149
3133 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3150 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3134 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 3151 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3135 (eicr & IXGBE_EICR_FLOW_DIR)) { 3152 (eicr & IXGBE_EICR_FLOW_DIR)) {
3136 /* This is probably overkill :) */ 3153 /* This is probably overkill :) */
3137 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) 3154 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3138 return 1; 3155 return 1;
3139 /* Disable the interrupt */ 3156 /* Disable the interrupt */
3140 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 3157 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3141 softint_schedule(adapter->fdir_si); 3158 softint_schedule(adapter->fdir_si);
3142 } 3159 }
3143 3160
3144 if (eicr & IXGBE_EICR_ECC) { 3161 if (eicr & IXGBE_EICR_ECC) {
3145 device_printf(adapter->dev, 3162 device_printf(adapter->dev,
3146 "CRITICAL: ECC ERROR!! Please Reboot!!\n"); 3163 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3147 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 3164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3148 } 3165 }
3149 3166
3150 /* Check for over temp condition */ 3167 /* Check for over temp condition */
3151 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 3168 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3152 switch (adapter->hw.mac.type) { 3169 switch (adapter->hw.mac.type) {
3153 case ixgbe_mac_X550EM_a: 3170 case ixgbe_mac_X550EM_a:
3154 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 3171 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3155 break; 3172 break;
3156 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 3173 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3157 IXGBE_EICR_GPI_SDP0_X550EM_a); 3174 IXGBE_EICR_GPI_SDP0_X550EM_a);
3158 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3175 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3159 IXGBE_EICR_GPI_SDP0_X550EM_a); 3176 IXGBE_EICR_GPI_SDP0_X550EM_a);
3160 retval = hw->phy.ops.check_overtemp(hw); 3177 retval = hw->phy.ops.check_overtemp(hw);
3161 if (retval != IXGBE_ERR_OVERTEMP) 3178 if (retval != IXGBE_ERR_OVERTEMP)
3162 break; 3179 break;
3163 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 3180 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3164 device_printf(adapter->dev, "System shutdown required!\n"); 3181 device_printf(adapter->dev, "System shutdown required!\n");
3165 break; 3182 break;
3166 default: 3183 default:
3167 if (!(eicr & IXGBE_EICR_TS)) 3184 if (!(eicr & IXGBE_EICR_TS))
3168 break; 3185 break;
3169 retval = hw->phy.ops.check_overtemp(hw); 3186 retval = hw->phy.ops.check_overtemp(hw);
3170 if (retval != IXGBE_ERR_OVERTEMP) 3187 if (retval != IXGBE_ERR_OVERTEMP)
3171 break; 3188 break;
3172 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 3189 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3173 device_printf(adapter->dev, "System shutdown required!\n"); 3190 device_printf(adapter->dev, "System shutdown required!\n");
3174 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 3191 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3175 break; 3192 break;
3176 } 3193 }
3177 } 3194 }
3178 3195
3179 /* Check for VF message */ 3196 /* Check for VF message */
3180 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 3197 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3181 (eicr & IXGBE_EICR_MAILBOX)) 3198 (eicr & IXGBE_EICR_MAILBOX))
3182 softint_schedule(adapter->mbx_si); 3199 softint_schedule(adapter->mbx_si);
3183 } 3200 }
3184 3201
3185 if (ixgbe_is_sfp(hw)) { 
3186 /* Pluggable optics-related interrupt */ 
3187 if (hw->mac.type >= ixgbe_mac_X540) 
3188 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 
3189 else 
3190 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 
3191 
3192 if (eicr & eicr_mask) { 
3193 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 
3194 softint_schedule(adapter->mod_si); 
3195 } 
3196 
3197 if ((hw->mac.type == ixgbe_mac_82599EB) && 
3198 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 
3199 IXGBE_WRITE_REG(hw, IXGBE_EICR, 
3200 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 
3201 softint_schedule(adapter->msf_si); 
3202 } 
3203 } 
3204 
3205 /* Check for fan failure */ 3202 /* Check for fan failure */
3206 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 3203 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3207 ixgbe_check_fan_failure(adapter, eicr, TRUE); 3204 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3208 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3205 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3209 } 3206 }
3210 3207
3211 /* External PHY interrupt */ 3208 /* External PHY interrupt */
3212 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3209 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3213 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 3210 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3214 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 3211 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3215 softint_schedule(adapter->phy_si); 3212 softint_schedule(adapter->phy_si);
3216 } 3213 }
3217 3214
3218 /* Re-enable other interrupts */ 3215 /* Re-enable other interrupts */
3219 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 3216 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3220 return 1; 3217 return 1;
3221} /* ixgbe_msix_link */ 3218} /* ixgbe_msix_link */
3222 3219
3223static void 3220static void
3224ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 3221ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3225{ 3222{
3226 3223
3227 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 3224 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3228 itr |= itr << 16; 3225 itr |= itr << 16;
3229 else 3226 else
3230 itr |= IXGBE_EITR_CNT_WDIS; 3227 itr |= IXGBE_EITR_CNT_WDIS;
3231 3228
3232 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr); 3229 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3233} 3230}
3234 3231
3235 3232
3236/************************************************************************ 3233/************************************************************************
3237 * ixgbe_sysctl_interrupt_rate_handler 3234 * ixgbe_sysctl_interrupt_rate_handler
3238 ************************************************************************/ 3235 ************************************************************************/
3239static int 3236static int
3240ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 3237ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3241{ 3238{
3242 struct sysctlnode node = *rnode; 3239 struct sysctlnode node = *rnode;
3243 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 3240 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3244 struct adapter *adapter; 3241 struct adapter *adapter;
3245 uint32_t reg, usec, rate; 3242 uint32_t reg, usec, rate;
3246 int error; 3243 int error;
3247 3244
3248 if (que == NULL) 3245 if (que == NULL)
3249 return 0; 3246 return 0;
3250 3247
3251 adapter = que->adapter; 3248 adapter = que->adapter;
3252 if (ixgbe_fw_recovery_mode_swflag(adapter)) 3249 if (ixgbe_fw_recovery_mode_swflag(adapter))
3253 return (EPERM); 3250 return (EPERM);
3254 3251
3255 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix)); 3252 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3256 usec = ((reg & 0x0FF8) >> 3); 3253 usec = ((reg & 0x0FF8) >> 3);
3257 if (usec > 0) 3254 if (usec > 0)
3258 rate = 500000 / usec; 3255 rate = 500000 / usec;
3259 else 3256 else
3260 rate = 0; 3257 rate = 0;
3261 node.sysctl_data = &rate; 3258 node.sysctl_data = &rate;
3262 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3259 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3263 if (error || newp == NULL) 3260 if (error || newp == NULL)
3264 return error; 3261 return error;
3265 reg &= ~0xfff; /* default, no limitation */ 3262 reg &= ~0xfff; /* default, no limitation */
3266 if (rate > 0 && rate < 500000) { 3263 if (rate > 0 && rate < 500000) {
3267 if (rate < 1000) 3264 if (rate < 1000)
3268 rate = 1000; 3265 rate = 1000;
3269 reg |= ((4000000/rate) & 0xff8); 3266 reg |= ((4000000/rate) & 0xff8);
3270 /* 3267 /*
3271 * When RSC is used, ITR interval must be larger than 3268 * When RSC is used, ITR interval must be larger than
3272 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 3269 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3273 * The minimum value is always greater than 2us on 100M 3270 * The minimum value is always greater than 2us on 100M
3274 * (and 10M?(not documented)), but it's not on 1G and higher. 3271 * (and 10M?(not documented)), but it's not on 1G and higher.
3275 */ 3272 */
3276 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 3273 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3277 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 3274 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3278 if ((adapter->num_queues > 1) 3275 if ((adapter->num_queues > 1)
3279 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 3276 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3280 return EINVAL; 3277 return EINVAL;
3281 } 3278 }
3282 ixgbe_max_interrupt_rate = rate; 3279 ixgbe_max_interrupt_rate = rate;
3283 } else 3280 } else
3284 ixgbe_max_interrupt_rate = 0; 3281 ixgbe_max_interrupt_rate = 0;
3285 ixgbe_eitr_write(adapter, que->msix, reg); 3282 ixgbe_eitr_write(adapter, que->msix, reg);
3286 3283
3287 return (0); 3284 return (0);
3288} /* ixgbe_sysctl_interrupt_rate_handler */ 3285} /* ixgbe_sysctl_interrupt_rate_handler */
3289 3286
3290const struct sysctlnode * 3287const struct sysctlnode *
3291ixgbe_sysctl_instance(struct adapter *adapter) 3288ixgbe_sysctl_instance(struct adapter *adapter)
3292{ 3289{
3293 const char *dvname; 3290 const char *dvname;
3294 struct sysctllog **log; 3291 struct sysctllog **log;
3295 int rc; 3292 int rc;
3296 const struct sysctlnode *rnode; 3293 const struct sysctlnode *rnode;
3297 3294
3298 if (adapter->sysctltop != NULL) 3295 if (adapter->sysctltop != NULL)
3299 return adapter->sysctltop; 3296 return adapter->sysctltop;
3300 3297
3301 log = &adapter->sysctllog; 3298 log = &adapter->sysctllog;
3302 dvname = device_xname(adapter->dev); 3299 dvname = device_xname(adapter->dev);
3303 3300
3304 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 3301 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3305 0, CTLTYPE_NODE, dvname, 3302 0, CTLTYPE_NODE, dvname,
3306 SYSCTL_DESCR("ixgbe information and settings"), 3303 SYSCTL_DESCR("ixgbe information and settings"),
3307 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 3304 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3308 goto err; 3305 goto err;
3309 3306
3310 return rnode; 3307 return rnode;
3311err: 3308err:
3312 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); 3309 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3313 return NULL; 3310 return NULL;
3314} 3311}
3315 3312
3316/************************************************************************ 3313/************************************************************************
3317 * ixgbe_add_device_sysctls 3314 * ixgbe_add_device_sysctls
3318 ************************************************************************/ 3315 ************************************************************************/
3319static void 3316static void
3320ixgbe_add_device_sysctls(struct adapter *adapter) 3317ixgbe_add_device_sysctls(struct adapter *adapter)
3321{ 3318{
3322 device_t dev = adapter->dev; 3319 device_t dev = adapter->dev;
3323 struct ixgbe_hw *hw = &adapter->hw; 3320 struct ixgbe_hw *hw = &adapter->hw;
3324 struct sysctllog **log; 3321 struct sysctllog **log;
3325 const struct sysctlnode *rnode, *cnode; 3322 const struct sysctlnode *rnode, *cnode;
3326 3323
3327 log = &adapter->sysctllog; 3324 log = &adapter->sysctllog;
3328 3325
3329 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 3326 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3330 aprint_error_dev(dev, "could not create sysctl root\n"); 3327 aprint_error_dev(dev, "could not create sysctl root\n");
3331 return; 3328 return;
3332 } 3329 }
3333 3330
3334 if (sysctl_createv(log, 0, &rnode, &cnode, 3331 if (sysctl_createv(log, 0, &rnode, &cnode,
3335 CTLFLAG_READWRITE, CTLTYPE_INT, 3332 CTLFLAG_READWRITE, CTLTYPE_INT,
3336 "debug", SYSCTL_DESCR("Debug Info"), 3333 "debug", SYSCTL_DESCR("Debug Info"),
3337 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 3334 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3338 aprint_error_dev(dev, "could not create sysctl\n"); 3335 aprint_error_dev(dev, "could not create sysctl\n");
3339 3336
3340 if (sysctl_createv(log, 0, &rnode, &cnode, 3337 if (sysctl_createv(log, 0, &rnode, &cnode,
3341 CTLFLAG_READONLY, CTLTYPE_INT, 3338 CTLFLAG_READONLY, CTLTYPE_INT,
3342 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"), 3339 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3343 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 3340 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3344 aprint_error_dev(dev, "could not create sysctl\n"); 3341 aprint_error_dev(dev, "could not create sysctl\n");
3345 3342
3346 if (sysctl_createv(log, 0, &rnode, &cnode, 3343 if (sysctl_createv(log, 0, &rnode, &cnode,
3347 CTLFLAG_READONLY, CTLTYPE_INT, 3344 CTLFLAG_READONLY, CTLTYPE_INT,
3348 "num_queues", SYSCTL_DESCR("Number of queues"), 3345 "num_queues", SYSCTL_DESCR("Number of queues"),
3349 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0) 3346 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3350 aprint_error_dev(dev, "could not create sysctl\n"); 3347 aprint_error_dev(dev, "could not create sysctl\n");
3351 3348
3352 /* Sysctls for all devices */ 3349 /* Sysctls for all devices */
3353 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3350 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3354 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC), 3351 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3355 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, 3352 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3356 CTL_EOL) != 0) 3353 CTL_EOL) != 0)
3357 aprint_error_dev(dev, "could not create sysctl\n"); 3354 aprint_error_dev(dev, "could not create sysctl\n");
3358 3355
3359 adapter->enable_aim = ixgbe_enable_aim; 3356 adapter->enable_aim = ixgbe_enable_aim;
3360 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3357 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3361 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 3358 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3362 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 3359 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3363 aprint_error_dev(dev, "could not create sysctl\n"); 3360 aprint_error_dev(dev, "could not create sysctl\n");
3364 3361
3365 if (sysctl_createv(log, 0, &rnode, &cnode, 3362 if (sysctl_createv(log, 0, &rnode, &cnode,
3366 CTLFLAG_READWRITE, CTLTYPE_INT, 3363 CTLFLAG_READWRITE, CTLTYPE_INT,
3367 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED), 3364 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3368 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE, 3365 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3369 CTL_EOL) != 0) 3366 CTL_EOL) != 0)
3370 aprint_error_dev(dev, "could not create sysctl\n"); 3367 aprint_error_dev(dev, "could not create sysctl\n");
3371 3368
3372 /* 3369 /*
3373 * If each "que->txrx_use_workqueue" is changed in sysctl handler, 3370 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3374 * it causesflip-flopping softint/workqueue mode in one deferred 3371 * it causesflip-flopping softint/workqueue mode in one deferred
3375 * processing. Therefore, preempt_disable()/preempt_enable() are 3372 * processing. Therefore, preempt_disable()/preempt_enable() are
3376 * required in ixgbe_sched_handle_que() to avoid 3373 * required in ixgbe_sched_handle_que() to avoid
3377 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule(). 3374 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3378 * I think changing "que->txrx_use_workqueue" in interrupt handler 3375 * I think changing "que->txrx_use_workqueue" in interrupt handler
3379 * is lighter than doing preempt_disable()/preempt_enable() in every 3376 * is lighter than doing preempt_disable()/preempt_enable() in every
3380 * ixgbe_sched_handle_que(). 3377 * ixgbe_sched_handle_que().
3381 */ 3378 */
3382 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue; 3379 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3383 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3380 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3384 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 3381 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3385 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) 3382 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3386 aprint_error_dev(dev, "could not create sysctl\n"); 3383 aprint_error_dev(dev, "could not create sysctl\n");
3387 3384
3388#ifdef IXGBE_DEBUG 3385#ifdef IXGBE_DEBUG
3389 /* testing sysctls (for all devices) */ 3386 /* testing sysctls (for all devices) */
3390 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3387 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3391 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"), 3388 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3392 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE, 3389 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3393 CTL_EOL) != 0) 3390 CTL_EOL) != 0)
3394 aprint_error_dev(dev, "could not create sysctl\n"); 3391 aprint_error_dev(dev, "could not create sysctl\n");
3395 3392
3396 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, 3393 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3397 CTLTYPE_STRING, "print_rss_config", 3394 CTLTYPE_STRING, "print_rss_config",
3398 SYSCTL_DESCR("Prints RSS Configuration"), 3395 SYSCTL_DESCR("Prints RSS Configuration"),
3399 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE, 3396 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3400 CTL_EOL) != 0) 3397 CTL_EOL) != 0)
3401 aprint_error_dev(dev, "could not create sysctl\n"); 3398 aprint_error_dev(dev, "could not create sysctl\n");
3402#endif 3399#endif
3403 /* for X550 series devices */ 3400 /* for X550 series devices */
3404 if (hw->mac.type >= ixgbe_mac_X550) 3401 if (hw->mac.type >= ixgbe_mac_X550)
3405 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3402 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3406 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"), 3403 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3407 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, 3404 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3408 CTL_EOL) != 0) 3405 CTL_EOL) != 0)
3409 aprint_error_dev(dev, "could not create sysctl\n"); 3406 aprint_error_dev(dev, "could not create sysctl\n");
3410 3407
3411 /* for WoL-capable devices */ 3408 /* for WoL-capable devices */
3412 if (adapter->wol_support) { 3409 if (adapter->wol_support) {
3413 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3410 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3414 CTLTYPE_BOOL, "wol_enable", 3411 CTLTYPE_BOOL, "wol_enable",
3415 SYSCTL_DESCR("Enable/Disable Wake on LAN"), 3412 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3416 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, 3413 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3417 CTL_EOL) != 0) 3414 CTL_EOL) != 0)
3418 aprint_error_dev(dev, "could not create sysctl\n"); 3415 aprint_error_dev(dev, "could not create sysctl\n");
3419 3416
3420 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3417 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3421 CTLTYPE_INT, "wufc", 3418 CTLTYPE_INT, "wufc",
3422 SYSCTL_DESCR("Enable/Disable Wake Up Filters"), 3419 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3423 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, 3420 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3424 CTL_EOL) != 0) 3421 CTL_EOL) != 0)
3425 aprint_error_dev(dev, "could not create sysctl\n"); 3422 aprint_error_dev(dev, "could not create sysctl\n");
3426 } 3423 }
3427 3424
3428 /* for X552/X557-AT devices */ 3425 /* for X552/X557-AT devices */
3429 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 3426 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3430 const struct sysctlnode *phy_node; 3427 const struct sysctlnode *phy_node;
3431 3428
3432 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE, 3429 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3433 "phy", SYSCTL_DESCR("External PHY sysctls"), 3430 "phy", SYSCTL_DESCR("External PHY sysctls"),
3434 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) { 3431 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3435 aprint_error_dev(dev, "could not create sysctl\n"); 3432 aprint_error_dev(dev, "could not create sysctl\n");
3436 return; 3433 return;
3437 } 3434 }
3438 3435
3439 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3436 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3440 CTLTYPE_INT, "temp", 3437 CTLTYPE_INT, "temp",
3441 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"), 3438 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3442 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, 3439 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3443 CTL_EOL) != 0) 3440 CTL_EOL) != 0)
3444 aprint_error_dev(dev, "could not create sysctl\n"); 3441 aprint_error_dev(dev, "could not create sysctl\n");
3445 3442
3446 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3443 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3447 CTLTYPE_INT, "overtemp_occurred", 3444 CTLTYPE_INT, "overtemp_occurred",
3448 SYSCTL_DESCR("External PHY High Temperature Event Occurred"), 3445 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3449 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, 3446 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3450 CTL_CREATE, CTL_EOL) != 0) 3447 CTL_CREATE, CTL_EOL) != 0)
3451 aprint_error_dev(dev, "could not create sysctl\n"); 3448 aprint_error_dev(dev, "could not create sysctl\n");
3452 } 3449 }
3453 3450
3454 if ((hw->mac.type == ixgbe_mac_X550EM_a) 3451 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3455 && (hw->phy.type == ixgbe_phy_fw)) 3452 && (hw->phy.type == ixgbe_phy_fw))
3456 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3453 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3457 CTLTYPE_BOOL, "force_10_100_autonego", 3454 CTLTYPE_BOOL, "force_10_100_autonego",
3458 SYSCTL_DESCR("Force autonego on 10M and 100M"), 3455 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3459 NULL, 0, &hw->phy.force_10_100_autonego, 0, 3456 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3460 CTL_CREATE, CTL_EOL) != 0) 3457 CTL_CREATE, CTL_EOL) != 0)
3461 aprint_error_dev(dev, "could not create sysctl\n"); 3458 aprint_error_dev(dev, "could not create sysctl\n");
3462 3459
3463 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 3460 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3464 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3461 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3465 CTLTYPE_INT, "eee_state", 3462 CTLTYPE_INT, "eee_state",
3466 SYSCTL_DESCR("EEE Power Save State"), 3463 SYSCTL_DESCR("EEE Power Save State"),
3467 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE, 3464 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3468 CTL_EOL) != 0) 3465 CTL_EOL) != 0)
3469 aprint_error_dev(dev, "could not create sysctl\n"); 3466 aprint_error_dev(dev, "could not create sysctl\n");
3470 } 3467 }
3471} /* ixgbe_add_device_sysctls */ 3468} /* ixgbe_add_device_sysctls */
3472 3469
3473/************************************************************************ 3470/************************************************************************
3474 * ixgbe_allocate_pci_resources 3471 * ixgbe_allocate_pci_resources
3475 ************************************************************************/ 3472 ************************************************************************/
3476static int 3473static int
3477ixgbe_allocate_pci_resources(struct adapter *adapter, 3474ixgbe_allocate_pci_resources(struct adapter *adapter,
3478 const struct pci_attach_args *pa) 3475 const struct pci_attach_args *pa)
3479{ 3476{
3480 pcireg_t memtype, csr; 3477 pcireg_t memtype, csr;
3481 device_t dev = adapter->dev; 3478 device_t dev = adapter->dev;
3482 bus_addr_t addr; 3479 bus_addr_t addr;
3483 int flags; 3480 int flags;
3484 3481
3485 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 3482 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3486 switch (memtype) { 3483 switch (memtype) {
3487 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3484 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3488 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3485 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3489 adapter->osdep.mem_bus_space_tag = pa->pa_memt; 3486 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3490 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 3487 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3491 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) 3488 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3492 goto map_err; 3489 goto map_err;
3493 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 3490 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3494 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 3491 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3495 flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3492 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3496 } 3493 }
3497 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, 3494 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3498 adapter->osdep.mem_size, flags, 3495 adapter->osdep.mem_size, flags,
3499 &adapter->osdep.mem_bus_space_handle) != 0) { 3496 &adapter->osdep.mem_bus_space_handle) != 0) {
3500map_err: 3497map_err:
3501 adapter->osdep.mem_size = 0; 3498 adapter->osdep.mem_size = 0;
3502 aprint_error_dev(dev, "unable to map BAR0\n"); 3499 aprint_error_dev(dev, "unable to map BAR0\n");
3503 return ENXIO; 3500 return ENXIO;
3504 } 3501 }
3505 /* 3502 /*
3506 * Enable address decoding for memory range in case BIOS or 3503 * Enable address decoding for memory range in case BIOS or
3507 * UEFI don't set it. 3504 * UEFI don't set it.
3508 */ 3505 */
3509 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 3506 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3510 PCI_COMMAND_STATUS_REG); 3507 PCI_COMMAND_STATUS_REG);
3511 csr |= PCI_COMMAND_MEM_ENABLE; 3508 csr |= PCI_COMMAND_MEM_ENABLE;
3512 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 3509 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3513 csr); 3510 csr);
3514 break; 3511 break;
3515 default: 3512 default:
3516 aprint_error_dev(dev, "unexpected type on BAR0\n"); 3513 aprint_error_dev(dev, "unexpected type on BAR0\n");
3517 return ENXIO; 3514 return ENXIO;
3518 } 3515 }
3519 3516
3520 return (0); 3517 return (0);
3521} /* ixgbe_allocate_pci_resources */ 3518} /* ixgbe_allocate_pci_resources */
3522 3519
3523static void 3520static void
3524ixgbe_free_softint(struct adapter *adapter) 3521ixgbe_free_softint(struct adapter *adapter)
3525{ 3522{
3526 struct ix_queue *que = adapter->queues; 3523 struct ix_queue *que = adapter->queues;
3527 struct tx_ring *txr = adapter->tx_rings; 3524 struct tx_ring *txr = adapter->tx_rings;
3528 int i; 3525 int i;
3529 3526
3530 for (i = 0; i < adapter->num_queues; i++, que++, txr++) { 3527 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3531 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 3528 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3532 if (txr->txr_si != NULL) 3529 if (txr->txr_si != NULL)
3533 softint_disestablish(txr->txr_si); 3530 softint_disestablish(txr->txr_si);
3534 } 3531 }
3535 if (que->que_si != NULL) 3532 if (que->que_si != NULL)
3536 softint_disestablish(que->que_si); 3533 softint_disestablish(que->que_si);
3537 } 3534 }
3538 if (adapter->txr_wq != NULL) 3535 if (adapter->txr_wq != NULL)
3539 workqueue_destroy(adapter->txr_wq); 3536 workqueue_destroy(adapter->txr_wq);
3540 if (adapter->txr_wq_enqueued != NULL) 3537 if (adapter->txr_wq_enqueued != NULL)
3541 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 3538 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3542 if (adapter->que_wq != NULL) 3539 if (adapter->que_wq != NULL)
3543 workqueue_destroy(adapter->que_wq); 3540 workqueue_destroy(adapter->que_wq);
3544 3541
3545 /* Drain the Link queue */ 3542 /* Drain the Link queue */
3546 if (adapter->link_si != NULL) { 3543 if (adapter->link_si != NULL) {
3547 softint_disestablish(adapter->link_si); 3544 softint_disestablish(adapter->link_si);
3548 adapter->link_si = NULL; 3545 adapter->link_si = NULL;
3549 } 3546 }
3550 if (adapter->mod_si != NULL) { 3547 if (adapter->mod_si != NULL) {
3551 softint_disestablish(adapter->mod_si); 3548 softint_disestablish(adapter->mod_si);
3552 adapter->mod_si = NULL; 3549 adapter->mod_si = NULL;
3553 } 3550 }
3554 if (adapter->msf_si != NULL) { 3551 if (adapter->msf_si != NULL) {
3555 softint_disestablish(adapter->msf_si); 3552 softint_disestablish(adapter->msf_si);
3556 adapter->msf_si = NULL; 3553 adapter->msf_si = NULL;
3557 } 3554 }
3558 if (adapter->phy_si != NULL) { 3555 if (adapter->phy_si != NULL) {
3559 softint_disestablish(adapter->phy_si); 3556 softint_disestablish(adapter->phy_si);
3560 adapter->phy_si = NULL; 3557 adapter->phy_si = NULL;
3561 } 3558 }
3562 if (adapter->feat_en & IXGBE_FEATURE_FDIR) { 3559 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3563 if (adapter->fdir_si != NULL) { 3560 if (adapter->fdir_si != NULL) {
3564 softint_disestablish(adapter->fdir_si); 3561 softint_disestablish(adapter->fdir_si);
3565 adapter->fdir_si = NULL; 3562 adapter->fdir_si = NULL;
3566 } 3563 }
3567 } 3564 }
3568 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) { 3565 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3569 if (adapter->mbx_si != NULL) { 3566 if (adapter->mbx_si != NULL) {
3570 softint_disestablish(adapter->mbx_si); 3567 softint_disestablish(adapter->mbx_si);
3571 adapter->mbx_si = NULL; 3568 adapter->mbx_si = NULL;
3572 } 3569 }
3573 } 3570 }
3574} /* ixgbe_free_softint */ 3571} /* ixgbe_free_softint */
3575 3572
3576/************************************************************************ 3573/************************************************************************
3577 * ixgbe_detach - Device removal routine 3574 * ixgbe_detach - Device removal routine
3578 * 3575 *
3579 * Called when the driver is being removed. 3576 * Called when the driver is being removed.
3580 * Stops the adapter and deallocates all the resources 3577 * Stops the adapter and deallocates all the resources
3581 * that were allocated for driver operation. 3578 * that were allocated for driver operation.
3582 * 3579 *
3583 * return 0 on success, positive on failure 3580 * return 0 on success, positive on failure
3584 ************************************************************************/ 3581 ************************************************************************/
3585static int 3582static int
3586ixgbe_detach(device_t dev, int flags) 3583ixgbe_detach(device_t dev, int flags)
3587{ 3584{
3588 struct adapter *adapter = device_private(dev); 3585 struct adapter *adapter = device_private(dev);
3589 struct rx_ring *rxr = adapter->rx_rings; 3586 struct rx_ring *rxr = adapter->rx_rings;
3590 struct tx_ring *txr = adapter->tx_rings; 3587 struct tx_ring *txr = adapter->tx_rings;
3591 struct ixgbe_hw *hw = &adapter->hw; 3588 struct ixgbe_hw *hw = &adapter->hw;
3592 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 3589 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3593 u32 ctrl_ext; 3590 u32 ctrl_ext;
3594 int i; 3591 int i;
3595 3592
3596 INIT_DEBUGOUT("ixgbe_detach: begin"); 3593 INIT_DEBUGOUT("ixgbe_detach: begin");
3597 if (adapter->osdep.attached == false) 3594 if (adapter->osdep.attached == false)
3598 return 0; 3595 return 0;
3599 3596
3600 if (ixgbe_pci_iov_detach(dev) != 0) { 3597 if (ixgbe_pci_iov_detach(dev) != 0) {
3601 device_printf(dev, "SR-IOV in use; detach first.\n"); 3598 device_printf(dev, "SR-IOV in use; detach first.\n");
3602 return (EBUSY); 3599 return (EBUSY);
3603 } 3600 }
3604 3601
3605 /* 3602 /*
3606 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(), 3603 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3607 * so it's not required to call ixgbe_stop() directly. 3604 * so it's not required to call ixgbe_stop() directly.
3608 */ 3605 */
3609 IXGBE_CORE_LOCK(adapter); 3606 IXGBE_CORE_LOCK(adapter);
3610 ixgbe_setup_low_power_mode(adapter); 3607 ixgbe_setup_low_power_mode(adapter);
3611 IXGBE_CORE_UNLOCK(adapter); 3608 IXGBE_CORE_UNLOCK(adapter);
3612#if NVLAN > 0 3609#if NVLAN > 0
3613 /* Make sure VLANs are not using driver */ 3610 /* Make sure VLANs are not using driver */
3614 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 3611 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3615 ; /* nothing to do: no VLANs */ 3612 ; /* nothing to do: no VLANs */
3616 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 3613 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3617 vlan_ifdetach(adapter->ifp); 3614 vlan_ifdetach(adapter->ifp);
3618 else { 3615 else {
3619 aprint_error_dev(dev, "VLANs in use, detach first\n"); 3616 aprint_error_dev(dev, "VLANs in use, detach first\n");
3620 return (EBUSY); 3617 return (EBUSY);
3621 } 3618 }
3622#endif 3619#endif
3623 3620
3624 pmf_device_deregister(dev); 3621 pmf_device_deregister(dev);
3625 3622
3626 ether_ifdetach(adapter->ifp); 3623 ether_ifdetach(adapter->ifp);
3627 3624
3628 ixgbe_free_softint(adapter); 3625 ixgbe_free_softint(adapter);
3629 3626
3630 /* let hardware know driver is unloading */ 3627 /* let hardware know driver is unloading */
3631 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 3628 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3632 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3629 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 3630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3634 3631
3635 callout_halt(&adapter->timer, NULL); 3632 callout_halt(&adapter->timer, NULL);
3636 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 3633 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3637 callout_halt(&adapter->recovery_mode_timer, NULL); 3634 callout_halt(&adapter->recovery_mode_timer, NULL);
3638 3635
3639 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 3636 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3640 netmap_detach(adapter->ifp); 3637 netmap_detach(adapter->ifp);
3641 3638
3642 ixgbe_free_pci_resources(adapter); 3639 ixgbe_free_pci_resources(adapter);
3643#if 0 /* XXX the NetBSD port is probably missing something here */ 3640#if 0 /* XXX the NetBSD port is probably missing something here */
3644 bus_generic_detach(dev); 3641 bus_generic_detach(dev);
3645#endif 3642#endif
3646 if_detach(adapter->ifp); 3643 if_detach(adapter->ifp);
3647 if_percpuq_destroy(adapter->ipq); 3644 if_percpuq_destroy(adapter->ipq);
3648 3645
3649 sysctl_teardown(&adapter->sysctllog); 3646 sysctl_teardown(&adapter->sysctllog);
3650 evcnt_detach(&adapter->efbig_tx_dma_setup); 3647 evcnt_detach(&adapter->efbig_tx_dma_setup);
3651 evcnt_detach(&adapter->mbuf_defrag_failed); 3648 evcnt_detach(&adapter->mbuf_defrag_failed);
3652 evcnt_detach(&adapter->efbig2_tx_dma_setup); 3649 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3653 evcnt_detach(&adapter->einval_tx_dma_setup); 3650 evcnt_detach(&adapter->einval_tx_dma_setup);
3654 evcnt_detach(&adapter->other_tx_dma_setup); 3651 evcnt_detach(&adapter->other_tx_dma_setup);
3655 evcnt_detach(&adapter->eagain_tx_dma_setup); 3652 evcnt_detach(&adapter->eagain_tx_dma_setup);
3656 evcnt_detach(&adapter->enomem_tx_dma_setup); 3653 evcnt_detach(&adapter->enomem_tx_dma_setup);
3657 evcnt_detach(&adapter->watchdog_events); 3654 evcnt_detach(&adapter->watchdog_events);
3658 evcnt_detach(&adapter->tso_err); 3655 evcnt_detach(&adapter->tso_err);
3659 evcnt_detach(&adapter->link_irq); 3656 evcnt_detach(&adapter->link_irq);
3660 evcnt_detach(&adapter->link_sicount); 3657 evcnt_detach(&adapter->link_sicount);
3661 evcnt_detach(&adapter->mod_sicount); 3658 evcnt_detach(&adapter->mod_sicount);
3662 evcnt_detach(&adapter->msf_sicount); 3659 evcnt_detach(&adapter->msf_sicount);
3663 evcnt_detach(&adapter->phy_sicount); 3660 evcnt_detach(&adapter->phy_sicount);
3664 3661
3665 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 3662 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3666 if (i < __arraycount(stats->mpc)) { 3663 if (i < __arraycount(stats->mpc)) {
3667 evcnt_detach(&stats->mpc[i]); 3664 evcnt_detach(&stats->mpc[i]);
3668 if (hw->mac.type == ixgbe_mac_82598EB) 3665 if (hw->mac.type == ixgbe_mac_82598EB)
3669 evcnt_detach(&stats->rnbc[i]); 3666 evcnt_detach(&stats->rnbc[i]);
3670 } 3667 }
3671 if (i < __arraycount(stats->pxontxc)) { 3668 if (i < __arraycount(stats->pxontxc)) {
3672 evcnt_detach(&stats->pxontxc[i]); 3669 evcnt_detach(&stats->pxontxc[i]);
3673 evcnt_detach(&stats->pxonrxc[i]); 3670 evcnt_detach(&stats->pxonrxc[i]);
3674 evcnt_detach(&stats->pxofftxc[i]); 3671 evcnt_detach(&stats->pxofftxc[i]);
3675 evcnt_detach(&stats->pxoffrxc[i]); 3672 evcnt_detach(&stats->pxoffrxc[i]);
3676 if (hw->mac.type >= ixgbe_mac_82599EB) 3673 if (hw->mac.type >= ixgbe_mac_82599EB)
3677 evcnt_detach(&stats->pxon2offc[i]); 3674 evcnt_detach(&stats->pxon2offc[i]);
3678 } 3675 }
3679 } 3676 }
3680 3677
3681 txr = adapter->tx_rings; 3678 txr = adapter->tx_rings;
3682 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 3679 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3683 evcnt_detach(&adapter->queues[i].irqs); 3680 evcnt_detach(&adapter->queues[i].irqs);
3684 evcnt_detach(&adapter->queues[i].handleq); 3681 evcnt_detach(&adapter->queues[i].handleq);
3685 evcnt_detach(&adapter->queues[i].req); 3682 evcnt_detach(&adapter->queues[i].req);
3686 evcnt_detach(&txr->no_desc_avail); 3683 evcnt_detach(&txr->no_desc_avail);
3687 evcnt_detach(&txr->total_packets); 3684 evcnt_detach(&txr->total_packets);
3688 evcnt_detach(&txr->tso_tx); 3685 evcnt_detach(&txr->tso_tx);
3689#ifndef IXGBE_LEGACY_TX 3686#ifndef IXGBE_LEGACY_TX
3690 evcnt_detach(&txr->pcq_drops); 3687 evcnt_detach(&txr->pcq_drops);
3691#endif 3688#endif
3692 3689
3693 if (i < __arraycount(stats->qprc)) { 3690 if (i < __arraycount(stats->qprc)) {
3694 evcnt_detach(&stats->qprc[i]); 3691 evcnt_detach(&stats->qprc[i]);
3695 evcnt_detach(&stats->qptc[i]); 3692 evcnt_detach(&stats->qptc[i]);
3696 evcnt_detach(&stats->qbrc[i]); 3693 evcnt_detach(&stats->qbrc[i]);
3697 evcnt_detach(&stats->qbtc[i]); 3694 evcnt_detach(&stats->qbtc[i]);
3698 if (hw->mac.type >= ixgbe_mac_82599EB) 3695 if (hw->mac.type >= ixgbe_mac_82599EB)
3699 evcnt_detach(&stats->qprdc[i]); 3696 evcnt_detach(&stats->qprdc[i]);
3700 } 3697 }
3701 3698
3702 evcnt_detach(&rxr->rx_packets); 3699 evcnt_detach(&rxr->rx_packets);
3703 evcnt_detach(&rxr->rx_bytes); 3700 evcnt_detach(&rxr->rx_bytes);
3704 evcnt_detach(&rxr->rx_copies); 3701 evcnt_detach(&rxr->rx_copies);
3705 evcnt_detach(&rxr->no_jmbuf); 3702 evcnt_detach(&rxr->no_jmbuf);
3706 evcnt_detach(&rxr->rx_discarded); 3703 evcnt_detach(&rxr->rx_discarded);
3707 } 3704 }
3708 evcnt_detach(&stats->ipcs); 3705 evcnt_detach(&stats->ipcs);
3709 evcnt_detach(&stats->l4cs); 3706 evcnt_detach(&stats->l4cs);
3710 evcnt_detach(&stats->ipcs_bad); 3707 evcnt_detach(&stats->ipcs_bad);
3711 evcnt_detach(&stats->l4cs_bad); 3708 evcnt_detach(&stats->l4cs_bad);
3712 evcnt_detach(&stats->intzero); 3709 evcnt_detach(&stats->intzero);
3713 evcnt_detach(&stats->legint); 3710 evcnt_detach(&stats->legint);
3714 evcnt_detach(&stats->crcerrs); 3711 evcnt_detach(&stats->crcerrs);
3715 evcnt_detach(&stats->illerrc); 3712 evcnt_detach(&stats->illerrc);
3716 evcnt_detach(&stats->errbc); 3713 evcnt_detach(&stats->errbc);
3717 evcnt_detach(&stats->mspdc); 3714 evcnt_detach(&stats->mspdc);
3718 if (hw->mac.type >= ixgbe_mac_X550) 3715 if (hw->mac.type >= ixgbe_mac_X550)
3719 evcnt_detach(&stats->mbsdc); 3716 evcnt_detach(&stats->mbsdc);
3720 evcnt_detach(&stats->mpctotal); 3717 evcnt_detach(&stats->mpctotal);
3721 evcnt_detach(&stats->mlfc); 3718 evcnt_detach(&stats->mlfc);
3722 evcnt_detach(&stats->mrfc); 3719 evcnt_detach(&stats->mrfc);
3723 evcnt_detach(&stats->rlec); 3720 evcnt_detach(&stats->rlec);
3724 evcnt_detach(&stats->lxontxc); 3721 evcnt_detach(&stats->lxontxc);
3725 evcnt_detach(&stats->lxonrxc); 3722 evcnt_detach(&stats->lxonrxc);
3726 evcnt_detach(&stats->lxofftxc); 3723 evcnt_detach(&stats->lxofftxc);
3727 evcnt_detach(&stats->lxoffrxc); 3724 evcnt_detach(&stats->lxoffrxc);
3728 3725
3729 /* Packet Reception Stats */ 3726 /* Packet Reception Stats */
3730 evcnt_detach(&stats->tor); 3727 evcnt_detach(&stats->tor);
3731 evcnt_detach(&stats->gorc); 3728 evcnt_detach(&stats->gorc);
3732 evcnt_detach(&stats->tpr); 3729 evcnt_detach(&stats->tpr);
3733 evcnt_detach(&stats->gprc); 3730 evcnt_detach(&stats->gprc);
3734 evcnt_detach(&stats->mprc); 3731 evcnt_detach(&stats->mprc);
3735 evcnt_detach(&stats->bprc); 3732 evcnt_detach(&stats->bprc);
3736 evcnt_detach(&stats->prc64); 3733 evcnt_detach(&stats->prc64);
3737 evcnt_detach(&stats->prc127); 3734 evcnt_detach(&stats->prc127);
3738 evcnt_detach(&stats->prc255); 3735 evcnt_detach(&stats->prc255);
3739 evcnt_detach(&stats->prc511); 3736 evcnt_detach(&stats->prc511);
3740 evcnt_detach(&stats->prc1023); 3737 evcnt_detach(&stats->prc1023);
3741 evcnt_detach(&stats->prc1522); 3738 evcnt_detach(&stats->prc1522);
3742 evcnt_detach(&stats->ruc); 3739 evcnt_detach(&stats->ruc);
3743 evcnt_detach(&stats->rfc); 3740 evcnt_detach(&stats->rfc);
3744 evcnt_detach(&stats->roc); 3741 evcnt_detach(&stats->roc);
3745 evcnt_detach(&stats->rjc); 3742 evcnt_detach(&stats->rjc);
3746 evcnt_detach(&stats->mngprc); 3743 evcnt_detach(&stats->mngprc);
3747 evcnt_detach(&stats->mngpdc); 3744 evcnt_detach(&stats->mngpdc);
3748 evcnt_detach(&stats->xec); 3745 evcnt_detach(&stats->xec);
3749 3746
3750 /* Packet Transmission Stats */ 3747 /* Packet Transmission Stats */
3751 evcnt_detach(&stats->gotc); 3748 evcnt_detach(&stats->gotc);
3752 evcnt_detach(&stats->tpt); 3749 evcnt_detach(&stats->tpt);
3753 evcnt_detach(&stats->gptc); 3750 evcnt_detach(&stats->gptc);
3754 evcnt_detach(&stats->bptc); 3751 evcnt_detach(&stats->bptc);
3755 evcnt_detach(&stats->mptc); 3752 evcnt_detach(&stats->mptc);
3756 evcnt_detach(&stats->mngptc); 3753 evcnt_detach(&stats->mngptc);
3757 evcnt_detach(&stats->ptc64); 3754 evcnt_detach(&stats->ptc64);
3758 evcnt_detach(&stats->ptc127); 3755 evcnt_detach(&stats->ptc127);
3759 evcnt_detach(&stats->ptc255); 3756 evcnt_detach(&stats->ptc255);
3760 evcnt_detach(&stats->ptc511); 3757 evcnt_detach(&stats->ptc511);
3761 evcnt_detach(&stats->ptc1023); 3758 evcnt_detach(&stats->ptc1023);
3762 evcnt_detach(&stats->ptc1522); 3759 evcnt_detach(&stats->ptc1522);
3763 3760
3764 ixgbe_free_transmit_structures(adapter); 3761 ixgbe_free_transmit_structures(adapter);
3765 ixgbe_free_receive_structures(adapter); 3762 ixgbe_free_receive_structures(adapter);
3766 for (i = 0; i < adapter->num_queues; i++) { 3763 for (i = 0; i < adapter->num_queues; i++) {
3767 struct ix_queue * que = &adapter->queues[i]; 3764 struct ix_queue * que = &adapter->queues[i];
3768 mutex_destroy(&que->dc_mtx); 3765 mutex_destroy(&que->dc_mtx);
3769 } 3766 }
3770 free(adapter->queues, M_DEVBUF); 3767 free(adapter->queues, M_DEVBUF);
3771 free(adapter->mta, M_DEVBUF); 3768 free(adapter->mta, M_DEVBUF);
3772 3769
3773 IXGBE_CORE_LOCK_DESTROY(adapter); 3770 IXGBE_CORE_LOCK_DESTROY(adapter);
3774 3771
3775 return (0); 3772 return (0);
3776} /* ixgbe_detach */ 3773} /* ixgbe_detach */
3777 3774
3778/************************************************************************ 3775/************************************************************************
3779 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 3776 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3780 * 3777 *
3781 * Prepare the adapter/port for LPLU and/or WoL 3778 * Prepare the adapter/port for LPLU and/or WoL
3782 ************************************************************************/ 3779 ************************************************************************/
3783static int 3780static int
3784ixgbe_setup_low_power_mode(struct adapter *adapter) 3781ixgbe_setup_low_power_mode(struct adapter *adapter)
3785{ 3782{
3786 struct ixgbe_hw *hw = &adapter->hw; 3783 struct ixgbe_hw *hw = &adapter->hw;
3787 device_t dev = adapter->dev; 3784 device_t dev = adapter->dev;
3788 s32 error = 0; 3785 s32 error = 0;
3789 3786
3790 KASSERT(mutex_owned(&adapter->core_mtx)); 3787 KASSERT(mutex_owned(&adapter->core_mtx));
3791 3788
3792 /* Limit power management flow to X550EM baseT */ 3789 /* Limit power management flow to X550EM baseT */
3793 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 3790 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3794 hw->phy.ops.enter_lplu) { 3791 hw->phy.ops.enter_lplu) {
3795 /* X550EM baseT adapters need a special LPLU flow */ 3792 /* X550EM baseT adapters need a special LPLU flow */
3796 hw->phy.reset_disable = true; 3793 hw->phy.reset_disable = true;
3797 ixgbe_stop(adapter); 3794 ixgbe_stop(adapter);
3798 error = hw->phy.ops.enter_lplu(hw); 3795 error = hw->phy.ops.enter_lplu(hw);
3799 if (error) 3796 if (error)
3800 device_printf(dev, 3797 device_printf(dev,
3801 "Error entering LPLU: %d\n", error); 3798 "Error entering LPLU: %d\n", error);
3802 hw->phy.reset_disable = false; 3799 hw->phy.reset_disable = false;
3803 } else { 3800 } else {
3804 /* Just stop for other adapters */ 3801 /* Just stop for other adapters */
3805 ixgbe_stop(adapter); 3802 ixgbe_stop(adapter);
3806 } 3803 }
3807 3804
3808 if (!hw->wol_enabled) { 3805 if (!hw->wol_enabled) {
3809 ixgbe_set_phy_power(hw, FALSE); 3806 ixgbe_set_phy_power(hw, FALSE);
3810 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3807 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3811 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 3808 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3812 } else { 3809 } else {
3813 /* Turn off support for APM wakeup. (Using ACPI instead) */ 3810 /* Turn off support for APM wakeup. (Using ACPI instead) */
3814 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), 3811 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3815 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); 3812 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3816 3813
3817 /* 3814 /*
3818 * Clear Wake Up Status register to prevent any previous wakeup 3815 * Clear Wake Up Status register to prevent any previous wakeup
3819 * events from waking us up immediately after we suspend. 3816 * events from waking us up immediately after we suspend.
3820 */ 3817 */
3821 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3818 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3822 3819
3823 /* 3820 /*
3824 * Program the Wakeup Filter Control register with user filter 3821 * Program the Wakeup Filter Control register with user filter
3825 * settings 3822 * settings
3826 */ 3823 */
3827 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 3824 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3828 3825
3829 /* Enable wakeups and power management in Wakeup Control */ 3826 /* Enable wakeups and power management in Wakeup Control */
3830 IXGBE_WRITE_REG(hw, IXGBE_WUC, 3827 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3831 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3828 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3832 3829
3833 } 3830 }
3834 3831
3835 return error; 3832 return error;
3836} /* ixgbe_setup_low_power_mode */ 3833} /* ixgbe_setup_low_power_mode */
3837 3834
3838/************************************************************************ 3835/************************************************************************
3839 * ixgbe_shutdown - Shutdown entry point 3836 * ixgbe_shutdown - Shutdown entry point
3840 ************************************************************************/ 3837 ************************************************************************/
3841#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 3838#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3842static int 3839static int
3843ixgbe_shutdown(device_t dev) 3840ixgbe_shutdown(device_t dev)
3844{ 3841{
3845 struct adapter *adapter = device_private(dev); 3842 struct adapter *adapter = device_private(dev);
3846 int error = 0; 3843 int error = 0;
3847 3844
3848 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 3845 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3849 3846
3850 IXGBE_CORE_LOCK(adapter); 3847 IXGBE_CORE_LOCK(adapter);
3851 error = ixgbe_setup_low_power_mode(adapter); 3848 error = ixgbe_setup_low_power_mode(adapter);
3852 IXGBE_CORE_UNLOCK(adapter); 3849 IXGBE_CORE_UNLOCK(adapter);
3853 3850
3854 return (error); 3851 return (error);
3855} /* ixgbe_shutdown */ 3852} /* ixgbe_shutdown */
3856#endif 3853#endif
3857 3854
3858/************************************************************************ 3855/************************************************************************
3859 * ixgbe_suspend 3856 * ixgbe_suspend
3860 * 3857 *
3861 * From D0 to D3 3858 * From D0 to D3
3862 ************************************************************************/ 3859 ************************************************************************/
3863static bool 3860static bool
3864ixgbe_suspend(device_t dev, const pmf_qual_t *qual) 3861ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3865{ 3862{
3866 struct adapter *adapter = device_private(dev); 3863 struct adapter *adapter = device_private(dev);
3867 int error = 0; 3864 int error = 0;
3868 3865
3869 INIT_DEBUGOUT("ixgbe_suspend: begin"); 3866 INIT_DEBUGOUT("ixgbe_suspend: begin");
3870 3867
3871 IXGBE_CORE_LOCK(adapter); 3868 IXGBE_CORE_LOCK(adapter);
3872 3869
3873 error = ixgbe_setup_low_power_mode(adapter); 3870 error = ixgbe_setup_low_power_mode(adapter);
3874 3871
3875 IXGBE_CORE_UNLOCK(adapter); 3872 IXGBE_CORE_UNLOCK(adapter);
3876 3873
3877 return (error); 3874 return (error);
3878} /* ixgbe_suspend */ 3875} /* ixgbe_suspend */
3879 3876
3880/************************************************************************ 3877/************************************************************************
3881 * ixgbe_resume 3878 * ixgbe_resume
3882 * 3879 *
3883 * From D3 to D0 3880 * From D3 to D0
3884 ************************************************************************/ 3881 ************************************************************************/
3885static bool 3882static bool
3886ixgbe_resume(device_t dev, const pmf_qual_t *qual) 3883ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3887{ 3884{
3888 struct adapter *adapter = device_private(dev); 3885 struct adapter *adapter = device_private(dev);
3889 struct ifnet *ifp = adapter->ifp; 3886 struct ifnet *ifp = adapter->ifp;
3890 struct ixgbe_hw *hw = &adapter->hw; 3887 struct ixgbe_hw *hw = &adapter->hw;
3891 u32 wus; 3888 u32 wus;
3892 3889
3893 INIT_DEBUGOUT("ixgbe_resume: begin"); 3890 INIT_DEBUGOUT("ixgbe_resume: begin");
3894 3891
3895 IXGBE_CORE_LOCK(adapter); 3892 IXGBE_CORE_LOCK(adapter);
3896 3893
3897 /* Read & clear WUS register */ 3894 /* Read & clear WUS register */
3898 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 3895 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3899 if (wus) 3896 if (wus)
3900 device_printf(dev, "Woken up by (WUS): %#010x\n", 3897 device_printf(dev, "Woken up by (WUS): %#010x\n",
3901 IXGBE_READ_REG(hw, IXGBE_WUS)); 3898 IXGBE_READ_REG(hw, IXGBE_WUS));
3902 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3899 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3903 /* And clear WUFC until next low-power transition */ 3900 /* And clear WUFC until next low-power transition */
3904 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3901 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3905 3902
3906 /* 3903 /*
3907 * Required after D3->D0 transition; 3904 * Required after D3->D0 transition;
3908 * will re-advertise all previous advertised speeds 3905 * will re-advertise all previous advertised speeds
3909 */ 3906 */
3910 if (ifp->if_flags & IFF_UP) 3907 if (ifp->if_flags & IFF_UP)
3911 ixgbe_init_locked(adapter); 3908 ixgbe_init_locked(adapter);
3912 3909
3913 IXGBE_CORE_UNLOCK(adapter); 3910 IXGBE_CORE_UNLOCK(adapter);
3914 3911
3915 return true; 3912 return true;
3916} /* ixgbe_resume */ 3913} /* ixgbe_resume */
3917 3914
3918/* 3915/*
3919 * Set the various hardware offload abilities. 3916 * Set the various hardware offload abilities.
3920 * 3917 *
3921 * This takes the ifnet's if_capenable flags (e.g. set by the user using 3918 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3922 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 3919 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3923 * mbuf offload flags the driver will understand. 3920 * mbuf offload flags the driver will understand.
3924 */ 3921 */
3925static void 3922static void
3926ixgbe_set_if_hwassist(struct adapter *adapter) 3923ixgbe_set_if_hwassist(struct adapter *adapter)
3927{ 3924{
3928 /* XXX */ 3925 /* XXX */
3929} 3926}
3930 3927
3931/************************************************************************ 3928/************************************************************************
3932 * ixgbe_init_locked - Init entry point 3929 * ixgbe_init_locked - Init entry point
3933 * 3930 *
3934 * Used in two ways: It is used by the stack as an init 3931 * Used in two ways: It is used by the stack as an init
3935 * entry point in network interface structure. It is also 3932 * entry point in network interface structure. It is also
3936 * used by the driver as a hw/sw initialization routine to 3933 * used by the driver as a hw/sw initialization routine to
3937 * get to a consistent state. 3934 * get to a consistent state.
3938 * 3935 *
3939 * return 0 on success, positive on failure 3936 * return 0 on success, positive on failure
3940 ************************************************************************/ 3937 ************************************************************************/
3941static void 3938static void
3942ixgbe_init_locked(struct adapter *adapter) 3939ixgbe_init_locked(struct adapter *adapter)
3943{ 3940{
3944 struct ifnet *ifp = adapter->ifp; 3941 struct ifnet *ifp = adapter->ifp;
3945 device_t dev = adapter->dev; 3942 device_t dev = adapter->dev;
3946 struct ixgbe_hw *hw = &adapter->hw; 3943 struct ixgbe_hw *hw = &adapter->hw;
3947 struct ix_queue *que; 3944 struct ix_queue *que;
3948 struct tx_ring *txr; 3945 struct tx_ring *txr;
3949 struct rx_ring *rxr; 3946 struct rx_ring *rxr;
3950 u32 txdctl, mhadd; 3947 u32 txdctl, mhadd;
3951 u32 rxdctl, rxctrl; 3948 u32 rxdctl, rxctrl;
3952 u32 ctrl_ext; 3949 u32 ctrl_ext;
3953 int i, j, err; 3950 int i, j, err;
3954 3951
3955 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ 3952 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3956 3953
3957 KASSERT(mutex_owned(&adapter->core_mtx)); 3954 KASSERT(mutex_owned(&adapter->core_mtx));
3958 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 3955 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3959 3956
3960 hw->adapter_stopped = FALSE; 3957 hw->adapter_stopped = FALSE;
3961 ixgbe_stop_adapter(hw); 3958 ixgbe_stop_adapter(hw);
3962 callout_stop(&adapter->timer); 3959 callout_stop(&adapter->timer);
3963 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 3960 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3964 que->disabled_count = 0; 3961 que->disabled_count = 0;
3965 3962
3966 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ 3963 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3967 adapter->max_frame_size = 3964 adapter->max_frame_size =
3968 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 3965 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3969 3966
3970 /* Queue indices may change with IOV mode */ 3967 /* Queue indices may change with IOV mode */
3971 ixgbe_align_all_queue_indices(adapter); 3968 ixgbe_align_all_queue_indices(adapter);
3972 3969
3973 /* reprogram the RAR[0] in case user changed it. */ 3970 /* reprogram the RAR[0] in case user changed it. */
3974 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 3971 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3975 3972
3976 /* Get the latest mac address, User can use a LAA */ 3973 /* Get the latest mac address, User can use a LAA */
3977 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 3974 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3978 IXGBE_ETH_LENGTH_OF_ADDRESS); 3975 IXGBE_ETH_LENGTH_OF_ADDRESS);
3979 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 3976 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3980 hw->addr_ctrl.rar_used_count = 1; 3977 hw->addr_ctrl.rar_used_count = 1;
3981 3978
3982 /* Set hardware offload abilities from ifnet flags */ 3979 /* Set hardware offload abilities from ifnet flags */
3983 ixgbe_set_if_hwassist(adapter); 3980 ixgbe_set_if_hwassist(adapter);
3984 3981
3985 /* Prepare transmit descriptors and buffers */ 3982 /* Prepare transmit descriptors and buffers */
3986 if (ixgbe_setup_transmit_structures(adapter)) { 3983 if (ixgbe_setup_transmit_structures(adapter)) {
3987 device_printf(dev, "Could not setup transmit structures\n"); 3984 device_printf(dev, "Could not setup transmit structures\n");
3988 ixgbe_stop(adapter); 3985 ixgbe_stop(adapter);
3989 return; 3986 return;
3990 } 3987 }
3991 3988
3992 ixgbe_init_hw(hw); 3989 ixgbe_init_hw(hw);
3993 3990
3994 ixgbe_initialize_iov(adapter); 3991 ixgbe_initialize_iov(adapter);
3995 3992
3996 ixgbe_initialize_transmit_units(adapter); 3993 ixgbe_initialize_transmit_units(adapter);
3997 3994
3998 /* Setup Multicast table */ 3995 /* Setup Multicast table */
3999 ixgbe_set_multi(adapter); 3996 ixgbe_set_multi(adapter);
4000 3997
4001 /* Determine the correct mbuf pool, based on frame size */ 3998 /* Determine the correct mbuf pool, based on frame size */
4002 if (adapter->max_frame_size <= MCLBYTES) 3999 if (adapter->max_frame_size <= MCLBYTES)
4003 adapter->rx_mbuf_sz = MCLBYTES; 4000 adapter->rx_mbuf_sz = MCLBYTES;
4004 else 4001 else
4005 adapter->rx_mbuf_sz = MJUMPAGESIZE; 4002 adapter->rx_mbuf_sz = MJUMPAGESIZE;
4006 4003
4007 /* Prepare receive descriptors and buffers */ 4004 /* Prepare receive descriptors and buffers */
4008 if (ixgbe_setup_receive_structures(adapter)) { 4005 if (ixgbe_setup_receive_structures(adapter)) {
4009 device_printf(dev, "Could not setup receive structures\n"); 4006 device_printf(dev, "Could not setup receive structures\n");
4010 ixgbe_stop(adapter); 4007 ixgbe_stop(adapter);
4011 return; 4008 return;
4012 } 4009 }
4013 4010
4014 /* Configure RX settings */ 4011 /* Configure RX settings */
4015 ixgbe_initialize_receive_units(adapter); 4012 ixgbe_initialize_receive_units(adapter);
4016 4013
4017 /* Enable SDP & MSI-X interrupts based on adapter */ 4014 /* Enable SDP & MSI-X interrupts based on adapter */
4018 ixgbe_config_gpie(adapter); 4015 ixgbe_config_gpie(adapter);
4019 4016
4020 /* Set MTU size */ 4017 /* Set MTU size */
4021 if (ifp->if_mtu > ETHERMTU) { 4018 if (ifp->if_mtu > ETHERMTU) {
4022 /* aka IXGBE_MAXFRS on 82599 and newer */ 4019 /* aka IXGBE_MAXFRS on 82599 and newer */
4023 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 4020 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
4024 mhadd &= ~IXGBE_MHADD_MFS_MASK; 4021 mhadd &= ~IXGBE_MHADD_MFS_MASK;
4025 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 4022 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
4026 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 4023 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
4027 } 4024 }
4028 4025
4029 /* Now enable all the queues */ 4026 /* Now enable all the queues */
4030 for (i = 0; i < adapter->num_queues; i++) { 4027 for (i = 0; i < adapter->num_queues; i++) {
4031 txr = &adapter->tx_rings[i]; 4028 txr = &adapter->tx_rings[i];
4032 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 4029 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
4033 txdctl |= IXGBE_TXDCTL_ENABLE; 4030 txdctl |= IXGBE_TXDCTL_ENABLE;
4034 /* Set WTHRESH to 8, burst writeback */ 4031 /* Set WTHRESH to 8, burst writeback */
4035 txdctl |= (8 << 16); 4032 txdctl |= (8 << 16);
4036 /* 4033 /*
4037 * When the internal queue falls below PTHRESH (32), 4034 * When the internal queue falls below PTHRESH (32),
4038 * start prefetching as long as there are at least 4035 * start prefetching as long as there are at least
4039 * HTHRESH (1) buffers ready. The values are taken 4036 * HTHRESH (1) buffers ready. The values are taken
4040 * from the Intel linux driver 3.8.21. 4037 * from the Intel linux driver 3.8.21.
4041 * Prefetching enables tx line rate even with 1 queue. 4038 * Prefetching enables tx line rate even with 1 queue.
4042 */ 4039 */
4043 txdctl |= (32 << 0) | (1 << 8); 4040 txdctl |= (32 << 0) | (1 << 8);
4044 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 4041 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
4045 } 4042 }
4046 4043
4047 for (i = 0; i < adapter->num_queues; i++) { 4044 for (i = 0; i < adapter->num_queues; i++) {
4048 rxr = &adapter->rx_rings[i]; 4045 rxr = &adapter->rx_rings[i];
4049 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 4046 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4050 if (hw->mac.type == ixgbe_mac_82598EB) { 4047 if (hw->mac.type == ixgbe_mac_82598EB) {
4051 /* 4048 /*
4052 * PTHRESH = 21 4049 * PTHRESH = 21
4053 * HTHRESH = 4 4050 * HTHRESH = 4
4054 * WTHRESH = 8 4051 * WTHRESH = 8
4055 */ 4052 */
4056 rxdctl &= ~0x3FFFFF; 4053 rxdctl &= ~0x3FFFFF;
4057 rxdctl |= 0x080420; 4054 rxdctl |= 0x080420;
4058 } 4055 }
4059 rxdctl |= IXGBE_RXDCTL_ENABLE; 4056 rxdctl |= IXGBE_RXDCTL_ENABLE;
4060 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 4057 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4061 for (j = 0; j < 10; j++) { 4058 for (j = 0; j < 10; j++) {
4062 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 4059 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4063 IXGBE_RXDCTL_ENABLE) 4060 IXGBE_RXDCTL_ENABLE)
4064 break; 4061 break;
4065 else 4062 else
4066 msec_delay(1); 4063 msec_delay(1);
4067 } 4064 }
4068 wmb(); 4065 wmb();
4069 4066
4070 /* 4067 /*
4071 * In netmap mode, we must preserve the buffers made 4068 * In netmap mode, we must preserve the buffers made
4072 * available to userspace before the if_init() 4069 * available to userspace before the if_init()
4073 * (this is true by default on the TX side, because 4070 * (this is true by default on the TX side, because
4074 * init makes all buffers available to userspace). 4071 * init makes all buffers available to userspace).
4075 * 4072 *
4076 * netmap_reset() and the device specific routines 4073 * netmap_reset() and the device specific routines
4077 * (e.g. ixgbe_setup_receive_rings()) map these 4074 * (e.g. ixgbe_setup_receive_rings()) map these
4078 * buffers at the end of the NIC ring, so here we 4075 * buffers at the end of the NIC ring, so here we
4079 * must set the RDT (tail) register to make sure 4076 * must set the RDT (tail) register to make sure
4080 * they are not overwritten. 4077 * they are not overwritten.
4081 * 4078 *
4082 * In this driver the NIC ring starts at RDH = 0, 4079 * In this driver the NIC ring starts at RDH = 0,
4083 * RDT points to the last slot available for reception (?), 4080 * RDT points to the last slot available for reception (?),
4084 * so RDT = num_rx_desc - 1 means the whole ring is available. 4081 * so RDT = num_rx_desc - 1 means the whole ring is available.
4085 */ 4082 */
4086#ifdef DEV_NETMAP 4083#ifdef DEV_NETMAP
4087 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 4084 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4088 (ifp->if_capenable & IFCAP_NETMAP)) { 4085 (ifp->if_capenable & IFCAP_NETMAP)) {
4089 struct netmap_adapter *na = NA(adapter->ifp); 4086 struct netmap_adapter *na = NA(adapter->ifp);
4090 struct netmap_kring *kring = na->rx_rings[i]; 4087 struct netmap_kring *kring = na->rx_rings[i];
4091 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 4088 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4092 4089
4093 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 4090 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4094 } else 4091 } else
4095#endif /* DEV_NETMAP */ 4092#endif /* DEV_NETMAP */
4096 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), 4093 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4097 adapter->num_rx_desc - 1); 4094 adapter->num_rx_desc - 1);
4098 } 4095 }
4099 4096
4100 /* Enable Receive engine */ 4097 /* Enable Receive engine */
4101 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4098 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4102 if (hw->mac.type == ixgbe_mac_82598EB) 4099 if (hw->mac.type == ixgbe_mac_82598EB)
4103 rxctrl |= IXGBE_RXCTRL_DMBYPS; 4100 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4104 rxctrl |= IXGBE_RXCTRL_RXEN; 4101 rxctrl |= IXGBE_RXCTRL_RXEN;
4105 ixgbe_enable_rx_dma(hw, rxctrl); 4102 ixgbe_enable_rx_dma(hw, rxctrl);
4106 4103
4107 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4104 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4108 4105
4109 /* Set up MSI/MSI-X routing */ 4106 /* Set up MSI/MSI-X routing */
4110 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4107 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4111 ixgbe_configure_ivars(adapter); 4108 ixgbe_configure_ivars(adapter);
4112 /* Set up auto-mask */ 4109 /* Set up auto-mask */
4113 if (hw->mac.type == ixgbe_mac_82598EB) 4110 if (hw->mac.type == ixgbe_mac_82598EB)
4114 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4111 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4115 else { 4112 else {
4116 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4113 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4117 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4114 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4118 } 4115 }
4119 } else { /* Simple settings for Legacy/MSI */ 4116 } else { /* Simple settings for Legacy/MSI */
4120 ixgbe_set_ivar(adapter, 0, 0, 0); 4117 ixgbe_set_ivar(adapter, 0, 0, 0);
4121 ixgbe_set_ivar(adapter, 0, 0, 1); 4118 ixgbe_set_ivar(adapter, 0, 0, 1);
4122 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4119 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4123 } 4120 }
4124 4121
4125 ixgbe_init_fdir(adapter); 4122 ixgbe_init_fdir(adapter);
4126 4123
4127 /* 4124 /*
4128 * Check on any SFP devices that 4125 * Check on any SFP devices that
4129 * need to be kick-started 4126 * need to be kick-started
4130 */ 4127 */
4131 if (hw->phy.type == ixgbe_phy_none) { 4128 if (hw->phy.type == ixgbe_phy_none) {
4132 err = hw->phy.ops.identify(hw); 4129 err = hw->phy.ops.identify(hw);
4133 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4130 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4134 device_printf(dev, 4131 device_printf(dev,
4135 "Unsupported SFP+ module type was detected.\n"); 4132 "Unsupported SFP+ module type was detected.\n");
4136 return; 4133 return;
4137 } 4134 }
4138 } 4135 }
4139 4136
4140 /* Set moderation on the Link interrupt */ 4137 /* Set moderation on the Link interrupt */
4141 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 4138 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4142 4139
4143 /* Enable EEE power saving */ 4140 /* Enable EEE power saving */
4144 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4141 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4145 hw->mac.ops.setup_eee(hw, 4142 hw->mac.ops.setup_eee(hw,
4146 adapter->feat_en & IXGBE_FEATURE_EEE); 4143 adapter->feat_en & IXGBE_FEATURE_EEE);
4147 4144
4148 /* Enable power to the phy. */ 4145 /* Enable power to the phy. */
4149 ixgbe_set_phy_power(hw, TRUE); 4146 ixgbe_set_phy_power(hw, TRUE);
4150 4147
4151 /* Config/Enable Link */ 4148 /* Config/Enable Link */
4152 ixgbe_config_link(adapter); 4149 ixgbe_config_link(adapter);
4153 4150
4154 /* Hardware Packet Buffer & Flow Control setup */ 4151 /* Hardware Packet Buffer & Flow Control setup */
4155 ixgbe_config_delay_values(adapter); 4152 ixgbe_config_delay_values(adapter);
4156 4153
4157 /* Initialize the FC settings */ 4154 /* Initialize the FC settings */
4158 ixgbe_start_hw(hw); 4155 ixgbe_start_hw(hw);
4159 4156
4160 /* Set up VLAN support and filter */ 4157 /* Set up VLAN support and filter */
4161 ixgbe_setup_vlan_hw_support(adapter); 4158 ixgbe_setup_vlan_hw_support(adapter);
4162 4159
4163 /* Setup DMA Coalescing */ 4160 /* Setup DMA Coalescing */
4164 ixgbe_config_dmac(adapter); 4161 ixgbe_config_dmac(adapter);
4165 4162
4166 /* And now turn on interrupts */ 4163 /* And now turn on interrupts */
4167 ixgbe_enable_intr(adapter); 4164 ixgbe_enable_intr(adapter);
4168 4165
4169 /* Enable the use of the MBX by the VF's */ 4166 /* Enable the use of the MBX by the VF's */
4170 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 4167 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4171 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4168 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4172 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 4169 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4173 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4170 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4174 } 4171 }
4175 4172
4176 /* Update saved flags. See ixgbe_ifflags_cb() */ 4173 /* Update saved flags. See ixgbe_ifflags_cb() */
4177 adapter->if_flags = ifp->if_flags; 4174 adapter->if_flags = ifp->if_flags;
4178 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 4175 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4179 4176
4180 /* Now inform the stack we're ready */ 4177 /* Now inform the stack we're ready */
4181 ifp->if_flags |= IFF_RUNNING; 4178 ifp->if_flags |= IFF_RUNNING;
4182 4179
4183 return; 4180 return;
4184} /* ixgbe_init_locked */ 4181} /* ixgbe_init_locked */
4185 4182
4186/************************************************************************ 4183/************************************************************************
4187 * ixgbe_init 4184 * ixgbe_init
4188 ************************************************************************/ 4185 ************************************************************************/
4189static int 4186static int
4190ixgbe_init(struct ifnet *ifp) 4187ixgbe_init(struct ifnet *ifp)
4191{ 4188{
4192 struct adapter *adapter = ifp->if_softc; 4189 struct adapter *adapter = ifp->if_softc;
4193 4190
4194 IXGBE_CORE_LOCK(adapter); 4191 IXGBE_CORE_LOCK(adapter);
4195 ixgbe_init_locked(adapter); 4192 ixgbe_init_locked(adapter);
4196 IXGBE_CORE_UNLOCK(adapter); 4193 IXGBE_CORE_UNLOCK(adapter);
4197 4194
4198 return 0; /* XXX ixgbe_init_locked cannot fail? really? */ 4195 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4199} /* ixgbe_init */ 4196} /* ixgbe_init */
4200 4197
4201/************************************************************************ 4198/************************************************************************
4202 * ixgbe_set_ivar 4199 * ixgbe_set_ivar
4203 * 4200 *

cvs diff -r1.125.2.1 -r1.125.2.2 src/sys/dev/pci/ixgbe/ixv.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixv.c 2019/09/01 11:07:05 1.125.2.1
+++ src/sys/dev/pci/ixgbe/ixv.c 2019/09/01 11:12:45 1.125.2.2
@@ -1,3129 +1,3121 @@ @@ -1,3129 +1,3121 @@
1/*$NetBSD: ixv.c,v 1.125.2.1 2019/09/01 11:07:05 martin Exp $*/ 1/*$NetBSD: ixv.c,v 1.125.2.2 2019/09/01 11:12:45 martin Exp $*/
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37#ifdef _KERNEL_OPT 37#ifdef _KERNEL_OPT
38#include "opt_inet.h" 38#include "opt_inet.h"
39#include "opt_inet6.h" 39#include "opt_inet6.h"
40#include "opt_net_mpsafe.h" 40#include "opt_net_mpsafe.h"
41#endif 41#endif
42 42
43#include "ixgbe.h" 43#include "ixgbe.h"
44#include "vlan.h" 44#include "vlan.h"
45 45
46/************************************************************************ 46/************************************************************************
47 * Driver version 47 * Driver version
48 ************************************************************************/ 48 ************************************************************************/
49static const char ixv_driver_version[] = "2.0.1-k"; 49static const char ixv_driver_version[] = "2.0.1-k";
50/* XXX NetBSD: + 1.5.17 */ 50/* XXX NetBSD: + 1.5.17 */
51 51
52/************************************************************************ 52/************************************************************************
53 * PCI Device ID Table 53 * PCI Device ID Table
54 * 54 *
55 * Used by probe to select devices to load on 55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings 56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s 57 * Last entry must be all 0s
58 * 58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/ 60 ************************************************************************/
61static const ixgbe_vendor_info_t ixv_vendor_info_array[] = 61static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
62{ 62{
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */ 68 /* required last entry */
69 {0, 0, 0, 0, 0} 69 {0, 0, 0, 0, 0}
70}; 70};
71 71
72/************************************************************************ 72/************************************************************************
73 * Table of branding strings 73 * Table of branding strings
74 ************************************************************************/ 74 ************************************************************************/
75static const char *ixv_strings[] = { 75static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver" 76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77}; 77};
78 78
79/********************************************************************* 79/*********************************************************************
80 * Function prototypes 80 * Function prototypes
81 *********************************************************************/ 81 *********************************************************************/
82static int ixv_probe(device_t, cfdata_t, void *); 82static int ixv_probe(device_t, cfdata_t, void *);
83static void ixv_attach(device_t, device_t, void *); 83static void ixv_attach(device_t, device_t, void *);
84static int ixv_detach(device_t, int); 84static int ixv_detach(device_t, int);
85#if 0 85#if 0
86static int ixv_shutdown(device_t); 86static int ixv_shutdown(device_t);
87#endif 87#endif
88static int ixv_ifflags_cb(struct ethercom *); 88static int ixv_ifflags_cb(struct ethercom *);
89static int ixv_ioctl(struct ifnet *, u_long, void *); 89static int ixv_ioctl(struct ifnet *, u_long, void *);
90static int ixv_init(struct ifnet *); 90static int ixv_init(struct ifnet *);
91static void ixv_init_locked(struct adapter *); 91static void ixv_init_locked(struct adapter *);
92static void ixv_ifstop(struct ifnet *, int); 92static void ixv_ifstop(struct ifnet *, int);
93static void ixv_stop(void *); 93static void ixv_stop(void *);
94static void ixv_init_device_features(struct adapter *); 94static void ixv_init_device_features(struct adapter *);
95static void ixv_media_status(struct ifnet *, struct ifmediareq *); 95static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96static int ixv_media_change(struct ifnet *); 96static int ixv_media_change(struct ifnet *);
97static int ixv_allocate_pci_resources(struct adapter *, 97static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *); 98 const struct pci_attach_args *);
99static int ixv_allocate_msix(struct adapter *, 99static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *); 100 const struct pci_attach_args *);
101static int ixv_configure_interrupts(struct adapter *); 101static int ixv_configure_interrupts(struct adapter *);
102static void ixv_free_pci_resources(struct adapter *); 102static void ixv_free_pci_resources(struct adapter *);
103static void ixv_local_timer(void *); 103static void ixv_local_timer(void *);
104static void ixv_local_timer_locked(void *); 104static void ixv_local_timer_locked(void *);
105static int ixv_setup_interface(device_t, struct adapter *); 105static int ixv_setup_interface(device_t, struct adapter *);
106static int ixv_negotiate_api(struct adapter *); 106static int ixv_negotiate_api(struct adapter *);
107 107
108static void ixv_initialize_transmit_units(struct adapter *); 108static void ixv_initialize_transmit_units(struct adapter *);
109static void ixv_initialize_receive_units(struct adapter *); 109static void ixv_initialize_receive_units(struct adapter *);
110static void ixv_initialize_rss_mapping(struct adapter *); 110static void ixv_initialize_rss_mapping(struct adapter *);
111static s32 ixv_check_link(struct adapter *); 111static s32 ixv_check_link(struct adapter *);
112 112
113static void ixv_enable_intr(struct adapter *); 113static void ixv_enable_intr(struct adapter *);
114static void ixv_disable_intr(struct adapter *); 114static void ixv_disable_intr(struct adapter *);
115static void ixv_set_multi(struct adapter *); 115static void ixv_set_multi(struct adapter *);
116static void ixv_update_link_status(struct adapter *); 116static void ixv_update_link_status(struct adapter *);
117static int ixv_sysctl_debug(SYSCTLFN_PROTO); 117static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118static void ixv_set_ivar(struct adapter *, u8, u8, s8); 118static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119static void ixv_configure_ivars(struct adapter *); 119static void ixv_configure_ivars(struct adapter *);
120static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 120static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); 121static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
122 122
123static void ixv_setup_vlan_tagging(struct adapter *); 123static void ixv_setup_vlan_tagging(struct adapter *);
124static int ixv_setup_vlan_support(struct adapter *); 124static int ixv_setup_vlan_support(struct adapter *);
125static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); 125static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
126static int ixv_register_vlan(void *, struct ifnet *, u16); 126static int ixv_register_vlan(struct adapter *, u16);
127static int ixv_unregister_vlan(void *, struct ifnet *, u16); 127static int ixv_unregister_vlan(struct adapter *, u16);
128 128
129static void ixv_add_device_sysctls(struct adapter *); 129static void ixv_add_device_sysctls(struct adapter *);
130static void ixv_save_stats(struct adapter *); 130static void ixv_save_stats(struct adapter *);
131static void ixv_init_stats(struct adapter *); 131static void ixv_init_stats(struct adapter *);
132static void ixv_update_stats(struct adapter *); 132static void ixv_update_stats(struct adapter *);
133static void ixv_add_stats_sysctls(struct adapter *); 133static void ixv_add_stats_sysctls(struct adapter *);
134 134
135/* Sysctl handlers */ 135/* Sysctl handlers */
136static void ixv_set_sysctl_value(struct adapter *, const char *, 136static void ixv_set_sysctl_value(struct adapter *, const char *,
137 const char *, int *, int); 137 const char *, int *, int);
138static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 138static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
139static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 139static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
140static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); 140static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
141static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); 141static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
142static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); 142static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
143static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); 143static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
144 144
145/* The MSI-X Interrupt handlers */ 145/* The MSI-X Interrupt handlers */
146static int ixv_msix_que(void *); 146static int ixv_msix_que(void *);
147static int ixv_msix_mbx(void *); 147static int ixv_msix_mbx(void *);
148 148
149/* Deferred interrupt tasklets */ 149/* Deferred interrupt tasklets */
150static void ixv_handle_que(void *); 150static void ixv_handle_que(void *);
151static void ixv_handle_link(void *); 151static void ixv_handle_link(void *);
152 152
153/* Workqueue handler for deferred work */ 153/* Workqueue handler for deferred work */
154static void ixv_handle_que_work(struct work *, void *); 154static void ixv_handle_que_work(struct work *, void *);
155 155
156const struct sysctlnode *ixv_sysctl_instance(struct adapter *); 156const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
157static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); 157static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
158 158
159/************************************************************************ 159/************************************************************************
160 * FreeBSD Device Interface Entry Points 160 * FreeBSD Device Interface Entry Points
161 ************************************************************************/ 161 ************************************************************************/
162CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), 162CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, 163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
164 DVF_DETACH_SHUTDOWN); 164 DVF_DETACH_SHUTDOWN);
165 165
166#if 0 166#if 0
167static driver_t ixv_driver = { 167static driver_t ixv_driver = {
168 "ixv", ixv_methods, sizeof(struct adapter), 168 "ixv", ixv_methods, sizeof(struct adapter),
169}; 169};
170 170
171devclass_t ixv_devclass; 171devclass_t ixv_devclass;
172DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 172DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
173MODULE_DEPEND(ixv, pci, 1, 1, 1); 173MODULE_DEPEND(ixv, pci, 1, 1, 1);
174MODULE_DEPEND(ixv, ether, 1, 1, 1); 174MODULE_DEPEND(ixv, ether, 1, 1, 1);
175#endif 175#endif
176 176
177/* 177/*
178 * TUNEABLE PARAMETERS: 178 * TUNEABLE PARAMETERS:
179 */ 179 */
180 180
181/* Number of Queues - do not exceed MSI-X vectors - 1 */ 181/* Number of Queues - do not exceed MSI-X vectors - 1 */
182static int ixv_num_queues = 0; 182static int ixv_num_queues = 0;
183#define TUNABLE_INT(__x, __y) 183#define TUNABLE_INT(__x, __y)
184TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 184TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
185 185
186/* 186/*
187 * AIM: Adaptive Interrupt Moderation 187 * AIM: Adaptive Interrupt Moderation
188 * which means that the interrupt rate 188 * which means that the interrupt rate
189 * is varied over time based on the 189 * is varied over time based on the
190 * traffic for that interrupt vector 190 * traffic for that interrupt vector
191 */ 191 */
192static bool ixv_enable_aim = false; 192static bool ixv_enable_aim = false;
193TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 193TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
194 194
195static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 195static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
196TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); 196TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
197 197
198/* How many packets rxeof tries to clean at a time */ 198/* How many packets rxeof tries to clean at a time */
199static int ixv_rx_process_limit = 256; 199static int ixv_rx_process_limit = 256;
200TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 200TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
201 201
202/* How many packets txeof tries to clean at a time */ 202/* How many packets txeof tries to clean at a time */
203static int ixv_tx_process_limit = 256; 203static int ixv_tx_process_limit = 256;
204TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 204TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
205 205
206/* Which packet processing uses workqueue or softint */ 206/* Which packet processing uses workqueue or softint */
207static bool ixv_txrx_workqueue = false; 207static bool ixv_txrx_workqueue = false;
208 208
209/* 209/*
210 * Number of TX descriptors per ring, 210 * Number of TX descriptors per ring,
211 * setting higher than RX as this seems 211 * setting higher than RX as this seems
212 * the better performing choice. 212 * the better performing choice.
213 */ 213 */
214static int ixv_txd = PERFORM_TXD; 214static int ixv_txd = PERFORM_TXD;
215TUNABLE_INT("hw.ixv.txd", &ixv_txd); 215TUNABLE_INT("hw.ixv.txd", &ixv_txd);
216 216
217/* Number of RX descriptors per ring */ 217/* Number of RX descriptors per ring */
218static int ixv_rxd = PERFORM_RXD; 218static int ixv_rxd = PERFORM_RXD;
219TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 219TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
220 220
221/* Legacy Transmit (single queue) */ 221/* Legacy Transmit (single queue) */
222static int ixv_enable_legacy_tx = 0; 222static int ixv_enable_legacy_tx = 0;
223TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); 223TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
224 224
225#ifdef NET_MPSAFE 225#ifdef NET_MPSAFE
226#define IXGBE_MPSAFE 1 226#define IXGBE_MPSAFE 1
227#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 227#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
228#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE 228#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
229#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 229#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
230#else 230#else
231#define IXGBE_CALLOUT_FLAGS 0 231#define IXGBE_CALLOUT_FLAGS 0
232#define IXGBE_SOFTINFT_FLAGS 0 232#define IXGBE_SOFTINFT_FLAGS 0
233#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 233#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
234#endif 234#endif
235#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 235#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
236 236
237#if 0 237#if 0
238static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); 238static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
239static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); 239static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
240#endif 240#endif
241 241
242/************************************************************************ 242/************************************************************************
243 * ixv_probe - Device identification routine 243 * ixv_probe - Device identification routine
244 * 244 *
245 * Determines if the driver should be loaded on 245 * Determines if the driver should be loaded on
246 * adapter based on its PCI vendor/device ID. 246 * adapter based on its PCI vendor/device ID.
247 * 247 *
248 * return BUS_PROBE_DEFAULT on success, positive on failure 248 * return BUS_PROBE_DEFAULT on success, positive on failure
249 ************************************************************************/ 249 ************************************************************************/
250static int 250static int
251ixv_probe(device_t dev, cfdata_t cf, void *aux) 251ixv_probe(device_t dev, cfdata_t cf, void *aux)
252{ 252{
253#ifdef __HAVE_PCI_MSI_MSIX 253#ifdef __HAVE_PCI_MSI_MSIX
254 const struct pci_attach_args *pa = aux; 254 const struct pci_attach_args *pa = aux;
255 255
256 return (ixv_lookup(pa) != NULL) ? 1 : 0; 256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
257#else 257#else
258 return 0; 258 return 0;
259#endif 259#endif
260} /* ixv_probe */ 260} /* ixv_probe */
261 261
262static const ixgbe_vendor_info_t * 262static const ixgbe_vendor_info_t *
263ixv_lookup(const struct pci_attach_args *pa) 263ixv_lookup(const struct pci_attach_args *pa)
264{ 264{
265 const ixgbe_vendor_info_t *ent; 265 const ixgbe_vendor_info_t *ent;
266 pcireg_t subid; 266 pcireg_t subid;
267 267
268 INIT_DEBUGOUT("ixv_lookup: begin"); 268 INIT_DEBUGOUT("ixv_lookup: begin");
269 269
270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
271 return NULL; 271 return NULL;
272 272
273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
274 274
275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { 275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
279 (ent->subvendor_id == 0)) && 279 (ent->subvendor_id == 0)) &&
280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
281 (ent->subdevice_id == 0))) { 281 (ent->subdevice_id == 0))) {
282 return ent; 282 return ent;
283 } 283 }
284 } 284 }
285 285
286 return NULL; 286 return NULL;
287} 287}
288 288
289/************************************************************************ 289/************************************************************************
290 * ixv_attach - Device initialization routine 290 * ixv_attach - Device initialization routine
291 * 291 *
292 * Called when the driver is being loaded. 292 * Called when the driver is being loaded.
293 * Identifies the type of hardware, allocates all resources 293 * Identifies the type of hardware, allocates all resources
294 * and initializes the hardware. 294 * and initializes the hardware.
295 * 295 *
296 * return 0 on success, positive on failure 296 * return 0 on success, positive on failure
297 ************************************************************************/ 297 ************************************************************************/
298static void 298static void
299ixv_attach(device_t parent, device_t dev, void *aux) 299ixv_attach(device_t parent, device_t dev, void *aux)
300{ 300{
301 struct adapter *adapter; 301 struct adapter *adapter;
302 struct ixgbe_hw *hw; 302 struct ixgbe_hw *hw;
303 int error = 0; 303 int error = 0;
304 pcireg_t id, subid; 304 pcireg_t id, subid;
305 const ixgbe_vendor_info_t *ent; 305 const ixgbe_vendor_info_t *ent;
306 const struct pci_attach_args *pa = aux; 306 const struct pci_attach_args *pa = aux;
307 const char *apivstr; 307 const char *apivstr;
308 const char *str; 308 const char *str;
309 char buf[256]; 309 char buf[256];
310 310
311 INIT_DEBUGOUT("ixv_attach: begin"); 311 INIT_DEBUGOUT("ixv_attach: begin");
312 312
313 /* 313 /*
314 * Make sure BUSMASTER is set, on a VM under 314 * Make sure BUSMASTER is set, on a VM under
315 * KVM it may not be and will break things. 315 * KVM it may not be and will break things.
316 */ 316 */
317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
318 318
319 /* Allocate, clear, and link in our adapter structure */ 319 /* Allocate, clear, and link in our adapter structure */
320 adapter = device_private(dev); 320 adapter = device_private(dev);
321 adapter->dev = dev; 321 adapter->dev = dev;
322 adapter->hw.back = adapter; 322 adapter->hw.back = adapter;
323 hw = &adapter->hw; 323 hw = &adapter->hw;
324 324
325 adapter->init_locked = ixv_init_locked; 325 adapter->init_locked = ixv_init_locked;
326 adapter->stop_locked = ixv_stop; 326 adapter->stop_locked = ixv_stop;
327 327
328 adapter->osdep.pc = pa->pa_pc; 328 adapter->osdep.pc = pa->pa_pc;
329 adapter->osdep.tag = pa->pa_tag; 329 adapter->osdep.tag = pa->pa_tag;
330 if (pci_dma64_available(pa)) 330 if (pci_dma64_available(pa))
331 adapter->osdep.dmat = pa->pa_dmat64; 331 adapter->osdep.dmat = pa->pa_dmat64;
332 else 332 else
333 adapter->osdep.dmat = pa->pa_dmat; 333 adapter->osdep.dmat = pa->pa_dmat;
334 adapter->osdep.attached = false; 334 adapter->osdep.attached = false;
335 335
336 ent = ixv_lookup(pa); 336 ent = ixv_lookup(pa);
337 337
338 KASSERT(ent != NULL); 338 KASSERT(ent != NULL);
339 339
340 aprint_normal(": %s, Version - %s\n", 340 aprint_normal(": %s, Version - %s\n",
341 ixv_strings[ent->index], ixv_driver_version); 341 ixv_strings[ent->index], ixv_driver_version);
342 342
343 /* Core Lock Init*/ 343 /* Core Lock Init*/
344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
345 345
346 /* Do base PCI setup - map BAR0 */ 346 /* Do base PCI setup - map BAR0 */
347 if (ixv_allocate_pci_resources(adapter, pa)) { 347 if (ixv_allocate_pci_resources(adapter, pa)) {
348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); 348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
349 error = ENXIO; 349 error = ENXIO;
350 goto err_out; 350 goto err_out;
351 } 351 }
352 352
353 /* SYSCTL APIs */ 353 /* SYSCTL APIs */
354 ixv_add_device_sysctls(adapter); 354 ixv_add_device_sysctls(adapter);
355 355
356 /* Set up the timer callout */ 356 /* Set up the timer callout */
357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
358 358
359 /* Save off the information about this board */ 359 /* Save off the information about this board */
360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
362 hw->vendor_id = PCI_VENDOR(id); 362 hw->vendor_id = PCI_VENDOR(id);
363 hw->device_id = PCI_PRODUCT(id); 363 hw->device_id = PCI_PRODUCT(id);
364 hw->revision_id = 364 hw->revision_id =
365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
368 368
369 /* A subset of set_mac_type */ 369 /* A subset of set_mac_type */
370 switch (hw->device_id) { 370 switch (hw->device_id) {
371 case IXGBE_DEV_ID_82599_VF: 371 case IXGBE_DEV_ID_82599_VF:
372 hw->mac.type = ixgbe_mac_82599_vf; 372 hw->mac.type = ixgbe_mac_82599_vf;
373 str = "82599 VF"; 373 str = "82599 VF";
374 break; 374 break;
375 case IXGBE_DEV_ID_X540_VF: 375 case IXGBE_DEV_ID_X540_VF:
376 hw->mac.type = ixgbe_mac_X540_vf; 376 hw->mac.type = ixgbe_mac_X540_vf;
377 str = "X540 VF"; 377 str = "X540 VF";
378 break; 378 break;
379 case IXGBE_DEV_ID_X550_VF: 379 case IXGBE_DEV_ID_X550_VF:
380 hw->mac.type = ixgbe_mac_X550_vf; 380 hw->mac.type = ixgbe_mac_X550_vf;
381 str = "X550 VF"; 381 str = "X550 VF";
382 break; 382 break;
383 case IXGBE_DEV_ID_X550EM_X_VF: 383 case IXGBE_DEV_ID_X550EM_X_VF:
384 hw->mac.type = ixgbe_mac_X550EM_x_vf; 384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
385 str = "X550EM X VF"; 385 str = "X550EM X VF";
386 break; 386 break;
387 case IXGBE_DEV_ID_X550EM_A_VF: 387 case IXGBE_DEV_ID_X550EM_A_VF:
388 hw->mac.type = ixgbe_mac_X550EM_a_vf; 388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
389 str = "X550EM A VF"; 389 str = "X550EM A VF";
390 break; 390 break;
391 default: 391 default:
392 /* Shouldn't get here since probe succeeded */ 392 /* Shouldn't get here since probe succeeded */
393 aprint_error_dev(dev, "Unknown device ID!\n"); 393 aprint_error_dev(dev, "Unknown device ID!\n");
394 error = ENXIO; 394 error = ENXIO;
395 goto err_out; 395 goto err_out;
396 break; 396 break;
397 } 397 }
398 aprint_normal_dev(dev, "device %s\n", str); 398 aprint_normal_dev(dev, "device %s\n", str);
399 399
400 ixv_init_device_features(adapter); 400 ixv_init_device_features(adapter);
401 401
402 /* Initialize the shared code */ 402 /* Initialize the shared code */
403 error = ixgbe_init_ops_vf(hw); 403 error = ixgbe_init_ops_vf(hw);
404 if (error) { 404 if (error) {
405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); 405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
406 error = EIO; 406 error = EIO;
407 goto err_out; 407 goto err_out;
408 } 408 }
409 409
410 /* Setup the mailbox */ 410 /* Setup the mailbox */
411 ixgbe_init_mbx_params_vf(hw); 411 ixgbe_init_mbx_params_vf(hw);
412 412
413 /* Set the right number of segments */ 413 /* Set the right number of segments */
414 adapter->num_segs = IXGBE_82599_SCATTER; 414 adapter->num_segs = IXGBE_82599_SCATTER;
415 415
416 /* Reset mbox api to 1.0 */ 416 /* Reset mbox api to 1.0 */
417 error = hw->mac.ops.reset_hw(hw); 417 error = hw->mac.ops.reset_hw(hw);
418 if (error == IXGBE_ERR_RESET_FAILED) 418 if (error == IXGBE_ERR_RESET_FAILED)
419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); 419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
420 else if (error) 420 else if (error)
421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n", 421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
422 error); 422 error);
423 if (error) { 423 if (error) {
424 error = EIO; 424 error = EIO;
425 goto err_out; 425 goto err_out;
426 } 426 }
427 427
428 error = hw->mac.ops.init_hw(hw); 428 error = hw->mac.ops.init_hw(hw);
429 if (error) { 429 if (error) {
430 aprint_error_dev(dev, "...init_hw() failed!\n"); 430 aprint_error_dev(dev, "...init_hw() failed!\n");
431 error = EIO; 431 error = EIO;
432 goto err_out; 432 goto err_out;
433 } 433 }
434 434
435 /* Negotiate mailbox API version */ 435 /* Negotiate mailbox API version */
436 error = ixv_negotiate_api(adapter); 436 error = ixv_negotiate_api(adapter);
437 if (error) 437 if (error)
438 aprint_normal_dev(dev, 438 aprint_normal_dev(dev,
439 "MBX API negotiation failed during attach!\n"); 439 "MBX API negotiation failed during attach!\n");
440 switch (hw->api_version) { 440 switch (hw->api_version) {
441 case ixgbe_mbox_api_10: 441 case ixgbe_mbox_api_10:
442 apivstr = "1.0"; 442 apivstr = "1.0";
443 break; 443 break;
444 case ixgbe_mbox_api_20: 444 case ixgbe_mbox_api_20:
445 apivstr = "2.0"; 445 apivstr = "2.0";
446 break; 446 break;
447 case ixgbe_mbox_api_11: 447 case ixgbe_mbox_api_11:
448 apivstr = "1.1"; 448 apivstr = "1.1";
449 break; 449 break;
450 case ixgbe_mbox_api_12: 450 case ixgbe_mbox_api_12:
451 apivstr = "1.2"; 451 apivstr = "1.2";
452 break; 452 break;
453 case ixgbe_mbox_api_13: 453 case ixgbe_mbox_api_13:
454 apivstr = "1.3"; 454 apivstr = "1.3";
455 break; 455 break;
456 default: 456 default:
457 apivstr = "unknown"; 457 apivstr = "unknown";
458 break; 458 break;
459 } 459 }
460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); 460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
461 461
462 /* If no mac address was assigned, make a random one */ 462 /* If no mac address was assigned, make a random one */
463 if (!ixv_check_ether_addr(hw->mac.addr)) { 463 if (!ixv_check_ether_addr(hw->mac.addr)) {
464 u8 addr[ETHER_ADDR_LEN]; 464 u8 addr[ETHER_ADDR_LEN];
465 uint64_t rndval = cprng_strong64(); 465 uint64_t rndval = cprng_strong64();
466 466
467 memcpy(addr, &rndval, sizeof(addr)); 467 memcpy(addr, &rndval, sizeof(addr));
468 addr[0] &= 0xFE; 468 addr[0] &= 0xFE;
469 addr[0] |= 0x02; 469 addr[0] |= 0x02;
470 bcopy(addr, hw->mac.addr, sizeof(addr)); 470 bcopy(addr, hw->mac.addr, sizeof(addr));
471 } 471 }
472 472
473 /* Register for VLAN events */ 473 /* Register for VLAN events */
474 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); 474 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
475 475
476 /* Sysctls for limiting the amount of work done in the taskqueues */ 476 /* Sysctls for limiting the amount of work done in the taskqueues */
477 ixv_set_sysctl_value(adapter, "rx_processing_limit", 477 ixv_set_sysctl_value(adapter, "rx_processing_limit",
478 "max number of rx packets to process", 478 "max number of rx packets to process",
479 &adapter->rx_process_limit, ixv_rx_process_limit); 479 &adapter->rx_process_limit, ixv_rx_process_limit);
480 480
481 ixv_set_sysctl_value(adapter, "tx_processing_limit", 481 ixv_set_sysctl_value(adapter, "tx_processing_limit",
482 "max number of tx packets to process", 482 "max number of tx packets to process",
483 &adapter->tx_process_limit, ixv_tx_process_limit); 483 &adapter->tx_process_limit, ixv_tx_process_limit);
484 484
485 /* Do descriptor calc and sanity checks */ 485 /* Do descriptor calc and sanity checks */
486 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 486 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
487 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 487 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
488 aprint_error_dev(dev, "TXD config issue, using default!\n"); 488 aprint_error_dev(dev, "TXD config issue, using default!\n");
489 adapter->num_tx_desc = DEFAULT_TXD; 489 adapter->num_tx_desc = DEFAULT_TXD;
490 } else 490 } else
491 adapter->num_tx_desc = ixv_txd; 491 adapter->num_tx_desc = ixv_txd;
492 492
493 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 493 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
494 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 494 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
495 aprint_error_dev(dev, "RXD config issue, using default!\n"); 495 aprint_error_dev(dev, "RXD config issue, using default!\n");
496 adapter->num_rx_desc = DEFAULT_RXD; 496 adapter->num_rx_desc = DEFAULT_RXD;
497 } else 497 } else
498 adapter->num_rx_desc = ixv_rxd; 498 adapter->num_rx_desc = ixv_rxd;
499 499
500 /* Setup MSI-X */ 500 /* Setup MSI-X */
501 error = ixv_configure_interrupts(adapter); 501 error = ixv_configure_interrupts(adapter);
502 if (error) 502 if (error)
503 goto err_out; 503 goto err_out;
504 504
505 /* Allocate our TX/RX Queues */ 505 /* Allocate our TX/RX Queues */
506 if (ixgbe_allocate_queues(adapter)) { 506 if (ixgbe_allocate_queues(adapter)) {
507 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); 507 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
508 error = ENOMEM; 508 error = ENOMEM;
509 goto err_out; 509 goto err_out;
510 } 510 }
511 511
512 /* hw.ix defaults init */ 512 /* hw.ix defaults init */
513 adapter->enable_aim = ixv_enable_aim; 513 adapter->enable_aim = ixv_enable_aim;
514 514
515 adapter->txrx_use_workqueue = ixv_txrx_workqueue; 515 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
516 516
517 error = ixv_allocate_msix(adapter, pa); 517 error = ixv_allocate_msix(adapter, pa);
518 if (error) { 518 if (error) {
519 device_printf(dev, "ixv_allocate_msix() failed!\n"); 519 device_printf(dev, "ixv_allocate_msix() failed!\n");
520 goto err_late; 520 goto err_late;
521 } 521 }
522 522
523 /* Setup OS specific network interface */ 523 /* Setup OS specific network interface */
524 error = ixv_setup_interface(dev, adapter); 524 error = ixv_setup_interface(dev, adapter);
525 if (error != 0) { 525 if (error != 0) {
526 aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); 526 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
527 goto err_late; 527 goto err_late;
528 } 528 }
529 529
530 /* Do the stats setup */ 530 /* Do the stats setup */
531 ixv_save_stats(adapter); 531 ixv_save_stats(adapter);
532 ixv_init_stats(adapter); 532 ixv_init_stats(adapter);
533 ixv_add_stats_sysctls(adapter); 533 ixv_add_stats_sysctls(adapter);
534 534
535 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 535 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
536 ixgbe_netmap_attach(adapter); 536 ixgbe_netmap_attach(adapter);
537 537
538 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 538 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
539 aprint_verbose_dev(dev, "feature cap %s\n", buf); 539 aprint_verbose_dev(dev, "feature cap %s\n", buf);
540 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 540 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
541 aprint_verbose_dev(dev, "feature ena %s\n", buf); 541 aprint_verbose_dev(dev, "feature ena %s\n", buf);
542 542
543 INIT_DEBUGOUT("ixv_attach: end"); 543 INIT_DEBUGOUT("ixv_attach: end");
544 adapter->osdep.attached = true; 544 adapter->osdep.attached = true;
545 545
546 return; 546 return;
547 547
548err_late: 548err_late:
549 ixgbe_free_transmit_structures(adapter); 549 ixgbe_free_transmit_structures(adapter);
550 ixgbe_free_receive_structures(adapter); 550 ixgbe_free_receive_structures(adapter);
551 free(adapter->queues, M_DEVBUF); 551 free(adapter->queues, M_DEVBUF);
552err_out: 552err_out:
553 ixv_free_pci_resources(adapter); 553 ixv_free_pci_resources(adapter);
554 IXGBE_CORE_LOCK_DESTROY(adapter); 554 IXGBE_CORE_LOCK_DESTROY(adapter);
555 555
556 return; 556 return;
557} /* ixv_attach */ 557} /* ixv_attach */
558 558
559/************************************************************************ 559/************************************************************************
560 * ixv_detach - Device removal routine 560 * ixv_detach - Device removal routine
561 * 561 *
562 * Called when the driver is being removed. 562 * Called when the driver is being removed.
563 * Stops the adapter and deallocates all the resources 563 * Stops the adapter and deallocates all the resources
564 * that were allocated for driver operation. 564 * that were allocated for driver operation.
565 * 565 *
566 * return 0 on success, positive on failure 566 * return 0 on success, positive on failure
567 ************************************************************************/ 567 ************************************************************************/
568static int 568static int
569ixv_detach(device_t dev, int flags) 569ixv_detach(device_t dev, int flags)
570{ 570{
571 struct adapter *adapter = device_private(dev); 571 struct adapter *adapter = device_private(dev);
572 struct ixgbe_hw *hw = &adapter->hw; 572 struct ixgbe_hw *hw = &adapter->hw;
573 struct ix_queue *que = adapter->queues; 573 struct ix_queue *que = adapter->queues;
574 struct tx_ring *txr = adapter->tx_rings; 574 struct tx_ring *txr = adapter->tx_rings;
575 struct rx_ring *rxr = adapter->rx_rings; 575 struct rx_ring *rxr = adapter->rx_rings;
576 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 576 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
577 577
578 INIT_DEBUGOUT("ixv_detach: begin"); 578 INIT_DEBUGOUT("ixv_detach: begin");
579 if (adapter->osdep.attached == false) 579 if (adapter->osdep.attached == false)
580 return 0; 580 return 0;
581 581
582 /* Stop the interface. Callouts are stopped in it. */ 582 /* Stop the interface. Callouts are stopped in it. */
583 ixv_ifstop(adapter->ifp, 1); 583 ixv_ifstop(adapter->ifp, 1);
584 584
585#if NVLAN > 0 585#if NVLAN > 0
586 /* Make sure VLANs are not using driver */ 586 /* Make sure VLANs are not using driver */
587 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 587 if (!VLAN_ATTACHED(&adapter->osdep.ec))
588 ; /* nothing to do: no VLANs */ 588 ; /* nothing to do: no VLANs */
589 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 589 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
590 vlan_ifdetach(adapter->ifp); 590 vlan_ifdetach(adapter->ifp);
591 else { 591 else {
592 aprint_error_dev(dev, "VLANs in use, detach first\n"); 592 aprint_error_dev(dev, "VLANs in use, detach first\n");
593 return EBUSY; 593 return EBUSY;
594 } 594 }
595#endif 595#endif
596 596
597 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 597 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
598 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 598 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
599 softint_disestablish(txr->txr_si); 599 softint_disestablish(txr->txr_si);
600 softint_disestablish(que->que_si); 600 softint_disestablish(que->que_si);
601 } 601 }
602 if (adapter->txr_wq != NULL) 602 if (adapter->txr_wq != NULL)
603 workqueue_destroy(adapter->txr_wq); 603 workqueue_destroy(adapter->txr_wq);
604 if (adapter->txr_wq_enqueued != NULL) 604 if (adapter->txr_wq_enqueued != NULL)
605 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 605 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
606 if (adapter->que_wq != NULL) 606 if (adapter->que_wq != NULL)
607 workqueue_destroy(adapter->que_wq); 607 workqueue_destroy(adapter->que_wq);
608 608
609 /* Drain the Mailbox(link) queue */ 609 /* Drain the Mailbox(link) queue */
610 softint_disestablish(adapter->link_si); 610 softint_disestablish(adapter->link_si);
611 611
612 ether_ifdetach(adapter->ifp); 612 ether_ifdetach(adapter->ifp);
613 callout_halt(&adapter->timer, NULL); 613 callout_halt(&adapter->timer, NULL);
614 614
615 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 615 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
616 netmap_detach(adapter->ifp); 616 netmap_detach(adapter->ifp);
617 617
618 ixv_free_pci_resources(adapter); 618 ixv_free_pci_resources(adapter);
619#if 0 /* XXX the NetBSD port is probably missing something here */ 619#if 0 /* XXX the NetBSD port is probably missing something here */
620 bus_generic_detach(dev); 620 bus_generic_detach(dev);
621#endif 621#endif
622 if_detach(adapter->ifp); 622 if_detach(adapter->ifp);
623 if_percpuq_destroy(adapter->ipq); 623 if_percpuq_destroy(adapter->ipq);
624 624
625 sysctl_teardown(&adapter->sysctllog); 625 sysctl_teardown(&adapter->sysctllog);
626 evcnt_detach(&adapter->efbig_tx_dma_setup); 626 evcnt_detach(&adapter->efbig_tx_dma_setup);
627 evcnt_detach(&adapter->mbuf_defrag_failed); 627 evcnt_detach(&adapter->mbuf_defrag_failed);
628 evcnt_detach(&adapter->efbig2_tx_dma_setup); 628 evcnt_detach(&adapter->efbig2_tx_dma_setup);
629 evcnt_detach(&adapter->einval_tx_dma_setup); 629 evcnt_detach(&adapter->einval_tx_dma_setup);
630 evcnt_detach(&adapter->other_tx_dma_setup); 630 evcnt_detach(&adapter->other_tx_dma_setup);
631 evcnt_detach(&adapter->eagain_tx_dma_setup); 631 evcnt_detach(&adapter->eagain_tx_dma_setup);
632 evcnt_detach(&adapter->enomem_tx_dma_setup); 632 evcnt_detach(&adapter->enomem_tx_dma_setup);
633 evcnt_detach(&adapter->watchdog_events); 633 evcnt_detach(&adapter->watchdog_events);
634 evcnt_detach(&adapter->tso_err); 634 evcnt_detach(&adapter->tso_err);
635 evcnt_detach(&adapter->link_irq); 635 evcnt_detach(&adapter->link_irq);
636 636
637 txr = adapter->tx_rings; 637 txr = adapter->tx_rings;
638 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 638 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
639 evcnt_detach(&adapter->queues[i].irqs); 639 evcnt_detach(&adapter->queues[i].irqs);
640 evcnt_detach(&adapter->queues[i].handleq); 640 evcnt_detach(&adapter->queues[i].handleq);
641 evcnt_detach(&adapter->queues[i].req); 641 evcnt_detach(&adapter->queues[i].req);
642 evcnt_detach(&txr->no_desc_avail); 642 evcnt_detach(&txr->no_desc_avail);
643 evcnt_detach(&txr->total_packets); 643 evcnt_detach(&txr->total_packets);
644 evcnt_detach(&txr->tso_tx); 644 evcnt_detach(&txr->tso_tx);
645#ifndef IXGBE_LEGACY_TX 645#ifndef IXGBE_LEGACY_TX
646 evcnt_detach(&txr->pcq_drops); 646 evcnt_detach(&txr->pcq_drops);
647#endif 647#endif
648 648
649 evcnt_detach(&rxr->rx_packets); 649 evcnt_detach(&rxr->rx_packets);
650 evcnt_detach(&rxr->rx_bytes); 650 evcnt_detach(&rxr->rx_bytes);
651 evcnt_detach(&rxr->rx_copies); 651 evcnt_detach(&rxr->rx_copies);
652 evcnt_detach(&rxr->no_jmbuf); 652 evcnt_detach(&rxr->no_jmbuf);
653 evcnt_detach(&rxr->rx_discarded); 653 evcnt_detach(&rxr->rx_discarded);
654 } 654 }
655 evcnt_detach(&stats->ipcs); 655 evcnt_detach(&stats->ipcs);
656 evcnt_detach(&stats->l4cs); 656 evcnt_detach(&stats->l4cs);
657 evcnt_detach(&stats->ipcs_bad); 657 evcnt_detach(&stats->ipcs_bad);
658 evcnt_detach(&stats->l4cs_bad); 658 evcnt_detach(&stats->l4cs_bad);
659 659
660 /* Packet Reception Stats */ 660 /* Packet Reception Stats */
661 evcnt_detach(&stats->vfgorc); 661 evcnt_detach(&stats->vfgorc);
662 evcnt_detach(&stats->vfgprc); 662 evcnt_detach(&stats->vfgprc);
663 evcnt_detach(&stats->vfmprc); 663 evcnt_detach(&stats->vfmprc);
664 664
665 /* Packet Transmission Stats */ 665 /* Packet Transmission Stats */
666 evcnt_detach(&stats->vfgotc); 666 evcnt_detach(&stats->vfgotc);
667 evcnt_detach(&stats->vfgptc); 667 evcnt_detach(&stats->vfgptc);
668 668
669 /* Mailbox Stats */ 669 /* Mailbox Stats */
670 evcnt_detach(&hw->mbx.stats.msgs_tx); 670 evcnt_detach(&hw->mbx.stats.msgs_tx);
671 evcnt_detach(&hw->mbx.stats.msgs_rx); 671 evcnt_detach(&hw->mbx.stats.msgs_rx);
672 evcnt_detach(&hw->mbx.stats.acks); 672 evcnt_detach(&hw->mbx.stats.acks);
673 evcnt_detach(&hw->mbx.stats.reqs); 673 evcnt_detach(&hw->mbx.stats.reqs);
674 evcnt_detach(&hw->mbx.stats.rsts); 674 evcnt_detach(&hw->mbx.stats.rsts);
675 675
676 ixgbe_free_transmit_structures(adapter); 676 ixgbe_free_transmit_structures(adapter);
677 ixgbe_free_receive_structures(adapter); 677 ixgbe_free_receive_structures(adapter);
678 for (int i = 0; i < adapter->num_queues; i++) { 678 for (int i = 0; i < adapter->num_queues; i++) {
679 struct ix_queue *lque = &adapter->queues[i]; 679 struct ix_queue *lque = &adapter->queues[i];
680 mutex_destroy(&lque->dc_mtx); 680 mutex_destroy(&lque->dc_mtx);
681 } 681 }
682 free(adapter->queues, M_DEVBUF); 682 free(adapter->queues, M_DEVBUF);
683 683
684 IXGBE_CORE_LOCK_DESTROY(adapter); 684 IXGBE_CORE_LOCK_DESTROY(adapter);
685 685
686 return (0); 686 return (0);
687} /* ixv_detach */ 687} /* ixv_detach */
688 688
689/************************************************************************ 689/************************************************************************
690 * ixv_init_locked - Init entry point 690 * ixv_init_locked - Init entry point
691 * 691 *
692 * Used in two ways: It is used by the stack as an init entry 692 * Used in two ways: It is used by the stack as an init entry
693 * point in network interface structure. It is also used 693 * point in network interface structure. It is also used
694 * by the driver as a hw/sw initialization routine to get 694 * by the driver as a hw/sw initialization routine to get
695 * to a consistent state. 695 * to a consistent state.
696 * 696 *
697 * return 0 on success, positive on failure 697 * return 0 on success, positive on failure
698 ************************************************************************/ 698 ************************************************************************/
699static void 699static void
700ixv_init_locked(struct adapter *adapter) 700ixv_init_locked(struct adapter *adapter)
701{ 701{
702 struct ifnet *ifp = adapter->ifp; 702 struct ifnet *ifp = adapter->ifp;
703 device_t dev = adapter->dev; 703 device_t dev = adapter->dev;
704 struct ixgbe_hw *hw = &adapter->hw; 704 struct ixgbe_hw *hw = &adapter->hw;
705 struct ix_queue *que; 705 struct ix_queue *que;
706 int error = 0; 706 int error = 0;
707 uint32_t mask; 707 uint32_t mask;
708 int i; 708 int i;
709 709
710 INIT_DEBUGOUT("ixv_init_locked: begin"); 710 INIT_DEBUGOUT("ixv_init_locked: begin");
711 KASSERT(mutex_owned(&adapter->core_mtx)); 711 KASSERT(mutex_owned(&adapter->core_mtx));
712 hw->adapter_stopped = FALSE; 712 hw->adapter_stopped = FALSE;
713 hw->mac.ops.stop_adapter(hw); 713 hw->mac.ops.stop_adapter(hw);
714 callout_stop(&adapter->timer); 714 callout_stop(&adapter->timer);
715 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 715 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
716 que->disabled_count = 0; 716 que->disabled_count = 0;
717 717
718 /* reprogram the RAR[0] in case user changed it. */ 718 /* reprogram the RAR[0] in case user changed it. */
719 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 719 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
720 720
721 /* Get the latest mac address, User can use a LAA */ 721 /* Get the latest mac address, User can use a LAA */
722 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 722 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
723 IXGBE_ETH_LENGTH_OF_ADDRESS); 723 IXGBE_ETH_LENGTH_OF_ADDRESS);
724 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 724 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
725 725
726 /* Prepare transmit descriptors and buffers */ 726 /* Prepare transmit descriptors and buffers */
727 if (ixgbe_setup_transmit_structures(adapter)) { 727 if (ixgbe_setup_transmit_structures(adapter)) {
728 aprint_error_dev(dev, "Could not setup transmit structures\n"); 728 aprint_error_dev(dev, "Could not setup transmit structures\n");
729 ixv_stop(adapter); 729 ixv_stop(adapter);
730 return; 730 return;
731 } 731 }
732 732
733 /* Reset VF and renegotiate mailbox API version */ 733 /* Reset VF and renegotiate mailbox API version */
734 hw->mac.ops.reset_hw(hw); 734 hw->mac.ops.reset_hw(hw);
735 hw->mac.ops.start_hw(hw); 735 hw->mac.ops.start_hw(hw);
736 error = ixv_negotiate_api(adapter); 736 error = ixv_negotiate_api(adapter);
737 if (error) 737 if (error)
738 device_printf(dev, 738 device_printf(dev,
739 "Mailbox API negotiation failed in init_locked!\n"); 739 "Mailbox API negotiation failed in init_locked!\n");
740 740
741 ixv_initialize_transmit_units(adapter); 741 ixv_initialize_transmit_units(adapter);
742 742
743 /* Setup Multicast table */ 743 /* Setup Multicast table */
744 ixv_set_multi(adapter); 744 ixv_set_multi(adapter);
745 745
746 /* 746 /*
747 * Determine the correct mbuf pool 747 * Determine the correct mbuf pool
748 * for doing jumbo/headersplit 748 * for doing jumbo/headersplit
749 */ 749 */
750 if (ifp->if_mtu > ETHERMTU) 750 if (ifp->if_mtu > ETHERMTU)
751 adapter->rx_mbuf_sz = MJUMPAGESIZE; 751 adapter->rx_mbuf_sz = MJUMPAGESIZE;
752 else 752 else
753 adapter->rx_mbuf_sz = MCLBYTES; 753 adapter->rx_mbuf_sz = MCLBYTES;
754 754
755 /* Prepare receive descriptors and buffers */ 755 /* Prepare receive descriptors and buffers */
756 if (ixgbe_setup_receive_structures(adapter)) { 756 if (ixgbe_setup_receive_structures(adapter)) {
757 device_printf(dev, "Could not setup receive structures\n"); 757 device_printf(dev, "Could not setup receive structures\n");
758 ixv_stop(adapter); 758 ixv_stop(adapter);
759 return; 759 return;
760 } 760 }
761 761
762 /* Configure RX settings */ 762 /* Configure RX settings */
763 ixv_initialize_receive_units(adapter); 763 ixv_initialize_receive_units(adapter);
764 764
765#if 0 /* XXX isn't it required? -- msaitoh */ 765#if 0 /* XXX isn't it required? -- msaitoh */
766 /* Set the various hardware offload abilities */ 766 /* Set the various hardware offload abilities */
767 ifp->if_hwassist = 0; 767 ifp->if_hwassist = 0;
768 if (ifp->if_capenable & IFCAP_TSO4) 768 if (ifp->if_capenable & IFCAP_TSO4)
769 ifp->if_hwassist |= CSUM_TSO; 769 ifp->if_hwassist |= CSUM_TSO;
770 if (ifp->if_capenable & IFCAP_TXCSUM) { 770 if (ifp->if_capenable & IFCAP_TXCSUM) {
771 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 771 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
772#if __FreeBSD_version >= 800000 772#if __FreeBSD_version >= 800000
773 ifp->if_hwassist |= CSUM_SCTP; 773 ifp->if_hwassist |= CSUM_SCTP;
774#endif 774#endif
775 } 775 }
776#endif 776#endif
777 777
778 /* Set up VLAN offload and filter */ 778 /* Set up VLAN offload and filter */
779 ixv_setup_vlan_support(adapter); 779 ixv_setup_vlan_support(adapter);
780 780
781 /* Set up MSI-X routing */ 781 /* Set up MSI-X routing */
782 ixv_configure_ivars(adapter); 782 ixv_configure_ivars(adapter);
783 783
784 /* Set up auto-mask */ 784 /* Set up auto-mask */
785 mask = (1 << adapter->vector); 785 mask = (1 << adapter->vector);
786 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 786 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
787 mask |= (1 << que->msix); 787 mask |= (1 << que->msix);
788 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); 788 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
789 789
790 /* Set moderation on the Link interrupt */ 790 /* Set moderation on the Link interrupt */
791 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 791 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
792 792
793 /* Stats init */ 793 /* Stats init */
794 ixv_init_stats(adapter); 794 ixv_init_stats(adapter);
795 795
796 /* Config/Enable Link */ 796 /* Config/Enable Link */
797 hw->mac.get_link_status = TRUE; 797 hw->mac.get_link_status = TRUE;
798 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, 798 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
799 FALSE); 799 FALSE);
800 800
801 /* Start watchdog */ 801 /* Start watchdog */
802 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 802 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
803 803
804 /* And now turn on interrupts */ 804 /* And now turn on interrupts */
805 ixv_enable_intr(adapter); 805 ixv_enable_intr(adapter);
806 806
807 /* Update saved flags. See ixgbe_ifflags_cb() */ 807 /* Update saved flags. See ixgbe_ifflags_cb() */
808 adapter->if_flags = ifp->if_flags; 808 adapter->if_flags = ifp->if_flags;
809 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 809 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
810 810
811 /* Now inform the stack we're ready */ 811 /* Now inform the stack we're ready */
812 ifp->if_flags |= IFF_RUNNING; 812 ifp->if_flags |= IFF_RUNNING;
813 ifp->if_flags &= ~IFF_OACTIVE; 813 ifp->if_flags &= ~IFF_OACTIVE;
814 814
815 return; 815 return;
816} /* ixv_init_locked */ 816} /* ixv_init_locked */
817 817
818/************************************************************************ 818/************************************************************************
819 * ixv_enable_queue 819 * ixv_enable_queue
820 ************************************************************************/ 820 ************************************************************************/
821static inline void 821static inline void
822ixv_enable_queue(struct adapter *adapter, u32 vector) 822ixv_enable_queue(struct adapter *adapter, u32 vector)
823{ 823{
824 struct ixgbe_hw *hw = &adapter->hw; 824 struct ixgbe_hw *hw = &adapter->hw;
825 struct ix_queue *que = &adapter->queues[vector]; 825 struct ix_queue *que = &adapter->queues[vector];
826 u32 queue = 1UL << vector; 826 u32 queue = 1UL << vector;
827 u32 mask; 827 u32 mask;
828 828
829 mutex_enter(&que->dc_mtx); 829 mutex_enter(&que->dc_mtx);
830 if (que->disabled_count > 0 && --que->disabled_count > 0) 830 if (que->disabled_count > 0 && --que->disabled_count > 0)
831 goto out; 831 goto out;
832 832
833 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 833 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
834 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 834 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
835out: 835out:
836 mutex_exit(&que->dc_mtx); 836 mutex_exit(&que->dc_mtx);
837} /* ixv_enable_queue */ 837} /* ixv_enable_queue */
838 838
839/************************************************************************ 839/************************************************************************
840 * ixv_disable_queue 840 * ixv_disable_queue
841 ************************************************************************/ 841 ************************************************************************/
842static inline void 842static inline void
843ixv_disable_queue(struct adapter *adapter, u32 vector) 843ixv_disable_queue(struct adapter *adapter, u32 vector)
844{ 844{
845 struct ixgbe_hw *hw = &adapter->hw; 845 struct ixgbe_hw *hw = &adapter->hw;
846 struct ix_queue *que = &adapter->queues[vector]; 846 struct ix_queue *que = &adapter->queues[vector];
847 u32 queue = 1UL << vector; 847 u32 queue = 1UL << vector;
848 u32 mask; 848 u32 mask;
849 849
850 mutex_enter(&que->dc_mtx); 850 mutex_enter(&que->dc_mtx);
851 if (que->disabled_count++ > 0) 851 if (que->disabled_count++ > 0)
852 goto out; 852 goto out;
853 853
854 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 854 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
855 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 855 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
856out: 856out:
857 mutex_exit(&que->dc_mtx); 857 mutex_exit(&que->dc_mtx);
858} /* ixv_disable_queue */ 858} /* ixv_disable_queue */
859 859
860#if 0 860#if 0
861static inline void 861static inline void
862ixv_rearm_queues(struct adapter *adapter, u64 queues) 862ixv_rearm_queues(struct adapter *adapter, u64 queues)
863{ 863{
864 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 864 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
865 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 865 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
866} /* ixv_rearm_queues */ 866} /* ixv_rearm_queues */
867#endif 867#endif
868 868
869 869
870/************************************************************************ 870/************************************************************************
871 * ixv_msix_que - MSI-X Queue Interrupt Service routine 871 * ixv_msix_que - MSI-X Queue Interrupt Service routine
872 ************************************************************************/ 872 ************************************************************************/
873static int 873static int
874ixv_msix_que(void *arg) 874ixv_msix_que(void *arg)
875{ 875{
876 struct ix_queue *que = arg; 876 struct ix_queue *que = arg;
877 struct adapter *adapter = que->adapter; 877 struct adapter *adapter = que->adapter;
878 struct tx_ring *txr = que->txr; 878 struct tx_ring *txr = que->txr;
879 struct rx_ring *rxr = que->rxr; 879 struct rx_ring *rxr = que->rxr;
880 bool more; 880 bool more;
881 u32 newitr = 0; 881 u32 newitr = 0;
882 882
883 ixv_disable_queue(adapter, que->msix); 883 ixv_disable_queue(adapter, que->msix);
884 ++que->irqs.ev_count; 884 ++que->irqs.ev_count;
885 885
886#ifdef __NetBSD__ 886#ifdef __NetBSD__
887 /* Don't run ixgbe_rxeof in interrupt context */ 887 /* Don't run ixgbe_rxeof in interrupt context */
888 more = true; 888 more = true;
889#else 889#else
890 more = ixgbe_rxeof(que); 890 more = ixgbe_rxeof(que);
891#endif 891#endif
892 892
893 IXGBE_TX_LOCK(txr); 893 IXGBE_TX_LOCK(txr);
894 ixgbe_txeof(txr); 894 ixgbe_txeof(txr);
895 IXGBE_TX_UNLOCK(txr); 895 IXGBE_TX_UNLOCK(txr);
896 896
897 /* Do AIM now? */ 897 /* Do AIM now? */
898 898
899 if (adapter->enable_aim == false) 899 if (adapter->enable_aim == false)
900 goto no_calc; 900 goto no_calc;
901 /* 901 /*
902 * Do Adaptive Interrupt Moderation: 902 * Do Adaptive Interrupt Moderation:
903 * - Write out last calculated setting 903 * - Write out last calculated setting
904 * - Calculate based on average size over 904 * - Calculate based on average size over
905 * the last interval. 905 * the last interval.
906 */ 906 */
907 if (que->eitr_setting) 907 if (que->eitr_setting)
908 ixv_eitr_write(adapter, que->msix, que->eitr_setting); 908 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
909 909
910 que->eitr_setting = 0; 910 que->eitr_setting = 0;
911 911
912 /* Idle, do nothing */ 912 /* Idle, do nothing */
913 if ((txr->bytes == 0) && (rxr->bytes == 0)) 913 if ((txr->bytes == 0) && (rxr->bytes == 0))
914 goto no_calc; 914 goto no_calc;
915 915
916 if ((txr->bytes) && (txr->packets)) 916 if ((txr->bytes) && (txr->packets))
917 newitr = txr->bytes/txr->packets; 917 newitr = txr->bytes/txr->packets;
918 if ((rxr->bytes) && (rxr->packets)) 918 if ((rxr->bytes) && (rxr->packets))
919 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 919 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
920 newitr += 24; /* account for hardware frame, crc */ 920 newitr += 24; /* account for hardware frame, crc */
921 921
922 /* set an upper boundary */ 922 /* set an upper boundary */
923 newitr = uimin(newitr, 3000); 923 newitr = uimin(newitr, 3000);
924 924
925 /* Be nice to the mid range */ 925 /* Be nice to the mid range */
926 if ((newitr > 300) && (newitr < 1200)) 926 if ((newitr > 300) && (newitr < 1200))
927 newitr = (newitr / 3); 927 newitr = (newitr / 3);
928 else 928 else
929 newitr = (newitr / 2); 929 newitr = (newitr / 2);
930 930
931 /* 931 /*
932 * When RSC is used, ITR interval must be larger than RSC_DELAY. 932 * When RSC is used, ITR interval must be larger than RSC_DELAY.
933 * Currently, we use 2us for RSC_DELAY. The minimum value is always 933 * Currently, we use 2us for RSC_DELAY. The minimum value is always
934 * greater than 2us on 100M (and 10M?(not documented)), but it's not 934 * greater than 2us on 100M (and 10M?(not documented)), but it's not
935 * on 1G and higher. 935 * on 1G and higher.
936 */ 936 */
937 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 937 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
938 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 938 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
939 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 939 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
940 newitr = IXGBE_MIN_RSC_EITR_10G1G; 940 newitr = IXGBE_MIN_RSC_EITR_10G1G;
941 } 941 }
942 942
943 /* save for next interrupt */ 943 /* save for next interrupt */
944 que->eitr_setting = newitr; 944 que->eitr_setting = newitr;
945 945
946 /* Reset state */ 946 /* Reset state */
947 txr->bytes = 0; 947 txr->bytes = 0;
948 txr->packets = 0; 948 txr->packets = 0;
949 rxr->bytes = 0; 949 rxr->bytes = 0;
950 rxr->packets = 0; 950 rxr->packets = 0;
951 951
952no_calc: 952no_calc:
953 if (more) 953 if (more)
954 softint_schedule(que->que_si); 954 softint_schedule(que->que_si);
955 else /* Re-enable this interrupt */ 955 else /* Re-enable this interrupt */
956 ixv_enable_queue(adapter, que->msix); 956 ixv_enable_queue(adapter, que->msix);
957 957
958 return 1; 958 return 1;
959} /* ixv_msix_que */ 959} /* ixv_msix_que */
960 960
961/************************************************************************ 961/************************************************************************
962 * ixv_msix_mbx 962 * ixv_msix_mbx
963 ************************************************************************/ 963 ************************************************************************/
964static int 964static int
965ixv_msix_mbx(void *arg) 965ixv_msix_mbx(void *arg)
966{ 966{
967 struct adapter *adapter = arg; 967 struct adapter *adapter = arg;
968 struct ixgbe_hw *hw = &adapter->hw; 968 struct ixgbe_hw *hw = &adapter->hw;
969 969
970 ++adapter->link_irq.ev_count; 970 ++adapter->link_irq.ev_count;
971 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ 971 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
972 972
973 /* Link status change */ 973 /* Link status change */
974 hw->mac.get_link_status = TRUE; 974 hw->mac.get_link_status = TRUE;
975 softint_schedule(adapter->link_si); 975 softint_schedule(adapter->link_si);
976 976
977 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 977 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
978 978
979 return 1; 979 return 1;
980} /* ixv_msix_mbx */ 980} /* ixv_msix_mbx */
981 981
982static void 982static void
983ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 983ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
984{ 984{
985 985
986 /* 986 /*
987 * Newer devices than 82598 have VF function, so this function is 987 * Newer devices than 82598 have VF function, so this function is
988 * simple. 988 * simple.
989 */ 989 */
990 itr |= IXGBE_EITR_CNT_WDIS; 990 itr |= IXGBE_EITR_CNT_WDIS;
991 991
992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); 992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
993} 993}
994 994
995 995
996/************************************************************************ 996/************************************************************************
997 * ixv_media_status - Media Ioctl callback 997 * ixv_media_status - Media Ioctl callback
998 * 998 *
999 * Called whenever the user queries the status of 999 * Called whenever the user queries the status of
1000 * the interface using ifconfig. 1000 * the interface using ifconfig.
1001 ************************************************************************/ 1001 ************************************************************************/
1002static void 1002static void
1003ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1003ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1004{ 1004{
1005 struct adapter *adapter = ifp->if_softc; 1005 struct adapter *adapter = ifp->if_softc;
1006 1006
1007 INIT_DEBUGOUT("ixv_media_status: begin"); 1007 INIT_DEBUGOUT("ixv_media_status: begin");
1008 IXGBE_CORE_LOCK(adapter); 1008 IXGBE_CORE_LOCK(adapter);
1009 ixv_update_link_status(adapter); 1009 ixv_update_link_status(adapter);
1010 1010
1011 ifmr->ifm_status = IFM_AVALID; 1011 ifmr->ifm_status = IFM_AVALID;
1012 ifmr->ifm_active = IFM_ETHER; 1012 ifmr->ifm_active = IFM_ETHER;
1013 1013
1014 if (adapter->link_active != LINK_STATE_UP) { 1014 if (adapter->link_active != LINK_STATE_UP) {
1015 ifmr->ifm_active |= IFM_NONE; 1015 ifmr->ifm_active |= IFM_NONE;
1016 IXGBE_CORE_UNLOCK(adapter); 1016 IXGBE_CORE_UNLOCK(adapter);
1017 return; 1017 return;
1018 } 1018 }
1019 1019
1020 ifmr->ifm_status |= IFM_ACTIVE; 1020 ifmr->ifm_status |= IFM_ACTIVE;
1021 1021
1022 switch (adapter->link_speed) { 1022 switch (adapter->link_speed) {
1023 case IXGBE_LINK_SPEED_10GB_FULL: 1023 case IXGBE_LINK_SPEED_10GB_FULL:
1024 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1024 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1025 break; 1025 break;
1026 case IXGBE_LINK_SPEED_5GB_FULL: 1026 case IXGBE_LINK_SPEED_5GB_FULL:
1027 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 1027 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1028 break; 1028 break;
1029 case IXGBE_LINK_SPEED_2_5GB_FULL: 1029 case IXGBE_LINK_SPEED_2_5GB_FULL:
1030 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 1030 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1031 break; 1031 break;
1032 case IXGBE_LINK_SPEED_1GB_FULL: 1032 case IXGBE_LINK_SPEED_1GB_FULL:
1033 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1033 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1034 break; 1034 break;
1035 case IXGBE_LINK_SPEED_100_FULL: 1035 case IXGBE_LINK_SPEED_100_FULL:
1036 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1036 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1037 break; 1037 break;
1038 case IXGBE_LINK_SPEED_10_FULL: 1038 case IXGBE_LINK_SPEED_10_FULL:
1039 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 1039 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1040 break; 1040 break;
1041 } 1041 }
1042 1042
1043 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 1043 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1044 1044
1045 IXGBE_CORE_UNLOCK(adapter); 1045 IXGBE_CORE_UNLOCK(adapter);
1046} /* ixv_media_status */ 1046} /* ixv_media_status */
1047 1047
1048/************************************************************************ 1048/************************************************************************
1049 * ixv_media_change - Media Ioctl callback 1049 * ixv_media_change - Media Ioctl callback
1050 * 1050 *
1051 * Called when the user changes speed/duplex using 1051 * Called when the user changes speed/duplex using
1052 * media/mediopt option with ifconfig. 1052 * media/mediopt option with ifconfig.
1053 ************************************************************************/ 1053 ************************************************************************/
1054static int 1054static int
1055ixv_media_change(struct ifnet *ifp) 1055ixv_media_change(struct ifnet *ifp)
1056{ 1056{
1057 struct adapter *adapter = ifp->if_softc; 1057 struct adapter *adapter = ifp->if_softc;
1058 struct ifmedia *ifm = &adapter->media; 1058 struct ifmedia *ifm = &adapter->media;
1059 1059
1060 INIT_DEBUGOUT("ixv_media_change: begin"); 1060 INIT_DEBUGOUT("ixv_media_change: begin");
1061 1061
1062 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1062 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1063 return (EINVAL); 1063 return (EINVAL);
1064 1064
1065 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1065 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1066 case IFM_AUTO: 1066 case IFM_AUTO:
1067 break; 1067 break;
1068 default: 1068 default:
1069 device_printf(adapter->dev, "Only auto media type\n"); 1069 device_printf(adapter->dev, "Only auto media type\n");
1070 return (EINVAL); 1070 return (EINVAL);
1071 } 1071 }
1072 1072
1073 return (0); 1073 return (0);
1074} /* ixv_media_change */ 1074} /* ixv_media_change */
1075 1075
1076 1076
1077/************************************************************************ 1077/************************************************************************
1078 * ixv_negotiate_api 1078 * ixv_negotiate_api
1079 * 1079 *
1080 * Negotiate the Mailbox API with the PF; 1080 * Negotiate the Mailbox API with the PF;
1081 * start with the most featured API first. 1081 * start with the most featured API first.
1082 ************************************************************************/ 1082 ************************************************************************/
1083static int 1083static int
1084ixv_negotiate_api(struct adapter *adapter) 1084ixv_negotiate_api(struct adapter *adapter)
1085{ 1085{
1086 struct ixgbe_hw *hw = &adapter->hw; 1086 struct ixgbe_hw *hw = &adapter->hw;
1087 int mbx_api[] = { ixgbe_mbox_api_11, 1087 int mbx_api[] = { ixgbe_mbox_api_11,
1088 ixgbe_mbox_api_10, 1088 ixgbe_mbox_api_10,
1089 ixgbe_mbox_api_unknown }; 1089 ixgbe_mbox_api_unknown };
1090 int i = 0; 1090 int i = 0;
1091 1091
1092 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 1092 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1093 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 1093 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1094 return (0); 1094 return (0);
1095 i++; 1095 i++;
1096 } 1096 }
1097 1097
1098 return (EINVAL); 1098 return (EINVAL);
1099} /* ixv_negotiate_api */ 1099} /* ixv_negotiate_api */
1100 1100
1101 1101
1102/************************************************************************ 1102/************************************************************************
1103 * ixv_set_multi - Multicast Update 1103 * ixv_set_multi - Multicast Update
1104 * 1104 *
1105 * Called whenever multicast address list is updated. 1105 * Called whenever multicast address list is updated.
1106 ************************************************************************/ 1106 ************************************************************************/
1107static void 1107static void
1108ixv_set_multi(struct adapter *adapter) 1108ixv_set_multi(struct adapter *adapter)
1109{ 1109{
1110 struct ether_multi *enm; 1110 struct ether_multi *enm;
1111 struct ether_multistep step; 1111 struct ether_multistep step;
1112 struct ethercom *ec = &adapter->osdep.ec; 1112 struct ethercom *ec = &adapter->osdep.ec;
1113 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 1113 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1114 u8 *update_ptr; 1114 u8 *update_ptr;
1115 int mcnt = 0; 1115 int mcnt = 0;
1116 1116
1117 KASSERT(mutex_owned(&adapter->core_mtx)); 1117 KASSERT(mutex_owned(&adapter->core_mtx));
1118 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 1118 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1119 1119
1120 ETHER_LOCK(ec); 1120 ETHER_LOCK(ec);
1121 ETHER_FIRST_MULTI(step, ec, enm); 1121 ETHER_FIRST_MULTI(step, ec, enm);
1122 while (enm != NULL) { 1122 while (enm != NULL) {
1123 bcopy(enm->enm_addrlo, 1123 bcopy(enm->enm_addrlo,
1124 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1124 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1125 IXGBE_ETH_LENGTH_OF_ADDRESS); 1125 IXGBE_ETH_LENGTH_OF_ADDRESS);
1126 mcnt++; 1126 mcnt++;
1127 /* XXX This might be required --msaitoh */ 1127 /* XXX This might be required --msaitoh */
1128 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) 1128 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1129 break; 1129 break;
1130 ETHER_NEXT_MULTI(step, enm); 1130 ETHER_NEXT_MULTI(step, enm);
1131 } 1131 }
1132 ETHER_UNLOCK(ec); 1132 ETHER_UNLOCK(ec);
1133 1133
1134 update_ptr = mta; 1134 update_ptr = mta;
1135 1135
1136 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 1136 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1137 ixv_mc_array_itr, TRUE); 1137 ixv_mc_array_itr, TRUE);
1138} /* ixv_set_multi */ 1138} /* ixv_set_multi */
1139 1139
1140/************************************************************************ 1140/************************************************************************
1141 * ixv_mc_array_itr 1141 * ixv_mc_array_itr
1142 * 1142 *
1143 * An iterator function needed by the multicast shared code. 1143 * An iterator function needed by the multicast shared code.
1144 * It feeds the shared code routine the addresses in the 1144 * It feeds the shared code routine the addresses in the
1145 * array of ixv_set_multi() one by one. 1145 * array of ixv_set_multi() one by one.
1146 ************************************************************************/ 1146 ************************************************************************/
1147static u8 * 1147static u8 *
1148ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1148ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1149{ 1149{
1150 u8 *addr = *update_ptr; 1150 u8 *addr = *update_ptr;
1151 u8 *newptr; 1151 u8 *newptr;
1152 1152
1153 *vmdq = 0; 1153 *vmdq = 0;
1154 1154
1155 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1155 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1156 *update_ptr = newptr; 1156 *update_ptr = newptr;
1157 1157
1158 return addr; 1158 return addr;
1159} /* ixv_mc_array_itr */ 1159} /* ixv_mc_array_itr */
1160 1160
1161/************************************************************************ 1161/************************************************************************
1162 * ixv_local_timer - Timer routine 1162 * ixv_local_timer - Timer routine
1163 * 1163 *
1164 * Checks for link status, updates statistics, 1164 * Checks for link status, updates statistics,
1165 * and runs the watchdog check. 1165 * and runs the watchdog check.
1166 ************************************************************************/ 1166 ************************************************************************/
1167static void 1167static void
1168ixv_local_timer(void *arg) 1168ixv_local_timer(void *arg)
1169{ 1169{
1170 struct adapter *adapter = arg; 1170 struct adapter *adapter = arg;
1171 1171
1172 IXGBE_CORE_LOCK(adapter); 1172 IXGBE_CORE_LOCK(adapter);
1173 ixv_local_timer_locked(adapter); 1173 ixv_local_timer_locked(adapter);
1174 IXGBE_CORE_UNLOCK(adapter); 1174 IXGBE_CORE_UNLOCK(adapter);
1175} 1175}
1176 1176
1177static void 1177static void
1178ixv_local_timer_locked(void *arg) 1178ixv_local_timer_locked(void *arg)
1179{ 1179{
1180 struct adapter *adapter = arg; 1180 struct adapter *adapter = arg;
1181 device_t dev = adapter->dev; 1181 device_t dev = adapter->dev;
1182 struct ix_queue *que = adapter->queues; 1182 struct ix_queue *que = adapter->queues;
1183 u64 queues = 0; 1183 u64 queues = 0;
1184 u64 v0, v1, v2, v3, v4, v5, v6, v7; 1184 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1185 int hung = 0; 1185 int hung = 0;
1186 int i; 1186 int i;
1187 1187
1188 KASSERT(mutex_owned(&adapter->core_mtx)); 1188 KASSERT(mutex_owned(&adapter->core_mtx));
1189 1189
1190 if (ixv_check_link(adapter)) { 1190 if (ixv_check_link(adapter)) {
1191 ixv_init_locked(adapter); 1191 ixv_init_locked(adapter);
1192 return; 1192 return;
1193 } 1193 }
1194 1194
1195 /* Stats Update */ 1195 /* Stats Update */
1196 ixv_update_stats(adapter); 1196 ixv_update_stats(adapter);
1197 1197
1198 /* Update some event counters */ 1198 /* Update some event counters */
1199 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 1199 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1200 que = adapter->queues; 1200 que = adapter->queues;
1201 for (i = 0; i < adapter->num_queues; i++, que++) { 1201 for (i = 0; i < adapter->num_queues; i++, que++) {
1202 struct tx_ring *txr = que->txr; 1202 struct tx_ring *txr = que->txr;
1203 1203
1204 v0 += txr->q_efbig_tx_dma_setup; 1204 v0 += txr->q_efbig_tx_dma_setup;
1205 v1 += txr->q_mbuf_defrag_failed; 1205 v1 += txr->q_mbuf_defrag_failed;
1206 v2 += txr->q_efbig2_tx_dma_setup; 1206 v2 += txr->q_efbig2_tx_dma_setup;
1207 v3 += txr->q_einval_tx_dma_setup; 1207 v3 += txr->q_einval_tx_dma_setup;
1208 v4 += txr->q_other_tx_dma_setup; 1208 v4 += txr->q_other_tx_dma_setup;
1209 v5 += txr->q_eagain_tx_dma_setup; 1209 v5 += txr->q_eagain_tx_dma_setup;
1210 v6 += txr->q_enomem_tx_dma_setup; 1210 v6 += txr->q_enomem_tx_dma_setup;
1211 v7 += txr->q_tso_err; 1211 v7 += txr->q_tso_err;
1212 } 1212 }
1213 adapter->efbig_tx_dma_setup.ev_count = v0; 1213 adapter->efbig_tx_dma_setup.ev_count = v0;
1214 adapter->mbuf_defrag_failed.ev_count = v1; 1214 adapter->mbuf_defrag_failed.ev_count = v1;
1215 adapter->efbig2_tx_dma_setup.ev_count = v2; 1215 adapter->efbig2_tx_dma_setup.ev_count = v2;
1216 adapter->einval_tx_dma_setup.ev_count = v3; 1216 adapter->einval_tx_dma_setup.ev_count = v3;
1217 adapter->other_tx_dma_setup.ev_count = v4; 1217 adapter->other_tx_dma_setup.ev_count = v4;
1218 adapter->eagain_tx_dma_setup.ev_count = v5; 1218 adapter->eagain_tx_dma_setup.ev_count = v5;
1219 adapter->enomem_tx_dma_setup.ev_count = v6; 1219 adapter->enomem_tx_dma_setup.ev_count = v6;
1220 adapter->tso_err.ev_count = v7; 1220 adapter->tso_err.ev_count = v7;
1221 1221
1222 /* 1222 /*
1223 * Check the TX queues status 1223 * Check the TX queues status
1224 * - mark hung queues so we don't schedule on them 1224 * - mark hung queues so we don't schedule on them
1225 * - watchdog only if all queues show hung 1225 * - watchdog only if all queues show hung
1226 */ 1226 */
1227 que = adapter->queues; 1227 que = adapter->queues;
1228 for (i = 0; i < adapter->num_queues; i++, que++) { 1228 for (i = 0; i < adapter->num_queues; i++, que++) {
1229 /* Keep track of queues with work for soft irq */ 1229 /* Keep track of queues with work for soft irq */
1230 if (que->txr->busy) 1230 if (que->txr->busy)
1231 queues |= ((u64)1 << que->me); 1231 queues |= ((u64)1 << que->me);
1232 /* 1232 /*
1233 * Each time txeof runs without cleaning, but there 1233 * Each time txeof runs without cleaning, but there
1234 * are uncleaned descriptors it increments busy. If 1234 * are uncleaned descriptors it increments busy. If
1235 * we get to the MAX we declare it hung. 1235 * we get to the MAX we declare it hung.
1236 */ 1236 */
1237 if (que->busy == IXGBE_QUEUE_HUNG) { 1237 if (que->busy == IXGBE_QUEUE_HUNG) {
1238 ++hung; 1238 ++hung;
1239 /* Mark the queue as inactive */ 1239 /* Mark the queue as inactive */
1240 adapter->active_queues &= ~((u64)1 << que->me); 1240 adapter->active_queues &= ~((u64)1 << que->me);
1241 continue; 1241 continue;
1242 } else { 1242 } else {
1243 /* Check if we've come back from hung */ 1243 /* Check if we've come back from hung */
1244 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1244 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1245 adapter->active_queues |= ((u64)1 << que->me); 1245 adapter->active_queues |= ((u64)1 << que->me);
1246 } 1246 }
1247 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1247 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1248 device_printf(dev, 1248 device_printf(dev,
1249 "Warning queue %d appears to be hung!\n", i); 1249 "Warning queue %d appears to be hung!\n", i);
1250 que->txr->busy = IXGBE_QUEUE_HUNG; 1250 que->txr->busy = IXGBE_QUEUE_HUNG;
1251 ++hung; 1251 ++hung;
1252 } 1252 }
1253 } 1253 }
1254 1254
1255 /* Only truly watchdog if all queues show hung */ 1255 /* Only truly watchdog if all queues show hung */
1256 if (hung == adapter->num_queues) 1256 if (hung == adapter->num_queues)
1257 goto watchdog; 1257 goto watchdog;
1258#if 0 1258#if 0
1259 else if (queues != 0) { /* Force an IRQ on queues with work */ 1259 else if (queues != 0) { /* Force an IRQ on queues with work */
1260 ixv_rearm_queues(adapter, queues); 1260 ixv_rearm_queues(adapter, queues);
1261 } 1261 }
1262#endif 1262#endif
1263 1263
1264 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1264 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1265 1265
1266 return; 1266 return;
1267 1267
1268watchdog: 1268watchdog:
1269 1269
1270 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1270 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1271 adapter->ifp->if_flags &= ~IFF_RUNNING; 1271 adapter->ifp->if_flags &= ~IFF_RUNNING;
1272 adapter->watchdog_events.ev_count++; 1272 adapter->watchdog_events.ev_count++;
1273 ixv_init_locked(adapter); 1273 ixv_init_locked(adapter);
1274} /* ixv_local_timer */ 1274} /* ixv_local_timer */
1275 1275
1276/************************************************************************ 1276/************************************************************************
1277 * ixv_update_link_status - Update OS on link state 1277 * ixv_update_link_status - Update OS on link state
1278 * 1278 *
1279 * Note: Only updates the OS on the cached link state. 1279 * Note: Only updates the OS on the cached link state.
1280 * The real check of the hardware only happens with 1280 * The real check of the hardware only happens with
1281 * a link interrupt. 1281 * a link interrupt.
1282 ************************************************************************/ 1282 ************************************************************************/
1283static void 1283static void
1284ixv_update_link_status(struct adapter *adapter) 1284ixv_update_link_status(struct adapter *adapter)
1285{ 1285{
1286 struct ifnet *ifp = adapter->ifp; 1286 struct ifnet *ifp = adapter->ifp;
1287 device_t dev = adapter->dev; 1287 device_t dev = adapter->dev;
1288 1288
1289 KASSERT(mutex_owned(&adapter->core_mtx)); 1289 KASSERT(mutex_owned(&adapter->core_mtx));
1290 1290
1291 if (adapter->link_up) { 1291 if (adapter->link_up) {
1292 if (adapter->link_active != LINK_STATE_UP) { 1292 if (adapter->link_active != LINK_STATE_UP) {
1293 if (bootverbose) { 1293 if (bootverbose) {
1294 const char *bpsmsg; 1294 const char *bpsmsg;
1295 1295
1296 switch (adapter->link_speed) { 1296 switch (adapter->link_speed) {
1297 case IXGBE_LINK_SPEED_10GB_FULL: 1297 case IXGBE_LINK_SPEED_10GB_FULL:
1298 bpsmsg = "10 Gbps"; 1298 bpsmsg = "10 Gbps";
1299 break; 1299 break;
1300 case IXGBE_LINK_SPEED_5GB_FULL: 1300 case IXGBE_LINK_SPEED_5GB_FULL:
1301 bpsmsg = "5 Gbps"; 1301 bpsmsg = "5 Gbps";
1302 break; 1302 break;
1303 case IXGBE_LINK_SPEED_2_5GB_FULL: 1303 case IXGBE_LINK_SPEED_2_5GB_FULL:
1304 bpsmsg = "2.5 Gbps"; 1304 bpsmsg = "2.5 Gbps";
1305 break; 1305 break;
1306 case IXGBE_LINK_SPEED_1GB_FULL: 1306 case IXGBE_LINK_SPEED_1GB_FULL:
1307 bpsmsg = "1 Gbps"; 1307 bpsmsg = "1 Gbps";
1308 break; 1308 break;
1309 case IXGBE_LINK_SPEED_100_FULL: 1309 case IXGBE_LINK_SPEED_100_FULL:
1310 bpsmsg = "100 Mbps"; 1310 bpsmsg = "100 Mbps";
1311 break; 1311 break;
1312 case IXGBE_LINK_SPEED_10_FULL: 1312 case IXGBE_LINK_SPEED_10_FULL:
1313 bpsmsg = "10 Mbps"; 1313 bpsmsg = "10 Mbps";
1314 break; 1314 break;
1315 default: 1315 default:
1316 bpsmsg = "unknown speed"; 1316 bpsmsg = "unknown speed";
1317 break; 1317 break;
1318 } 1318 }
1319 device_printf(dev, "Link is up %s %s \n", 1319 device_printf(dev, "Link is up %s %s \n",
1320 bpsmsg, "Full Duplex"); 1320 bpsmsg, "Full Duplex");
1321 } 1321 }
1322 adapter->link_active = LINK_STATE_UP; 1322 adapter->link_active = LINK_STATE_UP;
1323 if_link_state_change(ifp, LINK_STATE_UP); 1323 if_link_state_change(ifp, LINK_STATE_UP);
1324 } 1324 }
1325 } else { 1325 } else {
1326 /* 1326 /*
1327 * Do it when link active changes to DOWN. i.e. 1327 * Do it when link active changes to DOWN. i.e.
1328 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 1328 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1329 * b) LINK_STATE_UP -> LINK_STATE_DOWN 1329 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1330 */ 1330 */
1331 if (adapter->link_active != LINK_STATE_DOWN) { 1331 if (adapter->link_active != LINK_STATE_DOWN) {
1332 if (bootverbose) 1332 if (bootverbose)
1333 device_printf(dev, "Link is Down\n"); 1333 device_printf(dev, "Link is Down\n");
1334 if_link_state_change(ifp, LINK_STATE_DOWN); 1334 if_link_state_change(ifp, LINK_STATE_DOWN);
1335 adapter->link_active = LINK_STATE_DOWN; 1335 adapter->link_active = LINK_STATE_DOWN;
1336 } 1336 }
1337 } 1337 }
1338} /* ixv_update_link_status */ 1338} /* ixv_update_link_status */
1339 1339
1340 1340
1341/************************************************************************ 1341/************************************************************************
1342 * ixv_stop - Stop the hardware 1342 * ixv_stop - Stop the hardware
1343 * 1343 *
1344 * Disables all traffic on the adapter by issuing a 1344 * Disables all traffic on the adapter by issuing a
1345 * global reset on the MAC and deallocates TX/RX buffers. 1345 * global reset on the MAC and deallocates TX/RX buffers.
1346 ************************************************************************/ 1346 ************************************************************************/
1347static void 1347static void
1348ixv_ifstop(struct ifnet *ifp, int disable) 1348ixv_ifstop(struct ifnet *ifp, int disable)
1349{ 1349{
1350 struct adapter *adapter = ifp->if_softc; 1350 struct adapter *adapter = ifp->if_softc;
1351 1351
1352 IXGBE_CORE_LOCK(adapter); 1352 IXGBE_CORE_LOCK(adapter);
1353 ixv_stop(adapter); 1353 ixv_stop(adapter);
1354 IXGBE_CORE_UNLOCK(adapter); 1354 IXGBE_CORE_UNLOCK(adapter);
1355} 1355}
1356 1356
1357static void 1357static void
1358ixv_stop(void *arg) 1358ixv_stop(void *arg)
1359{ 1359{
1360 struct ifnet *ifp; 1360 struct ifnet *ifp;
1361 struct adapter *adapter = arg; 1361 struct adapter *adapter = arg;
1362 struct ixgbe_hw *hw = &adapter->hw; 1362 struct ixgbe_hw *hw = &adapter->hw;
1363 1363
1364 ifp = adapter->ifp; 1364 ifp = adapter->ifp;
1365 1365
1366 KASSERT(mutex_owned(&adapter->core_mtx)); 1366 KASSERT(mutex_owned(&adapter->core_mtx));
1367 1367
1368 INIT_DEBUGOUT("ixv_stop: begin\n"); 1368 INIT_DEBUGOUT("ixv_stop: begin\n");
1369 ixv_disable_intr(adapter); 1369 ixv_disable_intr(adapter);
1370 1370
1371 /* Tell the stack that the interface is no longer active */ 1371 /* Tell the stack that the interface is no longer active */
1372 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1372 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1373 1373
1374 hw->mac.ops.reset_hw(hw); 1374 hw->mac.ops.reset_hw(hw);
1375 adapter->hw.adapter_stopped = FALSE; 1375 adapter->hw.adapter_stopped = FALSE;
1376 hw->mac.ops.stop_adapter(hw); 1376 hw->mac.ops.stop_adapter(hw);
1377 callout_stop(&adapter->timer); 1377 callout_stop(&adapter->timer);
1378 1378
1379 /* reprogram the RAR[0] in case user changed it. */ 1379 /* reprogram the RAR[0] in case user changed it. */
1380 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1380 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1381 1381
1382 return; 1382 return;
1383} /* ixv_stop */ 1383} /* ixv_stop */
1384 1384
1385 1385
1386/************************************************************************ 1386/************************************************************************
1387 * ixv_allocate_pci_resources 1387 * ixv_allocate_pci_resources
1388 ************************************************************************/ 1388 ************************************************************************/
1389static int 1389static int
1390ixv_allocate_pci_resources(struct adapter *adapter, 1390ixv_allocate_pci_resources(struct adapter *adapter,
1391 const struct pci_attach_args *pa) 1391 const struct pci_attach_args *pa)
1392{ 1392{
1393 pcireg_t memtype, csr; 1393 pcireg_t memtype, csr;
1394 device_t dev = adapter->dev; 1394 device_t dev = adapter->dev;
1395 bus_addr_t addr; 1395 bus_addr_t addr;
1396 int flags; 1396 int flags;
1397 1397
1398 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 1398 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1399 switch (memtype) { 1399 switch (memtype) {
1400 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1400 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1401 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1401 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1402 adapter->osdep.mem_bus_space_tag = pa->pa_memt; 1402 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1403 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 1403 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1404 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) 1404 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1405 goto map_err; 1405 goto map_err;
1406 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 1406 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1407 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 1407 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1408 flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 1408 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1409 } 1409 }
1410 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, 1410 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1411 adapter->osdep.mem_size, flags, 1411 adapter->osdep.mem_size, flags,
1412 &adapter->osdep.mem_bus_space_handle) != 0) { 1412 &adapter->osdep.mem_bus_space_handle) != 0) {
1413map_err: 1413map_err:
1414 adapter->osdep.mem_size = 0; 1414 adapter->osdep.mem_size = 0;
1415 aprint_error_dev(dev, "unable to map BAR0\n"); 1415 aprint_error_dev(dev, "unable to map BAR0\n");
1416 return ENXIO; 1416 return ENXIO;
1417 } 1417 }
1418 /* 1418 /*
1419 * Enable address decoding for memory range in case it's not 1419 * Enable address decoding for memory range in case it's not
1420 * set. 1420 * set.
1421 */ 1421 */
1422 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 1422 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1423 PCI_COMMAND_STATUS_REG); 1423 PCI_COMMAND_STATUS_REG);
1424 csr |= PCI_COMMAND_MEM_ENABLE; 1424 csr |= PCI_COMMAND_MEM_ENABLE;
1425 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1425 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1426 csr); 1426 csr);
1427 break; 1427 break;
1428 default: 1428 default:
1429 aprint_error_dev(dev, "unexpected type on BAR0\n"); 1429 aprint_error_dev(dev, "unexpected type on BAR0\n");
1430 return ENXIO; 1430 return ENXIO;
1431 } 1431 }
1432 1432
1433 /* Pick up the tuneable queues */ 1433 /* Pick up the tuneable queues */
1434 adapter->num_queues = ixv_num_queues; 1434 adapter->num_queues = ixv_num_queues;
1435 1435
1436 return (0); 1436 return (0);
1437} /* ixv_allocate_pci_resources */ 1437} /* ixv_allocate_pci_resources */
1438 1438
1439/************************************************************************ 1439/************************************************************************
1440 * ixv_free_pci_resources 1440 * ixv_free_pci_resources
1441 ************************************************************************/ 1441 ************************************************************************/
1442static void 1442static void
1443ixv_free_pci_resources(struct adapter * adapter) 1443ixv_free_pci_resources(struct adapter * adapter)
1444{ 1444{
1445 struct ix_queue *que = adapter->queues; 1445 struct ix_queue *que = adapter->queues;
1446 int rid; 1446 int rid;
1447 1447
1448 /* 1448 /*
1449 * Release all msix queue resources: 1449 * Release all msix queue resources:
1450 */ 1450 */
1451 for (int i = 0; i < adapter->num_queues; i++, que++) { 1451 for (int i = 0; i < adapter->num_queues; i++, que++) {
1452 if (que->res != NULL) 1452 if (que->res != NULL)
1453 pci_intr_disestablish(adapter->osdep.pc, 1453 pci_intr_disestablish(adapter->osdep.pc,
1454 adapter->osdep.ihs[i]); 1454 adapter->osdep.ihs[i]);
1455 } 1455 }
1456 1456
1457 1457
1458 /* Clean the Mailbox interrupt last */ 1458 /* Clean the Mailbox interrupt last */
1459 rid = adapter->vector; 1459 rid = adapter->vector;
1460 1460
1461 if (adapter->osdep.ihs[rid] != NULL) { 1461 if (adapter->osdep.ihs[rid] != NULL) {
1462 pci_intr_disestablish(adapter->osdep.pc, 1462 pci_intr_disestablish(adapter->osdep.pc,
1463 adapter->osdep.ihs[rid]); 1463 adapter->osdep.ihs[rid]);
1464 adapter->osdep.ihs[rid] = NULL; 1464 adapter->osdep.ihs[rid] = NULL;
1465 } 1465 }
1466 1466
1467 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1467 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1468 adapter->osdep.nintrs); 1468 adapter->osdep.nintrs);
1469 1469
1470 if (adapter->osdep.mem_size != 0) { 1470 if (adapter->osdep.mem_size != 0) {
1471 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 1471 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1472 adapter->osdep.mem_bus_space_handle, 1472 adapter->osdep.mem_bus_space_handle,
1473 adapter->osdep.mem_size); 1473 adapter->osdep.mem_size);
1474 } 1474 }
1475 1475
1476 return; 1476 return;
1477} /* ixv_free_pci_resources */ 1477} /* ixv_free_pci_resources */
1478 1478
1479/************************************************************************ 1479/************************************************************************
1480 * ixv_setup_interface 1480 * ixv_setup_interface
1481 * 1481 *
1482 * Setup networking device structure and register an interface. 1482 * Setup networking device structure and register an interface.
1483 ************************************************************************/ 1483 ************************************************************************/
1484static int 1484static int
1485ixv_setup_interface(device_t dev, struct adapter *adapter) 1485ixv_setup_interface(device_t dev, struct adapter *adapter)
1486{ 1486{
1487 struct ethercom *ec = &adapter->osdep.ec; 1487 struct ethercom *ec = &adapter->osdep.ec;
1488 struct ifnet *ifp; 1488 struct ifnet *ifp;
1489 int rv; 1489 int rv;
1490 1490
1491 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1491 INIT_DEBUGOUT("ixv_setup_interface: begin");
1492 1492
1493 ifp = adapter->ifp = &ec->ec_if; 1493 ifp = adapter->ifp = &ec->ec_if;
1494 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1494 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1495 ifp->if_baudrate = IF_Gbps(10); 1495 ifp->if_baudrate = IF_Gbps(10);
1496 ifp->if_init = ixv_init; 1496 ifp->if_init = ixv_init;
1497 ifp->if_stop = ixv_ifstop; 1497 ifp->if_stop = ixv_ifstop;
1498 ifp->if_softc = adapter; 1498 ifp->if_softc = adapter;
1499 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1499 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1500#ifdef IXGBE_MPSAFE 1500#ifdef IXGBE_MPSAFE
1501 ifp->if_extflags = IFEF_MPSAFE; 1501 ifp->if_extflags = IFEF_MPSAFE;
1502#endif 1502#endif
1503 ifp->if_ioctl = ixv_ioctl; 1503 ifp->if_ioctl = ixv_ioctl;
1504 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1504 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1505#if 0 1505#if 0
1506 ixv_start_locked = ixgbe_legacy_start_locked; 1506 ixv_start_locked = ixgbe_legacy_start_locked;
1507#endif 1507#endif
1508 } else { 1508 } else {
1509 ifp->if_transmit = ixgbe_mq_start; 1509 ifp->if_transmit = ixgbe_mq_start;
1510#if 0 1510#if 0
1511 ixv_start_locked = ixgbe_mq_start_locked; 1511 ixv_start_locked = ixgbe_mq_start_locked;
1512#endif 1512#endif
1513 } 1513 }
1514 ifp->if_start = ixgbe_legacy_start; 1514 ifp->if_start = ixgbe_legacy_start;
1515 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1515 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1516 IFQ_SET_READY(&ifp->if_snd); 1516 IFQ_SET_READY(&ifp->if_snd);
1517 1517
1518 rv = if_initialize(ifp); 1518 rv = if_initialize(ifp);
1519 if (rv != 0) { 1519 if (rv != 0) {
1520 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); 1520 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1521 return rv; 1521 return rv;
1522 } 1522 }
1523 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1523 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1524 ether_ifattach(ifp, adapter->hw.mac.addr); 1524 ether_ifattach(ifp, adapter->hw.mac.addr);
1525 /* 1525 /*
1526 * We use per TX queue softint, so if_deferred_start_init() isn't 1526 * We use per TX queue softint, so if_deferred_start_init() isn't
1527 * used. 1527 * used.
1528 */ 1528 */
1529 ether_set_ifflags_cb(ec, ixv_ifflags_cb); 1529 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1530 1530
1531 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1531 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1532 1532
1533 /* 1533 /*
1534 * Tell the upper layer(s) we support long frames. 1534 * Tell the upper layer(s) we support long frames.
1535 */ 1535 */
1536 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1536 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1537 1537
1538 /* Set capability flags */ 1538 /* Set capability flags */
1539 ifp->if_capabilities |= IFCAP_HWCSUM 1539 ifp->if_capabilities |= IFCAP_HWCSUM
1540 | IFCAP_TSOv4 1540 | IFCAP_TSOv4
1541 | IFCAP_TSOv6; 1541 | IFCAP_TSOv6;
1542 ifp->if_capenable = 0; 1542 ifp->if_capenable = 0;
1543 1543
1544 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER 1544 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1545 | ETHERCAP_VLAN_HWTAGGING 1545 | ETHERCAP_VLAN_HWTAGGING
1546 | ETHERCAP_VLAN_HWCSUM 1546 | ETHERCAP_VLAN_HWCSUM
1547 | ETHERCAP_JUMBO_MTU 1547 | ETHERCAP_JUMBO_MTU
1548 | ETHERCAP_VLAN_MTU; 1548 | ETHERCAP_VLAN_MTU;
1549 1549
1550 /* Enable the above capabilities by default */ 1550 /* Enable the above capabilities by default */
1551 ec->ec_capenable = ec->ec_capabilities; 1551 ec->ec_capenable = ec->ec_capabilities;
1552 1552
1553 /* Don't enable LRO by default */ 1553 /* Don't enable LRO by default */
1554#if 0 1554#if 0
1555 /* NetBSD doesn't support LRO yet */ 1555 /* NetBSD doesn't support LRO yet */
1556 ifp->if_capabilities |= IFCAP_LRO; 1556 ifp->if_capabilities |= IFCAP_LRO;
1557#endif 1557#endif
1558 1558
1559 /* 1559 /*
1560 * Specify the media types supported by this adapter and register 1560 * Specify the media types supported by this adapter and register
1561 * callbacks to update media and link information 1561 * callbacks to update media and link information
1562 */ 1562 */
1563 ec->ec_ifmedia = &adapter->media; 1563 ec->ec_ifmedia = &adapter->media;
1564 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1564 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1565 ixv_media_status); 1565 ixv_media_status);
1566 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1566 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1567 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1567 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1568 1568
1569 if_register(ifp); 1569 if_register(ifp);
1570 1570
1571 return 0; 1571 return 0;
1572} /* ixv_setup_interface */ 1572} /* ixv_setup_interface */
1573 1573
1574 1574
1575/************************************************************************ 1575/************************************************************************
1576 * ixv_initialize_transmit_units - Enable transmit unit. 1576 * ixv_initialize_transmit_units - Enable transmit unit.
1577 ************************************************************************/ 1577 ************************************************************************/
1578static void 1578static void
1579ixv_initialize_transmit_units(struct adapter *adapter) 1579ixv_initialize_transmit_units(struct adapter *adapter)
1580{ 1580{
1581 struct tx_ring *txr = adapter->tx_rings; 1581 struct tx_ring *txr = adapter->tx_rings;
1582 struct ixgbe_hw *hw = &adapter->hw; 1582 struct ixgbe_hw *hw = &adapter->hw;
1583 int i; 1583 int i;
1584 1584
1585 for (i = 0; i < adapter->num_queues; i++, txr++) { 1585 for (i = 0; i < adapter->num_queues; i++, txr++) {
1586 u64 tdba = txr->txdma.dma_paddr; 1586 u64 tdba = txr->txdma.dma_paddr;
1587 u32 txctrl, txdctl; 1587 u32 txctrl, txdctl;
1588 int j = txr->me; 1588 int j = txr->me;
1589 1589
1590 /* Set WTHRESH to 8, burst writeback */ 1590 /* Set WTHRESH to 8, burst writeback */
1591 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1591 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1592 txdctl |= (8 << 16); 1592 txdctl |= (8 << 16);
1593 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1593 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1594 1594
1595 /* Set the HW Tx Head and Tail indices */ 1595 /* Set the HW Tx Head and Tail indices */
1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); 1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); 1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1598 1598
1599 /* Set Tx Tail register */ 1599 /* Set Tx Tail register */
1600 txr->tail = IXGBE_VFTDT(j); 1600 txr->tail = IXGBE_VFTDT(j);
1601 1601
1602 txr->txr_no_space = false; 1602 txr->txr_no_space = false;
1603 1603
1604 /* Set Ring parameters */ 1604 /* Set Ring parameters */
1605 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1605 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1606 (tdba & 0x00000000ffffffffULL)); 1606 (tdba & 0x00000000ffffffffULL));
1607 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1607 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1608 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1608 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1609 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); 1609 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1610 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1610 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1611 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1611 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1612 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1612 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1613 1613
1614 /* Now enable */ 1614 /* Now enable */
1615 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1615 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1616 txdctl |= IXGBE_TXDCTL_ENABLE; 1616 txdctl |= IXGBE_TXDCTL_ENABLE;
1617 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1617 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1618 } 1618 }
1619 1619
1620 return; 1620 return;
1621} /* ixv_initialize_transmit_units */ 1621} /* ixv_initialize_transmit_units */
1622 1622
1623 1623
1624/************************************************************************ 1624/************************************************************************
1625 * ixv_initialize_rss_mapping 1625 * ixv_initialize_rss_mapping
1626 ************************************************************************/ 1626 ************************************************************************/
1627static void 1627static void
1628ixv_initialize_rss_mapping(struct adapter *adapter) 1628ixv_initialize_rss_mapping(struct adapter *adapter)
1629{ 1629{
1630 struct ixgbe_hw *hw = &adapter->hw; 1630 struct ixgbe_hw *hw = &adapter->hw;
1631 u32 reta = 0, mrqc, rss_key[10]; 1631 u32 reta = 0, mrqc, rss_key[10];
1632 int queue_id; 1632 int queue_id;
1633 int i, j; 1633 int i, j;
1634 u32 rss_hash_config; 1634 u32 rss_hash_config;
1635 1635
1636 /* force use default RSS key. */ 1636 /* force use default RSS key. */
1637#ifdef __NetBSD__ 1637#ifdef __NetBSD__
1638 rss_getkey((uint8_t *) &rss_key); 1638 rss_getkey((uint8_t *) &rss_key);
1639#else 1639#else
1640 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1640 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1641 /* Fetch the configured RSS key */ 1641 /* Fetch the configured RSS key */
1642 rss_getkey((uint8_t *)&rss_key); 1642 rss_getkey((uint8_t *)&rss_key);
1643 } else { 1643 } else {
1644 /* set up random bits */ 1644 /* set up random bits */
1645 cprng_fast(&rss_key, sizeof(rss_key)); 1645 cprng_fast(&rss_key, sizeof(rss_key));
1646 } 1646 }
1647#endif 1647#endif
1648 1648
1649 /* Now fill out hash function seeds */ 1649 /* Now fill out hash function seeds */
1650 for (i = 0; i < 10; i++) 1650 for (i = 0; i < 10; i++)
1651 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1651 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1652 1652
1653 /* Set up the redirection table */ 1653 /* Set up the redirection table */
1654 for (i = 0, j = 0; i < 64; i++, j++) { 1654 for (i = 0, j = 0; i < 64; i++, j++) {
1655 if (j == adapter->num_queues) 1655 if (j == adapter->num_queues)
1656 j = 0; 1656 j = 0;
1657 1657
1658 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1658 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1659 /* 1659 /*
1660 * Fetch the RSS bucket id for the given indirection 1660 * Fetch the RSS bucket id for the given indirection
1661 * entry. Cap it at the number of configured buckets 1661 * entry. Cap it at the number of configured buckets
1662 * (which is num_queues.) 1662 * (which is num_queues.)
1663 */ 1663 */
1664 queue_id = rss_get_indirection_to_bucket(i); 1664 queue_id = rss_get_indirection_to_bucket(i);
1665 queue_id = queue_id % adapter->num_queues; 1665 queue_id = queue_id % adapter->num_queues;
1666 } else 1666 } else
1667 queue_id = j; 1667 queue_id = j;
1668 1668
1669 /* 1669 /*
1670 * The low 8 bits are for hash value (n+0); 1670 * The low 8 bits are for hash value (n+0);
1671 * The next 8 bits are for hash value (n+1), etc. 1671 * The next 8 bits are for hash value (n+1), etc.
1672 */ 1672 */
1673 reta >>= 8; 1673 reta >>= 8;
1674 reta |= ((uint32_t)queue_id) << 24; 1674 reta |= ((uint32_t)queue_id) << 24;
1675 if ((i & 3) == 3) { 1675 if ((i & 3) == 3) {
1676 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1676 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1677 reta = 0; 1677 reta = 0;
1678 } 1678 }
1679 } 1679 }
1680 1680
1681 /* Perform hash on these packet types */ 1681 /* Perform hash on these packet types */
1682 if (adapter->feat_en & IXGBE_FEATURE_RSS) 1682 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1683 rss_hash_config = rss_gethashconfig(); 1683 rss_hash_config = rss_gethashconfig();
1684 else { 1684 else {
1685 /* 1685 /*
1686 * Disable UDP - IP fragments aren't currently being handled 1686 * Disable UDP - IP fragments aren't currently being handled
1687 * and so we end up with a mix of 2-tuple and 4-tuple 1687 * and so we end up with a mix of 2-tuple and 4-tuple
1688 * traffic. 1688 * traffic.
1689 */ 1689 */
1690 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1690 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1691 | RSS_HASHTYPE_RSS_TCP_IPV4 1691 | RSS_HASHTYPE_RSS_TCP_IPV4
1692 | RSS_HASHTYPE_RSS_IPV6 1692 | RSS_HASHTYPE_RSS_IPV6
1693 | RSS_HASHTYPE_RSS_TCP_IPV6; 1693 | RSS_HASHTYPE_RSS_TCP_IPV6;
1694 } 1694 }
1695 1695
1696 mrqc = IXGBE_MRQC_RSSEN; 1696 mrqc = IXGBE_MRQC_RSSEN;
1697 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1697 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1698 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1698 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1699 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1699 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1700 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1700 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1701 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1701 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1702 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1702 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1703 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1703 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1704 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1704 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1705 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1705 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1706 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1706 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1707 __func__); 1707 __func__);
1708 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1708 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1709 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1709 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1710 __func__); 1710 __func__);
1711 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1711 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1712 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1712 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1713 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1713 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1715 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1715 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1716 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1716 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1717 __func__); 1717 __func__);
1718 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1718 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1719} /* ixv_initialize_rss_mapping */ 1719} /* ixv_initialize_rss_mapping */
1720 1720
1721 1721
1722/************************************************************************ 1722/************************************************************************
1723 * ixv_initialize_receive_units - Setup receive registers and features. 1723 * ixv_initialize_receive_units - Setup receive registers and features.
1724 ************************************************************************/ 1724 ************************************************************************/
1725static void 1725static void
1726ixv_initialize_receive_units(struct adapter *adapter) 1726ixv_initialize_receive_units(struct adapter *adapter)
1727{ 1727{
1728 struct rx_ring *rxr = adapter->rx_rings; 1728 struct rx_ring *rxr = adapter->rx_rings;
1729 struct ixgbe_hw *hw = &adapter->hw; 1729 struct ixgbe_hw *hw = &adapter->hw;
1730 struct ifnet *ifp = adapter->ifp; 1730 struct ifnet *ifp = adapter->ifp;
1731 u32 bufsz, psrtype; 1731 u32 bufsz, psrtype;
1732 1732
1733 if (ifp->if_mtu > ETHERMTU) 1733 if (ifp->if_mtu > ETHERMTU)
1734 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1734 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1735 else 1735 else
1736 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1736 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1737 1737
1738 psrtype = IXGBE_PSRTYPE_TCPHDR 1738 psrtype = IXGBE_PSRTYPE_TCPHDR
1739 | IXGBE_PSRTYPE_UDPHDR 1739 | IXGBE_PSRTYPE_UDPHDR
1740 | IXGBE_PSRTYPE_IPV4HDR 1740 | IXGBE_PSRTYPE_IPV4HDR
1741 | IXGBE_PSRTYPE_IPV6HDR 1741 | IXGBE_PSRTYPE_IPV6HDR
1742 | IXGBE_PSRTYPE_L2HDR; 1742 | IXGBE_PSRTYPE_L2HDR;
1743 1743
1744 if (adapter->num_queues > 1) 1744 if (adapter->num_queues > 1)
1745 psrtype |= 1 << 29; 1745 psrtype |= 1 << 29;
1746 1746
1747 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1747 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1748 1748
1749 /* Tell PF our max_frame size */ 1749 /* Tell PF our max_frame size */
1750 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { 1750 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1751 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1751 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1752 } 1752 }
1753 1753
1754 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1754 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1755 u64 rdba = rxr->rxdma.dma_paddr; 1755 u64 rdba = rxr->rxdma.dma_paddr;
1756 u32 reg, rxdctl; 1756 u32 reg, rxdctl;
1757 int j = rxr->me; 1757 int j = rxr->me;
1758 1758
1759 /* Disable the queue */ 1759 /* Disable the queue */
1760 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1760 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1761 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1761 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1762 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1762 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1763 for (int k = 0; k < 10; k++) { 1763 for (int k = 0; k < 10; k++) {
1764 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1764 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1765 IXGBE_RXDCTL_ENABLE) 1765 IXGBE_RXDCTL_ENABLE)
1766 msec_delay(1); 1766 msec_delay(1);
1767 else 1767 else
1768 break; 1768 break;
1769 } 1769 }
1770 wmb(); 1770 wmb();
1771 /* Setup the Base and Length of the Rx Descriptor Ring */ 1771 /* Setup the Base and Length of the Rx Descriptor Ring */
1772 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1772 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1773 (rdba & 0x00000000ffffffffULL)); 1773 (rdba & 0x00000000ffffffffULL));
1774 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1774 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1775 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1775 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1776 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1776 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1777 1777
1778 /* Reset the ring indices */ 1778 /* Reset the ring indices */
1779 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1779 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1780 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1780 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1781 1781
1782 /* Set up the SRRCTL register */ 1782 /* Set up the SRRCTL register */
1783 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1783 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1784 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1784 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1785 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1785 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1786 reg |= bufsz; 1786 reg |= bufsz;
1787 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1787 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1788 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1788 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1789 1789
1790 /* Capture Rx Tail index */ 1790 /* Capture Rx Tail index */
1791 rxr->tail = IXGBE_VFRDT(rxr->me); 1791 rxr->tail = IXGBE_VFRDT(rxr->me);
1792 1792
1793 /* Do the queue enabling last */ 1793 /* Do the queue enabling last */
1794 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1794 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1795 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1795 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1796 for (int k = 0; k < 10; k++) { 1796 for (int k = 0; k < 10; k++) {
1797 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1797 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1798 IXGBE_RXDCTL_ENABLE) 1798 IXGBE_RXDCTL_ENABLE)
1799 break; 1799 break;
1800 msec_delay(1); 1800 msec_delay(1);
1801 } 1801 }
1802 wmb(); 1802 wmb();
1803 1803
1804 /* Set the Tail Pointer */ 1804 /* Set the Tail Pointer */
1805#ifdef DEV_NETMAP 1805#ifdef DEV_NETMAP
1806 /* 1806 /*
1807 * In netmap mode, we must preserve the buffers made 1807 * In netmap mode, we must preserve the buffers made
1808 * available to userspace before the if_init() 1808 * available to userspace before the if_init()
1809 * (this is true by default on the TX side, because 1809 * (this is true by default on the TX side, because
1810 * init makes all buffers available to userspace). 1810 * init makes all buffers available to userspace).
1811 * 1811 *
1812 * netmap_reset() and the device specific routines 1812 * netmap_reset() and the device specific routines
1813 * (e.g. ixgbe_setup_receive_rings()) map these 1813 * (e.g. ixgbe_setup_receive_rings()) map these
1814 * buffers at the end of the NIC ring, so here we 1814 * buffers at the end of the NIC ring, so here we
1815 * must set the RDT (tail) register to make sure 1815 * must set the RDT (tail) register to make sure
1816 * they are not overwritten. 1816 * they are not overwritten.
1817 * 1817 *
1818 * In this driver the NIC ring starts at RDH = 0, 1818 * In this driver the NIC ring starts at RDH = 0,
1819 * RDT points to the last slot available for reception (?), 1819 * RDT points to the last slot available for reception (?),
1820 * so RDT = num_rx_desc - 1 means the whole ring is available. 1820 * so RDT = num_rx_desc - 1 means the whole ring is available.
1821 */ 1821 */
1822 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 1822 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1823 (ifp->if_capenable & IFCAP_NETMAP)) { 1823 (ifp->if_capenable & IFCAP_NETMAP)) {
1824 struct netmap_adapter *na = NA(adapter->ifp); 1824 struct netmap_adapter *na = NA(adapter->ifp);
1825 struct netmap_kring *kring = na->rx_rings[i]; 1825 struct netmap_kring *kring = na->rx_rings[i];
1826 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1826 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1827 1827
1828 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1828 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1829 } else 1829 } else
1830#endif /* DEV_NETMAP */ 1830#endif /* DEV_NETMAP */
1831 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1831 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1832 adapter->num_rx_desc - 1); 1832 adapter->num_rx_desc - 1);
1833 } 1833 }
1834 1834
1835 ixv_initialize_rss_mapping(adapter); 1835 ixv_initialize_rss_mapping(adapter);
1836} /* ixv_initialize_receive_units */ 1836} /* ixv_initialize_receive_units */
1837 1837
1838/************************************************************************ 1838/************************************************************************
1839 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function 1839 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1840 * 1840 *
1841 * Retrieves the TDH value from the hardware 1841 * Retrieves the TDH value from the hardware
1842 ************************************************************************/ 1842 ************************************************************************/
1843static int 1843static int
1844ixv_sysctl_tdh_handler(SYSCTLFN_ARGS) 1844ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1845{ 1845{
1846 struct sysctlnode node = *rnode; 1846 struct sysctlnode node = *rnode;
1847 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1847 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1848 uint32_t val; 1848 uint32_t val;
1849 1849
1850 if (!txr) 1850 if (!txr)
1851 return (0); 1851 return (0);
1852 1852
1853 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me)); 1853 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1854 node.sysctl_data = &val; 1854 node.sysctl_data = &val;
1855 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1855 return sysctl_lookup(SYSCTLFN_CALL(&node));
1856} /* ixv_sysctl_tdh_handler */ 1856} /* ixv_sysctl_tdh_handler */
1857 1857
1858/************************************************************************ 1858/************************************************************************
1859 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1859 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1860 * 1860 *
1861 * Retrieves the TDT value from the hardware 1861 * Retrieves the TDT value from the hardware
1862 ************************************************************************/ 1862 ************************************************************************/
1863static int 1863static int
1864ixv_sysctl_tdt_handler(SYSCTLFN_ARGS) 1864ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1865{ 1865{
1866 struct sysctlnode node = *rnode; 1866 struct sysctlnode node = *rnode;
1867 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1867 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1868 uint32_t val; 1868 uint32_t val;
1869 1869
1870 if (!txr) 1870 if (!txr)
1871 return (0); 1871 return (0);
1872 1872
1873 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me)); 1873 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1874 node.sysctl_data = &val; 1874 node.sysctl_data = &val;
1875 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1875 return sysctl_lookup(SYSCTLFN_CALL(&node));
1876} /* ixv_sysctl_tdt_handler */ 1876} /* ixv_sysctl_tdt_handler */
1877 1877
1878/************************************************************************ 1878/************************************************************************
1879 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check 1879 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1880 * handler function 1880 * handler function
1881 * 1881 *
1882 * Retrieves the next_to_check value 1882 * Retrieves the next_to_check value
1883 ************************************************************************/ 1883 ************************************************************************/
1884static int 1884static int
1885ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 1885ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1886{ 1886{
1887 struct sysctlnode node = *rnode; 1887 struct sysctlnode node = *rnode;
1888 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1888 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1889 uint32_t val; 1889 uint32_t val;
1890 1890
1891 if (!rxr) 1891 if (!rxr)
1892 return (0); 1892 return (0);
1893 1893
1894 val = rxr->next_to_check; 1894 val = rxr->next_to_check;
1895 node.sysctl_data = &val; 1895 node.sysctl_data = &val;
1896 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1896 return sysctl_lookup(SYSCTLFN_CALL(&node));
1897} /* ixv_sysctl_next_to_check_handler */ 1897} /* ixv_sysctl_next_to_check_handler */
1898 1898
1899/************************************************************************ 1899/************************************************************************
1900 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function 1900 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1901 * 1901 *
1902 * Retrieves the RDH value from the hardware 1902 * Retrieves the RDH value from the hardware
1903 ************************************************************************/ 1903 ************************************************************************/
1904static int 1904static int
1905ixv_sysctl_rdh_handler(SYSCTLFN_ARGS) 1905ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1906{ 1906{
1907 struct sysctlnode node = *rnode; 1907 struct sysctlnode node = *rnode;
1908 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1908 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1909 uint32_t val; 1909 uint32_t val;
1910 1910
1911 if (!rxr) 1911 if (!rxr)
1912 return (0); 1912 return (0);
1913 1913
1914 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me)); 1914 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1915 node.sysctl_data = &val; 1915 node.sysctl_data = &val;
1916 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1916 return sysctl_lookup(SYSCTLFN_CALL(&node));
1917} /* ixv_sysctl_rdh_handler */ 1917} /* ixv_sysctl_rdh_handler */
1918 1918
1919/************************************************************************ 1919/************************************************************************
1920 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function 1920 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1921 * 1921 *
1922 * Retrieves the RDT value from the hardware 1922 * Retrieves the RDT value from the hardware
1923 ************************************************************************/ 1923 ************************************************************************/
1924static int 1924static int
1925ixv_sysctl_rdt_handler(SYSCTLFN_ARGS) 1925ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1926{ 1926{
1927 struct sysctlnode node = *rnode; 1927 struct sysctlnode node = *rnode;
1928 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1928 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1929 uint32_t val; 1929 uint32_t val;
1930 1930
1931 if (!rxr) 1931 if (!rxr)
1932 return (0); 1932 return (0);
1933 1933
1934 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me)); 1934 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1935 node.sysctl_data = &val; 1935 node.sysctl_data = &val;
1936 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1936 return sysctl_lookup(SYSCTLFN_CALL(&node));
1937} /* ixv_sysctl_rdt_handler */ 1937} /* ixv_sysctl_rdt_handler */
1938 1938
1939static void 1939static void
1940ixv_setup_vlan_tagging(struct adapter *adapter) 1940ixv_setup_vlan_tagging(struct adapter *adapter)
1941{ 1941{
1942 struct ethercom *ec = &adapter->osdep.ec; 1942 struct ethercom *ec = &adapter->osdep.ec;
1943 struct ixgbe_hw *hw = &adapter->hw; 1943 struct ixgbe_hw *hw = &adapter->hw;
1944 struct rx_ring *rxr; 1944 struct rx_ring *rxr;
1945 u32 ctrl; 1945 u32 ctrl;
1946 int i; 1946 int i;
1947 bool hwtagging; 1947 bool hwtagging;
1948 1948
1949 /* Enable HW tagging only if any vlan is attached */ 1949 /* Enable HW tagging only if any vlan is attached */
1950 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 1950 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
1951 && VLAN_ATTACHED(ec); 1951 && VLAN_ATTACHED(ec);
1952 1952
1953 /* Enable the queues */ 1953 /* Enable the queues */
1954 for (i = 0; i < adapter->num_queues; i++) { 1954 for (i = 0; i < adapter->num_queues; i++) {
1955 rxr = &adapter->rx_rings[i]; 1955 rxr = &adapter->rx_rings[i];
1956 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me)); 1956 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1957 if (hwtagging) 1957 if (hwtagging)
1958 ctrl |= IXGBE_RXDCTL_VME; 1958 ctrl |= IXGBE_RXDCTL_VME;
1959 else 1959 else
1960 ctrl &= ~IXGBE_RXDCTL_VME; 1960 ctrl &= ~IXGBE_RXDCTL_VME;
1961 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl); 1961 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1962 /* 1962 /*
1963 * Let Rx path know that it needs to store VLAN tag 1963 * Let Rx path know that it needs to store VLAN tag
1964 * as part of extra mbuf info. 1964 * as part of extra mbuf info.
1965 */ 1965 */
1966 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 1966 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
1967 } 1967 }
1968} /* ixv_setup_vlan_tagging */ 1968} /* ixv_setup_vlan_tagging */
1969 1969
1970/************************************************************************ 1970/************************************************************************
1971 * ixv_setup_vlan_support 1971 * ixv_setup_vlan_support
1972 ************************************************************************/ 1972 ************************************************************************/
1973static int 1973static int
1974ixv_setup_vlan_support(struct adapter *adapter) 1974ixv_setup_vlan_support(struct adapter *adapter)
1975{ 1975{
1976 struct ethercom *ec = &adapter->osdep.ec; 1976 struct ethercom *ec = &adapter->osdep.ec;
1977 struct ixgbe_hw *hw = &adapter->hw; 1977 struct ixgbe_hw *hw = &adapter->hw;
1978 u32 vid, vfta, retry; 1978 u32 vid, vfta, retry;
1979 struct vlanid_list *vlanidp; 1979 struct vlanid_list *vlanidp;
1980 int rv, error = 0; 1980 int rv, error = 0;
1981 1981
1982 /* 1982 /*
1983 * This function is called from both if_init and ifflags_cb() 1983 * This function is called from both if_init and ifflags_cb()
1984 * on NetBSD. 1984 * on NetBSD.
1985 */ 1985 */
1986 1986
1987 /* 1987 /*
1988 * Part 1: 1988 * Part 1:
1989 * Setup VLAN HW tagging 1989 * Setup VLAN HW tagging
1990 */ 1990 */
1991 ixv_setup_vlan_tagging(adapter); 1991 ixv_setup_vlan_tagging(adapter);
1992 1992
1993 if (!VLAN_ATTACHED(ec)) 1993 if (!VLAN_ATTACHED(ec))
1994 return 0; 1994 return 0;
1995 1995
1996 /* 1996 /*
1997 * Part 2: 1997 * Part 2:
1998 * Setup VLAN HW filter 1998 * Setup VLAN HW filter
1999 */ 1999 */
2000 /* Cleanup shadow_vfta */ 2000 /* Cleanup shadow_vfta */
2001 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 2001 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2002 adapter->shadow_vfta[i] = 0; 2002 adapter->shadow_vfta[i] = 0;
2003 /* Generate shadow_vfta from ec_vids */ 2003 /* Generate shadow_vfta from ec_vids */
2004 ETHER_LOCK(ec); 2004 ETHER_LOCK(ec);
2005 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2005 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2006 uint32_t idx; 2006 uint32_t idx;
2007 2007
2008 idx = vlanidp->vid / 32; 2008 idx = vlanidp->vid / 32;
2009 KASSERT(idx < IXGBE_VFTA_SIZE); 2009 KASSERT(idx < IXGBE_VFTA_SIZE);
2010 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2010 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2011 } 2011 }
2012 ETHER_UNLOCK(ec); 2012 ETHER_UNLOCK(ec);
2013  2013
2014 /* 2014 /*
2015 * A soft reset zero's out the VFTA, so 2015 * A soft reset zero's out the VFTA, so
2016 * we need to repopulate it now. 2016 * we need to repopulate it now.
2017 */ 2017 */
2018 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 2018 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2019 if (adapter->shadow_vfta[i] == 0) 2019 if (adapter->shadow_vfta[i] == 0)
2020 continue; 2020 continue;
2021 vfta = adapter->shadow_vfta[i]; 2021 vfta = adapter->shadow_vfta[i];
2022 /* 2022 /*
2023 * Reconstruct the vlan id's 2023 * Reconstruct the vlan id's
2024 * based on the bits set in each 2024 * based on the bits set in each
2025 * of the array ints. 2025 * of the array ints.
2026 */ 2026 */
2027 for (int j = 0; j < 32; j++) { 2027 for (int j = 0; j < 32; j++) {
2028 retry = 0; 2028 retry = 0;
2029 if ((vfta & ((u32)1 << j)) == 0) 2029 if ((vfta & ((u32)1 << j)) == 0)
2030 continue; 2030 continue;
2031 vid = (i * 32) + j; 2031 vid = (i * 32) + j;
2032  2032
2033 /* Call the shared code mailbox routine */ 2033 /* Call the shared code mailbox routine */
2034 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE, 2034 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2035 FALSE)) != 0) { 2035 FALSE)) != 0) {
2036 if (++retry > 5) { 2036 if (++retry > 5) {
2037 device_printf(adapter->dev, 2037 device_printf(adapter->dev,
2038 "%s: max retry exceeded\n", 2038 "%s: max retry exceeded\n",
2039 __func__); 2039 __func__);
2040 break; 2040 break;
2041 } 2041 }
2042 } 2042 }
2043 if (rv != 0) { 2043 if (rv != 0) {
2044 device_printf(adapter->dev, 2044 device_printf(adapter->dev,
2045 "failed to set vlan %d\n", vid); 2045 "failed to set vlan %d\n", vid);
2046 error = EACCES; 2046 error = EACCES;
2047 } 2047 }
2048 } 2048 }
2049 } 2049 }
2050 return error; 2050 return error;
2051} /* ixv_setup_vlan_support */ 2051} /* ixv_setup_vlan_support */
2052 2052
2053static int 2053static int
2054ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2054ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2055{ 2055{
2056 struct ifnet *ifp = &ec->ec_if; 2056 struct ifnet *ifp = &ec->ec_if;
2057 struct adapter *adapter = ifp->if_softc; 2057 struct adapter *adapter = ifp->if_softc;
2058 int rv; 2058 int rv;
2059 2059
2060 if (set) 2060 if (set)
2061 rv = ixv_register_vlan(ifp->if_softc, ifp, vid); 2061 rv = ixv_register_vlan(adapter, vid);
2062 else 2062 else
2063 rv = ixv_unregister_vlan(ifp->if_softc, ifp, vid); 2063 rv = ixv_unregister_vlan(adapter, vid);
2064 2064
2065 if (rv != 0) 2065 if (rv != 0)
2066 return rv; 2066 return rv;
2067 2067
2068 /* 2068 /*
2069 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 2069 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2070 * or 0 to 1. 2070 * or 0 to 1.
2071 */ 2071 */
2072 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) 2072 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2073 ixv_setup_vlan_tagging(adapter); 2073 ixv_setup_vlan_tagging(adapter);
2074 2074
2075 return rv; 2075 return rv;
2076} 2076}
2077 2077
2078/************************************************************************ 2078/************************************************************************
2079 * ixv_register_vlan 2079 * ixv_register_vlan
2080 * 2080 *
2081 * Run via a vlan config EVENT, it enables us to use the 2081 * Run via a vlan config EVENT, it enables us to use the
2082 * HW Filter table since we can get the vlan id. This just 2082 * HW Filter table since we can get the vlan id. This just
2083 * creates the entry in the soft version of the VFTA, init 2083 * creates the entry in the soft version of the VFTA, init
2084 * will repopulate the real table. 2084 * will repopulate the real table.
2085 ************************************************************************/ 2085 ************************************************************************/
2086static int 2086static int
2087ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2087ixv_register_vlan(struct adapter *adapter, u16 vtag)
2088{ 2088{
2089 struct adapter *adapter = ifp->if_softc; 
2090 struct ixgbe_hw *hw = &adapter->hw; 2089 struct ixgbe_hw *hw = &adapter->hw;
2091 u16 index, bit; 2090 u16 index, bit;
2092 int error; 2091 int error;
2093 2092
2094 if (ifp->if_softc != arg) /* Not our event */ 
2095 return EINVAL; 
2096 
2097 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2093 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2098 return EINVAL; 2094 return EINVAL;
2099 IXGBE_CORE_LOCK(adapter); 2095 IXGBE_CORE_LOCK(adapter);
2100 index = (vtag >> 5) & 0x7F; 2096 index = (vtag >> 5) & 0x7F;
2101 bit = vtag & 0x1F; 2097 bit = vtag & 0x1F;
2102 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2098 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2103 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false); 2099 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2104 IXGBE_CORE_UNLOCK(adapter); 2100 IXGBE_CORE_UNLOCK(adapter);
2105 2101
2106 if (error != 0) { 2102 if (error != 0) {
2107 device_printf(adapter->dev, "failed to register vlan %hu\n", 2103 device_printf(adapter->dev, "failed to register vlan %hu\n",
2108 vtag); 2104 vtag);
2109 error = EACCES; 2105 error = EACCES;
2110 } 2106 }
2111 return error; 2107 return error;
2112} /* ixv_register_vlan */ 2108} /* ixv_register_vlan */
2113 2109
2114/************************************************************************ 2110/************************************************************************
2115 * ixv_unregister_vlan 2111 * ixv_unregister_vlan
2116 * 2112 *
2117 * Run via a vlan unconfig EVENT, remove our entry 2113 * Run via a vlan unconfig EVENT, remove our entry
2118 * in the soft vfta. 2114 * in the soft vfta.
2119 ************************************************************************/ 2115 ************************************************************************/
2120static int 2116static int
2121ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2117ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2122{ 2118{
2123 struct adapter *adapter = ifp->if_softc; 
2124 struct ixgbe_hw *hw = &adapter->hw; 2119 struct ixgbe_hw *hw = &adapter->hw;
2125 u16 index, bit; 2120 u16 index, bit;
2126 int error; 2121 int error;
2127 2122
2128 if (ifp->if_softc != arg) 
2129 return EINVAL; 
2130 
2131 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2123 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2132 return EINVAL; 2124 return EINVAL;
2133 2125
2134 IXGBE_CORE_LOCK(adapter); 2126 IXGBE_CORE_LOCK(adapter);
2135 index = (vtag >> 5) & 0x7F; 2127 index = (vtag >> 5) & 0x7F;
2136 bit = vtag & 0x1F; 2128 bit = vtag & 0x1F;
2137 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2129 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2138 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false); 2130 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2139 IXGBE_CORE_UNLOCK(adapter); 2131 IXGBE_CORE_UNLOCK(adapter);
2140 2132
2141 if (error != 0) { 2133 if (error != 0) {
2142 device_printf(adapter->dev, "failed to unregister vlan %hu\n", 2134 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2143 vtag); 2135 vtag);
2144 error = EIO; 2136 error = EIO;
2145 } 2137 }
2146 return error; 2138 return error;
2147} /* ixv_unregister_vlan */ 2139} /* ixv_unregister_vlan */
2148 2140
2149/************************************************************************ 2141/************************************************************************
2150 * ixv_enable_intr 2142 * ixv_enable_intr
2151 ************************************************************************/ 2143 ************************************************************************/
2152static void 2144static void
2153ixv_enable_intr(struct adapter *adapter) 2145ixv_enable_intr(struct adapter *adapter)
2154{ 2146{
2155 struct ixgbe_hw *hw = &adapter->hw; 2147 struct ixgbe_hw *hw = &adapter->hw;
2156 struct ix_queue *que = adapter->queues; 2148 struct ix_queue *que = adapter->queues;
2157 u32 mask; 2149 u32 mask;
2158 int i; 2150 int i;
2159 2151
2160 /* For VTEIAC */ 2152 /* For VTEIAC */
2161 mask = (1 << adapter->vector); 2153 mask = (1 << adapter->vector);
2162 for (i = 0; i < adapter->num_queues; i++, que++) 2154 for (i = 0; i < adapter->num_queues; i++, que++)
2163 mask |= (1 << que->msix); 2155 mask |= (1 << que->msix);
2164 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 2156 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2165 2157
2166 /* For VTEIMS */ 2158 /* For VTEIMS */
2167 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 2159 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2168 que = adapter->queues; 2160 que = adapter->queues;
2169 for (i = 0; i < adapter->num_queues; i++, que++) 2161 for (i = 0; i < adapter->num_queues; i++, que++)
2170 ixv_enable_queue(adapter, que->msix); 2162 ixv_enable_queue(adapter, que->msix);
2171 2163
2172 IXGBE_WRITE_FLUSH(hw); 2164 IXGBE_WRITE_FLUSH(hw);
2173} /* ixv_enable_intr */ 2165} /* ixv_enable_intr */
2174 2166
2175/************************************************************************ 2167/************************************************************************
2176 * ixv_disable_intr 2168 * ixv_disable_intr
2177 ************************************************************************/ 2169 ************************************************************************/
2178static void 2170static void
2179ixv_disable_intr(struct adapter *adapter) 2171ixv_disable_intr(struct adapter *adapter)
2180{ 2172{
2181 struct ix_queue *que = adapter->queues; 2173 struct ix_queue *que = adapter->queues;
2182 2174
2183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 2175 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2184 2176
2185 /* disable interrupts other than queues */ 2177 /* disable interrupts other than queues */
2186 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector); 2178 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2187 2179
2188 for (int i = 0; i < adapter->num_queues; i++, que++) 2180 for (int i = 0; i < adapter->num_queues; i++, que++)
2189 ixv_disable_queue(adapter, que->msix); 2181 ixv_disable_queue(adapter, que->msix);
2190 2182
2191 IXGBE_WRITE_FLUSH(&adapter->hw); 2183 IXGBE_WRITE_FLUSH(&adapter->hw);
2192} /* ixv_disable_intr */ 2184} /* ixv_disable_intr */
2193 2185
2194/************************************************************************ 2186/************************************************************************
2195 * ixv_set_ivar 2187 * ixv_set_ivar
2196 * 2188 *
2197 * Setup the correct IVAR register for a particular MSI-X interrupt 2189 * Setup the correct IVAR register for a particular MSI-X interrupt
2198 * - entry is the register array entry 2190 * - entry is the register array entry
2199 * - vector is the MSI-X vector for this queue 2191 * - vector is the MSI-X vector for this queue
2200 * - type is RX/TX/MISC 2192 * - type is RX/TX/MISC
2201 ************************************************************************/ 2193 ************************************************************************/
2202static void 2194static void
2203ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 2195ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2204{ 2196{
2205 struct ixgbe_hw *hw = &adapter->hw; 2197 struct ixgbe_hw *hw = &adapter->hw;
2206 u32 ivar, index; 2198 u32 ivar, index;
2207 2199
2208 vector |= IXGBE_IVAR_ALLOC_VAL; 2200 vector |= IXGBE_IVAR_ALLOC_VAL;
2209 2201
2210 if (type == -1) { /* MISC IVAR */ 2202 if (type == -1) { /* MISC IVAR */
2211 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 2203 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2212 ivar &= ~0xFF; 2204 ivar &= ~0xFF;
2213 ivar |= vector; 2205 ivar |= vector;
2214 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 2206 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2215 } else { /* RX/TX IVARS */ 2207 } else { /* RX/TX IVARS */
2216 index = (16 * (entry & 1)) + (8 * type); 2208 index = (16 * (entry & 1)) + (8 * type);
2217 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 2209 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2218 ivar &= ~(0xffUL << index); 2210 ivar &= ~(0xffUL << index);
2219 ivar |= ((u32)vector << index); 2211 ivar |= ((u32)vector << index);
2220 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 2212 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2221 } 2213 }
2222} /* ixv_set_ivar */ 2214} /* ixv_set_ivar */
2223 2215
2224/************************************************************************ 2216/************************************************************************
2225 * ixv_configure_ivars 2217 * ixv_configure_ivars
2226 ************************************************************************/ 2218 ************************************************************************/
2227static void 2219static void
2228ixv_configure_ivars(struct adapter *adapter) 2220ixv_configure_ivars(struct adapter *adapter)
2229{ 2221{
2230 struct ix_queue *que = adapter->queues; 2222 struct ix_queue *que = adapter->queues;
2231 2223
2232 /* XXX We should sync EITR value calculation with ixgbe.c? */ 2224 /* XXX We should sync EITR value calculation with ixgbe.c? */
2233 2225
2234 for (int i = 0; i < adapter->num_queues; i++, que++) { 2226 for (int i = 0; i < adapter->num_queues; i++, que++) {
2235 /* First the RX queue entry */ 2227 /* First the RX queue entry */
2236 ixv_set_ivar(adapter, i, que->msix, 0); 2228 ixv_set_ivar(adapter, i, que->msix, 0);
2237 /* ... and the TX */ 2229 /* ... and the TX */
2238 ixv_set_ivar(adapter, i, que->msix, 1); 2230 ixv_set_ivar(adapter, i, que->msix, 1);
2239 /* Set an initial value in EITR */ 2231 /* Set an initial value in EITR */
2240 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT); 2232 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2241 } 2233 }
2242 2234
2243 /* For the mailbox interrupt */ 2235 /* For the mailbox interrupt */
2244 ixv_set_ivar(adapter, 1, adapter->vector, -1); 2236 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2245} /* ixv_configure_ivars */ 2237} /* ixv_configure_ivars */
2246 2238
2247 2239
2248/************************************************************************ 2240/************************************************************************
2249 * ixv_save_stats 2241 * ixv_save_stats
2250 * 2242 *
2251 * The VF stats registers never have a truly virgin 2243 * The VF stats registers never have a truly virgin
2252 * starting point, so this routine tries to make an 2244 * starting point, so this routine tries to make an
2253 * artificial one, marking ground zero on attach as 2245 * artificial one, marking ground zero on attach as
2254 * it were. 2246 * it were.
2255 ************************************************************************/ 2247 ************************************************************************/
2256static void 2248static void
2257ixv_save_stats(struct adapter *adapter) 2249ixv_save_stats(struct adapter *adapter)
2258{ 2250{
2259 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2251 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2260 2252
2261 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) { 2253 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2262 stats->saved_reset_vfgprc += 2254 stats->saved_reset_vfgprc +=
2263 stats->vfgprc.ev_count - stats->base_vfgprc; 2255 stats->vfgprc.ev_count - stats->base_vfgprc;
2264 stats->saved_reset_vfgptc += 2256 stats->saved_reset_vfgptc +=
2265 stats->vfgptc.ev_count - stats->base_vfgptc; 2257 stats->vfgptc.ev_count - stats->base_vfgptc;
2266 stats->saved_reset_vfgorc += 2258 stats->saved_reset_vfgorc +=
2267 stats->vfgorc.ev_count - stats->base_vfgorc; 2259 stats->vfgorc.ev_count - stats->base_vfgorc;
2268 stats->saved_reset_vfgotc += 2260 stats->saved_reset_vfgotc +=
2269 stats->vfgotc.ev_count - stats->base_vfgotc; 2261 stats->vfgotc.ev_count - stats->base_vfgotc;
2270 stats->saved_reset_vfmprc += 2262 stats->saved_reset_vfmprc +=
2271 stats->vfmprc.ev_count - stats->base_vfmprc; 2263 stats->vfmprc.ev_count - stats->base_vfmprc;
2272 } 2264 }
2273} /* ixv_save_stats */ 2265} /* ixv_save_stats */
2274 2266
2275/************************************************************************ 2267/************************************************************************
2276 * ixv_init_stats 2268 * ixv_init_stats
2277 ************************************************************************/ 2269 ************************************************************************/
2278static void 2270static void
2279ixv_init_stats(struct adapter *adapter) 2271ixv_init_stats(struct adapter *adapter)
2280{ 2272{
2281 struct ixgbe_hw *hw = &adapter->hw; 2273 struct ixgbe_hw *hw = &adapter->hw;
2282 2274
2283 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 2275 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2284 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 2276 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2285 adapter->stats.vf.last_vfgorc |= 2277 adapter->stats.vf.last_vfgorc |=
2286 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 2278 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2287 2279
2288 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 2280 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2289 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 2281 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2290 adapter->stats.vf.last_vfgotc |= 2282 adapter->stats.vf.last_vfgotc |=
2291 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 2283 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2292 2284
2293 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 2285 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2294 2286
2295 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 2287 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2296 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 2288 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2297 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 2289 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2298 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 2290 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2299 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 2291 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2300} /* ixv_init_stats */ 2292} /* ixv_init_stats */
2301 2293
2302#define UPDATE_STAT_32(reg, last, count) \ 2294#define UPDATE_STAT_32(reg, last, count) \
2303{ \ 2295{ \
2304 u32 current = IXGBE_READ_REG(hw, (reg)); \ 2296 u32 current = IXGBE_READ_REG(hw, (reg)); \
2305 if (current < (last)) \ 2297 if (current < (last)) \
2306 count.ev_count += 0x100000000LL; \ 2298 count.ev_count += 0x100000000LL; \
2307 (last) = current; \ 2299 (last) = current; \
2308 count.ev_count &= 0xFFFFFFFF00000000LL; \ 2300 count.ev_count &= 0xFFFFFFFF00000000LL; \
2309 count.ev_count |= current; \ 2301 count.ev_count |= current; \
2310} 2302}
2311 2303
2312#define UPDATE_STAT_36(lsb, msb, last, count) \ 2304#define UPDATE_STAT_36(lsb, msb, last, count) \
2313{ \ 2305{ \
2314 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \ 2306 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2315 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \ 2307 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2316 u64 current = ((cur_msb << 32) | cur_lsb); \ 2308 u64 current = ((cur_msb << 32) | cur_lsb); \
2317 if (current < (last)) \ 2309 if (current < (last)) \
2318 count.ev_count += 0x1000000000LL; \ 2310 count.ev_count += 0x1000000000LL; \
2319 (last) = current; \ 2311 (last) = current; \
2320 count.ev_count &= 0xFFFFFFF000000000LL; \ 2312 count.ev_count &= 0xFFFFFFF000000000LL; \
2321 count.ev_count |= current; \ 2313 count.ev_count |= current; \
2322} 2314}
2323 2315
2324/************************************************************************ 2316/************************************************************************
2325 * ixv_update_stats - Update the board statistics counters. 2317 * ixv_update_stats - Update the board statistics counters.
2326 ************************************************************************/ 2318 ************************************************************************/
2327void 2319void
2328ixv_update_stats(struct adapter *adapter) 2320ixv_update_stats(struct adapter *adapter)
2329{ 2321{
2330 struct ixgbe_hw *hw = &adapter->hw; 2322 struct ixgbe_hw *hw = &adapter->hw;
2331 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2323 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2332 2324
2333 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc); 2325 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2334 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc); 2326 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2335 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc, 2327 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2336 stats->vfgorc); 2328 stats->vfgorc);
2337 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc, 2329 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2338 stats->vfgotc); 2330 stats->vfgotc);
2339 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc); 2331 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2340 2332
2341 /* Fill out the OS statistics structure */ 2333 /* Fill out the OS statistics structure */
2342 /* 2334 /*
2343 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with 2335 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2344 * adapter->stats counters. It's required to make ifconfig -z 2336 * adapter->stats counters. It's required to make ifconfig -z
2345 * (SOICZIFDATA) work. 2337 * (SOICZIFDATA) work.
2346 */ 2338 */
2347} /* ixv_update_stats */ 2339} /* ixv_update_stats */
2348 2340
2349/************************************************************************ 2341/************************************************************************
2350 * ixv_sysctl_interrupt_rate_handler 2342 * ixv_sysctl_interrupt_rate_handler
2351 ************************************************************************/ 2343 ************************************************************************/
2352static int 2344static int
2353ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 2345ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2354{ 2346{
2355 struct sysctlnode node = *rnode; 2347 struct sysctlnode node = *rnode;
2356 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 2348 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2357 struct adapter *adapter = que->adapter; 2349 struct adapter *adapter = que->adapter;
2358 uint32_t reg, usec, rate; 2350 uint32_t reg, usec, rate;
2359 int error; 2351 int error;
2360 2352
2361 if (que == NULL) 2353 if (que == NULL)
2362 return 0; 2354 return 0;
2363 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix)); 2355 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2364 usec = ((reg & 0x0FF8) >> 3); 2356 usec = ((reg & 0x0FF8) >> 3);
2365 if (usec > 0) 2357 if (usec > 0)
2366 rate = 500000 / usec; 2358 rate = 500000 / usec;
2367 else 2359 else
2368 rate = 0; 2360 rate = 0;
2369 node.sysctl_data = &rate; 2361 node.sysctl_data = &rate;
2370 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2362 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2371 if (error || newp == NULL) 2363 if (error || newp == NULL)
2372 return error; 2364 return error;
2373 reg &= ~0xfff; /* default, no limitation */ 2365 reg &= ~0xfff; /* default, no limitation */
2374 if (rate > 0 && rate < 500000) { 2366 if (rate > 0 && rate < 500000) {
2375 if (rate < 1000) 2367 if (rate < 1000)
2376 rate = 1000; 2368 rate = 1000;
2377 reg |= ((4000000/rate) & 0xff8); 2369 reg |= ((4000000/rate) & 0xff8);
2378 /* 2370 /*
2379 * When RSC is used, ITR interval must be larger than 2371 * When RSC is used, ITR interval must be larger than
2380 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 2372 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2381 * The minimum value is always greater than 2us on 100M 2373 * The minimum value is always greater than 2us on 100M
2382 * (and 10M?(not documented)), but it's not on 1G and higher. 2374 * (and 10M?(not documented)), but it's not on 1G and higher.
2383 */ 2375 */
2384 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2376 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2385 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2377 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2386 if ((adapter->num_queues > 1) 2378 if ((adapter->num_queues > 1)
2387 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 2379 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2388 return EINVAL; 2380 return EINVAL;
2389 } 2381 }
2390 ixv_max_interrupt_rate = rate; 2382 ixv_max_interrupt_rate = rate;
2391 } else 2383 } else
2392 ixv_max_interrupt_rate = 0; 2384 ixv_max_interrupt_rate = 0;
2393 ixv_eitr_write(adapter, que->msix, reg); 2385 ixv_eitr_write(adapter, que->msix, reg);
2394 2386
2395 return (0); 2387 return (0);
2396} /* ixv_sysctl_interrupt_rate_handler */ 2388} /* ixv_sysctl_interrupt_rate_handler */
2397 2389
2398const struct sysctlnode * 2390const struct sysctlnode *
2399ixv_sysctl_instance(struct adapter *adapter) 2391ixv_sysctl_instance(struct adapter *adapter)
2400{ 2392{
2401 const char *dvname; 2393 const char *dvname;
2402 struct sysctllog **log; 2394 struct sysctllog **log;
2403 int rc; 2395 int rc;
2404 const struct sysctlnode *rnode; 2396 const struct sysctlnode *rnode;
2405 2397
2406 log = &adapter->sysctllog; 2398 log = &adapter->sysctllog;
2407 dvname = device_xname(adapter->dev); 2399 dvname = device_xname(adapter->dev);
2408 2400
2409 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 2401 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2410 0, CTLTYPE_NODE, dvname, 2402 0, CTLTYPE_NODE, dvname,
2411 SYSCTL_DESCR("ixv information and settings"), 2403 SYSCTL_DESCR("ixv information and settings"),
2412 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 2404 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2413 goto err; 2405 goto err;
2414 2406
2415 return rnode; 2407 return rnode;
2416err: 2408err:
2417 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); 2409 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2418 return NULL; 2410 return NULL;
2419} 2411}
2420 2412
2421static void 2413static void
2422ixv_add_device_sysctls(struct adapter *adapter) 2414ixv_add_device_sysctls(struct adapter *adapter)
2423{ 2415{
2424 struct sysctllog **log; 2416 struct sysctllog **log;
2425 const struct sysctlnode *rnode, *cnode; 2417 const struct sysctlnode *rnode, *cnode;
2426 device_t dev; 2418 device_t dev;
2427 2419
2428 dev = adapter->dev; 2420 dev = adapter->dev;
2429 log = &adapter->sysctllog; 2421 log = &adapter->sysctllog;
2430 2422
2431 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2423 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2432 aprint_error_dev(dev, "could not create sysctl root\n"); 2424 aprint_error_dev(dev, "could not create sysctl root\n");
2433 return; 2425 return;
2434 } 2426 }
2435 2427
2436 if (sysctl_createv(log, 0, &rnode, &cnode, 2428 if (sysctl_createv(log, 0, &rnode, &cnode,
2437 CTLFLAG_READWRITE, CTLTYPE_INT, 2429 CTLFLAG_READWRITE, CTLTYPE_INT,
2438 "debug", SYSCTL_DESCR("Debug Info"), 2430 "debug", SYSCTL_DESCR("Debug Info"),
2439 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 2431 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2440 aprint_error_dev(dev, "could not create sysctl\n"); 2432 aprint_error_dev(dev, "could not create sysctl\n");
2441 2433
2442 if (sysctl_createv(log, 0, &rnode, &cnode, 2434 if (sysctl_createv(log, 0, &rnode, &cnode,
2443 CTLFLAG_READWRITE, CTLTYPE_BOOL, 2435 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2444 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 2436 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2445 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 2437 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2446 aprint_error_dev(dev, "could not create sysctl\n"); 2438 aprint_error_dev(dev, "could not create sysctl\n");
2447 2439
2448 if (sysctl_createv(log, 0, &rnode, &cnode, 2440 if (sysctl_createv(log, 0, &rnode, &cnode,
2449 CTLFLAG_READWRITE, CTLTYPE_BOOL, 2441 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2450 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 2442 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2451 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) 2443 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2452 aprint_error_dev(dev, "could not create sysctl\n"); 2444 aprint_error_dev(dev, "could not create sysctl\n");
2453} 2445}
2454 2446
2455/************************************************************************ 2447/************************************************************************
2456 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 2448 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2457 ************************************************************************/ 2449 ************************************************************************/
2458static void 2450static void
2459ixv_add_stats_sysctls(struct adapter *adapter) 2451ixv_add_stats_sysctls(struct adapter *adapter)
2460{ 2452{
2461 device_t dev = adapter->dev; 2453 device_t dev = adapter->dev;
2462 struct tx_ring *txr = adapter->tx_rings; 2454 struct tx_ring *txr = adapter->tx_rings;
2463 struct rx_ring *rxr = adapter->rx_rings; 2455 struct rx_ring *rxr = adapter->rx_rings;
2464 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2456 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2465 struct ixgbe_hw *hw = &adapter->hw; 2457 struct ixgbe_hw *hw = &adapter->hw;
2466 const struct sysctlnode *rnode, *cnode; 2458 const struct sysctlnode *rnode, *cnode;
2467 struct sysctllog **log = &adapter->sysctllog; 2459 struct sysctllog **log = &adapter->sysctllog;
2468 const char *xname = device_xname(dev); 2460 const char *xname = device_xname(dev);
2469 2461
2470 /* Driver Statistics */ 2462 /* Driver Statistics */
2471 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 2463 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2472 NULL, xname, "Driver tx dma soft fail EFBIG"); 2464 NULL, xname, "Driver tx dma soft fail EFBIG");
2473 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 2465 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2474 NULL, xname, "m_defrag() failed"); 2466 NULL, xname, "m_defrag() failed");
2475 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 2467 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2476 NULL, xname, "Driver tx dma hard fail EFBIG"); 2468 NULL, xname, "Driver tx dma hard fail EFBIG");
2477 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 2469 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2478 NULL, xname, "Driver tx dma hard fail EINVAL"); 2470 NULL, xname, "Driver tx dma hard fail EINVAL");
2479 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 2471 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2480 NULL, xname, "Driver tx dma hard fail other"); 2472 NULL, xname, "Driver tx dma hard fail other");
2481 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 2473 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2482 NULL, xname, "Driver tx dma soft fail EAGAIN"); 2474 NULL, xname, "Driver tx dma soft fail EAGAIN");
2483 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 2475 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2484 NULL, xname, "Driver tx dma soft fail ENOMEM"); 2476 NULL, xname, "Driver tx dma soft fail ENOMEM");
2485 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 2477 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2486 NULL, xname, "Watchdog timeouts"); 2478 NULL, xname, "Watchdog timeouts");
2487 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 2479 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2488 NULL, xname, "TSO errors"); 2480 NULL, xname, "TSO errors");
2489 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, 2481 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2490 NULL, xname, "Link MSI-X IRQ Handled"); 2482 NULL, xname, "Link MSI-X IRQ Handled");
2491 2483
2492 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2484 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2493 snprintf(adapter->queues[i].evnamebuf, 2485 snprintf(adapter->queues[i].evnamebuf,
2494 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 2486 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2495 xname, i); 2487 xname, i);
2496 snprintf(adapter->queues[i].namebuf, 2488 snprintf(adapter->queues[i].namebuf,
2497 sizeof(adapter->queues[i].namebuf), "q%d", i); 2489 sizeof(adapter->queues[i].namebuf), "q%d", i);
2498 2490
2499 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2491 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2500 aprint_error_dev(dev, "could not create sysctl root\n"); 2492 aprint_error_dev(dev, "could not create sysctl root\n");
2501 break; 2493 break;
2502 } 2494 }
2503 2495
2504 if (sysctl_createv(log, 0, &rnode, &rnode, 2496 if (sysctl_createv(log, 0, &rnode, &rnode,
2505 0, CTLTYPE_NODE, 2497 0, CTLTYPE_NODE,
2506 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 2498 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2507 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 2499 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2508 break; 2500 break;
2509 2501
2510 if (sysctl_createv(log, 0, &rnode, &cnode, 2502 if (sysctl_createv(log, 0, &rnode, &cnode,
2511 CTLFLAG_READWRITE, CTLTYPE_INT, 2503 CTLFLAG_READWRITE, CTLTYPE_INT,
2512 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 2504 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2513 ixv_sysctl_interrupt_rate_handler, 0, 2505 ixv_sysctl_interrupt_rate_handler, 0,
2514 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 2506 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2515 break; 2507 break;
2516 2508
2517 if (sysctl_createv(log, 0, &rnode, &cnode, 2509 if (sysctl_createv(log, 0, &rnode, &cnode,
2518 CTLFLAG_READONLY, CTLTYPE_INT, 2510 CTLFLAG_READONLY, CTLTYPE_INT,
2519 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 2511 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2520 ixv_sysctl_tdh_handler, 0, (void *)txr, 2512 ixv_sysctl_tdh_handler, 0, (void *)txr,
2521 0, CTL_CREATE, CTL_EOL) != 0) 2513 0, CTL_CREATE, CTL_EOL) != 0)
2522 break; 2514 break;
2523 2515
2524 if (sysctl_createv(log, 0, &rnode, &cnode, 2516 if (sysctl_createv(log, 0, &rnode, &cnode,
2525 CTLFLAG_READONLY, CTLTYPE_INT, 2517 CTLFLAG_READONLY, CTLTYPE_INT,
2526 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 2518 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2527 ixv_sysctl_tdt_handler, 0, (void *)txr, 2519 ixv_sysctl_tdt_handler, 0, (void *)txr,
2528 0, CTL_CREATE, CTL_EOL) != 0) 2520 0, CTL_CREATE, CTL_EOL) != 0)
2529 break; 2521 break;
2530 2522
2531 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 2523 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2532 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 2524 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2533 evcnt_attach_dynamic(&adapter->queues[i].handleq, 2525 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2534 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 2526 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2535 "Handled queue in softint"); 2527 "Handled queue in softint");
2536 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 2528 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2537 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 2529 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2538 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 2530 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2539 NULL, adapter->queues[i].evnamebuf, "TSO"); 2531 NULL, adapter->queues[i].evnamebuf, "TSO");
2540 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 2532 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2541 NULL, adapter->queues[i].evnamebuf, 2533 NULL, adapter->queues[i].evnamebuf,
2542 "Queue No Descriptor Available"); 2534 "Queue No Descriptor Available");
2543 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 2535 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2544 NULL, adapter->queues[i].evnamebuf, 2536 NULL, adapter->queues[i].evnamebuf,
2545 "Queue Packets Transmitted"); 2537 "Queue Packets Transmitted");
2546#ifndef IXGBE_LEGACY_TX 2538#ifndef IXGBE_LEGACY_TX
2547 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 2539 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2548 NULL, adapter->queues[i].evnamebuf, 2540 NULL, adapter->queues[i].evnamebuf,
2549 "Packets dropped in pcq"); 2541 "Packets dropped in pcq");
2550#endif 2542#endif
2551 2543
2552#ifdef LRO 2544#ifdef LRO
2553 struct lro_ctrl *lro = &rxr->lro; 2545 struct lro_ctrl *lro = &rxr->lro;
2554#endif /* LRO */ 2546#endif /* LRO */
2555 2547
2556 if (sysctl_createv(log, 0, &rnode, &cnode, 2548 if (sysctl_createv(log, 0, &rnode, &cnode,
2557 CTLFLAG_READONLY, 2549 CTLFLAG_READONLY,
2558 CTLTYPE_INT, 2550 CTLTYPE_INT,
2559 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), 2551 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2560 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 2552 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2561 CTL_CREATE, CTL_EOL) != 0) 2553 CTL_CREATE, CTL_EOL) != 0)
2562 break; 2554 break;
2563 2555
2564 if (sysctl_createv(log, 0, &rnode, &cnode, 2556 if (sysctl_createv(log, 0, &rnode, &cnode,
2565 CTLFLAG_READONLY, 2557 CTLFLAG_READONLY,
2566 CTLTYPE_INT, 2558 CTLTYPE_INT,
2567 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), 2559 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2568 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0, 2560 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2569 CTL_CREATE, CTL_EOL) != 0) 2561 CTL_CREATE, CTL_EOL) != 0)
2570 break; 2562 break;
2571 2563
2572 if (sysctl_createv(log, 0, &rnode, &cnode, 2564 if (sysctl_createv(log, 0, &rnode, &cnode,
2573 CTLFLAG_READONLY, 2565 CTLFLAG_READONLY,
2574 CTLTYPE_INT, 2566 CTLTYPE_INT,
2575 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), 2567 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2576 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0, 2568 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2577 CTL_CREATE, CTL_EOL) != 0) 2569 CTL_CREATE, CTL_EOL) != 0)
2578 break; 2570 break;
2579 2571
2580 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 2572 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2581 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); 2573 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2582 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 2574 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2583 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); 2575 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2584 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 2576 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2585 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 2577 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2586 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, 2578 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2587 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); 2579 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2588 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 2580 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2589 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 2581 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2590#ifdef LRO 2582#ifdef LRO
2591 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 2583 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2592 CTLFLAG_RD, &lro->lro_queued, 0, 2584 CTLFLAG_RD, &lro->lro_queued, 0,
2593 "LRO Queued"); 2585 "LRO Queued");
2594 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 2586 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2595 CTLFLAG_RD, &lro->lro_flushed, 0, 2587 CTLFLAG_RD, &lro->lro_flushed, 0,
2596 "LRO Flushed"); 2588 "LRO Flushed");
2597#endif /* LRO */ 2589#endif /* LRO */
2598 } 2590 }
2599 2591
2600 /* MAC stats get their own sub node */ 2592 /* MAC stats get their own sub node */
2601 2593
2602 snprintf(stats->namebuf, 2594 snprintf(stats->namebuf,
2603 sizeof(stats->namebuf), "%s MAC Statistics", xname); 2595 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2604 2596
2605 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 2597 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2606 stats->namebuf, "rx csum offload - IP"); 2598 stats->namebuf, "rx csum offload - IP");
2607 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 2599 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2608 stats->namebuf, "rx csum offload - L4"); 2600 stats->namebuf, "rx csum offload - L4");
2609 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 2601 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2610 stats->namebuf, "rx csum offload - IP bad"); 2602 stats->namebuf, "rx csum offload - IP bad");
2611 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 2603 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2612 stats->namebuf, "rx csum offload - L4 bad"); 2604 stats->namebuf, "rx csum offload - L4 bad");
2613 2605
2614 /* Packet Reception Stats */ 2606 /* Packet Reception Stats */
2615 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL, 2607 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2616 xname, "Good Packets Received"); 2608 xname, "Good Packets Received");
2617 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL, 2609 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2618 xname, "Good Octets Received"); 2610 xname, "Good Octets Received");
2619 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL, 2611 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2620 xname, "Multicast Packets Received"); 2612 xname, "Multicast Packets Received");
2621 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL, 2613 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2622 xname, "Good Packets Transmitted"); 2614 xname, "Good Packets Transmitted");
2623 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL, 2615 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2624 xname, "Good Octets Transmitted"); 2616 xname, "Good Octets Transmitted");
2625 2617
2626 /* Mailbox Stats */ 2618 /* Mailbox Stats */
2627 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL, 2619 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2628 xname, "message TXs"); 2620 xname, "message TXs");
2629 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL, 2621 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2630 xname, "message RXs"); 2622 xname, "message RXs");
2631 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL, 2623 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2632 xname, "ACKs"); 2624 xname, "ACKs");
2633 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL, 2625 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2634 xname, "REQs"); 2626 xname, "REQs");
2635 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL, 2627 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2636 xname, "RSTs"); 2628 xname, "RSTs");
2637 2629
2638} /* ixv_add_stats_sysctls */ 2630} /* ixv_add_stats_sysctls */
2639 2631
2640/************************************************************************ 2632/************************************************************************
2641 * ixv_set_sysctl_value 2633 * ixv_set_sysctl_value
2642 ************************************************************************/ 2634 ************************************************************************/
2643static void 2635static void
2644ixv_set_sysctl_value(struct adapter *adapter, const char *name, 2636ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2645 const char *description, int *limit, int value) 2637 const char *description, int *limit, int value)
2646{ 2638{
2647 device_t dev = adapter->dev; 2639 device_t dev = adapter->dev;
2648 struct sysctllog **log; 2640 struct sysctllog **log;
2649 const struct sysctlnode *rnode, *cnode; 2641 const struct sysctlnode *rnode, *cnode;
2650 2642
2651 log = &adapter->sysctllog; 2643 log = &adapter->sysctllog;
2652 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2644 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2653 aprint_error_dev(dev, "could not create sysctl root\n"); 2645 aprint_error_dev(dev, "could not create sysctl root\n");
2654 return; 2646 return;
2655 } 2647 }
2656 if (sysctl_createv(log, 0, &rnode, &cnode, 2648 if (sysctl_createv(log, 0, &rnode, &cnode,
2657 CTLFLAG_READWRITE, CTLTYPE_INT, 2649 CTLFLAG_READWRITE, CTLTYPE_INT,
2658 name, SYSCTL_DESCR(description), 2650 name, SYSCTL_DESCR(description),
2659 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) 2651 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2660 aprint_error_dev(dev, "could not create sysctl\n"); 2652 aprint_error_dev(dev, "could not create sysctl\n");
2661 *limit = value; 2653 *limit = value;
2662} /* ixv_set_sysctl_value */ 2654} /* ixv_set_sysctl_value */
2663 2655
2664/************************************************************************ 2656/************************************************************************
2665 * ixv_print_debug_info 2657 * ixv_print_debug_info
2666 * 2658 *
2667 * Called only when em_display_debug_stats is enabled. 2659 * Called only when em_display_debug_stats is enabled.
2668 * Provides a way to take a look at important statistics 2660 * Provides a way to take a look at important statistics
2669 * maintained by the driver and hardware. 2661 * maintained by the driver and hardware.
2670 ************************************************************************/ 2662 ************************************************************************/
2671static void 2663static void
2672ixv_print_debug_info(struct adapter *adapter) 2664ixv_print_debug_info(struct adapter *adapter)
2673{ 2665{
2674 device_t dev = adapter->dev; 2666 device_t dev = adapter->dev;
2675 struct ix_queue *que = adapter->queues; 2667 struct ix_queue *que = adapter->queues;
2676 struct rx_ring *rxr; 2668 struct rx_ring *rxr;
2677 struct tx_ring *txr; 2669 struct tx_ring *txr;
2678#ifdef LRO 2670#ifdef LRO
2679 struct lro_ctrl *lro; 2671 struct lro_ctrl *lro;
2680#endif /* LRO */ 2672#endif /* LRO */
2681 2673
2682 for (int i = 0; i < adapter->num_queues; i++, que++) { 2674 for (int i = 0; i < adapter->num_queues; i++, que++) {
2683 txr = que->txr; 2675 txr = que->txr;
2684 rxr = que->rxr; 2676 rxr = que->rxr;
2685#ifdef LRO 2677#ifdef LRO
2686 lro = &rxr->lro; 2678 lro = &rxr->lro;
2687#endif /* LRO */ 2679#endif /* LRO */
2688 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n", 2680 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2689 que->msix, (long)que->irqs.ev_count); 2681 que->msix, (long)que->irqs.ev_count);
2690 device_printf(dev, "RX(%d) Packets Received: %lld\n", 2682 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2691 rxr->me, (long long)rxr->rx_packets.ev_count); 2683 rxr->me, (long long)rxr->rx_packets.ev_count);
2692 device_printf(dev, "RX(%d) Bytes Received: %lu\n", 2684 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2693 rxr->me, (long)rxr->rx_bytes.ev_count); 2685 rxr->me, (long)rxr->rx_bytes.ev_count);
2694#ifdef LRO 2686#ifdef LRO
2695 device_printf(dev, "RX(%d) LRO Queued= %ju\n", 2687 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2696 rxr->me, (uintmax_t)lro->lro_queued); 2688 rxr->me, (uintmax_t)lro->lro_queued);
2697 device_printf(dev, "RX(%d) LRO Flushed= %ju\n", 2689 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2698 rxr->me, (uintmax_t)lro->lro_flushed); 2690 rxr->me, (uintmax_t)lro->lro_flushed);
2699#endif /* LRO */ 2691#endif /* LRO */
2700 device_printf(dev, "TX(%d) Packets Sent: %lu\n", 2692 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2701 txr->me, (long)txr->total_packets.ev_count); 2693 txr->me, (long)txr->total_packets.ev_count);
2702 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n", 2694 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2703 txr->me, (long)txr->no_desc_avail.ev_count); 2695 txr->me, (long)txr->no_desc_avail.ev_count);
2704 } 2696 }
2705 2697
2706 device_printf(dev, "MBX IRQ Handled: %lu\n", 2698 device_printf(dev, "MBX IRQ Handled: %lu\n",
2707 (long)adapter->link_irq.ev_count); 2699 (long)adapter->link_irq.ev_count);
2708} /* ixv_print_debug_info */ 2700} /* ixv_print_debug_info */
2709 2701
2710/************************************************************************ 2702/************************************************************************
2711 * ixv_sysctl_debug 2703 * ixv_sysctl_debug
2712 ************************************************************************/ 2704 ************************************************************************/
2713static int 2705static int
2714ixv_sysctl_debug(SYSCTLFN_ARGS) 2706ixv_sysctl_debug(SYSCTLFN_ARGS)
2715{ 2707{
2716 struct sysctlnode node = *rnode; 2708 struct sysctlnode node = *rnode;
2717 struct adapter *adapter = (struct adapter *)node.sysctl_data; 2709 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2718 int error, result; 2710 int error, result;
2719 2711
2720 node.sysctl_data = &result; 2712 node.sysctl_data = &result;
2721 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2713 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2722 2714
2723 if (error || newp == NULL) 2715 if (error || newp == NULL)
2724 return error; 2716 return error;
2725 2717
2726 if (result == 1) 2718 if (result == 1)
2727 ixv_print_debug_info(adapter); 2719 ixv_print_debug_info(adapter);
2728 2720
2729 return 0; 2721 return 0;
2730} /* ixv_sysctl_debug */ 2722} /* ixv_sysctl_debug */
2731 2723
2732/************************************************************************ 2724/************************************************************************
2733 * ixv_init_device_features 2725 * ixv_init_device_features
2734 ************************************************************************/ 2726 ************************************************************************/
2735static void 2727static void
2736ixv_init_device_features(struct adapter *adapter) 2728ixv_init_device_features(struct adapter *adapter)
2737{ 2729{
2738 adapter->feat_cap = IXGBE_FEATURE_NETMAP 2730 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2739 | IXGBE_FEATURE_VF 2731 | IXGBE_FEATURE_VF
2740 | IXGBE_FEATURE_RSS 2732 | IXGBE_FEATURE_RSS
2741 | IXGBE_FEATURE_LEGACY_TX; 2733 | IXGBE_FEATURE_LEGACY_TX;
2742 2734
2743 /* A tad short on feature flags for VFs, atm. */ 2735 /* A tad short on feature flags for VFs, atm. */
2744 switch (adapter->hw.mac.type) { 2736 switch (adapter->hw.mac.type) {
2745 case ixgbe_mac_82599_vf: 2737 case ixgbe_mac_82599_vf:
2746 break; 2738 break;
2747 case ixgbe_mac_X540_vf: 2739 case ixgbe_mac_X540_vf:
2748 break; 2740 break;
2749 case ixgbe_mac_X550_vf: 2741 case ixgbe_mac_X550_vf:
2750 case ixgbe_mac_X550EM_x_vf: 2742 case ixgbe_mac_X550EM_x_vf:
2751 case ixgbe_mac_X550EM_a_vf: 2743 case ixgbe_mac_X550EM_a_vf:
2752 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 2744 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2753 break; 2745 break;
2754 default: 2746 default:
2755 break; 2747 break;
2756 } 2748 }
2757 2749
2758 /* Enabled by default... */ 2750 /* Enabled by default... */
2759 /* Is a virtual function (VF) */ 2751 /* Is a virtual function (VF) */
2760 if (adapter->feat_cap & IXGBE_FEATURE_VF) 2752 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2761 adapter->feat_en |= IXGBE_FEATURE_VF; 2753 adapter->feat_en |= IXGBE_FEATURE_VF;
2762 /* Netmap */ 2754 /* Netmap */
2763 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 2755 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2764 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 2756 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2765 /* Receive-Side Scaling (RSS) */ 2757 /* Receive-Side Scaling (RSS) */
2766 if (adapter->feat_cap & IXGBE_FEATURE_RSS) 2758 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2767 adapter->feat_en |= IXGBE_FEATURE_RSS; 2759 adapter->feat_en |= IXGBE_FEATURE_RSS;
2768 /* Needs advanced context descriptor regardless of offloads req'd */ 2760 /* Needs advanced context descriptor regardless of offloads req'd */
2769 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 2761 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2770 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 2762 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2771 2763
2772 /* Enabled via sysctl... */ 2764 /* Enabled via sysctl... */
2773 /* Legacy (single queue) transmit */ 2765 /* Legacy (single queue) transmit */
2774 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 2766 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2775 ixv_enable_legacy_tx) 2767 ixv_enable_legacy_tx)
2776 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 2768 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2777} /* ixv_init_device_features */ 2769} /* ixv_init_device_features */
2778 2770
2779/************************************************************************ 2771/************************************************************************
2780 * ixv_shutdown - Shutdown entry point 2772 * ixv_shutdown - Shutdown entry point
2781 ************************************************************************/ 2773 ************************************************************************/
2782#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 2774#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2783static int 2775static int
2784ixv_shutdown(device_t dev) 2776ixv_shutdown(device_t dev)
2785{ 2777{
2786 struct adapter *adapter = device_private(dev); 2778 struct adapter *adapter = device_private(dev);
2787 IXGBE_CORE_LOCK(adapter); 2779 IXGBE_CORE_LOCK(adapter);
2788 ixv_stop(adapter); 2780 ixv_stop(adapter);
2789 IXGBE_CORE_UNLOCK(adapter); 2781 IXGBE_CORE_UNLOCK(adapter);
2790 2782
2791 return (0); 2783 return (0);
2792} /* ixv_shutdown */ 2784} /* ixv_shutdown */
2793#endif 2785#endif
2794 2786
2795static int 2787static int
2796ixv_ifflags_cb(struct ethercom *ec) 2788ixv_ifflags_cb(struct ethercom *ec)
2797{ 2789{
2798 struct ifnet *ifp = &ec->ec_if; 2790 struct ifnet *ifp = &ec->ec_if;
2799 struct adapter *adapter = ifp->if_softc; 2791 struct adapter *adapter = ifp->if_softc;
2800 int change, rv = 0; 2792 int change, rv = 0;
2801 2793
2802 IXGBE_CORE_LOCK(adapter); 2794 IXGBE_CORE_LOCK(adapter);
2803 2795
2804 change = ifp->if_flags ^ adapter->if_flags; 2796 change = ifp->if_flags ^ adapter->if_flags;
2805 if (change != 0) 2797 if (change != 0)
2806 adapter->if_flags = ifp->if_flags; 2798 adapter->if_flags = ifp->if_flags;
2807 2799
2808 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 2800 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2809 rv = ENETRESET; 2801 rv = ENETRESET;
2810 goto out; 2802 goto out;
2811 } 2803 }
2812 2804
2813 /* Check for ec_capenable. */ 2805 /* Check for ec_capenable. */
2814 change = ec->ec_capenable ^ adapter->ec_capenable; 2806 change = ec->ec_capenable ^ adapter->ec_capenable;
2815 adapter->ec_capenable = ec->ec_capenable; 2807 adapter->ec_capenable = ec->ec_capenable;
2816 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 2808 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
2817 | ETHERCAP_VLAN_HWFILTER)) != 0) { 2809 | ETHERCAP_VLAN_HWFILTER)) != 0) {
2818 rv = ENETRESET; 2810 rv = ENETRESET;
2819 goto out; 2811 goto out;
2820 } 2812 }
2821 2813
2822 /* 2814 /*
2823 * Special handling is not required for ETHERCAP_VLAN_MTU. 2815 * Special handling is not required for ETHERCAP_VLAN_MTU.
2824 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 2816 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
2825 */ 2817 */
2826 2818
2827 /* Set up VLAN support and filter */ 2819 /* Set up VLAN support and filter */
2828 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 2820 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
2829 rv = ixv_setup_vlan_support(adapter); 2821 rv = ixv_setup_vlan_support(adapter);
2830 2822
2831out: 2823out:
2832 IXGBE_CORE_UNLOCK(adapter); 2824 IXGBE_CORE_UNLOCK(adapter);
2833 2825
2834 return rv; 2826 return rv;
2835} 2827}
2836 2828
2837 2829
2838/************************************************************************ 2830/************************************************************************
2839 * ixv_ioctl - Ioctl entry point 2831 * ixv_ioctl - Ioctl entry point
2840 * 2832 *
2841 * Called when the user wants to configure the interface. 2833 * Called when the user wants to configure the interface.
2842 * 2834 *
2843 * return 0 on success, positive on failure 2835 * return 0 on success, positive on failure
2844 ************************************************************************/ 2836 ************************************************************************/
2845static int 2837static int
2846ixv_ioctl(struct ifnet *ifp, u_long command, void *data) 2838ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2847{ 2839{
2848 struct adapter *adapter = ifp->if_softc; 2840 struct adapter *adapter = ifp->if_softc;
2849 struct ifcapreq *ifcr = data; 2841 struct ifcapreq *ifcr = data;
2850 int error = 0; 2842 int error = 0;
2851 int l4csum_en; 2843 int l4csum_en;
2852 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 2844 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
2853 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 2845 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2854 2846
2855 switch (command) { 2847 switch (command) {
2856 case SIOCSIFFLAGS: 2848 case SIOCSIFFLAGS:
2857 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 2849 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2858 break; 2850 break;
2859 case SIOCADDMULTI: 2851 case SIOCADDMULTI:
2860 case SIOCDELMULTI: 2852 case SIOCDELMULTI:
2861 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 2853 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2862 break; 2854 break;
2863 case SIOCSIFMEDIA: 2855 case SIOCSIFMEDIA:
2864 case SIOCGIFMEDIA: 2856 case SIOCGIFMEDIA:
2865 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 2857 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2866 break; 2858 break;
2867 case SIOCSIFCAP: 2859 case SIOCSIFCAP:
2868 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 2860 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2869 break; 2861 break;
2870 case SIOCSIFMTU: 2862 case SIOCSIFMTU:
2871 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 2863 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2872 break; 2864 break;
2873 default: 2865 default:
2874 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 2866 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2875 break; 2867 break;
2876 } 2868 }
2877 2869
2878 switch (command) { 2870 switch (command) {
2879 case SIOCSIFCAP: 2871 case SIOCSIFCAP:
2880 /* Layer-4 Rx checksum offload has to be turned on and 2872 /* Layer-4 Rx checksum offload has to be turned on and
2881 * off as a unit. 2873 * off as a unit.
2882 */ 2874 */
2883 l4csum_en = ifcr->ifcr_capenable & l4csum; 2875 l4csum_en = ifcr->ifcr_capenable & l4csum;
2884 if (l4csum_en != l4csum && l4csum_en != 0) 2876 if (l4csum_en != l4csum && l4csum_en != 0)
2885 return EINVAL; 2877 return EINVAL;
2886 /*FALLTHROUGH*/ 2878 /*FALLTHROUGH*/
2887 case SIOCADDMULTI: 2879 case SIOCADDMULTI:
2888 case SIOCDELMULTI: 2880 case SIOCDELMULTI:
2889 case SIOCSIFFLAGS: 2881 case SIOCSIFFLAGS:
2890 case SIOCSIFMTU: 2882 case SIOCSIFMTU:
2891 default: 2883 default:
2892 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 2884 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2893 return error; 2885 return error;
2894 if ((ifp->if_flags & IFF_RUNNING) == 0) 2886 if ((ifp->if_flags & IFF_RUNNING) == 0)
2895 ; 2887 ;
2896 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 2888 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2897 IXGBE_CORE_LOCK(adapter); 2889 IXGBE_CORE_LOCK(adapter);
2898 ixv_init_locked(adapter); 2890 ixv_init_locked(adapter);
2899 IXGBE_CORE_UNLOCK(adapter); 2891 IXGBE_CORE_UNLOCK(adapter);
2900 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 2892 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2901 /* 2893 /*
2902 * Multicast list has changed; set the hardware filter 2894 * Multicast list has changed; set the hardware filter
2903 * accordingly. 2895 * accordingly.
2904 */ 2896 */
2905 IXGBE_CORE_LOCK(adapter); 2897 IXGBE_CORE_LOCK(adapter);
2906 ixv_disable_intr(adapter); 2898 ixv_disable_intr(adapter);
2907 ixv_set_multi(adapter); 2899 ixv_set_multi(adapter);
2908 ixv_enable_intr(adapter); 2900 ixv_enable_intr(adapter);
2909 IXGBE_CORE_UNLOCK(adapter); 2901 IXGBE_CORE_UNLOCK(adapter);
2910 } 2902 }
2911 return 0; 2903 return 0;
2912 } 2904 }
2913} /* ixv_ioctl */ 2905} /* ixv_ioctl */
2914 2906
2915/************************************************************************ 2907/************************************************************************
2916 * ixv_init 2908 * ixv_init
2917 ************************************************************************/ 2909 ************************************************************************/
2918static int 2910static int
2919ixv_init(struct ifnet *ifp) 2911ixv_init(struct ifnet *ifp)
2920{ 2912{
2921 struct adapter *adapter = ifp->if_softc; 2913 struct adapter *adapter = ifp->if_softc;
2922 2914
2923 IXGBE_CORE_LOCK(adapter); 2915 IXGBE_CORE_LOCK(adapter);
2924 ixv_init_locked(adapter); 2916 ixv_init_locked(adapter);
2925 IXGBE_CORE_UNLOCK(adapter); 2917 IXGBE_CORE_UNLOCK(adapter);
2926 2918
2927 return 0; 2919 return 0;
2928} /* ixv_init */ 2920} /* ixv_init */
2929 2921
2930/************************************************************************ 2922/************************************************************************
2931 * ixv_handle_que 2923 * ixv_handle_que
2932 ************************************************************************/ 2924 ************************************************************************/
2933static void 2925static void
2934ixv_handle_que(void *context) 2926ixv_handle_que(void *context)
2935{ 2927{
2936 struct ix_queue *que = context; 2928 struct ix_queue *que = context;
2937 struct adapter *adapter = que->adapter; 2929 struct adapter *adapter = que->adapter;
2938 struct tx_ring *txr = que->txr; 2930 struct tx_ring *txr = que->txr;
2939 struct ifnet *ifp = adapter->ifp; 2931 struct ifnet *ifp = adapter->ifp;
2940 bool more; 2932 bool more;
2941 2933
2942 que->handleq.ev_count++; 2934 que->handleq.ev_count++;
2943 2935
2944 if (ifp->if_flags & IFF_RUNNING) { 2936 if (ifp->if_flags & IFF_RUNNING) {
2945 more = ixgbe_rxeof(que); 2937 more = ixgbe_rxeof(que);
2946 IXGBE_TX_LOCK(txr); 2938 IXGBE_TX_LOCK(txr);
2947 more |= ixgbe_txeof(txr); 2939 more |= ixgbe_txeof(txr);
2948 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 2940 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2949 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) 2941 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2950 ixgbe_mq_start_locked(ifp, txr); 2942 ixgbe_mq_start_locked(ifp, txr);
2951 /* Only for queue 0 */ 2943 /* Only for queue 0 */
2952 /* NetBSD still needs this for CBQ */ 2944 /* NetBSD still needs this for CBQ */
2953 if ((&adapter->queues[0] == que) 2945 if ((&adapter->queues[0] == que)
2954 && (!ixgbe_legacy_ring_empty(ifp, NULL))) 2946 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2955 ixgbe_legacy_start_locked(ifp, txr); 2947 ixgbe_legacy_start_locked(ifp, txr);
2956 IXGBE_TX_UNLOCK(txr); 2948 IXGBE_TX_UNLOCK(txr);
2957 if (more) { 2949 if (more) {
2958 que->req.ev_count++; 2950 que->req.ev_count++;
2959 if (adapter->txrx_use_workqueue) { 2951 if (adapter->txrx_use_workqueue) {
2960 /* 2952 /*
2961 * "enqueued flag" is not required here 2953 * "enqueued flag" is not required here
2962 * the same as ixg(4). See ixgbe_msix_que(). 2954 * the same as ixg(4). See ixgbe_msix_que().
2963 */ 2955 */
2964 workqueue_enqueue(adapter->que_wq, 2956 workqueue_enqueue(adapter->que_wq,
2965 &que->wq_cookie, curcpu()); 2957 &que->wq_cookie, curcpu());
2966 } else 2958 } else
2967 softint_schedule(que->que_si); 2959 softint_schedule(que->que_si);
2968 return; 2960 return;
2969 } 2961 }
2970 } 2962 }
2971 2963
2972 /* Re-enable this interrupt */ 2964 /* Re-enable this interrupt */
2973 ixv_enable_queue(adapter, que->msix); 2965 ixv_enable_queue(adapter, que->msix);
2974 2966
2975 return; 2967 return;
2976} /* ixv_handle_que */ 2968} /* ixv_handle_que */
2977 2969
2978/************************************************************************ 2970/************************************************************************
2979 * ixv_handle_que_work 2971 * ixv_handle_que_work
2980 ************************************************************************/ 2972 ************************************************************************/
2981static void 2973static void
2982ixv_handle_que_work(struct work *wk, void *context) 2974ixv_handle_que_work(struct work *wk, void *context)
2983{ 2975{
2984 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); 2976 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2985 2977
2986 /* 2978 /*
2987 * "enqueued flag" is not required here the same as ixg(4). 2979 * "enqueued flag" is not required here the same as ixg(4).
2988 * See ixgbe_msix_que(). 2980 * See ixgbe_msix_que().
2989 */ 2981 */
2990 ixv_handle_que(que); 2982 ixv_handle_que(que);
2991} 2983}
2992 2984
2993/************************************************************************ 2985/************************************************************************
2994 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers 2986 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2995 ************************************************************************/ 2987 ************************************************************************/
2996static int 2988static int
2997ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa) 2989ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2998{ 2990{
2999 device_t dev = adapter->dev; 2991 device_t dev = adapter->dev;
3000 struct ix_queue *que = adapter->queues; 2992 struct ix_queue *que = adapter->queues;
3001 struct tx_ring *txr = adapter->tx_rings; 2993 struct tx_ring *txr = adapter->tx_rings;
3002 int error, msix_ctrl, rid, vector = 0; 2994 int error, msix_ctrl, rid, vector = 0;
3003 pci_chipset_tag_t pc; 2995 pci_chipset_tag_t pc;
3004 pcitag_t tag; 2996 pcitag_t tag;
3005 char intrbuf[PCI_INTRSTR_LEN]; 2997 char intrbuf[PCI_INTRSTR_LEN];
3006 char wqname[MAXCOMLEN]; 2998 char wqname[MAXCOMLEN];
3007 char intr_xname[32]; 2999 char intr_xname[32];
3008 const char *intrstr = NULL; 3000 const char *intrstr = NULL;
3009 kcpuset_t *affinity; 3001 kcpuset_t *affinity;
3010 int cpu_id = 0; 3002 int cpu_id = 0;
3011 3003
3012 pc = adapter->osdep.pc; 3004 pc = adapter->osdep.pc;
3013 tag = adapter->osdep.tag; 3005 tag = adapter->osdep.tag;
3014 3006
3015 adapter->osdep.nintrs = adapter->num_queues + 1; 3007 adapter->osdep.nintrs = adapter->num_queues + 1;
3016 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs, 3008 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3017 adapter->osdep.nintrs) != 0) { 3009 adapter->osdep.nintrs) != 0) {
3018 aprint_error_dev(dev, 3010 aprint_error_dev(dev,
3019 "failed to allocate MSI-X interrupt\n"); 3011 "failed to allocate MSI-X interrupt\n");
3020 return (ENXIO); 3012 return (ENXIO);
3021 } 3013 }
3022 3014
3023 kcpuset_create(&affinity, false); 3015 kcpuset_create(&affinity, false);
3024 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 3016 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3025 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", 3017 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3026 device_xname(dev), i); 3018 device_xname(dev), i);
3027 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf, 3019 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3028 sizeof(intrbuf)); 3020 sizeof(intrbuf));
3029#ifdef IXGBE_MPSAFE 3021#ifdef IXGBE_MPSAFE
3030 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE, 3022 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3031 true); 3023 true);
3032#endif 3024#endif
3033 /* Set the handler function */ 3025 /* Set the handler function */
3034 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc, 3026 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3035 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que, 3027 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3036 intr_xname); 3028 intr_xname);
3037 if (que->res == NULL) { 3029 if (que->res == NULL) {
3038 pci_intr_release(pc, adapter->osdep.intrs, 3030 pci_intr_release(pc, adapter->osdep.intrs,
3039 adapter->osdep.nintrs); 3031 adapter->osdep.nintrs);
3040 aprint_error_dev(dev, 3032 aprint_error_dev(dev,
3041 "Failed to register QUE handler\n"); 3033 "Failed to register QUE handler\n");
3042 kcpuset_destroy(affinity); 3034 kcpuset_destroy(affinity);
3043 return (ENXIO); 3035 return (ENXIO);
3044 } 3036 }
3045 que->msix = vector; 3037 que->msix = vector;
3046 adapter->active_queues |= (u64)(1 << que->msix); 3038 adapter->active_queues |= (u64)(1 << que->msix);
3047 3039
3048 cpu_id = i; 3040 cpu_id = i;
3049 /* Round-robin affinity */ 3041 /* Round-robin affinity */
3050 kcpuset_zero(affinity); 3042 kcpuset_zero(affinity);
3051 kcpuset_set(affinity, cpu_id % ncpu); 3043 kcpuset_set(affinity, cpu_id % ncpu);
3052 error = interrupt_distribute(adapter->osdep.ihs[i], affinity, 3044 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3053 NULL); 3045 NULL);
3054 aprint_normal_dev(dev, "for TX/RX, interrupting at %s", 3046 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3055 intrstr); 3047 intrstr);
3056 if (error == 0) 3048 if (error == 0)
3057 aprint_normal(", bound queue %d to cpu %d\n", 3049 aprint_normal(", bound queue %d to cpu %d\n",
3058 i, cpu_id % ncpu); 3050 i, cpu_id % ncpu);
3059 else 3051 else
3060 aprint_normal("\n"); 3052 aprint_normal("\n");
3061 3053
3062#ifndef IXGBE_LEGACY_TX 3054#ifndef IXGBE_LEGACY_TX
3063 txr->txr_si 3055 txr->txr_si
3064 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 3056 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3065 ixgbe_deferred_mq_start, txr); 3057 ixgbe_deferred_mq_start, txr);
3066#endif 3058#endif
3067 que->que_si 3059 que->que_si
3068 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 3060 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3069 ixv_handle_que, que); 3061 ixv_handle_que, que);
3070 if (que->que_si == NULL) { 3062 if (que->que_si == NULL) {
3071 aprint_error_dev(dev, 3063 aprint_error_dev(dev,
3072 "could not establish software interrupt\n"); 3064 "could not establish software interrupt\n");
3073 } 3065 }
3074 } 3066 }
3075 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); 3067 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3076 error = workqueue_create(&adapter->txr_wq, wqname, 3068 error = workqueue_create(&adapter->txr_wq, wqname,
3077 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3069 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3078 IXGBE_WORKQUEUE_FLAGS); 3070 IXGBE_WORKQUEUE_FLAGS);
3079 if (error) { 3071 if (error) {
3080 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n"); 3072 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
3081 } 3073 }
3082 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 3074 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3083 3075
3084 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 3076 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3085 error = workqueue_create(&adapter->que_wq, wqname, 3077 error = workqueue_create(&adapter->que_wq, wqname,
3086 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3078 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3087 IXGBE_WORKQUEUE_FLAGS); 3079 IXGBE_WORKQUEUE_FLAGS);
3088 if (error) { 3080 if (error) {
3089 aprint_error_dev(dev, 3081 aprint_error_dev(dev,
3090 "couldn't create workqueue\n"); 3082 "couldn't create workqueue\n");
3091 } 3083 }
3092 3084
3093 /* and Mailbox */ 3085 /* and Mailbox */
3094 cpu_id++; 3086 cpu_id++;
3095 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); 3087 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3096 adapter->vector = vector; 3088 adapter->vector = vector;
3097 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf, 3089 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3098 sizeof(intrbuf)); 3090 sizeof(intrbuf));
3099#ifdef IXGBE_MPSAFE 3091#ifdef IXGBE_MPSAFE
3100 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, 3092 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3101 true); 3093 true);
3102#endif 3094#endif
3103 /* Set the mbx handler function */ 3095 /* Set the mbx handler function */
3104 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc, 3096 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3105 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter, 3097 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3106 intr_xname); 3098 intr_xname);
3107 if (adapter->osdep.ihs[vector] == NULL) { 3099 if (adapter->osdep.ihs[vector] == NULL) {
3108 aprint_error_dev(dev, "Failed to register LINK handler\n"); 3100 aprint_error_dev(dev, "Failed to register LINK handler\n");
3109 kcpuset_destroy(affinity); 3101 kcpuset_destroy(affinity);
3110 return (ENXIO); 3102 return (ENXIO);
3111 } 3103 }
3112 /* Round-robin affinity */ 3104 /* Round-robin affinity */
3113 kcpuset_zero(affinity); 3105 kcpuset_zero(affinity);
3114 kcpuset_set(affinity, cpu_id % ncpu); 3106 kcpuset_set(affinity, cpu_id % ncpu);
3115 error = interrupt_distribute(adapter->osdep.ihs[vector], 3107 error = interrupt_distribute(adapter->osdep.ihs[vector],
3116 affinity, NULL); 3108 affinity, NULL);
3117 3109
3118 aprint_normal_dev(dev, 3110 aprint_normal_dev(dev,
3119 "for link, interrupting at %s", intrstr); 3111 "for link, interrupting at %s", intrstr);
3120 if (error == 0) 3112 if (error == 0)
3121 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); 3113 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3122 else 3114 else
3123 aprint_normal("\n"); 3115 aprint_normal("\n");
3124 3116
3125 /* Tasklets for Mailbox */ 3117 /* Tasklets for Mailbox */
3126 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS, 3118 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
3127 ixv_handle_link, adapter); 3119 ixv_handle_link, adapter);
3128 /* 3120 /*
3129 * Due to a broken design QEMU will fail to properly 3121 * Due to a broken design QEMU will fail to properly