Sun Sep 1 11:07:06 2019 UTC ()
Pull up following revision(s) (requested by msaitoh in ticket #133):

	sys/dev/pci/ixgbe/ixgbe.c: revision 1.200
	sys/dev/pci/ixgbe/ixgbe.c: revision 1.201
	sys/dev/pci/ixgbe/ixv.c: revision 1.126
	sys/dev/pci/ixgbe/ixv.c: revision 1.127
	sys/net/if_vlan.c: revision 1.142
	sys/net/if_vlan.c: revision 1.143
	sys/net/if_vlan.c: revision 1.144
	sys/net/if_vlan.c: revision 1.145
	sys/net/if_vlan.c: revision 1.146

 Check ec_capenable instead of ec_capabilities to control TX side of VLAN HW
tagging correctly.
XXX pullup-9

 Add missing IFNET_LOCK() and IFNET_UNLOCK() in vlan_config().
XXX pullup-9

 Fix a bug that VLAN HW "tagging" enable/disable may not reflect correctly.
  - Always call ec_vlan_cb() if it exists.
  - Some (or all?) ethernet drivers don't enable HW tagging if no any vlan is
    attached. ixgbe is one of them. Check the the transition and update
    VLAN HW tagging function.
XXX pullup-9

 Use ETHER_LOCK()/ETHER_UNLOCK() suggested by knakahara.
- kmem_alloc(,KM_SLEEP) never return NULL, so remove NULL check.
- VLAN ID is never duplicated, so break the loop when found. Also move
  kmen_free() outside of ETHER_LOCK(ec)/ETHER_UNLOCK(ec) to reduce the hold
  time. suggested by ozaki-r.
- Whitespace fix.


(martin)
diff -r1.199 -r1.199.2.1 src/sys/dev/pci/ixgbe/ixgbe.c
diff -r1.125 -r1.125.2.1 src/sys/dev/pci/ixgbe/ixv.c
diff -r1.141 -r1.141.2.1 src/sys/net/if_vlan.c

cvs diff -r1.199 -r1.199.2.1 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2019/07/30 08:44:28 1.199
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2019/09/01 11:07:05 1.199.2.1
@@ -1,1220 +1,1221 @@ @@ -1,1220 +1,1221 @@
1/* $NetBSD: ixgbe.c,v 1.199 2019/07/30 08:44:28 msaitoh Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.199.2.1 2019/09/01 11:07:05 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_inet.h" 67#include "opt_inet.h"
68#include "opt_inet6.h" 68#include "opt_inet6.h"
69#include "opt_net_mpsafe.h" 69#include "opt_net_mpsafe.h"
70#endif 70#endif
71 71
72#include "ixgbe.h" 72#include "ixgbe.h"
73#include "ixgbe_sriov.h" 73#include "ixgbe_sriov.h"
74#include "vlan.h" 74#include "vlan.h"
75 75
76#include <sys/cprng.h> 76#include <sys/cprng.h>
77#include <dev/mii/mii.h> 77#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h> 78#include <dev/mii/miivar.h>
79 79
80/************************************************************************ 80/************************************************************************
81 * Driver version 81 * Driver version
82 ************************************************************************/ 82 ************************************************************************/
83static const char ixgbe_driver_version[] = "4.0.1-k"; 83static const char ixgbe_driver_version[] = "4.0.1-k";
84/* XXX NetBSD: + 3.3.10 */ 84/* XXX NetBSD: + 3.3.10 */
85 85
86/************************************************************************ 86/************************************************************************
87 * PCI Device ID Table 87 * PCI Device ID Table
88 * 88 *
89 * Used by probe to select devices to load on 89 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings 90 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s 91 * Last entry must be all 0s
92 * 92 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/ 94 ************************************************************************/
95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96{ 96{
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */ 147 /* required last entry */
148 {0, 0, 0, 0, 0} 148 {0, 0, 0, 0, 0}
149}; 149};
150 150
151/************************************************************************ 151/************************************************************************
152 * Table of branding strings 152 * Table of branding strings
153 ************************************************************************/ 153 ************************************************************************/
154static const char *ixgbe_strings[] = { 154static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver" 155 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156}; 156};
157 157
158/************************************************************************ 158/************************************************************************
159 * Function prototypes 159 * Function prototypes
160 ************************************************************************/ 160 ************************************************************************/
161static int ixgbe_probe(device_t, cfdata_t, void *); 161static int ixgbe_probe(device_t, cfdata_t, void *);
162static void ixgbe_attach(device_t, device_t, void *); 162static void ixgbe_attach(device_t, device_t, void *);
163static int ixgbe_detach(device_t, int); 163static int ixgbe_detach(device_t, int);
164#if 0 164#if 0
165static int ixgbe_shutdown(device_t); 165static int ixgbe_shutdown(device_t);
166#endif 166#endif
167static bool ixgbe_suspend(device_t, const pmf_qual_t *); 167static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168static bool ixgbe_resume(device_t, const pmf_qual_t *); 168static bool ixgbe_resume(device_t, const pmf_qual_t *);
169static int ixgbe_ifflags_cb(struct ethercom *); 169static int ixgbe_ifflags_cb(struct ethercom *);
170static int ixgbe_ioctl(struct ifnet *, u_long, void *); 170static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171static void ixgbe_ifstop(struct ifnet *, int); 171static void ixgbe_ifstop(struct ifnet *, int);
172static int ixgbe_init(struct ifnet *); 172static int ixgbe_init(struct ifnet *);
173static void ixgbe_init_locked(struct adapter *); 173static void ixgbe_init_locked(struct adapter *);
174static void ixgbe_stop(void *); 174static void ixgbe_stop(void *);
175static void ixgbe_init_device_features(struct adapter *); 175static void ixgbe_init_device_features(struct adapter *);
176static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 176static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177static void ixgbe_add_media_types(struct adapter *); 177static void ixgbe_add_media_types(struct adapter *);
178static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 178static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179static int ixgbe_media_change(struct ifnet *); 179static int ixgbe_media_change(struct ifnet *);
180static int ixgbe_allocate_pci_resources(struct adapter *, 180static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *); 181 const struct pci_attach_args *);
182static void ixgbe_free_softint(struct adapter *); 182static void ixgbe_free_softint(struct adapter *);
183static void ixgbe_get_slot_info(struct adapter *); 183static void ixgbe_get_slot_info(struct adapter *);
184static int ixgbe_allocate_msix(struct adapter *, 184static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *); 185 const struct pci_attach_args *);
186static int ixgbe_allocate_legacy(struct adapter *, 186static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *); 187 const struct pci_attach_args *);
188static int ixgbe_configure_interrupts(struct adapter *); 188static int ixgbe_configure_interrupts(struct adapter *);
189static void ixgbe_free_pciintr_resources(struct adapter *); 189static void ixgbe_free_pciintr_resources(struct adapter *);
190static void ixgbe_free_pci_resources(struct adapter *); 190static void ixgbe_free_pci_resources(struct adapter *);
191static void ixgbe_local_timer(void *); 191static void ixgbe_local_timer(void *);
192static void ixgbe_local_timer1(void *); 192static void ixgbe_local_timer1(void *);
193static void ixgbe_recovery_mode_timer(void *); 193static void ixgbe_recovery_mode_timer(void *);
194static int ixgbe_setup_interface(device_t, struct adapter *); 194static int ixgbe_setup_interface(device_t, struct adapter *);
195static void ixgbe_config_gpie(struct adapter *); 195static void ixgbe_config_gpie(struct adapter *);
196static void ixgbe_config_dmac(struct adapter *); 196static void ixgbe_config_dmac(struct adapter *);
197static void ixgbe_config_delay_values(struct adapter *); 197static void ixgbe_config_delay_values(struct adapter *);
198static void ixgbe_config_link(struct adapter *); 198static void ixgbe_config_link(struct adapter *);
199static void ixgbe_check_wol_support(struct adapter *); 199static void ixgbe_check_wol_support(struct adapter *);
200static int ixgbe_setup_low_power_mode(struct adapter *); 200static int ixgbe_setup_low_power_mode(struct adapter *);
201#if 0 201#if 0
202static void ixgbe_rearm_queues(struct adapter *, u64); 202static void ixgbe_rearm_queues(struct adapter *, u64);
203#endif 203#endif
204 204
205static void ixgbe_initialize_transmit_units(struct adapter *); 205static void ixgbe_initialize_transmit_units(struct adapter *);
206static void ixgbe_initialize_receive_units(struct adapter *); 206static void ixgbe_initialize_receive_units(struct adapter *);
207static void ixgbe_enable_rx_drop(struct adapter *); 207static void ixgbe_enable_rx_drop(struct adapter *);
208static void ixgbe_disable_rx_drop(struct adapter *); 208static void ixgbe_disable_rx_drop(struct adapter *);
209static void ixgbe_initialize_rss_mapping(struct adapter *); 209static void ixgbe_initialize_rss_mapping(struct adapter *);
210 210
211static void ixgbe_enable_intr(struct adapter *); 211static void ixgbe_enable_intr(struct adapter *);
212static void ixgbe_disable_intr(struct adapter *); 212static void ixgbe_disable_intr(struct adapter *);
213static void ixgbe_update_stats_counters(struct adapter *); 213static void ixgbe_update_stats_counters(struct adapter *);
214static void ixgbe_set_promisc(struct adapter *); 214static void ixgbe_set_promisc(struct adapter *);
215static void ixgbe_set_multi(struct adapter *); 215static void ixgbe_set_multi(struct adapter *);
216static void ixgbe_update_link_status(struct adapter *); 216static void ixgbe_update_link_status(struct adapter *);
217static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 217static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
218static void ixgbe_configure_ivars(struct adapter *); 218static void ixgbe_configure_ivars(struct adapter *);
219static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 219static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
220static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); 220static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
221 221
 222static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
222static void ixgbe_setup_vlan_hw_support(struct adapter *); 223static void ixgbe_setup_vlan_hw_support(struct adapter *);
223static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 224static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
224static int ixgbe_register_vlan(void *, struct ifnet *, u16); 225static int ixgbe_register_vlan(void *, struct ifnet *, u16);
225static int ixgbe_unregister_vlan(void *, struct ifnet *, u16); 226static int ixgbe_unregister_vlan(void *, struct ifnet *, u16);
226 227
227static void ixgbe_add_device_sysctls(struct adapter *); 228static void ixgbe_add_device_sysctls(struct adapter *);
228static void ixgbe_add_hw_stats(struct adapter *); 229static void ixgbe_add_hw_stats(struct adapter *);
229static void ixgbe_clear_evcnt(struct adapter *); 230static void ixgbe_clear_evcnt(struct adapter *);
230static int ixgbe_set_flowcntl(struct adapter *, int); 231static int ixgbe_set_flowcntl(struct adapter *, int);
231static int ixgbe_set_advertise(struct adapter *, int); 232static int ixgbe_set_advertise(struct adapter *, int);
232static int ixgbe_get_advertise(struct adapter *); 233static int ixgbe_get_advertise(struct adapter *);
233 234
234/* Sysctl handlers */ 235/* Sysctl handlers */
235static void ixgbe_set_sysctl_value(struct adapter *, const char *, 236static void ixgbe_set_sysctl_value(struct adapter *, const char *,
236 const char *, int *, int); 237 const char *, int *, int);
237static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 238static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
238static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 239static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
239static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 240static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
240static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 241static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
241static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 242static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
242static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
243#ifdef IXGBE_DEBUG 244#ifdef IXGBE_DEBUG
244static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
245static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 246static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
246#endif 247#endif
247static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 248static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
248static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 249static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
249static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
250static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 251static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
251static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 252static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
252static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
253static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
256 257
257/* Support for pluggable optic modules */ 258/* Support for pluggable optic modules */
258static bool ixgbe_sfp_probe(struct adapter *); 259static bool ixgbe_sfp_probe(struct adapter *);
259 260
260/* Legacy (single vector) interrupt handler */ 261/* Legacy (single vector) interrupt handler */
261static int ixgbe_legacy_irq(void *); 262static int ixgbe_legacy_irq(void *);
262 263
263/* The MSI/MSI-X Interrupt handlers */ 264/* The MSI/MSI-X Interrupt handlers */
264static int ixgbe_msix_que(void *); 265static int ixgbe_msix_que(void *);
265static int ixgbe_msix_link(void *); 266static int ixgbe_msix_link(void *);
266 267
267/* Software interrupts for deferred work */ 268/* Software interrupts for deferred work */
268static void ixgbe_handle_que(void *); 269static void ixgbe_handle_que(void *);
269static void ixgbe_handle_link(void *); 270static void ixgbe_handle_link(void *);
270static void ixgbe_handle_msf(void *); 271static void ixgbe_handle_msf(void *);
271static void ixgbe_handle_mod(void *); 272static void ixgbe_handle_mod(void *);
272static void ixgbe_handle_phy(void *); 273static void ixgbe_handle_phy(void *);
273 274
274/* Workqueue handler for deferred work */ 275/* Workqueue handler for deferred work */
275static void ixgbe_handle_que_work(struct work *, void *); 276static void ixgbe_handle_que_work(struct work *, void *);
276 277
277static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 278static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
278 279
279/************************************************************************ 280/************************************************************************
280 * NetBSD Device Interface Entry Points 281 * NetBSD Device Interface Entry Points
281 ************************************************************************/ 282 ************************************************************************/
282CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), 283CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
283 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 284 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
284 DVF_DETACH_SHUTDOWN); 285 DVF_DETACH_SHUTDOWN);
285 286
286#if 0 287#if 0
287devclass_t ix_devclass; 288devclass_t ix_devclass;
288DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 289DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
289 290
290MODULE_DEPEND(ix, pci, 1, 1, 1); 291MODULE_DEPEND(ix, pci, 1, 1, 1);
291MODULE_DEPEND(ix, ether, 1, 1, 1); 292MODULE_DEPEND(ix, ether, 1, 1, 1);
292#ifdef DEV_NETMAP 293#ifdef DEV_NETMAP
293MODULE_DEPEND(ix, netmap, 1, 1, 1); 294MODULE_DEPEND(ix, netmap, 1, 1, 1);
294#endif 295#endif
295#endif 296#endif
296 297
297/* 298/*
298 * TUNEABLE PARAMETERS: 299 * TUNEABLE PARAMETERS:
299 */ 300 */
300 301
301/* 302/*
302 * AIM: Adaptive Interrupt Moderation 303 * AIM: Adaptive Interrupt Moderation
303 * which means that the interrupt rate 304 * which means that the interrupt rate
304 * is varied over time based on the 305 * is varied over time based on the
305 * traffic for that interrupt vector 306 * traffic for that interrupt vector
306 */ 307 */
307static bool ixgbe_enable_aim = true; 308static bool ixgbe_enable_aim = true;
308#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 309#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
309SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 310SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
310 "Enable adaptive interrupt moderation"); 311 "Enable adaptive interrupt moderation");
311 312
312static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 313static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
313SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 314SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
314 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 315 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
315 316
316/* How many packets rxeof tries to clean at a time */ 317/* How many packets rxeof tries to clean at a time */
317static int ixgbe_rx_process_limit = 256; 318static int ixgbe_rx_process_limit = 256;
318SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 319SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
319 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 320 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
320 321
321/* How many packets txeof tries to clean at a time */ 322/* How many packets txeof tries to clean at a time */
322static int ixgbe_tx_process_limit = 256; 323static int ixgbe_tx_process_limit = 256;
323SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 324SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_tx_process_limit, 0, 325 &ixgbe_tx_process_limit, 0,
325 "Maximum number of sent packets to process at a time, -1 means unlimited"); 326 "Maximum number of sent packets to process at a time, -1 means unlimited");
326 327
327/* Flow control setting, default to full */ 328/* Flow control setting, default to full */
328static int ixgbe_flow_control = ixgbe_fc_full; 329static int ixgbe_flow_control = ixgbe_fc_full;
329SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 330SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
330 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 331 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
331 332
332/* Which packet processing uses workqueue or softint */ 333/* Which packet processing uses workqueue or softint */
333static bool ixgbe_txrx_workqueue = false; 334static bool ixgbe_txrx_workqueue = false;
334 335
335/* 336/*
336 * Smart speed setting, default to on 337 * Smart speed setting, default to on
337 * this only works as a compile option 338 * this only works as a compile option
338 * right now as its during attach, set 339 * right now as its during attach, set
339 * this to 'ixgbe_smart_speed_off' to 340 * this to 'ixgbe_smart_speed_off' to
340 * disable. 341 * disable.
341 */ 342 */
342static int ixgbe_smart_speed = ixgbe_smart_speed_on; 343static int ixgbe_smart_speed = ixgbe_smart_speed_on;
343 344
344/* 345/*
345 * MSI-X should be the default for best performance, 346 * MSI-X should be the default for best performance,
346 * but this allows it to be forced off for testing. 347 * but this allows it to be forced off for testing.
347 */ 348 */
348static int ixgbe_enable_msix = 1; 349static int ixgbe_enable_msix = 1;
349SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 350SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
350 "Enable MSI-X interrupts"); 351 "Enable MSI-X interrupts");
351 352
352/* 353/*
353 * Number of Queues, can be set to 0, 354 * Number of Queues, can be set to 0,
354 * it then autoconfigures based on the 355 * it then autoconfigures based on the
355 * number of cpus with a max of 8. This 356 * number of cpus with a max of 8. This
356 * can be overriden manually here. 357 * can be overriden manually here.
357 */ 358 */
358static int ixgbe_num_queues = 0; 359static int ixgbe_num_queues = 0;
359SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 360SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
360 "Number of queues to configure, 0 indicates autoconfigure"); 361 "Number of queues to configure, 0 indicates autoconfigure");
361 362
362/* 363/*
363 * Number of TX descriptors per ring, 364 * Number of TX descriptors per ring,
364 * setting higher than RX as this seems 365 * setting higher than RX as this seems
365 * the better performing choice. 366 * the better performing choice.
366 */ 367 */
367static int ixgbe_txd = PERFORM_TXD; 368static int ixgbe_txd = PERFORM_TXD;
368SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 369SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
369 "Number of transmit descriptors per queue"); 370 "Number of transmit descriptors per queue");
370 371
371/* Number of RX descriptors per ring */ 372/* Number of RX descriptors per ring */
372static int ixgbe_rxd = PERFORM_RXD; 373static int ixgbe_rxd = PERFORM_RXD;
373SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 374SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
374 "Number of receive descriptors per queue"); 375 "Number of receive descriptors per queue");
375 376
376/* 377/*
377 * Defining this on will allow the use 378 * Defining this on will allow the use
378 * of unsupported SFP+ modules, note that 379 * of unsupported SFP+ modules, note that
379 * doing so you are on your own :) 380 * doing so you are on your own :)
380 */ 381 */
381static int allow_unsupported_sfp = false; 382static int allow_unsupported_sfp = false;
382#define TUNABLE_INT(__x, __y) 383#define TUNABLE_INT(__x, __y)
383TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 384TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
384 385
385/* 386/*
386 * Not sure if Flow Director is fully baked, 387 * Not sure if Flow Director is fully baked,
387 * so we'll default to turning it off. 388 * so we'll default to turning it off.
388 */ 389 */
389static int ixgbe_enable_fdir = 0; 390static int ixgbe_enable_fdir = 0;
390SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 391SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
391 "Enable Flow Director"); 392 "Enable Flow Director");
392 393
393/* Legacy Transmit (single queue) */ 394/* Legacy Transmit (single queue) */
394static int ixgbe_enable_legacy_tx = 0; 395static int ixgbe_enable_legacy_tx = 0;
395SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 396SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
396 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 397 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
397 398
398/* Receive-Side Scaling */ 399/* Receive-Side Scaling */
399static int ixgbe_enable_rss = 1; 400static int ixgbe_enable_rss = 1;
400SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 401SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
401 "Enable Receive-Side Scaling (RSS)"); 402 "Enable Receive-Side Scaling (RSS)");
402 403
403#if 0 404#if 0
404static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 405static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
405static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 406static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
406#endif 407#endif
407 408
408#ifdef NET_MPSAFE 409#ifdef NET_MPSAFE
409#define IXGBE_MPSAFE 1 410#define IXGBE_MPSAFE 1
410#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 411#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
411#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE 412#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
412#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 413#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
413#else 414#else
414#define IXGBE_CALLOUT_FLAGS 0 415#define IXGBE_CALLOUT_FLAGS 0
415#define IXGBE_SOFTINFT_FLAGS 0 416#define IXGBE_SOFTINFT_FLAGS 0
416#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 417#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
417#endif 418#endif
418#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 419#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
419 420
420/************************************************************************ 421/************************************************************************
421 * ixgbe_initialize_rss_mapping 422 * ixgbe_initialize_rss_mapping
422 ************************************************************************/ 423 ************************************************************************/
423static void 424static void
424ixgbe_initialize_rss_mapping(struct adapter *adapter) 425ixgbe_initialize_rss_mapping(struct adapter *adapter)
425{ 426{
426 struct ixgbe_hw *hw = &adapter->hw; 427 struct ixgbe_hw *hw = &adapter->hw;
427 u32 reta = 0, mrqc, rss_key[10]; 428 u32 reta = 0, mrqc, rss_key[10];
428 int queue_id, table_size, index_mult; 429 int queue_id, table_size, index_mult;
429 int i, j; 430 int i, j;
430 u32 rss_hash_config; 431 u32 rss_hash_config;
431 432
432 /* force use default RSS key. */ 433 /* force use default RSS key. */
433#ifdef __NetBSD__ 434#ifdef __NetBSD__
434 rss_getkey((uint8_t *) &rss_key); 435 rss_getkey((uint8_t *) &rss_key);
435#else 436#else
436 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 437 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
437 /* Fetch the configured RSS key */ 438 /* Fetch the configured RSS key */
438 rss_getkey((uint8_t *) &rss_key); 439 rss_getkey((uint8_t *) &rss_key);
439 } else { 440 } else {
440 /* set up random bits */ 441 /* set up random bits */
441 cprng_fast(&rss_key, sizeof(rss_key)); 442 cprng_fast(&rss_key, sizeof(rss_key));
442 } 443 }
443#endif 444#endif
444 445
445 /* Set multiplier for RETA setup and table size based on MAC */ 446 /* Set multiplier for RETA setup and table size based on MAC */
446 index_mult = 0x1; 447 index_mult = 0x1;
447 table_size = 128; 448 table_size = 128;
448 switch (adapter->hw.mac.type) { 449 switch (adapter->hw.mac.type) {
449 case ixgbe_mac_82598EB: 450 case ixgbe_mac_82598EB:
450 index_mult = 0x11; 451 index_mult = 0x11;
451 break; 452 break;
452 case ixgbe_mac_X550: 453 case ixgbe_mac_X550:
453 case ixgbe_mac_X550EM_x: 454 case ixgbe_mac_X550EM_x:
454 case ixgbe_mac_X550EM_a: 455 case ixgbe_mac_X550EM_a:
455 table_size = 512; 456 table_size = 512;
456 break; 457 break;
457 default: 458 default:
458 break; 459 break;
459 } 460 }
460 461
461 /* Set up the redirection table */ 462 /* Set up the redirection table */
462 for (i = 0, j = 0; i < table_size; i++, j++) { 463 for (i = 0, j = 0; i < table_size; i++, j++) {
463 if (j == adapter->num_queues) 464 if (j == adapter->num_queues)
464 j = 0; 465 j = 0;
465 466
466 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 467 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
467 /* 468 /*
468 * Fetch the RSS bucket id for the given indirection 469 * Fetch the RSS bucket id for the given indirection
469 * entry. Cap it at the number of configured buckets 470 * entry. Cap it at the number of configured buckets
470 * (which is num_queues.) 471 * (which is num_queues.)
471 */ 472 */
472 queue_id = rss_get_indirection_to_bucket(i); 473 queue_id = rss_get_indirection_to_bucket(i);
473 queue_id = queue_id % adapter->num_queues; 474 queue_id = queue_id % adapter->num_queues;
474 } else 475 } else
475 queue_id = (j * index_mult); 476 queue_id = (j * index_mult);
476 477
477 /* 478 /*
478 * The low 8 bits are for hash value (n+0); 479 * The low 8 bits are for hash value (n+0);
479 * The next 8 bits are for hash value (n+1), etc. 480 * The next 8 bits are for hash value (n+1), etc.
480 */ 481 */
481 reta = reta >> 8; 482 reta = reta >> 8;
482 reta = reta | (((uint32_t) queue_id) << 24); 483 reta = reta | (((uint32_t) queue_id) << 24);
483 if ((i & 3) == 3) { 484 if ((i & 3) == 3) {
484 if (i < 128) 485 if (i < 128)
485 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 486 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
486 else 487 else
487 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 488 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
488 reta); 489 reta);
489 reta = 0; 490 reta = 0;
490 } 491 }
491 } 492 }
492 493
493 /* Now fill our hash function seeds */ 494 /* Now fill our hash function seeds */
494 for (i = 0; i < 10; i++) 495 for (i = 0; i < 10; i++)
495 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 496 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
496 497
497 /* Perform hash on these packet types */ 498 /* Perform hash on these packet types */
498 if (adapter->feat_en & IXGBE_FEATURE_RSS) 499 if (adapter->feat_en & IXGBE_FEATURE_RSS)
499 rss_hash_config = rss_gethashconfig(); 500 rss_hash_config = rss_gethashconfig();
500 else { 501 else {
501 /* 502 /*
502 * Disable UDP - IP fragments aren't currently being handled 503 * Disable UDP - IP fragments aren't currently being handled
503 * and so we end up with a mix of 2-tuple and 4-tuple 504 * and so we end up with a mix of 2-tuple and 4-tuple
504 * traffic. 505 * traffic.
505 */ 506 */
506 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 507 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
507 | RSS_HASHTYPE_RSS_TCP_IPV4 508 | RSS_HASHTYPE_RSS_TCP_IPV4
508 | RSS_HASHTYPE_RSS_IPV6 509 | RSS_HASHTYPE_RSS_IPV6
509 | RSS_HASHTYPE_RSS_TCP_IPV6 510 | RSS_HASHTYPE_RSS_TCP_IPV6
510 | RSS_HASHTYPE_RSS_IPV6_EX 511 | RSS_HASHTYPE_RSS_IPV6_EX
511 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 512 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
512 } 513 }
513 514
514 mrqc = IXGBE_MRQC_RSSEN; 515 mrqc = IXGBE_MRQC_RSSEN;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 528 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
533 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 534 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
534 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 535 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
535} /* ixgbe_initialize_rss_mapping */ 536} /* ixgbe_initialize_rss_mapping */
536 537
537/************************************************************************ 538/************************************************************************
538 * ixgbe_initialize_receive_units - Setup receive registers and features. 539 * ixgbe_initialize_receive_units - Setup receive registers and features.
539 ************************************************************************/ 540 ************************************************************************/
540#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 541#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
541 542
542static void 543static void
543ixgbe_initialize_receive_units(struct adapter *adapter) 544ixgbe_initialize_receive_units(struct adapter *adapter)
544{ 545{
545 struct rx_ring *rxr = adapter->rx_rings; 546 struct rx_ring *rxr = adapter->rx_rings;
546 struct ixgbe_hw *hw = &adapter->hw; 547 struct ixgbe_hw *hw = &adapter->hw;
547 struct ifnet *ifp = adapter->ifp; 548 struct ifnet *ifp = adapter->ifp;
548 int i, j; 549 int i, j;
549 u32 bufsz, fctrl, srrctl, rxcsum; 550 u32 bufsz, fctrl, srrctl, rxcsum;
550 u32 hlreg; 551 u32 hlreg;
551 552
552 /* 553 /*
553 * Make sure receives are disabled while 554 * Make sure receives are disabled while
554 * setting up the descriptor ring 555 * setting up the descriptor ring
555 */ 556 */
556 ixgbe_disable_rx(hw); 557 ixgbe_disable_rx(hw);
557 558
558 /* Enable broadcasts */ 559 /* Enable broadcasts */
559 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 560 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
560 fctrl |= IXGBE_FCTRL_BAM; 561 fctrl |= IXGBE_FCTRL_BAM;
561 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 562 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
562 fctrl |= IXGBE_FCTRL_DPF; 563 fctrl |= IXGBE_FCTRL_DPF;
563 fctrl |= IXGBE_FCTRL_PMCF; 564 fctrl |= IXGBE_FCTRL_PMCF;
564 } 565 }
565 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 566 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
566 567
567 /* Set for Jumbo Frames? */ 568 /* Set for Jumbo Frames? */
568 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 569 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
569 if (ifp->if_mtu > ETHERMTU) 570 if (ifp->if_mtu > ETHERMTU)
570 hlreg |= IXGBE_HLREG0_JUMBOEN; 571 hlreg |= IXGBE_HLREG0_JUMBOEN;
571 else 572 else
572 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 573 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
573 574
574#ifdef DEV_NETMAP 575#ifdef DEV_NETMAP
575 /* CRC stripping is conditional in Netmap */ 576 /* CRC stripping is conditional in Netmap */
576 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 577 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
577 (ifp->if_capenable & IFCAP_NETMAP) && 578 (ifp->if_capenable & IFCAP_NETMAP) &&
578 !ix_crcstrip) 579 !ix_crcstrip)
579 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 580 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
580 else 581 else
581#endif /* DEV_NETMAP */ 582#endif /* DEV_NETMAP */
582 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 583 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
583 584
584 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 585 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
585 586
586 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 587 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
587 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 588 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
588 589
589 for (i = 0; i < adapter->num_queues; i++, rxr++) { 590 for (i = 0; i < adapter->num_queues; i++, rxr++) {
590 u64 rdba = rxr->rxdma.dma_paddr; 591 u64 rdba = rxr->rxdma.dma_paddr;
591 u32 reg; 592 u32 reg;
592 int regnum = i / 4; /* 1 register per 4 queues */ 593 int regnum = i / 4; /* 1 register per 4 queues */
593 int regshift = i % 4; /* 4 bits per 1 queue */ 594 int regshift = i % 4; /* 4 bits per 1 queue */
594 j = rxr->me; 595 j = rxr->me;
595 596
596 /* Setup the Base and Length of the Rx Descriptor Ring */ 597 /* Setup the Base and Length of the Rx Descriptor Ring */
597 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 598 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
598 (rdba & 0x00000000ffffffffULL)); 599 (rdba & 0x00000000ffffffffULL));
599 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 600 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
600 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 601 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
601 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 602 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
602 603
603 /* Set up the SRRCTL register */ 604 /* Set up the SRRCTL register */
604 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 605 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
605 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
606 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
607 srrctl |= bufsz; 608 srrctl |= bufsz;
608 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 609 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
609 610
610 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 611 /* Set RQSMR (Receive Queue Statistic Mapping) register */
611 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 612 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
612 reg &= ~(0x000000ffUL << (regshift * 8)); 613 reg &= ~(0x000000ffUL << (regshift * 8));
613 reg |= i << (regshift * 8); 614 reg |= i << (regshift * 8);
614 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 615 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
615 616
616 /* 617 /*
617 * Set DROP_EN iff we have no flow control and >1 queue. 618 * Set DROP_EN iff we have no flow control and >1 queue.
618 * Note that srrctl was cleared shortly before during reset, 619 * Note that srrctl was cleared shortly before during reset,
619 * so we do not need to clear the bit, but do it just in case 620 * so we do not need to clear the bit, but do it just in case
620 * this code is moved elsewhere. 621 * this code is moved elsewhere.
621 */ 622 */
622 if (adapter->num_queues > 1 && 623 if (adapter->num_queues > 1 &&
623 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 624 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
624 srrctl |= IXGBE_SRRCTL_DROP_EN; 625 srrctl |= IXGBE_SRRCTL_DROP_EN;
625 } else { 626 } else {
626 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 627 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
627 } 628 }
628 629
629 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 630 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
630 631
631 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 632 /* Setup the HW Rx Head and Tail Descriptor Pointers */
632 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 633 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
633 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 634 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
634 635
635 /* Set the driver rx tail address */ 636 /* Set the driver rx tail address */
636 rxr->tail = IXGBE_RDT(rxr->me); 637 rxr->tail = IXGBE_RDT(rxr->me);
637 } 638 }
638 639
639 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 640 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
640 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 641 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
641 | IXGBE_PSRTYPE_UDPHDR 642 | IXGBE_PSRTYPE_UDPHDR
642 | IXGBE_PSRTYPE_IPV4HDR 643 | IXGBE_PSRTYPE_IPV4HDR
643 | IXGBE_PSRTYPE_IPV6HDR; 644 | IXGBE_PSRTYPE_IPV6HDR;
644 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 645 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
645 } 646 }
646 647
647 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 648 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
648 649
649 ixgbe_initialize_rss_mapping(adapter); 650 ixgbe_initialize_rss_mapping(adapter);
650 651
651 if (adapter->num_queues > 1) { 652 if (adapter->num_queues > 1) {
652 /* RSS and RX IPP Checksum are mutually exclusive */ 653 /* RSS and RX IPP Checksum are mutually exclusive */
653 rxcsum |= IXGBE_RXCSUM_PCSD; 654 rxcsum |= IXGBE_RXCSUM_PCSD;
654 } 655 }
655 656
656 if (ifp->if_capenable & IFCAP_RXCSUM) 657 if (ifp->if_capenable & IFCAP_RXCSUM)
657 rxcsum |= IXGBE_RXCSUM_PCSD; 658 rxcsum |= IXGBE_RXCSUM_PCSD;
658 659
659 /* This is useful for calculating UDP/IP fragment checksums */ 660 /* This is useful for calculating UDP/IP fragment checksums */
660 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 661 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
661 rxcsum |= IXGBE_RXCSUM_IPPCSE; 662 rxcsum |= IXGBE_RXCSUM_IPPCSE;
662 663
663 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 664 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
664 665
665} /* ixgbe_initialize_receive_units */ 666} /* ixgbe_initialize_receive_units */
666 667
667/************************************************************************ 668/************************************************************************
668 * ixgbe_initialize_transmit_units - Enable transmit units. 669 * ixgbe_initialize_transmit_units - Enable transmit units.
669 ************************************************************************/ 670 ************************************************************************/
670static void 671static void
671ixgbe_initialize_transmit_units(struct adapter *adapter) 672ixgbe_initialize_transmit_units(struct adapter *adapter)
672{ 673{
673 struct tx_ring *txr = adapter->tx_rings; 674 struct tx_ring *txr = adapter->tx_rings;
674 struct ixgbe_hw *hw = &adapter->hw; 675 struct ixgbe_hw *hw = &adapter->hw;
675 int i; 676 int i;
676 677
677 /* Setup the Base and Length of the Tx Descriptor Ring */ 678 /* Setup the Base and Length of the Tx Descriptor Ring */
678 for (i = 0; i < adapter->num_queues; i++, txr++) { 679 for (i = 0; i < adapter->num_queues; i++, txr++) {
679 u64 tdba = txr->txdma.dma_paddr; 680 u64 tdba = txr->txdma.dma_paddr;
680 u32 txctrl = 0; 681 u32 txctrl = 0;
681 u32 tqsmreg, reg; 682 u32 tqsmreg, reg;
682 int regnum = i / 4; /* 1 register per 4 queues */ 683 int regnum = i / 4; /* 1 register per 4 queues */
683 int regshift = i % 4; /* 4 bits per 1 queue */ 684 int regshift = i % 4; /* 4 bits per 1 queue */
684 int j = txr->me; 685 int j = txr->me;
685 686
686 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 687 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
687 (tdba & 0x00000000ffffffffULL)); 688 (tdba & 0x00000000ffffffffULL));
688 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 689 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
689 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 690 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
690 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 691 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
691 692
692 /* 693 /*
693 * Set TQSMR (Transmit Queue Statistic Mapping) register. 694 * Set TQSMR (Transmit Queue Statistic Mapping) register.
694 * Register location is different between 82598 and others. 695 * Register location is different between 82598 and others.
695 */ 696 */
696 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 697 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
697 tqsmreg = IXGBE_TQSMR(regnum); 698 tqsmreg = IXGBE_TQSMR(regnum);
698 else 699 else
699 tqsmreg = IXGBE_TQSM(regnum); 700 tqsmreg = IXGBE_TQSM(regnum);
700 reg = IXGBE_READ_REG(hw, tqsmreg); 701 reg = IXGBE_READ_REG(hw, tqsmreg);
701 reg &= ~(0x000000ffUL << (regshift * 8)); 702 reg &= ~(0x000000ffUL << (regshift * 8));
702 reg |= i << (regshift * 8); 703 reg |= i << (regshift * 8);
703 IXGBE_WRITE_REG(hw, tqsmreg, reg); 704 IXGBE_WRITE_REG(hw, tqsmreg, reg);
704 705
705 /* Setup the HW Tx Head and Tail descriptor pointers */ 706 /* Setup the HW Tx Head and Tail descriptor pointers */
706 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 707 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
707 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 708 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
708 709
709 /* Cache the tail address */ 710 /* Cache the tail address */
710 txr->tail = IXGBE_TDT(j); 711 txr->tail = IXGBE_TDT(j);
711 712
712 txr->txr_no_space = false; 713 txr->txr_no_space = false;
713 714
714 /* Disable Head Writeback */ 715 /* Disable Head Writeback */
715 /* 716 /*
716 * Note: for X550 series devices, these registers are actually 717 * Note: for X550 series devices, these registers are actually
717 * prefixed with TPH_ isntead of DCA_, but the addresses and 718 * prefixed with TPH_ isntead of DCA_, but the addresses and
718 * fields remain the same. 719 * fields remain the same.
719 */ 720 */
720 switch (hw->mac.type) { 721 switch (hw->mac.type) {
721 case ixgbe_mac_82598EB: 722 case ixgbe_mac_82598EB:
722 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 723 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
723 break; 724 break;
724 default: 725 default:
725 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 726 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
726 break; 727 break;
727 } 728 }
728 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 729 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
729 switch (hw->mac.type) { 730 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB: 731 case ixgbe_mac_82598EB:
731 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 732 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
732 break; 733 break;
733 default: 734 default:
734 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 735 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
735 break; 736 break;
736 } 737 }
737 738
738 } 739 }
739 740
740 if (hw->mac.type != ixgbe_mac_82598EB) { 741 if (hw->mac.type != ixgbe_mac_82598EB) {
741 u32 dmatxctl, rttdcs; 742 u32 dmatxctl, rttdcs;
742 743
743 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 744 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
744 dmatxctl |= IXGBE_DMATXCTL_TE; 745 dmatxctl |= IXGBE_DMATXCTL_TE;
745 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 746 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
746 /* Disable arbiter to set MTQC */ 747 /* Disable arbiter to set MTQC */
747 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 748 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
748 rttdcs |= IXGBE_RTTDCS_ARBDIS; 749 rttdcs |= IXGBE_RTTDCS_ARBDIS;
749 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 750 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
750 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 751 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
751 ixgbe_get_mtqc(adapter->iov_mode)); 752 ixgbe_get_mtqc(adapter->iov_mode));
752 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 753 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
753 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 754 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
754 } 755 }
755 756
756 return; 757 return;
757} /* ixgbe_initialize_transmit_units */ 758} /* ixgbe_initialize_transmit_units */
758 759
759/************************************************************************ 760/************************************************************************
760 * ixgbe_attach - Device initialization routine 761 * ixgbe_attach - Device initialization routine
761 * 762 *
762 * Called when the driver is being loaded. 763 * Called when the driver is being loaded.
763 * Identifies the type of hardware, allocates all resources 764 * Identifies the type of hardware, allocates all resources
764 * and initializes the hardware. 765 * and initializes the hardware.
765 * 766 *
766 * return 0 on success, positive on failure 767 * return 0 on success, positive on failure
767 ************************************************************************/ 768 ************************************************************************/
768static void 769static void
769ixgbe_attach(device_t parent, device_t dev, void *aux) 770ixgbe_attach(device_t parent, device_t dev, void *aux)
770{ 771{
771 struct adapter *adapter; 772 struct adapter *adapter;
772 struct ixgbe_hw *hw; 773 struct ixgbe_hw *hw;
773 int error = -1; 774 int error = -1;
774 u32 ctrl_ext; 775 u32 ctrl_ext;
775 u16 high, low, nvmreg; 776 u16 high, low, nvmreg;
776 pcireg_t id, subid; 777 pcireg_t id, subid;
777 const ixgbe_vendor_info_t *ent; 778 const ixgbe_vendor_info_t *ent;
778 struct pci_attach_args *pa = aux; 779 struct pci_attach_args *pa = aux;
779 const char *str; 780 const char *str;
780 char buf[256]; 781 char buf[256];
781 782
782 INIT_DEBUGOUT("ixgbe_attach: begin"); 783 INIT_DEBUGOUT("ixgbe_attach: begin");
783 784
784 /* Allocate, clear, and link in our adapter structure */ 785 /* Allocate, clear, and link in our adapter structure */
785 adapter = device_private(dev); 786 adapter = device_private(dev);
786 adapter->hw.back = adapter; 787 adapter->hw.back = adapter;
787 adapter->dev = dev; 788 adapter->dev = dev;
788 hw = &adapter->hw; 789 hw = &adapter->hw;
789 adapter->osdep.pc = pa->pa_pc; 790 adapter->osdep.pc = pa->pa_pc;
790 adapter->osdep.tag = pa->pa_tag; 791 adapter->osdep.tag = pa->pa_tag;
791 if (pci_dma64_available(pa)) 792 if (pci_dma64_available(pa))
792 adapter->osdep.dmat = pa->pa_dmat64; 793 adapter->osdep.dmat = pa->pa_dmat64;
793 else 794 else
794 adapter->osdep.dmat = pa->pa_dmat; 795 adapter->osdep.dmat = pa->pa_dmat;
795 adapter->osdep.attached = false; 796 adapter->osdep.attached = false;
796 797
797 ent = ixgbe_lookup(pa); 798 ent = ixgbe_lookup(pa);
798 799
799 KASSERT(ent != NULL); 800 KASSERT(ent != NULL);
800 801
801 aprint_normal(": %s, Version - %s\n", 802 aprint_normal(": %s, Version - %s\n",
802 ixgbe_strings[ent->index], ixgbe_driver_version); 803 ixgbe_strings[ent->index], ixgbe_driver_version);
803 804
804 /* Core Lock Init*/ 805 /* Core Lock Init*/
805 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 806 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
806 807
807 /* Set up the timer callout */ 808 /* Set up the timer callout */
808 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 809 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
809 810
810 /* Determine hardware revision */ 811 /* Determine hardware revision */
811 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 812 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
812 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 813 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
813 814
814 hw->vendor_id = PCI_VENDOR(id); 815 hw->vendor_id = PCI_VENDOR(id);
815 hw->device_id = PCI_PRODUCT(id); 816 hw->device_id = PCI_PRODUCT(id);
816 hw->revision_id = 817 hw->revision_id =
817 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 818 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
818 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 819 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
819 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 820 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
820 821
821 /* 822 /*
822 * Make sure BUSMASTER is set 823 * Make sure BUSMASTER is set
823 */ 824 */
824 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 825 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
825 826
826 /* Do base PCI setup - map BAR0 */ 827 /* Do base PCI setup - map BAR0 */
827 if (ixgbe_allocate_pci_resources(adapter, pa)) { 828 if (ixgbe_allocate_pci_resources(adapter, pa)) {
828 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 829 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
829 error = ENXIO; 830 error = ENXIO;
830 goto err_out; 831 goto err_out;
831 } 832 }
832 833
833 /* let hardware know driver is loaded */ 834 /* let hardware know driver is loaded */
834 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 835 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
835 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 836 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
836 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 837 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
837 838
838 /* 839 /*
839 * Initialize the shared code 840 * Initialize the shared code
840 */ 841 */
841 if (ixgbe_init_shared_code(hw) != 0) { 842 if (ixgbe_init_shared_code(hw) != 0) {
842 aprint_error_dev(dev, "Unable to initialize the shared code\n"); 843 aprint_error_dev(dev, "Unable to initialize the shared code\n");
843 error = ENXIO; 844 error = ENXIO;
844 goto err_out; 845 goto err_out;
845 } 846 }
846 847
847 switch (hw->mac.type) { 848 switch (hw->mac.type) {
848 case ixgbe_mac_82598EB: 849 case ixgbe_mac_82598EB:
849 str = "82598EB"; 850 str = "82598EB";
850 break; 851 break;
851 case ixgbe_mac_82599EB: 852 case ixgbe_mac_82599EB:
852 str = "82599EB"; 853 str = "82599EB";
853 break; 854 break;
854 case ixgbe_mac_X540: 855 case ixgbe_mac_X540:
855 str = "X540"; 856 str = "X540";
856 break; 857 break;
857 case ixgbe_mac_X550: 858 case ixgbe_mac_X550:
858 str = "X550"; 859 str = "X550";
859 break; 860 break;
860 case ixgbe_mac_X550EM_x: 861 case ixgbe_mac_X550EM_x:
861 str = "X550EM"; 862 str = "X550EM";
862 break; 863 break;
863 case ixgbe_mac_X550EM_a: 864 case ixgbe_mac_X550EM_a:
864 str = "X550EM A"; 865 str = "X550EM A";
865 break; 866 break;
866 default: 867 default:
867 str = "Unknown"; 868 str = "Unknown";
868 break; 869 break;
869 } 870 }
870 aprint_normal_dev(dev, "device %s\n", str); 871 aprint_normal_dev(dev, "device %s\n", str);
871 872
872 if (hw->mbx.ops.init_params) 873 if (hw->mbx.ops.init_params)
873 hw->mbx.ops.init_params(hw); 874 hw->mbx.ops.init_params(hw);
874 875
875 hw->allow_unsupported_sfp = allow_unsupported_sfp; 876 hw->allow_unsupported_sfp = allow_unsupported_sfp;
876 877
877 /* Pick up the 82599 settings */ 878 /* Pick up the 82599 settings */
878 if (hw->mac.type != ixgbe_mac_82598EB) { 879 if (hw->mac.type != ixgbe_mac_82598EB) {
879 hw->phy.smart_speed = ixgbe_smart_speed; 880 hw->phy.smart_speed = ixgbe_smart_speed;
880 adapter->num_segs = IXGBE_82599_SCATTER; 881 adapter->num_segs = IXGBE_82599_SCATTER;
881 } else 882 } else
882 adapter->num_segs = IXGBE_82598_SCATTER; 883 adapter->num_segs = IXGBE_82598_SCATTER;
883 884
884 /* Ensure SW/FW semaphore is free */ 885 /* Ensure SW/FW semaphore is free */
885 ixgbe_init_swfw_semaphore(hw); 886 ixgbe_init_swfw_semaphore(hw);
886 887
887 hw->mac.ops.set_lan_id(hw); 888 hw->mac.ops.set_lan_id(hw);
888 ixgbe_init_device_features(adapter); 889 ixgbe_init_device_features(adapter);
889 890
890 if (ixgbe_configure_interrupts(adapter)) { 891 if (ixgbe_configure_interrupts(adapter)) {
891 error = ENXIO; 892 error = ENXIO;
892 goto err_out; 893 goto err_out;
893 } 894 }
894 895
895 /* Allocate multicast array memory. */ 896 /* Allocate multicast array memory. */
896 adapter->mta = malloc(sizeof(*adapter->mta) * 897 adapter->mta = malloc(sizeof(*adapter->mta) *
897 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 898 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
898 if (adapter->mta == NULL) { 899 if (adapter->mta == NULL) {
899 aprint_error_dev(dev, "Cannot allocate multicast setup array\n"); 900 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
900 error = ENOMEM; 901 error = ENOMEM;
901 goto err_out; 902 goto err_out;
902 } 903 }
903 904
904 /* Enable WoL (if supported) */ 905 /* Enable WoL (if supported) */
905 ixgbe_check_wol_support(adapter); 906 ixgbe_check_wol_support(adapter);
906 907
907 /* Register for VLAN events */ 908 /* Register for VLAN events */
908 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); 909 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
909 910
910 /* Verify adapter fan is still functional (if applicable) */ 911 /* Verify adapter fan is still functional (if applicable) */
911 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 912 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
912 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 913 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
913 ixgbe_check_fan_failure(adapter, esdp, FALSE); 914 ixgbe_check_fan_failure(adapter, esdp, FALSE);
914 } 915 }
915 916
916 /* Set an initial default flow control value */ 917 /* Set an initial default flow control value */
917 hw->fc.requested_mode = ixgbe_flow_control; 918 hw->fc.requested_mode = ixgbe_flow_control;
918 919
919 /* Sysctls for limiting the amount of work done in the taskqueues */ 920 /* Sysctls for limiting the amount of work done in the taskqueues */
920 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 921 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
921 "max number of rx packets to process", 922 "max number of rx packets to process",
922 &adapter->rx_process_limit, ixgbe_rx_process_limit); 923 &adapter->rx_process_limit, ixgbe_rx_process_limit);
923 924
924 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 925 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
925 "max number of tx packets to process", 926 "max number of tx packets to process",
926 &adapter->tx_process_limit, ixgbe_tx_process_limit); 927 &adapter->tx_process_limit, ixgbe_tx_process_limit);
927 928
928 /* Do descriptor calc and sanity checks */ 929 /* Do descriptor calc and sanity checks */
929 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 930 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
930 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 931 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
931 aprint_error_dev(dev, "TXD config issue, using default!\n"); 932 aprint_error_dev(dev, "TXD config issue, using default!\n");
932 adapter->num_tx_desc = DEFAULT_TXD; 933 adapter->num_tx_desc = DEFAULT_TXD;
933 } else 934 } else
934 adapter->num_tx_desc = ixgbe_txd; 935 adapter->num_tx_desc = ixgbe_txd;
935 936
936 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 937 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
937 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 938 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
938 aprint_error_dev(dev, "RXD config issue, using default!\n"); 939 aprint_error_dev(dev, "RXD config issue, using default!\n");
939 adapter->num_rx_desc = DEFAULT_RXD; 940 adapter->num_rx_desc = DEFAULT_RXD;
940 } else 941 } else
941 adapter->num_rx_desc = ixgbe_rxd; 942 adapter->num_rx_desc = ixgbe_rxd;
942 943
943 /* Allocate our TX/RX Queues */ 944 /* Allocate our TX/RX Queues */
944 if (ixgbe_allocate_queues(adapter)) { 945 if (ixgbe_allocate_queues(adapter)) {
945 error = ENOMEM; 946 error = ENOMEM;
946 goto err_out; 947 goto err_out;
947 } 948 }
948 949
949 hw->phy.reset_if_overtemp = TRUE; 950 hw->phy.reset_if_overtemp = TRUE;
950 error = ixgbe_reset_hw(hw); 951 error = ixgbe_reset_hw(hw);
951 hw->phy.reset_if_overtemp = FALSE; 952 hw->phy.reset_if_overtemp = FALSE;
952 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 953 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
953 /* 954 /*
954 * No optics in this port, set up 955 * No optics in this port, set up
955 * so the timer routine will probe 956 * so the timer routine will probe
956 * for later insertion. 957 * for later insertion.
957 */ 958 */
958 adapter->sfp_probe = TRUE; 959 adapter->sfp_probe = TRUE;
959 error = IXGBE_SUCCESS; 960 error = IXGBE_SUCCESS;
960 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 961 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
961 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); 962 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
962 error = EIO; 963 error = EIO;
963 goto err_late; 964 goto err_late;
964 } else if (error) { 965 } else if (error) {
965 aprint_error_dev(dev, "Hardware initialization failed\n"); 966 aprint_error_dev(dev, "Hardware initialization failed\n");
966 error = EIO; 967 error = EIO;
967 goto err_late; 968 goto err_late;
968 } 969 }
969 970
970 /* Make sure we have a good EEPROM before we read from it */ 971 /* Make sure we have a good EEPROM before we read from it */
971 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 972 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
972 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); 973 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
973 error = EIO; 974 error = EIO;
974 goto err_late; 975 goto err_late;
975 } 976 }
976 977
977 aprint_normal("%s:", device_xname(dev)); 978 aprint_normal("%s:", device_xname(dev));
978 /* NVM Image Version */ 979 /* NVM Image Version */
979 high = low = 0; 980 high = low = 0;
980 switch (hw->mac.type) { 981 switch (hw->mac.type) {
981 case ixgbe_mac_X540: 982 case ixgbe_mac_X540:
982 case ixgbe_mac_X550EM_a: 983 case ixgbe_mac_X550EM_a:
983 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 984 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
984 if (nvmreg == 0xffff) 985 if (nvmreg == 0xffff)
985 break; 986 break;
986 high = (nvmreg >> 12) & 0x0f; 987 high = (nvmreg >> 12) & 0x0f;
987 low = (nvmreg >> 4) & 0xff; 988 low = (nvmreg >> 4) & 0xff;
988 id = nvmreg & 0x0f; 989 id = nvmreg & 0x0f;
989 aprint_normal(" NVM Image Version %u.", high); 990 aprint_normal(" NVM Image Version %u.", high);
990 if (hw->mac.type == ixgbe_mac_X540) 991 if (hw->mac.type == ixgbe_mac_X540)
991 str = "%x"; 992 str = "%x";
992 else 993 else
993 str = "%02x"; 994 str = "%02x";
994 aprint_normal(str, low); 995 aprint_normal(str, low);
995 aprint_normal(" ID 0x%x,", id); 996 aprint_normal(" ID 0x%x,", id);
996 break; 997 break;
997 case ixgbe_mac_X550EM_x: 998 case ixgbe_mac_X550EM_x:
998 case ixgbe_mac_X550: 999 case ixgbe_mac_X550:
999 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1000 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1000 if (nvmreg == 0xffff) 1001 if (nvmreg == 0xffff)
1001 break; 1002 break;
1002 high = (nvmreg >> 12) & 0x0f; 1003 high = (nvmreg >> 12) & 0x0f;
1003 low = nvmreg & 0xff; 1004 low = nvmreg & 0xff;
1004 aprint_normal(" NVM Image Version %u.%02x,", high, low); 1005 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1005 break; 1006 break;
1006 default: 1007 default:
1007 break; 1008 break;
1008 } 1009 }
1009 hw->eeprom.nvm_image_ver_high = high; 1010 hw->eeprom.nvm_image_ver_high = high;
1010 hw->eeprom.nvm_image_ver_low = low; 1011 hw->eeprom.nvm_image_ver_low = low;
1011 1012
1012 /* PHY firmware revision */ 1013 /* PHY firmware revision */
1013 switch (hw->mac.type) { 1014 switch (hw->mac.type) {
1014 case ixgbe_mac_X540: 1015 case ixgbe_mac_X540:
1015 case ixgbe_mac_X550: 1016 case ixgbe_mac_X550:
1016 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); 1017 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1017 if (nvmreg == 0xffff) 1018 if (nvmreg == 0xffff)
1018 break; 1019 break;
1019 high = (nvmreg >> 12) & 0x0f; 1020 high = (nvmreg >> 12) & 0x0f;
1020 low = (nvmreg >> 4) & 0xff; 1021 low = (nvmreg >> 4) & 0xff;
1021 id = nvmreg & 0x000f; 1022 id = nvmreg & 0x000f;
1022 aprint_normal(" PHY FW Revision %u.", high); 1023 aprint_normal(" PHY FW Revision %u.", high);
1023 if (hw->mac.type == ixgbe_mac_X540) 1024 if (hw->mac.type == ixgbe_mac_X540)
1024 str = "%x"; 1025 str = "%x";
1025 else 1026 else
1026 str = "%02x"; 1027 str = "%02x";
1027 aprint_normal(str, low); 1028 aprint_normal(str, low);
1028 aprint_normal(" ID 0x%x,", id); 1029 aprint_normal(" ID 0x%x,", id);
1029 break; 1030 break;
1030 default: 1031 default:
1031 break; 1032 break;
1032 } 1033 }
1033 1034
1034 /* NVM Map version & OEM NVM Image version */ 1035 /* NVM Map version & OEM NVM Image version */
1035 switch (hw->mac.type) { 1036 switch (hw->mac.type) {
1036 case ixgbe_mac_X550: 1037 case ixgbe_mac_X550:
1037 case ixgbe_mac_X550EM_x: 1038 case ixgbe_mac_X550EM_x:
1038 case ixgbe_mac_X550EM_a: 1039 case ixgbe_mac_X550EM_a:
1039 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1040 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1040 if (nvmreg != 0xffff) { 1041 if (nvmreg != 0xffff) {
1041 high = (nvmreg >> 12) & 0x0f; 1042 high = (nvmreg >> 12) & 0x0f;
1042 low = nvmreg & 0x00ff; 1043 low = nvmreg & 0x00ff;
1043 aprint_normal(" NVM Map version %u.%02x,", high, low); 1044 aprint_normal(" NVM Map version %u.%02x,", high, low);
1044 } 1045 }
1045 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); 1046 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1046 if (nvmreg != 0xffff) { 1047 if (nvmreg != 0xffff) {
1047 high = (nvmreg >> 12) & 0x0f; 1048 high = (nvmreg >> 12) & 0x0f;
1048 low = nvmreg & 0x00ff; 1049 low = nvmreg & 0x00ff;
1049 aprint_verbose(" OEM NVM Image version %u.%02x,", high, 1050 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1050 low); 1051 low);
1051 } 1052 }
1052 break; 1053 break;
1053 default: 1054 default:
1054 break; 1055 break;
1055 } 1056 }
1056 1057
1057 /* Print the ETrackID */ 1058 /* Print the ETrackID */
1058 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); 1059 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1059 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); 1060 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1060 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); 1061 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1061 1062
1062 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 1063 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1063 error = ixgbe_allocate_msix(adapter, pa); 1064 error = ixgbe_allocate_msix(adapter, pa);
1064 if (error) { 1065 if (error) {
1065 /* Free allocated queue structures first */ 1066 /* Free allocated queue structures first */
1066 ixgbe_free_transmit_structures(adapter); 1067 ixgbe_free_transmit_structures(adapter);
1067 ixgbe_free_receive_structures(adapter); 1068 ixgbe_free_receive_structures(adapter);
1068 free(adapter->queues, M_DEVBUF); 1069 free(adapter->queues, M_DEVBUF);
1069 1070
1070 /* Fallback to legacy interrupt */ 1071 /* Fallback to legacy interrupt */
1071 adapter->feat_en &= ~IXGBE_FEATURE_MSIX; 1072 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1072 if (adapter->feat_cap & IXGBE_FEATURE_MSI) 1073 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1073 adapter->feat_en |= IXGBE_FEATURE_MSI; 1074 adapter->feat_en |= IXGBE_FEATURE_MSI;
1074 adapter->num_queues = 1; 1075 adapter->num_queues = 1;
1075 1076
1076 /* Allocate our TX/RX Queues again */ 1077 /* Allocate our TX/RX Queues again */
1077 if (ixgbe_allocate_queues(adapter)) { 1078 if (ixgbe_allocate_queues(adapter)) {
1078 error = ENOMEM; 1079 error = ENOMEM;
1079 goto err_out; 1080 goto err_out;
1080 } 1081 }
1081 } 1082 }
1082 } 1083 }
1083 /* Recovery mode */ 1084 /* Recovery mode */
1084 switch (adapter->hw.mac.type) { 1085 switch (adapter->hw.mac.type) {
1085 case ixgbe_mac_X550: 1086 case ixgbe_mac_X550:
1086 case ixgbe_mac_X550EM_x: 1087 case ixgbe_mac_X550EM_x:
1087 case ixgbe_mac_X550EM_a: 1088 case ixgbe_mac_X550EM_a:
1088 /* >= 2.00 */ 1089 /* >= 2.00 */
1089 if (hw->eeprom.nvm_image_ver_high >= 2) { 1090 if (hw->eeprom.nvm_image_ver_high >= 2) {
1090 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; 1091 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1091 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; 1092 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1092 } 1093 }
1093 break; 1094 break;
1094 default: 1095 default:
1095 break; 1096 break;
1096 } 1097 }
1097 1098
1098 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0) 1099 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1099 error = ixgbe_allocate_legacy(adapter, pa); 1100 error = ixgbe_allocate_legacy(adapter, pa);
1100 if (error) 1101 if (error)
1101 goto err_late; 1102 goto err_late;
1102 1103
1103 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ 1104 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1104 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS, 1105 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS,
1105 ixgbe_handle_link, adapter); 1106 ixgbe_handle_link, adapter);
1106 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1107 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1107 ixgbe_handle_mod, adapter); 1108 ixgbe_handle_mod, adapter);
1108 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1109 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1109 ixgbe_handle_msf, adapter); 1110 ixgbe_handle_msf, adapter);
1110 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1111 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1111 ixgbe_handle_phy, adapter); 1112 ixgbe_handle_phy, adapter);
1112 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 1113 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1113 adapter->fdir_si = 1114 adapter->fdir_si =
1114 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 1115 softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
1115 ixgbe_reinit_fdir, adapter); 1116 ixgbe_reinit_fdir, adapter);
1116 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL) 1117 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1117 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL) 1118 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1118 || ((adapter->feat_en & IXGBE_FEATURE_FDIR) 1119 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1119 && (adapter->fdir_si == NULL))) { 1120 && (adapter->fdir_si == NULL))) {
1120 aprint_error_dev(dev, 1121 aprint_error_dev(dev,
1121 "could not establish software interrupts ()\n"); 1122 "could not establish software interrupts ()\n");
1122 goto err_out; 1123 goto err_out;
1123 } 1124 }
1124 1125
1125 error = ixgbe_start_hw(hw); 1126 error = ixgbe_start_hw(hw);
1126 switch (error) { 1127 switch (error) {
1127 case IXGBE_ERR_EEPROM_VERSION: 1128 case IXGBE_ERR_EEPROM_VERSION:
1128 aprint_error_dev(dev, "This device is a pre-production adapter/" 1129 aprint_error_dev(dev, "This device is a pre-production adapter/"
1129 "LOM. Please be aware there may be issues associated " 1130 "LOM. Please be aware there may be issues associated "
1130 "with your hardware.\nIf you are experiencing problems " 1131 "with your hardware.\nIf you are experiencing problems "
1131 "please contact your Intel or hardware representative " 1132 "please contact your Intel or hardware representative "
1132 "who provided you with this hardware.\n"); 1133 "who provided you with this hardware.\n");
1133 break; 1134 break;
1134 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1135 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1135 aprint_error_dev(dev, "Unsupported SFP+ Module\n"); 1136 aprint_error_dev(dev, "Unsupported SFP+ Module\n");
1136 error = EIO; 1137 error = EIO;
1137 goto err_late; 1138 goto err_late;
1138 case IXGBE_ERR_SFP_NOT_PRESENT: 1139 case IXGBE_ERR_SFP_NOT_PRESENT:
1139 aprint_error_dev(dev, "No SFP+ Module found\n"); 1140 aprint_error_dev(dev, "No SFP+ Module found\n");
1140 /* falls thru */ 1141 /* falls thru */
1141 default: 1142 default:
1142 break; 1143 break;
1143 } 1144 }
1144 1145
1145 /* Setup OS specific network interface */ 1146 /* Setup OS specific network interface */
1146 if (ixgbe_setup_interface(dev, adapter) != 0) 1147 if (ixgbe_setup_interface(dev, adapter) != 0)
1147 goto err_late; 1148 goto err_late;
1148 1149
1149 /* 1150 /*
1150 * Print PHY ID only for copper PHY. On device which has SFP(+) cage 1151 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1151 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. 1152 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1152 */ 1153 */
1153 if (hw->phy.media_type == ixgbe_media_type_copper) { 1154 if (hw->phy.media_type == ixgbe_media_type_copper) {
1154 uint16_t id1, id2; 1155 uint16_t id1, id2;
1155 int oui, model, rev; 1156 int oui, model, rev;
1156 const char *descr; 1157 const char *descr;
1157 1158
1158 id1 = hw->phy.id >> 16; 1159 id1 = hw->phy.id >> 16;
1159 id2 = hw->phy.id & 0xffff; 1160 id2 = hw->phy.id & 0xffff;
1160 oui = MII_OUI(id1, id2); 1161 oui = MII_OUI(id1, id2);
1161 model = MII_MODEL(id2); 1162 model = MII_MODEL(id2);
1162 rev = MII_REV(id2); 1163 rev = MII_REV(id2);
1163 if ((descr = mii_get_descr(oui, model)) != NULL) 1164 if ((descr = mii_get_descr(oui, model)) != NULL)
1164 aprint_normal_dev(dev, 1165 aprint_normal_dev(dev,
1165 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n", 1166 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1166 descr, oui, model, rev); 1167 descr, oui, model, rev);
1167 else 1168 else
1168 aprint_normal_dev(dev, 1169 aprint_normal_dev(dev,
1169 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", 1170 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1170 oui, model, rev); 1171 oui, model, rev);
1171 } 1172 }
1172 1173
1173 /* Enable the optics for 82599 SFP+ fiber */ 1174 /* Enable the optics for 82599 SFP+ fiber */
1174 ixgbe_enable_tx_laser(hw); 1175 ixgbe_enable_tx_laser(hw);
1175 1176
1176 /* Enable EEE power saving */ 1177 /* Enable EEE power saving */
1177 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 1178 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1178 hw->mac.ops.setup_eee(hw, 1179 hw->mac.ops.setup_eee(hw,
1179 adapter->feat_en & IXGBE_FEATURE_EEE); 1180 adapter->feat_en & IXGBE_FEATURE_EEE);
1180 1181
1181 /* Enable power to the phy. */ 1182 /* Enable power to the phy. */
1182 ixgbe_set_phy_power(hw, TRUE); 1183 ixgbe_set_phy_power(hw, TRUE);
1183 1184
1184 /* Initialize statistics */ 1185 /* Initialize statistics */
1185 ixgbe_update_stats_counters(adapter); 1186 ixgbe_update_stats_counters(adapter);
1186 1187
1187 /* Check PCIE slot type/speed/width */ 1188 /* Check PCIE slot type/speed/width */
1188 ixgbe_get_slot_info(adapter); 1189 ixgbe_get_slot_info(adapter);
1189 1190
1190 /* 1191 /*
1191 * Do time init and sysctl init here, but 1192 * Do time init and sysctl init here, but
1192 * only on the first port of a bypass adapter. 1193 * only on the first port of a bypass adapter.
1193 */ 1194 */
1194 ixgbe_bypass_init(adapter); 1195 ixgbe_bypass_init(adapter);
1195 1196
1196 /* Set an initial dmac value */ 1197 /* Set an initial dmac value */
1197 adapter->dmac = 0; 1198 adapter->dmac = 0;
1198 /* Set initial advertised speeds (if applicable) */ 1199 /* Set initial advertised speeds (if applicable) */
1199 adapter->advertise = ixgbe_get_advertise(adapter); 1200 adapter->advertise = ixgbe_get_advertise(adapter);
1200 1201
1201 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1202 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1202 ixgbe_define_iov_schemas(dev, &error); 1203 ixgbe_define_iov_schemas(dev, &error);
1203 1204
1204 /* Add sysctls */ 1205 /* Add sysctls */
1205 ixgbe_add_device_sysctls(adapter); 1206 ixgbe_add_device_sysctls(adapter);
1206 ixgbe_add_hw_stats(adapter); 1207 ixgbe_add_hw_stats(adapter);
1207 1208
1208 /* For Netmap */ 1209 /* For Netmap */
1209 adapter->init_locked = ixgbe_init_locked; 1210 adapter->init_locked = ixgbe_init_locked;
1210 adapter->stop_locked = ixgbe_stop; 1211 adapter->stop_locked = ixgbe_stop;
1211 1212
1212 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 1213 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1213 ixgbe_netmap_attach(adapter); 1214 ixgbe_netmap_attach(adapter);
1214 1215
1215 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 1216 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1216 aprint_verbose_dev(dev, "feature cap %s\n", buf); 1217 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1217 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 1218 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1218 aprint_verbose_dev(dev, "feature ena %s\n", buf); 1219 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1219 1220
1220 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) 1221 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
@@ -1309,2139 +1310,2172 @@ ixgbe_setup_interface(device_t dev, stru @@ -1309,2139 +1310,2172 @@ ixgbe_setup_interface(device_t dev, stru
1309 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1310#ifdef IXGBE_MPSAFE 1311#ifdef IXGBE_MPSAFE
1311 ifp->if_extflags = IFEF_MPSAFE; 1312 ifp->if_extflags = IFEF_MPSAFE;
1312#endif 1313#endif
1313 ifp->if_ioctl = ixgbe_ioctl; 1314 ifp->if_ioctl = ixgbe_ioctl;
1314#if __FreeBSD_version >= 1100045 1315#if __FreeBSD_version >= 1100045
1315 /* TSO parameters */ 1316 /* TSO parameters */
1316 ifp->if_hw_tsomax = 65518; 1317 ifp->if_hw_tsomax = 65518;
1317 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1318 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1318 ifp->if_hw_tsomaxsegsize = 2048; 1319 ifp->if_hw_tsomaxsegsize = 2048;
1319#endif 1320#endif
1320 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1321 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1321#if 0 1322#if 0
1322 ixgbe_start_locked = ixgbe_legacy_start_locked; 1323 ixgbe_start_locked = ixgbe_legacy_start_locked;
1323#endif 1324#endif
1324 } else { 1325 } else {
1325 ifp->if_transmit = ixgbe_mq_start; 1326 ifp->if_transmit = ixgbe_mq_start;
1326#if 0 1327#if 0
1327 ixgbe_start_locked = ixgbe_mq_start_locked; 1328 ixgbe_start_locked = ixgbe_mq_start_locked;
1328#endif 1329#endif
1329 } 1330 }
1330 ifp->if_start = ixgbe_legacy_start; 1331 ifp->if_start = ixgbe_legacy_start;
1331 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1332 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1332 IFQ_SET_READY(&ifp->if_snd); 1333 IFQ_SET_READY(&ifp->if_snd);
1333 1334
1334 rv = if_initialize(ifp); 1335 rv = if_initialize(ifp);
1335 if (rv != 0) { 1336 if (rv != 0) {
1336 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); 1337 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1337 return rv; 1338 return rv;
1338 } 1339 }
1339 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1340 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1340 ether_ifattach(ifp, adapter->hw.mac.addr); 1341 ether_ifattach(ifp, adapter->hw.mac.addr);
1341 /* 1342 /*
1342 * We use per TX queue softint, so if_deferred_start_init() isn't 1343 * We use per TX queue softint, so if_deferred_start_init() isn't
1343 * used. 1344 * used.
1344 */ 1345 */
1345 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); 1346 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1346 1347
1347 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1348 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1348 1349
1349 /* 1350 /*
1350 * Tell the upper layer(s) we support long frames. 1351 * Tell the upper layer(s) we support long frames.
1351 */ 1352 */
1352 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1353 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1353 1354
1354 /* Set capability flags */ 1355 /* Set capability flags */
1355 ifp->if_capabilities |= IFCAP_RXCSUM 1356 ifp->if_capabilities |= IFCAP_RXCSUM
1356 | IFCAP_TXCSUM 1357 | IFCAP_TXCSUM
1357 | IFCAP_TSOv4 1358 | IFCAP_TSOv4
1358 | IFCAP_TSOv6; 1359 | IFCAP_TSOv6;
1359 ifp->if_capenable = 0; 1360 ifp->if_capenable = 0;
1360 1361
1361 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING 1362 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1362 | ETHERCAP_VLAN_HWCSUM 1363 | ETHERCAP_VLAN_HWCSUM
1363 | ETHERCAP_JUMBO_MTU 1364 | ETHERCAP_JUMBO_MTU
1364 | ETHERCAP_VLAN_MTU; 1365 | ETHERCAP_VLAN_MTU;
1365 1366
1366 /* Enable the above capabilities by default */ 1367 /* Enable the above capabilities by default */
1367 ec->ec_capenable = ec->ec_capabilities; 1368 ec->ec_capenable = ec->ec_capabilities;
1368 1369
1369 /* 1370 /*
1370 * Don't turn this on by default, if vlans are 1371 * Don't turn this on by default, if vlans are
1371 * created on another pseudo device (eg. lagg) 1372 * created on another pseudo device (eg. lagg)
1372 * then vlan events are not passed thru, breaking 1373 * then vlan events are not passed thru, breaking
1373 * operation, but with HW FILTER off it works. If 1374 * operation, but with HW FILTER off it works. If
1374 * using vlans directly on the ixgbe driver you can 1375 * using vlans directly on the ixgbe driver you can
1375 * enable this and get full hardware tag filtering. 1376 * enable this and get full hardware tag filtering.
1376 */ 1377 */
1377 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1378 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1378 1379
1379 /* 1380 /*
1380 * Specify the media types supported by this adapter and register 1381 * Specify the media types supported by this adapter and register
1381 * callbacks to update media and link information 1382 * callbacks to update media and link information
1382 */ 1383 */
1383 ec->ec_ifmedia = &adapter->media; 1384 ec->ec_ifmedia = &adapter->media;
1384 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 1385 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1385 ixgbe_media_status); 1386 ixgbe_media_status);
1386 1387
1387 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1388 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1388 ixgbe_add_media_types(adapter); 1389 ixgbe_add_media_types(adapter);
1389 1390
1390 /* Set autoselect media by default */ 1391 /* Set autoselect media by default */
1391 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1392 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1392 1393
1393 if_register(ifp); 1394 if_register(ifp);
1394 1395
1395 return (0); 1396 return (0);
1396} /* ixgbe_setup_interface */ 1397} /* ixgbe_setup_interface */
1397 1398
1398/************************************************************************ 1399/************************************************************************
1399 * ixgbe_add_media_types 1400 * ixgbe_add_media_types
1400 ************************************************************************/ 1401 ************************************************************************/
1401static void 1402static void
1402ixgbe_add_media_types(struct adapter *adapter) 1403ixgbe_add_media_types(struct adapter *adapter)
1403{ 1404{
1404 struct ixgbe_hw *hw = &adapter->hw; 1405 struct ixgbe_hw *hw = &adapter->hw;
1405 device_t dev = adapter->dev; 1406 device_t dev = adapter->dev;
1406 u64 layer; 1407 u64 layer;
1407 1408
1408 layer = adapter->phy_layer; 1409 layer = adapter->phy_layer;
1409 1410
1410#define ADD(mm, dd) \ 1411#define ADD(mm, dd) \
1411 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL); 1412 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1412 1413
1413 ADD(IFM_NONE, 0); 1414 ADD(IFM_NONE, 0);
1414 1415
1415 /* Media types with matching NetBSD media defines */ 1416 /* Media types with matching NetBSD media defines */
1416 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 1417 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1417 ADD(IFM_10G_T | IFM_FDX, 0); 1418 ADD(IFM_10G_T | IFM_FDX, 0);
1418 } 1419 }
1419 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 1420 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1420 ADD(IFM_1000_T | IFM_FDX, 0); 1421 ADD(IFM_1000_T | IFM_FDX, 0);
1421 } 1422 }
1422 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) { 1423 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1423 ADD(IFM_100_TX | IFM_FDX, 0); 1424 ADD(IFM_100_TX | IFM_FDX, 0);
1424 } 1425 }
1425 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) { 1426 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1426 ADD(IFM_10_T | IFM_FDX, 0); 1427 ADD(IFM_10_T | IFM_FDX, 0);
1427 } 1428 }
1428 1429
1429 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1430 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1430 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) { 1431 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1431 ADD(IFM_10G_TWINAX | IFM_FDX, 0); 1432 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1432 } 1433 }
1433 1434
1434 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1435 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1435 ADD(IFM_10G_LR | IFM_FDX, 0); 1436 ADD(IFM_10G_LR | IFM_FDX, 0);
1436 if (hw->phy.multispeed_fiber) { 1437 if (hw->phy.multispeed_fiber) {
1437 ADD(IFM_1000_LX | IFM_FDX, 0); 1438 ADD(IFM_1000_LX | IFM_FDX, 0);
1438 } 1439 }
1439 } 1440 }
1440 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1441 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1441 ADD(IFM_10G_SR | IFM_FDX, 0); 1442 ADD(IFM_10G_SR | IFM_FDX, 0);
1442 if (hw->phy.multispeed_fiber) { 1443 if (hw->phy.multispeed_fiber) {
1443 ADD(IFM_1000_SX | IFM_FDX, 0); 1444 ADD(IFM_1000_SX | IFM_FDX, 0);
1444 } 1445 }
1445 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 1446 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1446 ADD(IFM_1000_SX | IFM_FDX, 0); 1447 ADD(IFM_1000_SX | IFM_FDX, 0);
1447 } 1448 }
1448 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) { 1449 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1449 ADD(IFM_10G_CX4 | IFM_FDX, 0); 1450 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1450 } 1451 }
1451 1452
1452 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1453 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1453 ADD(IFM_10G_KR | IFM_FDX, 0); 1454 ADD(IFM_10G_KR | IFM_FDX, 0);
1454 } 1455 }
1455 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1456 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1456 ADD(IFM_10G_KX4 | IFM_FDX, 0); 1457 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1457 } 1458 }
1458 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1459 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1459 ADD(IFM_1000_KX | IFM_FDX, 0); 1460 ADD(IFM_1000_KX | IFM_FDX, 0);
1460 } 1461 }
1461 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1462 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1462 ADD(IFM_2500_KX | IFM_FDX, 0); 1463 ADD(IFM_2500_KX | IFM_FDX, 0);
1463 } 1464 }
1464 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) { 1465 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1465 ADD(IFM_2500_T | IFM_FDX, 0); 1466 ADD(IFM_2500_T | IFM_FDX, 0);
1466 } 1467 }
1467 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) { 1468 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1468 ADD(IFM_5000_T | IFM_FDX, 0); 1469 ADD(IFM_5000_T | IFM_FDX, 0);
1469 } 1470 }
1470 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1471 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1471 device_printf(dev, "Media supported: 1000baseBX\n"); 1472 device_printf(dev, "Media supported: 1000baseBX\n");
1472 /* XXX no ifmedia_set? */ 1473 /* XXX no ifmedia_set? */
1473 1474
1474 ADD(IFM_AUTO, 0); 1475 ADD(IFM_AUTO, 0);
1475 1476
1476#undef ADD 1477#undef ADD
1477} /* ixgbe_add_media_types */ 1478} /* ixgbe_add_media_types */
1478 1479
1479/************************************************************************ 1480/************************************************************************
1480 * ixgbe_is_sfp 1481 * ixgbe_is_sfp
1481 ************************************************************************/ 1482 ************************************************************************/
1482static inline bool 1483static inline bool
1483ixgbe_is_sfp(struct ixgbe_hw *hw) 1484ixgbe_is_sfp(struct ixgbe_hw *hw)
1484{ 1485{
1485 switch (hw->mac.type) { 1486 switch (hw->mac.type) {
1486 case ixgbe_mac_82598EB: 1487 case ixgbe_mac_82598EB:
1487 if (hw->phy.type == ixgbe_phy_nl) 1488 if (hw->phy.type == ixgbe_phy_nl)
1488 return (TRUE); 1489 return (TRUE);
1489 return (FALSE); 1490 return (FALSE);
1490 case ixgbe_mac_82599EB: 1491 case ixgbe_mac_82599EB:
1491 switch (hw->mac.ops.get_media_type(hw)) { 1492 switch (hw->mac.ops.get_media_type(hw)) {
1492 case ixgbe_media_type_fiber: 1493 case ixgbe_media_type_fiber:
1493 case ixgbe_media_type_fiber_qsfp: 1494 case ixgbe_media_type_fiber_qsfp:
1494 return (TRUE); 1495 return (TRUE);
1495 default: 1496 default:
1496 return (FALSE); 1497 return (FALSE);
1497 } 1498 }
1498 case ixgbe_mac_X550EM_x: 1499 case ixgbe_mac_X550EM_x:
1499 case ixgbe_mac_X550EM_a: 1500 case ixgbe_mac_X550EM_a:
1500 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) 1501 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
1501 return (TRUE); 1502 return (TRUE);
1502 return (FALSE); 1503 return (FALSE);
1503 default: 1504 default:
1504 return (FALSE); 1505 return (FALSE);
1505 } 1506 }
1506} /* ixgbe_is_sfp */ 1507} /* ixgbe_is_sfp */
1507 1508
1508/************************************************************************ 1509/************************************************************************
1509 * ixgbe_config_link 1510 * ixgbe_config_link
1510 ************************************************************************/ 1511 ************************************************************************/
1511static void 1512static void
1512ixgbe_config_link(struct adapter *adapter) 1513ixgbe_config_link(struct adapter *adapter)
1513{ 1514{
1514 struct ixgbe_hw *hw = &adapter->hw; 1515 struct ixgbe_hw *hw = &adapter->hw;
1515 u32 autoneg, err = 0; 1516 u32 autoneg, err = 0;
1516 bool sfp, negotiate = false; 1517 bool sfp, negotiate = false;
1517 1518
1518 sfp = ixgbe_is_sfp(hw); 1519 sfp = ixgbe_is_sfp(hw);
1519 1520
1520 if (sfp) { 1521 if (sfp) {
1521 if (hw->phy.multispeed_fiber) { 1522 if (hw->phy.multispeed_fiber) {
1522 ixgbe_enable_tx_laser(hw); 1523 ixgbe_enable_tx_laser(hw);
1523 kpreempt_disable(); 1524 kpreempt_disable();
1524 softint_schedule(adapter->msf_si); 1525 softint_schedule(adapter->msf_si);
1525 kpreempt_enable(); 1526 kpreempt_enable();
1526 } 1527 }
1527 kpreempt_disable(); 1528 kpreempt_disable();
1528 softint_schedule(adapter->mod_si); 1529 softint_schedule(adapter->mod_si);
1529 kpreempt_enable(); 1530 kpreempt_enable();
1530 } else { 1531 } else {
1531 struct ifmedia *ifm = &adapter->media; 1532 struct ifmedia *ifm = &adapter->media;
1532 1533
1533 if (hw->mac.ops.check_link) 1534 if (hw->mac.ops.check_link)
1534 err = ixgbe_check_link(hw, &adapter->link_speed, 1535 err = ixgbe_check_link(hw, &adapter->link_speed,
1535 &adapter->link_up, FALSE); 1536 &adapter->link_up, FALSE);
1536 if (err) 1537 if (err)
1537 return; 1538 return;
1538 1539
1539 /* 1540 /*
1540 * Check if it's the first call. If it's the first call, 1541 * Check if it's the first call. If it's the first call,
1541 * get value for auto negotiation. 1542 * get value for auto negotiation.
1542 */ 1543 */
1543 autoneg = hw->phy.autoneg_advertised; 1544 autoneg = hw->phy.autoneg_advertised;
1544 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) 1545 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1545 && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) 1546 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1546 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1547 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1547 &negotiate); 1548 &negotiate);
1548 if (err) 1549 if (err)
1549 return; 1550 return;
1550 if (hw->mac.ops.setup_link) 1551 if (hw->mac.ops.setup_link)
1551 err = hw->mac.ops.setup_link(hw, autoneg, 1552 err = hw->mac.ops.setup_link(hw, autoneg,
1552 adapter->link_up); 1553 adapter->link_up);
1553 } 1554 }
1554 1555
1555} /* ixgbe_config_link */ 1556} /* ixgbe_config_link */
1556 1557
1557/************************************************************************ 1558/************************************************************************
1558 * ixgbe_update_stats_counters - Update board statistics counters. 1559 * ixgbe_update_stats_counters - Update board statistics counters.
1559 ************************************************************************/ 1560 ************************************************************************/
1560static void 1561static void
1561ixgbe_update_stats_counters(struct adapter *adapter) 1562ixgbe_update_stats_counters(struct adapter *adapter)
1562{ 1563{
1563 struct ifnet *ifp = adapter->ifp; 1564 struct ifnet *ifp = adapter->ifp;
1564 struct ixgbe_hw *hw = &adapter->hw; 1565 struct ixgbe_hw *hw = &adapter->hw;
1565 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1566 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1566 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1567 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1567 u64 total_missed_rx = 0; 1568 u64 total_missed_rx = 0;
1568 uint64_t crcerrs, rlec; 1569 uint64_t crcerrs, rlec;
1569 unsigned int queue_counters; 1570 unsigned int queue_counters;
1570 int i; 1571 int i;
1571 1572
1572 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1573 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1573 stats->crcerrs.ev_count += crcerrs; 1574 stats->crcerrs.ev_count += crcerrs;
1574 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1575 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1575 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1576 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1576 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1577 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1577 if (hw->mac.type == ixgbe_mac_X550) 1578 if (hw->mac.type == ixgbe_mac_X550)
1578 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC); 1579 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1579 1580
1580 /* 16 registers exist */ 1581 /* 16 registers exist */
1581 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues); 1582 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1582 for (i = 0; i < queue_counters; i++) { 1583 for (i = 0; i < queue_counters; i++) {
1583 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1584 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1584 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1585 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1585 if (hw->mac.type >= ixgbe_mac_82599EB) { 1586 if (hw->mac.type >= ixgbe_mac_82599EB) {
1586 stats->qprdc[i].ev_count 1587 stats->qprdc[i].ev_count
1587 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1588 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1588 } 1589 }
1589 } 1590 }
1590 1591
1591 /* 8 registers exist */ 1592 /* 8 registers exist */
1592 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1593 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1593 uint32_t mp; 1594 uint32_t mp;
1594 1595
1595 /* MPC */ 1596 /* MPC */
1596 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 1597 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1597 /* global total per queue */ 1598 /* global total per queue */
1598 stats->mpc[i].ev_count += mp; 1599 stats->mpc[i].ev_count += mp;
1599 /* running comprehensive total for stats display */ 1600 /* running comprehensive total for stats display */
1600 total_missed_rx += mp; 1601 total_missed_rx += mp;
1601 1602
1602 if (hw->mac.type == ixgbe_mac_82598EB) 1603 if (hw->mac.type == ixgbe_mac_82598EB)
1603 stats->rnbc[i].ev_count 1604 stats->rnbc[i].ev_count
1604 += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 1605 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1605 1606
1606 stats->pxontxc[i].ev_count 1607 stats->pxontxc[i].ev_count
1607 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 1608 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1608 stats->pxofftxc[i].ev_count 1609 stats->pxofftxc[i].ev_count
1609 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 1610 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1610 if (hw->mac.type >= ixgbe_mac_82599EB) { 1611 if (hw->mac.type >= ixgbe_mac_82599EB) {
1611 stats->pxonrxc[i].ev_count 1612 stats->pxonrxc[i].ev_count
1612 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 1613 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1613 stats->pxoffrxc[i].ev_count 1614 stats->pxoffrxc[i].ev_count
1614 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 1615 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1615 stats->pxon2offc[i].ev_count 1616 stats->pxon2offc[i].ev_count
1616 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 1617 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1617 } else { 1618 } else {
1618 stats->pxonrxc[i].ev_count 1619 stats->pxonrxc[i].ev_count
1619 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 1620 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1620 stats->pxoffrxc[i].ev_count 1621 stats->pxoffrxc[i].ev_count
1621 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 1622 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1622 } 1623 }
1623 } 1624 }
1624 stats->mpctotal.ev_count += total_missed_rx; 1625 stats->mpctotal.ev_count += total_missed_rx;
1625 1626
1626 /* Document says M[LR]FC are valid when link is up and 10Gbps */ 1627 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1627 if ((adapter->link_active == LINK_STATE_UP) 1628 if ((adapter->link_active == LINK_STATE_UP)
1628 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { 1629 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1629 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC); 1630 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1630 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC); 1631 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1631 } 1632 }
1632 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC); 1633 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1633 stats->rlec.ev_count += rlec; 1634 stats->rlec.ev_count += rlec;
1634 1635
1635 /* Hardware workaround, gprc counts missed packets */ 1636 /* Hardware workaround, gprc counts missed packets */
1636 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx; 1637 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1637 1638
1638 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1639 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1639 stats->lxontxc.ev_count += lxon; 1640 stats->lxontxc.ev_count += lxon;
1640 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1641 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1641 stats->lxofftxc.ev_count += lxoff; 1642 stats->lxofftxc.ev_count += lxoff;
1642 total = lxon + lxoff; 1643 total = lxon + lxoff;
1643 1644
1644 if (hw->mac.type != ixgbe_mac_82598EB) { 1645 if (hw->mac.type != ixgbe_mac_82598EB) {
1645 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1646 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1646 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1647 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1647 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1648 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1648 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN; 1649 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1649 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) + 1650 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1650 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1651 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1651 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1652 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1652 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1653 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1653 } else { 1654 } else {
1654 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1655 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1655 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1656 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1656 /* 82598 only has a counter in the high register */ 1657 /* 82598 only has a counter in the high register */
1657 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH); 1658 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1658 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN; 1659 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1659 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH); 1660 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1660 } 1661 }
1661 1662
1662 /* 1663 /*
1663 * Workaround: mprc hardware is incorrectly counting 1664 * Workaround: mprc hardware is incorrectly counting
1664 * broadcasts, so for now we subtract those. 1665 * broadcasts, so for now we subtract those.
1665 */ 1666 */
1666 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1667 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1667 stats->bprc.ev_count += bprc; 1668 stats->bprc.ev_count += bprc;
1668 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) 1669 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1669 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0); 1670 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1670 1671
1671 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64); 1672 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1672 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127); 1673 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1673 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255); 1674 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1674 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511); 1675 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1675 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1676 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1676 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1677 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1677 1678
1678 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total; 1679 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1679 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total; 1680 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1680 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total; 1681 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1681 1682
1682 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC); 1683 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1683 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC); 1684 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1684 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC); 1685 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1685 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC); 1686 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1686 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1687 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1687 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1688 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1688 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1689 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1689 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR); 1690 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1690 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT); 1691 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1691 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127); 1692 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1692 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255); 1693 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1693 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511); 1694 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1694 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1695 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1695 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1696 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1696 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC); 1697 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1697 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC); 1698 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1698 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1699 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1699 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1700 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1700 /* Only read FCOE on 82599 */ 1701 /* Only read FCOE on 82599 */
1701 if (hw->mac.type != ixgbe_mac_82598EB) { 1702 if (hw->mac.type != ixgbe_mac_82598EB) {
1702 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1703 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1703 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1704 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1704 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1705 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1705 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1706 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1706 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1707 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1707 } 1708 }
1708 1709
1709 /* Fill out the OS statistics structure */ 1710 /* Fill out the OS statistics structure */
1710 /* 1711 /*
1711 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with 1712 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1712 * adapter->stats counters. It's required to make ifconfig -z 1713 * adapter->stats counters. It's required to make ifconfig -z
1713 * (SOICZIFDATA) work. 1714 * (SOICZIFDATA) work.
1714 */ 1715 */
1715 ifp->if_collisions = 0; 1716 ifp->if_collisions = 0;
1716 1717
1717 /* Rx Errors */ 1718 /* Rx Errors */
1718 ifp->if_iqdrops += total_missed_rx; 1719 ifp->if_iqdrops += total_missed_rx;
1719 ifp->if_ierrors += crcerrs + rlec; 1720 ifp->if_ierrors += crcerrs + rlec;
1720} /* ixgbe_update_stats_counters */ 1721} /* ixgbe_update_stats_counters */
1721 1722
1722/************************************************************************ 1723/************************************************************************
1723 * ixgbe_add_hw_stats 1724 * ixgbe_add_hw_stats
1724 * 1725 *
1725 * Add sysctl variables, one per statistic, to the system. 1726 * Add sysctl variables, one per statistic, to the system.
1726 ************************************************************************/ 1727 ************************************************************************/
1727static void 1728static void
1728ixgbe_add_hw_stats(struct adapter *adapter) 1729ixgbe_add_hw_stats(struct adapter *adapter)
1729{ 1730{
1730 device_t dev = adapter->dev; 1731 device_t dev = adapter->dev;
1731 const struct sysctlnode *rnode, *cnode; 1732 const struct sysctlnode *rnode, *cnode;
1732 struct sysctllog **log = &adapter->sysctllog; 1733 struct sysctllog **log = &adapter->sysctllog;
1733 struct tx_ring *txr = adapter->tx_rings; 1734 struct tx_ring *txr = adapter->tx_rings;
1734 struct rx_ring *rxr = adapter->rx_rings; 1735 struct rx_ring *rxr = adapter->rx_rings;
1735 struct ixgbe_hw *hw = &adapter->hw; 1736 struct ixgbe_hw *hw = &adapter->hw;
1736 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1737 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1737 const char *xname = device_xname(dev); 1738 const char *xname = device_xname(dev);
1738 int i; 1739 int i;
1739 1740
1740 /* Driver Statistics */ 1741 /* Driver Statistics */
1741 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 1742 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1742 NULL, xname, "Driver tx dma soft fail EFBIG"); 1743 NULL, xname, "Driver tx dma soft fail EFBIG");
1743 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 1744 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1744 NULL, xname, "m_defrag() failed"); 1745 NULL, xname, "m_defrag() failed");
1745 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 1746 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1746 NULL, xname, "Driver tx dma hard fail EFBIG"); 1747 NULL, xname, "Driver tx dma hard fail EFBIG");
1747 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 1748 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1748 NULL, xname, "Driver tx dma hard fail EINVAL"); 1749 NULL, xname, "Driver tx dma hard fail EINVAL");
1749 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 1750 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1750 NULL, xname, "Driver tx dma hard fail other"); 1751 NULL, xname, "Driver tx dma hard fail other");
1751 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 1752 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1752 NULL, xname, "Driver tx dma soft fail EAGAIN"); 1753 NULL, xname, "Driver tx dma soft fail EAGAIN");
1753 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 1754 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1754 NULL, xname, "Driver tx dma soft fail ENOMEM"); 1755 NULL, xname, "Driver tx dma soft fail ENOMEM");
1755 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 1756 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1756 NULL, xname, "Watchdog timeouts"); 1757 NULL, xname, "Watchdog timeouts");
1757 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 1758 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1758 NULL, xname, "TSO errors"); 1759 NULL, xname, "TSO errors");
1759 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, 1760 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1760 NULL, xname, "Link MSI-X IRQ Handled"); 1761 NULL, xname, "Link MSI-X IRQ Handled");
1761 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR, 1762 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1762 NULL, xname, "Link softint"); 1763 NULL, xname, "Link softint");
1763 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR, 1764 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1764 NULL, xname, "module softint"); 1765 NULL, xname, "module softint");
1765 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR, 1766 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1766 NULL, xname, "multimode softint"); 1767 NULL, xname, "multimode softint");
1767 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR, 1768 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1768 NULL, xname, "external PHY softint"); 1769 NULL, xname, "external PHY softint");
1769 1770
1770 /* Max number of traffic class is 8 */ 1771 /* Max number of traffic class is 8 */
1771 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); 1772 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1772 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1773 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1773 snprintf(adapter->tcs[i].evnamebuf, 1774 snprintf(adapter->tcs[i].evnamebuf,
1774 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", 1775 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1775 xname, i); 1776 xname, i);
1776 if (i < __arraycount(stats->mpc)) { 1777 if (i < __arraycount(stats->mpc)) {
1777 evcnt_attach_dynamic(&stats->mpc[i], 1778 evcnt_attach_dynamic(&stats->mpc[i],
1778 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1779 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1779 "RX Missed Packet Count"); 1780 "RX Missed Packet Count");
1780 if (hw->mac.type == ixgbe_mac_82598EB) 1781 if (hw->mac.type == ixgbe_mac_82598EB)
1781 evcnt_attach_dynamic(&stats->rnbc[i], 1782 evcnt_attach_dynamic(&stats->rnbc[i],
1782 EVCNT_TYPE_MISC, NULL, 1783 EVCNT_TYPE_MISC, NULL,
1783 adapter->tcs[i].evnamebuf, 1784 adapter->tcs[i].evnamebuf,
1784 "Receive No Buffers"); 1785 "Receive No Buffers");
1785 } 1786 }
1786 if (i < __arraycount(stats->pxontxc)) { 1787 if (i < __arraycount(stats->pxontxc)) {
1787 evcnt_attach_dynamic(&stats->pxontxc[i], 1788 evcnt_attach_dynamic(&stats->pxontxc[i],
1788 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1789 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1789 "pxontxc"); 1790 "pxontxc");
1790 evcnt_attach_dynamic(&stats->pxonrxc[i], 1791 evcnt_attach_dynamic(&stats->pxonrxc[i],
1791 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1792 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1792 "pxonrxc"); 1793 "pxonrxc");
1793 evcnt_attach_dynamic(&stats->pxofftxc[i], 1794 evcnt_attach_dynamic(&stats->pxofftxc[i],
1794 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1795 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1795 "pxofftxc"); 1796 "pxofftxc");
1796 evcnt_attach_dynamic(&stats->pxoffrxc[i], 1797 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1797 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1798 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1798 "pxoffrxc"); 1799 "pxoffrxc");
1799 if (hw->mac.type >= ixgbe_mac_82599EB) 1800 if (hw->mac.type >= ixgbe_mac_82599EB)
1800 evcnt_attach_dynamic(&stats->pxon2offc[i], 1801 evcnt_attach_dynamic(&stats->pxon2offc[i],
1801 EVCNT_TYPE_MISC, NULL, 1802 EVCNT_TYPE_MISC, NULL,
1802 adapter->tcs[i].evnamebuf, 1803 adapter->tcs[i].evnamebuf,
1803 "pxon2offc"); 1804 "pxon2offc");
1804 } 1805 }
1805 } 1806 }
1806 1807
1807 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 1808 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1808#ifdef LRO 1809#ifdef LRO
1809 struct lro_ctrl *lro = &rxr->lro; 1810 struct lro_ctrl *lro = &rxr->lro;
1810#endif /* LRO */ 1811#endif /* LRO */
1811 1812
1812 snprintf(adapter->queues[i].evnamebuf, 1813 snprintf(adapter->queues[i].evnamebuf,
1813 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 1814 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1814 xname, i); 1815 xname, i);
1815 snprintf(adapter->queues[i].namebuf, 1816 snprintf(adapter->queues[i].namebuf,
1816 sizeof(adapter->queues[i].namebuf), "q%d", i); 1817 sizeof(adapter->queues[i].namebuf), "q%d", i);
1817 1818
1818 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 1819 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1819 aprint_error_dev(dev, "could not create sysctl root\n"); 1820 aprint_error_dev(dev, "could not create sysctl root\n");
1820 break; 1821 break;
1821 } 1822 }
1822 1823
1823 if (sysctl_createv(log, 0, &rnode, &rnode, 1824 if (sysctl_createv(log, 0, &rnode, &rnode,
1824 0, CTLTYPE_NODE, 1825 0, CTLTYPE_NODE,
1825 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 1826 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1826 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1827 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1827 break; 1828 break;
1828 1829
1829 if (sysctl_createv(log, 0, &rnode, &cnode, 1830 if (sysctl_createv(log, 0, &rnode, &cnode,
1830 CTLFLAG_READWRITE, CTLTYPE_INT, 1831 CTLFLAG_READWRITE, CTLTYPE_INT,
1831 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 1832 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1832 ixgbe_sysctl_interrupt_rate_handler, 0, 1833 ixgbe_sysctl_interrupt_rate_handler, 0,
1833 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 1834 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1834 break; 1835 break;
1835 1836
1836 if (sysctl_createv(log, 0, &rnode, &cnode, 1837 if (sysctl_createv(log, 0, &rnode, &cnode,
1837 CTLFLAG_READONLY, CTLTYPE_INT, 1838 CTLFLAG_READONLY, CTLTYPE_INT,
1838 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 1839 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1839 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1840 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1840 0, CTL_CREATE, CTL_EOL) != 0) 1841 0, CTL_CREATE, CTL_EOL) != 0)
1841 break; 1842 break;
1842 1843
1843 if (sysctl_createv(log, 0, &rnode, &cnode, 1844 if (sysctl_createv(log, 0, &rnode, &cnode,
1844 CTLFLAG_READONLY, CTLTYPE_INT, 1845 CTLFLAG_READONLY, CTLTYPE_INT,
1845 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 1846 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1846 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 1847 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1847 0, CTL_CREATE, CTL_EOL) != 0) 1848 0, CTL_CREATE, CTL_EOL) != 0)
1848 break; 1849 break;
1849 1850
1850 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 1851 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1851 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 1852 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1852 evcnt_attach_dynamic(&adapter->queues[i].handleq, 1853 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1853 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1854 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1854 "Handled queue in softint"); 1855 "Handled queue in softint");
1855 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 1856 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1856 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 1857 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1857 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 1858 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1858 NULL, adapter->queues[i].evnamebuf, "TSO"); 1859 NULL, adapter->queues[i].evnamebuf, "TSO");
1859 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 1860 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1860 NULL, adapter->queues[i].evnamebuf, 1861 NULL, adapter->queues[i].evnamebuf,
1861 "Queue No Descriptor Available"); 1862 "Queue No Descriptor Available");
1862 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 1863 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1863 NULL, adapter->queues[i].evnamebuf, 1864 NULL, adapter->queues[i].evnamebuf,
1864 "Queue Packets Transmitted"); 1865 "Queue Packets Transmitted");
1865#ifndef IXGBE_LEGACY_TX 1866#ifndef IXGBE_LEGACY_TX
1866 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 1867 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1867 NULL, adapter->queues[i].evnamebuf, 1868 NULL, adapter->queues[i].evnamebuf,
1868 "Packets dropped in pcq"); 1869 "Packets dropped in pcq");
1869#endif 1870#endif
1870 1871
1871 if (sysctl_createv(log, 0, &rnode, &cnode, 1872 if (sysctl_createv(log, 0, &rnode, &cnode,
1872 CTLFLAG_READONLY, 1873 CTLFLAG_READONLY,
1873 CTLTYPE_INT, 1874 CTLTYPE_INT,
1874 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), 1875 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1875 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 1876 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1876 CTL_CREATE, CTL_EOL) != 0) 1877 CTL_CREATE, CTL_EOL) != 0)
1877 break; 1878 break;
1878 1879
1879 if (sysctl_createv(log, 0, &rnode, &cnode, 1880 if (sysctl_createv(log, 0, &rnode, &cnode,
1880 CTLFLAG_READONLY, 1881 CTLFLAG_READONLY,
1881 CTLTYPE_INT, 1882 CTLTYPE_INT,
1882 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), 1883 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1883 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, 1884 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1884 CTL_CREATE, CTL_EOL) != 0) 1885 CTL_CREATE, CTL_EOL) != 0)
1885 break; 1886 break;
1886 1887
1887 if (sysctl_createv(log, 0, &rnode, &cnode, 1888 if (sysctl_createv(log, 0, &rnode, &cnode,
1888 CTLFLAG_READONLY, 1889 CTLFLAG_READONLY,
1889 CTLTYPE_INT, 1890 CTLTYPE_INT,
1890 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), 1891 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1891 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, 1892 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1892 CTL_CREATE, CTL_EOL) != 0) 1893 CTL_CREATE, CTL_EOL) != 0)
1893 break; 1894 break;
1894 1895
1895 if (i < __arraycount(stats->qprc)) { 1896 if (i < __arraycount(stats->qprc)) {
1896 evcnt_attach_dynamic(&stats->qprc[i], 1897 evcnt_attach_dynamic(&stats->qprc[i],
1897 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1898 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1898 "qprc"); 1899 "qprc");
1899 evcnt_attach_dynamic(&stats->qptc[i], 1900 evcnt_attach_dynamic(&stats->qptc[i],
1900 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1901 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1901 "qptc"); 1902 "qptc");
1902 evcnt_attach_dynamic(&stats->qbrc[i], 1903 evcnt_attach_dynamic(&stats->qbrc[i],
1903 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1904 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1904 "qbrc"); 1905 "qbrc");
1905 evcnt_attach_dynamic(&stats->qbtc[i], 1906 evcnt_attach_dynamic(&stats->qbtc[i],
1906 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1907 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1907 "qbtc"); 1908 "qbtc");
1908 if (hw->mac.type >= ixgbe_mac_82599EB) 1909 if (hw->mac.type >= ixgbe_mac_82599EB)
1909 evcnt_attach_dynamic(&stats->qprdc[i], 1910 evcnt_attach_dynamic(&stats->qprdc[i],
1910 EVCNT_TYPE_MISC, NULL, 1911 EVCNT_TYPE_MISC, NULL,
1911 adapter->queues[i].evnamebuf, "qprdc"); 1912 adapter->queues[i].evnamebuf, "qprdc");
1912 } 1913 }
1913 1914
1914 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 1915 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1915 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); 1916 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1916 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 1917 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1917 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); 1918 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1918 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 1919 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1919 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 1920 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1920 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, 1921 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1921 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); 1922 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1922 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 1923 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1923 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 1924 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1924#ifdef LRO 1925#ifdef LRO
1925 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 1926 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1926 CTLFLAG_RD, &lro->lro_queued, 0, 1927 CTLFLAG_RD, &lro->lro_queued, 0,
1927 "LRO Queued"); 1928 "LRO Queued");
1928 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 1929 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1929 CTLFLAG_RD, &lro->lro_flushed, 0, 1930 CTLFLAG_RD, &lro->lro_flushed, 0,
1930 "LRO Flushed"); 1931 "LRO Flushed");
1931#endif /* LRO */ 1932#endif /* LRO */
1932 } 1933 }
1933 1934
1934 /* MAC stats get their own sub node */ 1935 /* MAC stats get their own sub node */
1935 1936
1936 snprintf(stats->namebuf, 1937 snprintf(stats->namebuf,
1937 sizeof(stats->namebuf), "%s MAC Statistics", xname); 1938 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1938 1939
1939 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 1940 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "rx csum offload - IP"); 1941 stats->namebuf, "rx csum offload - IP");
1941 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 1942 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "rx csum offload - L4"); 1943 stats->namebuf, "rx csum offload - L4");
1943 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 1944 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "rx csum offload - IP bad"); 1945 stats->namebuf, "rx csum offload - IP bad");
1945 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 1946 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "rx csum offload - L4 bad"); 1947 stats->namebuf, "rx csum offload - L4 bad");
1947 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, 1948 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1948 stats->namebuf, "Interrupt conditions zero"); 1949 stats->namebuf, "Interrupt conditions zero");
1949 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, 1950 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1950 stats->namebuf, "Legacy interrupts"); 1951 stats->namebuf, "Legacy interrupts");
1951 1952
1952 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, 1953 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "CRC Errors"); 1954 stats->namebuf, "CRC Errors");
1954 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, 1955 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "Illegal Byte Errors"); 1956 stats->namebuf, "Illegal Byte Errors");
1956 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, 1957 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1957 stats->namebuf, "Byte Errors"); 1958 stats->namebuf, "Byte Errors");
1958 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, 1959 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1959 stats->namebuf, "MAC Short Packets Discarded"); 1960 stats->namebuf, "MAC Short Packets Discarded");
1960 if (hw->mac.type >= ixgbe_mac_X550) 1961 if (hw->mac.type >= ixgbe_mac_X550)
1961 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, 1962 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "Bad SFD"); 1963 stats->namebuf, "Bad SFD");
1963 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, 1964 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "Total Packets Missed"); 1965 stats->namebuf, "Total Packets Missed");
1965 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, 1966 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "MAC Local Faults"); 1967 stats->namebuf, "MAC Local Faults");
1967 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, 1968 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "MAC Remote Faults"); 1969 stats->namebuf, "MAC Remote Faults");
1969 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, 1970 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Receive Length Errors"); 1971 stats->namebuf, "Receive Length Errors");
1971 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, 1972 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "Link XON Transmitted"); 1973 stats->namebuf, "Link XON Transmitted");
1973 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, 1974 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Link XON Received"); 1975 stats->namebuf, "Link XON Received");
1975 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, 1976 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1976 stats->namebuf, "Link XOFF Transmitted"); 1977 stats->namebuf, "Link XOFF Transmitted");
1977 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, 1978 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "Link XOFF Received"); 1979 stats->namebuf, "Link XOFF Received");
1979 1980
1980 /* Packet Reception Stats */ 1981 /* Packet Reception Stats */
1981 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, 1982 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Total Octets Received"); 1983 stats->namebuf, "Total Octets Received");
1983 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, 1984 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Good Octets Received"); 1985 stats->namebuf, "Good Octets Received");
1985 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, 1986 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Total Packets Received"); 1987 stats->namebuf, "Total Packets Received");
1987 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, 1988 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Good Packets Received"); 1989 stats->namebuf, "Good Packets Received");
1989 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, 1990 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "Multicast Packets Received"); 1991 stats->namebuf, "Multicast Packets Received");
1991 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, 1992 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "Broadcast Packets Received"); 1993 stats->namebuf, "Broadcast Packets Received");
1993 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, 1994 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "64 byte frames received "); 1995 stats->namebuf, "64 byte frames received ");
1995 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, 1996 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "65-127 byte frames received"); 1997 stats->namebuf, "65-127 byte frames received");
1997 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, 1998 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "128-255 byte frames received"); 1999 stats->namebuf, "128-255 byte frames received");
1999 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, 2000 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "256-511 byte frames received"); 2001 stats->namebuf, "256-511 byte frames received");
2001 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, 2002 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "512-1023 byte frames received"); 2003 stats->namebuf, "512-1023 byte frames received");
2003 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, 2004 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "1023-1522 byte frames received"); 2005 stats->namebuf, "1023-1522 byte frames received");
2005 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, 2006 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "Receive Undersized"); 2007 stats->namebuf, "Receive Undersized");
2007 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, 2008 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "Fragmented Packets Received "); 2009 stats->namebuf, "Fragmented Packets Received ");
2009 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, 2010 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Oversized Packets Received"); 2011 stats->namebuf, "Oversized Packets Received");
2011 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, 2012 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Received Jabber"); 2013 stats->namebuf, "Received Jabber");
2013 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, 2014 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Management Packets Received"); 2015 stats->namebuf, "Management Packets Received");
2015 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, 2016 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2016 stats->namebuf, "Management Packets Dropped"); 2017 stats->namebuf, "Management Packets Dropped");
2017 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, 2018 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Checksum Errors"); 2019 stats->namebuf, "Checksum Errors");
2019 2020
2020 /* Packet Transmission Stats */ 2021 /* Packet Transmission Stats */
2021 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, 2022 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Good Octets Transmitted"); 2023 stats->namebuf, "Good Octets Transmitted");
2023 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, 2024 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2024 stats->namebuf, "Total Packets Transmitted"); 2025 stats->namebuf, "Total Packets Transmitted");
2025 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, 2026 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Good Packets Transmitted"); 2027 stats->namebuf, "Good Packets Transmitted");
2027 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, 2028 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Broadcast Packets Transmitted"); 2029 stats->namebuf, "Broadcast Packets Transmitted");
2029 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, 2030 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "Multicast Packets Transmitted"); 2031 stats->namebuf, "Multicast Packets Transmitted");
2031 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, 2032 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "Management Packets Transmitted"); 2033 stats->namebuf, "Management Packets Transmitted");
2033 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, 2034 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "64 byte frames transmitted "); 2035 stats->namebuf, "64 byte frames transmitted ");
2035 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, 2036 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "65-127 byte frames transmitted"); 2037 stats->namebuf, "65-127 byte frames transmitted");
2037 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, 2038 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "128-255 byte frames transmitted"); 2039 stats->namebuf, "128-255 byte frames transmitted");
2039 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, 2040 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "256-511 byte frames transmitted"); 2041 stats->namebuf, "256-511 byte frames transmitted");
2041 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, 2042 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2042 stats->namebuf, "512-1023 byte frames transmitted"); 2043 stats->namebuf, "512-1023 byte frames transmitted");
2043 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, 2044 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2044 stats->namebuf, "1024-1522 byte frames transmitted"); 2045 stats->namebuf, "1024-1522 byte frames transmitted");
2045} /* ixgbe_add_hw_stats */ 2046} /* ixgbe_add_hw_stats */
2046 2047
2047static void 2048static void
2048ixgbe_clear_evcnt(struct adapter *adapter) 2049ixgbe_clear_evcnt(struct adapter *adapter)
2049{ 2050{
2050 struct tx_ring *txr = adapter->tx_rings; 2051 struct tx_ring *txr = adapter->tx_rings;
2051 struct rx_ring *rxr = adapter->rx_rings; 2052 struct rx_ring *rxr = adapter->rx_rings;
2052 struct ixgbe_hw *hw = &adapter->hw; 2053 struct ixgbe_hw *hw = &adapter->hw;
2053 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 2054 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2054 int i; 2055 int i;
2055 2056
2056 adapter->efbig_tx_dma_setup.ev_count = 0; 2057 adapter->efbig_tx_dma_setup.ev_count = 0;
2057 adapter->mbuf_defrag_failed.ev_count = 0; 2058 adapter->mbuf_defrag_failed.ev_count = 0;
2058 adapter->efbig2_tx_dma_setup.ev_count = 0; 2059 adapter->efbig2_tx_dma_setup.ev_count = 0;
2059 adapter->einval_tx_dma_setup.ev_count = 0; 2060 adapter->einval_tx_dma_setup.ev_count = 0;
2060 adapter->other_tx_dma_setup.ev_count = 0; 2061 adapter->other_tx_dma_setup.ev_count = 0;
2061 adapter->eagain_tx_dma_setup.ev_count = 0; 2062 adapter->eagain_tx_dma_setup.ev_count = 0;
2062 adapter->enomem_tx_dma_setup.ev_count = 0; 2063 adapter->enomem_tx_dma_setup.ev_count = 0;
2063 adapter->tso_err.ev_count = 0; 2064 adapter->tso_err.ev_count = 0;
2064 adapter->watchdog_events.ev_count = 0; 2065 adapter->watchdog_events.ev_count = 0;
2065 adapter->link_irq.ev_count = 0; 2066 adapter->link_irq.ev_count = 0;
2066 adapter->link_sicount.ev_count = 0; 2067 adapter->link_sicount.ev_count = 0;
2067 adapter->mod_sicount.ev_count = 0; 2068 adapter->mod_sicount.ev_count = 0;
2068 adapter->msf_sicount.ev_count = 0; 2069 adapter->msf_sicount.ev_count = 0;
2069 adapter->phy_sicount.ev_count = 0; 2070 adapter->phy_sicount.ev_count = 0;
2070 2071
2071 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 2072 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2072 if (i < __arraycount(stats->mpc)) { 2073 if (i < __arraycount(stats->mpc)) {
2073 stats->mpc[i].ev_count = 0; 2074 stats->mpc[i].ev_count = 0;
2074 if (hw->mac.type == ixgbe_mac_82598EB) 2075 if (hw->mac.type == ixgbe_mac_82598EB)
2075 stats->rnbc[i].ev_count = 0; 2076 stats->rnbc[i].ev_count = 0;
2076 } 2077 }
2077 if (i < __arraycount(stats->pxontxc)) { 2078 if (i < __arraycount(stats->pxontxc)) {
2078 stats->pxontxc[i].ev_count = 0; 2079 stats->pxontxc[i].ev_count = 0;
2079 stats->pxonrxc[i].ev_count = 0; 2080 stats->pxonrxc[i].ev_count = 0;
2080 stats->pxofftxc[i].ev_count = 0; 2081 stats->pxofftxc[i].ev_count = 0;
2081 stats->pxoffrxc[i].ev_count = 0; 2082 stats->pxoffrxc[i].ev_count = 0;
2082 if (hw->mac.type >= ixgbe_mac_82599EB) 2083 if (hw->mac.type >= ixgbe_mac_82599EB)
2083 stats->pxon2offc[i].ev_count = 0; 2084 stats->pxon2offc[i].ev_count = 0;
2084 } 2085 }
2085 } 2086 }
2086 2087
2087 txr = adapter->tx_rings; 2088 txr = adapter->tx_rings;
2088 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2089 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2089 adapter->queues[i].irqs.ev_count = 0; 2090 adapter->queues[i].irqs.ev_count = 0;
2090 adapter->queues[i].handleq.ev_count = 0; 2091 adapter->queues[i].handleq.ev_count = 0;
2091 adapter->queues[i].req.ev_count = 0; 2092 adapter->queues[i].req.ev_count = 0;
2092 txr->no_desc_avail.ev_count = 0; 2093 txr->no_desc_avail.ev_count = 0;
2093 txr->total_packets.ev_count = 0; 2094 txr->total_packets.ev_count = 0;
2094 txr->tso_tx.ev_count = 0; 2095 txr->tso_tx.ev_count = 0;
2095#ifndef IXGBE_LEGACY_TX 2096#ifndef IXGBE_LEGACY_TX
2096 txr->pcq_drops.ev_count = 0; 2097 txr->pcq_drops.ev_count = 0;
2097#endif 2098#endif
2098 txr->q_efbig_tx_dma_setup = 0; 2099 txr->q_efbig_tx_dma_setup = 0;
2099 txr->q_mbuf_defrag_failed = 0; 2100 txr->q_mbuf_defrag_failed = 0;
2100 txr->q_efbig2_tx_dma_setup = 0; 2101 txr->q_efbig2_tx_dma_setup = 0;
2101 txr->q_einval_tx_dma_setup = 0; 2102 txr->q_einval_tx_dma_setup = 0;
2102 txr->q_other_tx_dma_setup = 0; 2103 txr->q_other_tx_dma_setup = 0;
2103 txr->q_eagain_tx_dma_setup = 0; 2104 txr->q_eagain_tx_dma_setup = 0;
2104 txr->q_enomem_tx_dma_setup = 0; 2105 txr->q_enomem_tx_dma_setup = 0;
2105 txr->q_tso_err = 0; 2106 txr->q_tso_err = 0;
2106 2107
2107 if (i < __arraycount(stats->qprc)) { 2108 if (i < __arraycount(stats->qprc)) {
2108 stats->qprc[i].ev_count = 0; 2109 stats->qprc[i].ev_count = 0;
2109 stats->qptc[i].ev_count = 0; 2110 stats->qptc[i].ev_count = 0;
2110 stats->qbrc[i].ev_count = 0; 2111 stats->qbrc[i].ev_count = 0;
2111 stats->qbtc[i].ev_count = 0; 2112 stats->qbtc[i].ev_count = 0;
2112 if (hw->mac.type >= ixgbe_mac_82599EB) 2113 if (hw->mac.type >= ixgbe_mac_82599EB)
2113 stats->qprdc[i].ev_count = 0; 2114 stats->qprdc[i].ev_count = 0;
2114 } 2115 }
2115 2116
2116 rxr->rx_packets.ev_count = 0; 2117 rxr->rx_packets.ev_count = 0;
2117 rxr->rx_bytes.ev_count = 0; 2118 rxr->rx_bytes.ev_count = 0;
2118 rxr->rx_copies.ev_count = 0; 2119 rxr->rx_copies.ev_count = 0;
2119 rxr->no_jmbuf.ev_count = 0; 2120 rxr->no_jmbuf.ev_count = 0;
2120 rxr->rx_discarded.ev_count = 0; 2121 rxr->rx_discarded.ev_count = 0;
2121 } 2122 }
2122 stats->ipcs.ev_count = 0; 2123 stats->ipcs.ev_count = 0;
2123 stats->l4cs.ev_count = 0; 2124 stats->l4cs.ev_count = 0;
2124 stats->ipcs_bad.ev_count = 0; 2125 stats->ipcs_bad.ev_count = 0;
2125 stats->l4cs_bad.ev_count = 0; 2126 stats->l4cs_bad.ev_count = 0;
2126 stats->intzero.ev_count = 0; 2127 stats->intzero.ev_count = 0;
2127 stats->legint.ev_count = 0; 2128 stats->legint.ev_count = 0;
2128 stats->crcerrs.ev_count = 0; 2129 stats->crcerrs.ev_count = 0;
2129 stats->illerrc.ev_count = 0; 2130 stats->illerrc.ev_count = 0;
2130 stats->errbc.ev_count = 0; 2131 stats->errbc.ev_count = 0;
2131 stats->mspdc.ev_count = 0; 2132 stats->mspdc.ev_count = 0;
2132 stats->mbsdc.ev_count = 0; 2133 stats->mbsdc.ev_count = 0;
2133 stats->mpctotal.ev_count = 0; 2134 stats->mpctotal.ev_count = 0;
2134 stats->mlfc.ev_count = 0; 2135 stats->mlfc.ev_count = 0;
2135 stats->mrfc.ev_count = 0; 2136 stats->mrfc.ev_count = 0;
2136 stats->rlec.ev_count = 0; 2137 stats->rlec.ev_count = 0;
2137 stats->lxontxc.ev_count = 0; 2138 stats->lxontxc.ev_count = 0;
2138 stats->lxonrxc.ev_count = 0; 2139 stats->lxonrxc.ev_count = 0;
2139 stats->lxofftxc.ev_count = 0; 2140 stats->lxofftxc.ev_count = 0;
2140 stats->lxoffrxc.ev_count = 0; 2141 stats->lxoffrxc.ev_count = 0;
2141 2142
2142 /* Packet Reception Stats */ 2143 /* Packet Reception Stats */
2143 stats->tor.ev_count = 0; 2144 stats->tor.ev_count = 0;
2144 stats->gorc.ev_count = 0; 2145 stats->gorc.ev_count = 0;
2145 stats->tpr.ev_count = 0; 2146 stats->tpr.ev_count = 0;
2146 stats->gprc.ev_count = 0; 2147 stats->gprc.ev_count = 0;
2147 stats->mprc.ev_count = 0; 2148 stats->mprc.ev_count = 0;
2148 stats->bprc.ev_count = 0; 2149 stats->bprc.ev_count = 0;
2149 stats->prc64.ev_count = 0; 2150 stats->prc64.ev_count = 0;
2150 stats->prc127.ev_count = 0; 2151 stats->prc127.ev_count = 0;
2151 stats->prc255.ev_count = 0; 2152 stats->prc255.ev_count = 0;
2152 stats->prc511.ev_count = 0; 2153 stats->prc511.ev_count = 0;
2153 stats->prc1023.ev_count = 0; 2154 stats->prc1023.ev_count = 0;
2154 stats->prc1522.ev_count = 0; 2155 stats->prc1522.ev_count = 0;
2155 stats->ruc.ev_count = 0; 2156 stats->ruc.ev_count = 0;
2156 stats->rfc.ev_count = 0; 2157 stats->rfc.ev_count = 0;
2157 stats->roc.ev_count = 0; 2158 stats->roc.ev_count = 0;
2158 stats->rjc.ev_count = 0; 2159 stats->rjc.ev_count = 0;
2159 stats->mngprc.ev_count = 0; 2160 stats->mngprc.ev_count = 0;
2160 stats->mngpdc.ev_count = 0; 2161 stats->mngpdc.ev_count = 0;
2161 stats->xec.ev_count = 0; 2162 stats->xec.ev_count = 0;
2162 2163
2163 /* Packet Transmission Stats */ 2164 /* Packet Transmission Stats */
2164 stats->gotc.ev_count = 0; 2165 stats->gotc.ev_count = 0;
2165 stats->tpt.ev_count = 0; 2166 stats->tpt.ev_count = 0;
2166 stats->gptc.ev_count = 0; 2167 stats->gptc.ev_count = 0;
2167 stats->bptc.ev_count = 0; 2168 stats->bptc.ev_count = 0;
2168 stats->mptc.ev_count = 0; 2169 stats->mptc.ev_count = 0;
2169 stats->mngptc.ev_count = 0; 2170 stats->mngptc.ev_count = 0;
2170 stats->ptc64.ev_count = 0; 2171 stats->ptc64.ev_count = 0;
2171 stats->ptc127.ev_count = 0; 2172 stats->ptc127.ev_count = 0;
2172 stats->ptc255.ev_count = 0; 2173 stats->ptc255.ev_count = 0;
2173 stats->ptc511.ev_count = 0; 2174 stats->ptc511.ev_count = 0;
2174 stats->ptc1023.ev_count = 0; 2175 stats->ptc1023.ev_count = 0;
2175 stats->ptc1522.ev_count = 0; 2176 stats->ptc1522.ev_count = 0;
2176} 2177}
2177 2178
2178/************************************************************************ 2179/************************************************************************
2179 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 2180 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2180 * 2181 *
2181 * Retrieves the TDH value from the hardware 2182 * Retrieves the TDH value from the hardware
2182 ************************************************************************/ 2183 ************************************************************************/
2183static int 2184static int
2184ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS) 2185ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2185{ 2186{
2186 struct sysctlnode node = *rnode; 2187 struct sysctlnode node = *rnode;
2187 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2188 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2188 struct adapter *adapter; 2189 struct adapter *adapter;
2189 uint32_t val; 2190 uint32_t val;
2190 2191
2191 if (!txr) 2192 if (!txr)
2192 return (0); 2193 return (0);
2193 2194
2194 adapter = txr->adapter; 2195 adapter = txr->adapter;
2195 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2196 if (ixgbe_fw_recovery_mode_swflag(adapter))
2196 return (EPERM); 2197 return (EPERM);
2197 2198
2198 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)); 2199 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2199 node.sysctl_data = &val; 2200 node.sysctl_data = &val;
2200 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2201 return sysctl_lookup(SYSCTLFN_CALL(&node));
2201} /* ixgbe_sysctl_tdh_handler */ 2202} /* ixgbe_sysctl_tdh_handler */
2202 2203
2203/************************************************************************ 2204/************************************************************************
2204 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 2205 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2205 * 2206 *
2206 * Retrieves the TDT value from the hardware 2207 * Retrieves the TDT value from the hardware
2207 ************************************************************************/ 2208 ************************************************************************/
2208static int 2209static int
2209ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS) 2210ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2210{ 2211{
2211 struct sysctlnode node = *rnode; 2212 struct sysctlnode node = *rnode;
2212 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2213 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2213 struct adapter *adapter; 2214 struct adapter *adapter;
2214 uint32_t val; 2215 uint32_t val;
2215 2216
2216 if (!txr) 2217 if (!txr)
2217 return (0); 2218 return (0);
2218 2219
2219 adapter = txr->adapter; 2220 adapter = txr->adapter;
2220 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2221 if (ixgbe_fw_recovery_mode_swflag(adapter))
2221 return (EPERM); 2222 return (EPERM);
2222 2223
2223 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)); 2224 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2224 node.sysctl_data = &val; 2225 node.sysctl_data = &val;
2225 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2226 return sysctl_lookup(SYSCTLFN_CALL(&node));
2226} /* ixgbe_sysctl_tdt_handler */ 2227} /* ixgbe_sysctl_tdt_handler */
2227 2228
2228/************************************************************************ 2229/************************************************************************
2229 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check 2230 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2230 * handler function 2231 * handler function
2231 * 2232 *
2232 * Retrieves the next_to_check value 2233 * Retrieves the next_to_check value
2233 ************************************************************************/ 2234 ************************************************************************/
2234static int 2235static int
2235ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 2236ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2236{ 2237{
2237 struct sysctlnode node = *rnode; 2238 struct sysctlnode node = *rnode;
2238 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2239 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2239 struct adapter *adapter; 2240 struct adapter *adapter;
2240 uint32_t val; 2241 uint32_t val;
2241 2242
2242 if (!rxr) 2243 if (!rxr)
2243 return (0); 2244 return (0);
2244 2245
2245 adapter = rxr->adapter; 2246 adapter = rxr->adapter;
2246 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2247 if (ixgbe_fw_recovery_mode_swflag(adapter))
2247 return (EPERM); 2248 return (EPERM);
2248 2249
2249 val = rxr->next_to_check; 2250 val = rxr->next_to_check;
2250 node.sysctl_data = &val; 2251 node.sysctl_data = &val;
2251 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2252 return sysctl_lookup(SYSCTLFN_CALL(&node));
2252} /* ixgbe_sysctl_next_to_check_handler */ 2253} /* ixgbe_sysctl_next_to_check_handler */
2253 2254
2254/************************************************************************ 2255/************************************************************************
2255 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 2256 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2256 * 2257 *
2257 * Retrieves the RDH value from the hardware 2258 * Retrieves the RDH value from the hardware
2258 ************************************************************************/ 2259 ************************************************************************/
2259static int 2260static int
2260ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS) 2261ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2261{ 2262{
2262 struct sysctlnode node = *rnode; 2263 struct sysctlnode node = *rnode;
2263 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2264 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2264 struct adapter *adapter; 2265 struct adapter *adapter;
2265 uint32_t val; 2266 uint32_t val;
2266 2267
2267 if (!rxr) 2268 if (!rxr)
2268 return (0); 2269 return (0);
2269 2270
2270 adapter = rxr->adapter; 2271 adapter = rxr->adapter;
2271 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2272 if (ixgbe_fw_recovery_mode_swflag(adapter))
2272 return (EPERM); 2273 return (EPERM);
2273 2274
2274 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me)); 2275 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2275 node.sysctl_data = &val; 2276 node.sysctl_data = &val;
2276 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2277 return sysctl_lookup(SYSCTLFN_CALL(&node));
2277} /* ixgbe_sysctl_rdh_handler */ 2278} /* ixgbe_sysctl_rdh_handler */
2278 2279
2279/************************************************************************ 2280/************************************************************************
2280 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 2281 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2281 * 2282 *
2282 * Retrieves the RDT value from the hardware 2283 * Retrieves the RDT value from the hardware
2283 ************************************************************************/ 2284 ************************************************************************/
2284static int 2285static int
2285ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) 2286ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2286{ 2287{
2287 struct sysctlnode node = *rnode; 2288 struct sysctlnode node = *rnode;
2288 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2289 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2289 struct adapter *adapter; 2290 struct adapter *adapter;
2290 uint32_t val; 2291 uint32_t val;
2291 2292
2292 if (!rxr) 2293 if (!rxr)
2293 return (0); 2294 return (0);
2294 2295
2295 adapter = rxr->adapter; 2296 adapter = rxr->adapter;
2296 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2297 if (ixgbe_fw_recovery_mode_swflag(adapter))
2297 return (EPERM); 2298 return (EPERM);
2298 2299
2299 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me)); 2300 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2300 node.sysctl_data = &val; 2301 node.sysctl_data = &val;
2301 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2302 return sysctl_lookup(SYSCTLFN_CALL(&node));
2302} /* ixgbe_sysctl_rdt_handler */ 2303} /* ixgbe_sysctl_rdt_handler */
2303 2304
2304static int 2305static int
2305ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2306ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2306{ 2307{
2307 struct ifnet *ifp = &ec->ec_if; 2308 struct ifnet *ifp = &ec->ec_if;
 2309 struct adapter *adapter = ifp->if_softc;
2308 int rv; 2310 int rv;
2309 2311
2310 if (set) 2312 if (set)
2311 rv = ixgbe_register_vlan(ifp->if_softc, ifp, vid); 2313 rv = ixgbe_register_vlan(ifp->if_softc, ifp, vid);
2312 else 2314 else
2313 rv = ixgbe_unregister_vlan(ifp->if_softc, ifp, vid); 2315 rv = ixgbe_unregister_vlan(ifp->if_softc, ifp, vid);
2314 2316
 2317 if (rv != 0)
 2318 return rv;
 2319
 2320 /*
 2321 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
 2322 * or 0 to 1.
 2323 */
 2324 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
 2325 ixgbe_setup_vlan_hw_tagging(adapter);
 2326
2315 return rv; 2327 return rv;
2316} 2328}
2317 2329
2318/************************************************************************ 2330/************************************************************************
2319 * ixgbe_register_vlan 2331 * ixgbe_register_vlan
2320 * 2332 *
2321 * Run via vlan config EVENT, it enables us to use the 2333 * Run via vlan config EVENT, it enables us to use the
2322 * HW Filter table since we can get the vlan id. This 2334 * HW Filter table since we can get the vlan id. This
2323 * just creates the entry in the soft version of the 2335 * just creates the entry in the soft version of the
2324 * VFTA, init will repopulate the real table. 2336 * VFTA, init will repopulate the real table.
2325 ************************************************************************/ 2337 ************************************************************************/
2326static int 2338static int
2327ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2339ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2328{ 2340{
2329 struct adapter *adapter = ifp->if_softc; 2341 struct adapter *adapter = ifp->if_softc;
2330 u16 index, bit; 2342 u16 index, bit;
2331 int error; 2343 int error;
2332 2344
2333 if (ifp->if_softc != arg) /* Not our event */ 2345 if (ifp->if_softc != arg) /* Not our event */
2334 return EINVAL; 2346 return EINVAL;
2335 2347
2336 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2348 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2337 return EINVAL; 2349 return EINVAL;
2338 2350
2339 IXGBE_CORE_LOCK(adapter); 2351 IXGBE_CORE_LOCK(adapter);
2340 index = (vtag >> 5) & 0x7F; 2352 index = (vtag >> 5) & 0x7F;
2341 bit = vtag & 0x1F; 2353 bit = vtag & 0x1F;
2342 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2354 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2343 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true, 2355 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2344 true); 2356 true);
2345 IXGBE_CORE_UNLOCK(adapter); 2357 IXGBE_CORE_UNLOCK(adapter);
2346 if (error != 0) 2358 if (error != 0)
2347 error = EACCES; 2359 error = EACCES;
2348 2360
2349 return error; 2361 return error;
2350} /* ixgbe_register_vlan */ 2362} /* ixgbe_register_vlan */
2351 2363
2352/************************************************************************ 2364/************************************************************************
2353 * ixgbe_unregister_vlan 2365 * ixgbe_unregister_vlan
2354 * 2366 *
2355 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 2367 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2356 ************************************************************************/ 2368 ************************************************************************/
2357static int 2369static int
2358ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2370ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2359{ 2371{
2360 struct adapter *adapter = ifp->if_softc; 2372 struct adapter *adapter = ifp->if_softc;
2361 u16 index, bit; 2373 u16 index, bit;
2362 int error; 2374 int error;
2363 2375
2364 if (ifp->if_softc != arg) 2376 if (ifp->if_softc != arg)
2365 return EINVAL; 2377 return EINVAL;
2366 2378
2367 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2379 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2368 return EINVAL; 2380 return EINVAL;
2369 2381
2370 IXGBE_CORE_LOCK(adapter); 2382 IXGBE_CORE_LOCK(adapter);
2371 index = (vtag >> 5) & 0x7F; 2383 index = (vtag >> 5) & 0x7F;
2372 bit = vtag & 0x1F; 2384 bit = vtag & 0x1F;
2373 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2385 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2374 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false, 2386 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2375 true); 2387 true);
2376 IXGBE_CORE_UNLOCK(adapter); 2388 IXGBE_CORE_UNLOCK(adapter);
2377 if (error != 0) 2389 if (error != 0)
2378 error = EACCES; 2390 error = EACCES;
2379 2391
2380 return error; 2392 return error;
2381} /* ixgbe_unregister_vlan */ 2393} /* ixgbe_unregister_vlan */
2382 2394
2383static void 2395static void
2384ixgbe_setup_vlan_hw_support(struct adapter *adapter) 2396ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2385{ 2397{
2386 struct ethercom *ec = &adapter->osdep.ec; 2398 struct ethercom *ec = &adapter->osdep.ec;
2387 struct ixgbe_hw *hw = &adapter->hw; 2399 struct ixgbe_hw *hw = &adapter->hw;
2388 struct rx_ring *rxr; 2400 struct rx_ring *rxr;
2389 int i; 
2390 u32 ctrl; 2401 u32 ctrl;
2391 struct vlanid_list *vlanidp; 2402 int i;
2392 bool hwtagging; 2403 bool hwtagging;
2393 2404
2394 /* 
2395 * This function is called from both if_init and ifflags_cb() 
2396 * on NetBSD. 
2397 */ 
2398 
2399 /* Enable HW tagging only if any vlan is attached */ 2405 /* Enable HW tagging only if any vlan is attached */
2400 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2406 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2401 && VLAN_ATTACHED(ec); 2407 && VLAN_ATTACHED(ec);
2402 2408
2403 /* Setup the queues for vlans */ 2409 /* Setup the queues for vlans */
2404 for (i = 0; i < adapter->num_queues; i++) { 2410 for (i = 0; i < adapter->num_queues; i++) {
2405 rxr = &adapter->rx_rings[i]; 2411 rxr = &adapter->rx_rings[i];
2406 /* 2412 /*
2407 * On 82599 and later, the VLAN enable is per/queue in RXDCTL. 2413 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2408 */ 2414 */
2409 if (hw->mac.type != ixgbe_mac_82598EB) { 2415 if (hw->mac.type != ixgbe_mac_82598EB) {
2410 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2416 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2411 if (hwtagging) 2417 if (hwtagging)
2412 ctrl |= IXGBE_RXDCTL_VME; 2418 ctrl |= IXGBE_RXDCTL_VME;
2413 else 2419 else
2414 ctrl &= ~IXGBE_RXDCTL_VME; 2420 ctrl &= ~IXGBE_RXDCTL_VME;
2415 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 2421 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2416 } 2422 }
2417 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 2423 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2418 } 2424 }
2419 2425
 2426 /* VLAN hw tagging for 82598 */
 2427 if (hw->mac.type == ixgbe_mac_82598EB) {
 2428 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 2429 if (hwtagging)
 2430 ctrl |= IXGBE_VLNCTRL_VME;
 2431 else
 2432 ctrl &= ~IXGBE_VLNCTRL_VME;
 2433 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
 2434 }
 2435} /* ixgbe_setup_vlan_hw_tagging */
 2436
 2437static void
 2438ixgbe_setup_vlan_hw_support(struct adapter *adapter)
 2439{
 2440 struct ethercom *ec = &adapter->osdep.ec;
 2441 struct ixgbe_hw *hw = &adapter->hw;
 2442 int i;
 2443 u32 ctrl;
 2444 struct vlanid_list *vlanidp;
 2445
 2446 /*
 2447 * This function is called from both if_init and ifflags_cb()
 2448 * on NetBSD.
 2449 */
 2450
 2451 /*
 2452 * Part 1:
 2453 * Setup VLAN HW tagging
 2454 */
 2455 ixgbe_setup_vlan_hw_tagging(adapter);
 2456
 2457 /*
 2458 * Part 2:
 2459 * Setup VLAN HW filter
 2460 */
2420 /* Cleanup shadow_vfta */ 2461 /* Cleanup shadow_vfta */
2421 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2462 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2422 adapter->shadow_vfta[i] = 0; 2463 adapter->shadow_vfta[i] = 0;
2423 /* Generate shadow_vfta from ec_vids */ 2464 /* Generate shadow_vfta from ec_vids */
2424 mutex_enter(ec->ec_lock); 2465 ETHER_LOCK(ec);
2425 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2466 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2426 uint32_t idx; 2467 uint32_t idx;
2427 2468
2428 idx = vlanidp->vid / 32; 2469 idx = vlanidp->vid / 32;
2429 KASSERT(idx < IXGBE_VFTA_SIZE); 2470 KASSERT(idx < IXGBE_VFTA_SIZE);
2430 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2471 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2431 } 2472 }
2432 mutex_exit(ec->ec_lock); 2473 ETHER_UNLOCK(ec);
2433 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2474 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2434 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]); 2475 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2435 2476
2436 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2477 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2437 /* Enable the Filter Table if enabled */ 2478 /* Enable the Filter Table if enabled */
2438 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) 2479 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2439 ctrl |= IXGBE_VLNCTRL_VFE; 2480 ctrl |= IXGBE_VLNCTRL_VFE;
2440 else 2481 else
2441 ctrl &= ~IXGBE_VLNCTRL_VFE; 2482 ctrl &= ~IXGBE_VLNCTRL_VFE;
2442 /* VLAN hw tagging for 82598 */ 
2443 if (hw->mac.type == ixgbe_mac_82598EB) { 
2444 if (hwtagging) 
2445 ctrl |= IXGBE_VLNCTRL_VME; 
2446 else 
2447 ctrl &= ~IXGBE_VLNCTRL_VME; 
2448 } 
2449 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2483 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2450} /* ixgbe_setup_vlan_hw_support */ 2484} /* ixgbe_setup_vlan_hw_support */
2451 2485
2452/************************************************************************ 2486/************************************************************************
2453 * ixgbe_get_slot_info 2487 * ixgbe_get_slot_info
2454 * 2488 *
2455 * Get the width and transaction speed of 2489 * Get the width and transaction speed of
2456 * the slot this adapter is plugged into. 2490 * the slot this adapter is plugged into.
2457 ************************************************************************/ 2491 ************************************************************************/
2458static void 2492static void
2459ixgbe_get_slot_info(struct adapter *adapter) 2493ixgbe_get_slot_info(struct adapter *adapter)
2460{ 2494{
2461 device_t dev = adapter->dev; 2495 device_t dev = adapter->dev;
2462 struct ixgbe_hw *hw = &adapter->hw; 2496 struct ixgbe_hw *hw = &adapter->hw;
2463 u32 offset; 2497 u32 offset;
2464 u16 link; 2498 u16 link;
2465 int bus_info_valid = TRUE; 2499 int bus_info_valid = TRUE;
2466 2500
2467 /* Some devices are behind an internal bridge */ 2501 /* Some devices are behind an internal bridge */
2468 switch (hw->device_id) { 2502 switch (hw->device_id) {
2469 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2503 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2470 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2504 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2471 goto get_parent_info; 2505 goto get_parent_info;
2472 default: 2506 default:
2473 break; 2507 break;
2474 } 2508 }
2475 2509
2476 ixgbe_get_bus_info(hw); 2510 ixgbe_get_bus_info(hw);
2477 2511
2478 /* 2512 /*
2479 * Some devices don't use PCI-E, but there is no need 2513 * Some devices don't use PCI-E, but there is no need
2480 * to display "Unknown" for bus speed and width. 2514 * to display "Unknown" for bus speed and width.
2481 */ 2515 */
2482 switch (hw->mac.type) { 2516 switch (hw->mac.type) {
2483 case ixgbe_mac_X550EM_x: 2517 case ixgbe_mac_X550EM_x:
2484 case ixgbe_mac_X550EM_a: 2518 case ixgbe_mac_X550EM_a:
2485 return; 2519 return;
2486 default: 2520 default:
2487 goto display; 2521 goto display;
2488 } 2522 }
2489 2523
2490get_parent_info: 2524get_parent_info:
2491 /* 2525 /*
2492 * For the Quad port adapter we need to parse back 2526 * For the Quad port adapter we need to parse back
2493 * up the PCI tree to find the speed of the expansion 2527 * up the PCI tree to find the speed of the expansion
2494 * slot into which this adapter is plugged. A bit more work. 2528 * slot into which this adapter is plugged. A bit more work.
2495 */ 2529 */
2496 dev = device_parent(device_parent(dev)); 2530 dev = device_parent(device_parent(dev));
2497#if 0 2531#if 0
2498#ifdef IXGBE_DEBUG 2532#ifdef IXGBE_DEBUG
2499 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2533 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2500 pci_get_slot(dev), pci_get_function(dev)); 2534 pci_get_slot(dev), pci_get_function(dev));
2501#endif 2535#endif
2502 dev = device_parent(device_parent(dev)); 2536 dev = device_parent(device_parent(dev));
2503#ifdef IXGBE_DEBUG 2537#ifdef IXGBE_DEBUG
2504 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2538 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2505 pci_get_slot(dev), pci_get_function(dev)); 2539 pci_get_slot(dev), pci_get_function(dev));
2506#endif 2540#endif
2507#endif 2541#endif
2508 /* Now get the PCI Express Capabilities offset */ 2542 /* Now get the PCI Express Capabilities offset */
2509 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag, 2543 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2510 PCI_CAP_PCIEXPRESS, &offset, NULL)) { 2544 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2511 /* 2545 /*
2512 * Hmm...can't get PCI-Express capabilities. 2546 * Hmm...can't get PCI-Express capabilities.
2513 * Falling back to default method. 2547 * Falling back to default method.
2514 */ 2548 */
2515 bus_info_valid = FALSE; 2549 bus_info_valid = FALSE;
2516 ixgbe_get_bus_info(hw); 2550 ixgbe_get_bus_info(hw);
2517 goto display; 2551 goto display;
2518 } 2552 }
2519 /* ...and read the Link Status Register */ 2553 /* ...and read the Link Status Register */
2520 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag, 2554 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2521 offset + PCIE_LCSR) >> 16; 2555 offset + PCIE_LCSR) >> 16;
2522 ixgbe_set_pci_config_data_generic(hw, link); 2556 ixgbe_set_pci_config_data_generic(hw, link);
2523 2557
2524display: 2558display:
2525 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", 2559 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2526 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2560 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2527 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2561 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2528 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2562 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2529 "Unknown"), 2563 "Unknown"),
2530 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" : 2564 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2531 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" : 2565 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2532 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" : 2566 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2533 "Unknown")); 2567 "Unknown"));
2534 2568
2535 if (bus_info_valid) { 2569 if (bus_info_valid) {
2536 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2570 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2537 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2571 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2538 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2572 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2539 device_printf(dev, "PCI-Express bandwidth available" 2573 device_printf(dev, "PCI-Express bandwidth available"
2540 " for this card\n is not sufficient for" 2574 " for this card\n is not sufficient for"
2541 " optimal performance.\n"); 2575 " optimal performance.\n");
2542 device_printf(dev, "For optimal performance a x8 " 2576 device_printf(dev, "For optimal performance a x8 "
2543 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 2577 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2544 } 2578 }
2545 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2579 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2546 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2580 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2547 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2581 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2548 device_printf(dev, "PCI-Express bandwidth available" 2582 device_printf(dev, "PCI-Express bandwidth available"
2549 " for this card\n is not sufficient for" 2583 " for this card\n is not sufficient for"
2550 " optimal performance.\n"); 2584 " optimal performance.\n");
2551 device_printf(dev, "For optimal performance a x8 " 2585 device_printf(dev, "For optimal performance a x8 "
2552 "PCIE Gen3 slot is required.\n"); 2586 "PCIE Gen3 slot is required.\n");
2553 } 2587 }
2554 } else 2588 } else
2555 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2589 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2556 2590
2557 return; 2591 return;
2558} /* ixgbe_get_slot_info */ 2592} /* ixgbe_get_slot_info */
2559 2593
2560/************************************************************************ 2594/************************************************************************
2561 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets 2595 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2562 ************************************************************************/ 2596 ************************************************************************/
2563static inline void 2597static inline void
2564ixgbe_enable_queue(struct adapter *adapter, u32 vector) 2598ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2565{ 2599{
2566 struct ixgbe_hw *hw = &adapter->hw; 2600 struct ixgbe_hw *hw = &adapter->hw;
2567 struct ix_queue *que = &adapter->queues[vector]; 2601 struct ix_queue *que = &adapter->queues[vector];
2568 u64 queue = 1ULL << vector; 2602 u64 queue = 1ULL << vector;
2569 u32 mask; 2603 u32 mask;
2570 2604
2571 mutex_enter(&que->dc_mtx); 2605 mutex_enter(&que->dc_mtx);
2572 if (que->disabled_count > 0 && --que->disabled_count > 0) 2606 if (que->disabled_count > 0 && --que->disabled_count > 0)
2573 goto out; 2607 goto out;
2574 2608
2575 if (hw->mac.type == ixgbe_mac_82598EB) { 2609 if (hw->mac.type == ixgbe_mac_82598EB) {
2576 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2610 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2577 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2611 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2578 } else { 2612 } else {
2579 mask = (queue & 0xFFFFFFFF); 2613 mask = (queue & 0xFFFFFFFF);
2580 if (mask) 2614 if (mask)
2581 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2615 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2582 mask = (queue >> 32); 2616 mask = (queue >> 32);
2583 if (mask) 2617 if (mask)
2584 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2618 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2585 } 2619 }
2586out: 2620out:
2587 mutex_exit(&que->dc_mtx); 2621 mutex_exit(&que->dc_mtx);
2588} /* ixgbe_enable_queue */ 2622} /* ixgbe_enable_queue */
2589 2623
2590/************************************************************************ 2624/************************************************************************
2591 * ixgbe_disable_queue_internal 2625 * ixgbe_disable_queue_internal
2592 ************************************************************************/ 2626 ************************************************************************/
2593static inline void 2627static inline void
2594ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok) 2628ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2595{ 2629{
2596 struct ixgbe_hw *hw = &adapter->hw; 2630 struct ixgbe_hw *hw = &adapter->hw;
2597 struct ix_queue *que = &adapter->queues[vector]; 2631 struct ix_queue *que = &adapter->queues[vector];
2598 u64 queue = 1ULL << vector; 2632 u64 queue = 1ULL << vector;
2599 u32 mask; 2633 u32 mask;
2600 2634
2601 mutex_enter(&que->dc_mtx); 2635 mutex_enter(&que->dc_mtx);
2602 2636
2603 if (que->disabled_count > 0) { 2637 if (que->disabled_count > 0) {
2604 if (nestok) 2638 if (nestok)
2605 que->disabled_count++; 2639 que->disabled_count++;
2606 goto out; 2640 goto out;
2607 } 2641 }
2608 que->disabled_count++; 2642 que->disabled_count++;
2609 2643
2610 if (hw->mac.type == ixgbe_mac_82598EB) { 2644 if (hw->mac.type == ixgbe_mac_82598EB) {
2611 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2645 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2612 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 2646 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2613 } else { 2647 } else {
2614 mask = (queue & 0xFFFFFFFF); 2648 mask = (queue & 0xFFFFFFFF);
2615 if (mask) 2649 if (mask)
2616 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2650 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2617 mask = (queue >> 32); 2651 mask = (queue >> 32);
2618 if (mask) 2652 if (mask)
2619 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 2653 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2620 } 2654 }
2621out: 2655out:
2622 mutex_exit(&que->dc_mtx); 2656 mutex_exit(&que->dc_mtx);
2623} /* ixgbe_disable_queue_internal */ 2657} /* ixgbe_disable_queue_internal */
2624 2658
2625/************************************************************************ 2659/************************************************************************
2626 * ixgbe_disable_queue 2660 * ixgbe_disable_queue
2627 ************************************************************************/ 2661 ************************************************************************/
2628static inline void 2662static inline void
2629ixgbe_disable_queue(struct adapter *adapter, u32 vector) 2663ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2630{ 2664{
2631 2665
2632 ixgbe_disable_queue_internal(adapter, vector, true); 2666 ixgbe_disable_queue_internal(adapter, vector, true);
2633} /* ixgbe_disable_queue */ 2667} /* ixgbe_disable_queue */
2634 2668
2635/************************************************************************ 2669/************************************************************************
2636 * ixgbe_sched_handle_que - schedule deferred packet processing 2670 * ixgbe_sched_handle_que - schedule deferred packet processing
2637 ************************************************************************/ 2671 ************************************************************************/
2638static inline void 2672static inline void
2639ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que) 2673ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2640{ 2674{
2641 2675
2642 if (que->txrx_use_workqueue) { 2676 if (que->txrx_use_workqueue) {
2643 /* 2677 /*
2644 * adapter->que_wq is bound to each CPU instead of 2678 * adapter->que_wq is bound to each CPU instead of
2645 * each NIC queue to reduce workqueue kthread. As we 2679 * each NIC queue to reduce workqueue kthread. As we
2646 * should consider about interrupt affinity in this 2680 * should consider about interrupt affinity in this
2647 * function, the workqueue kthread must be WQ_PERCPU. 2681 * function, the workqueue kthread must be WQ_PERCPU.
2648 * If create WQ_PERCPU workqueue kthread for each NIC 2682 * If create WQ_PERCPU workqueue kthread for each NIC
2649 * queue, that number of created workqueue kthread is 2683 * queue, that number of created workqueue kthread is
2650 * (number of used NIC queue) * (number of CPUs) = 2684 * (number of used NIC queue) * (number of CPUs) =
2651 * (number of CPUs) ^ 2 most often. 2685 * (number of CPUs) ^ 2 most often.
2652 * 2686 *
2653 * The same NIC queue's interrupts are avoided by 2687 * The same NIC queue's interrupts are avoided by
2654 * masking the queue's interrupt. And different 2688 * masking the queue's interrupt. And different
2655 * NIC queue's interrupts use different struct work 2689 * NIC queue's interrupts use different struct work
2656 * (que->wq_cookie). So, "enqueued flag" to avoid 2690 * (que->wq_cookie). So, "enqueued flag" to avoid
2657 * twice workqueue_enqueue() is not required . 2691 * twice workqueue_enqueue() is not required .
2658 */ 2692 */
2659 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu()); 2693 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2660 } else { 2694 } else {
2661 softint_schedule(que->que_si); 2695 softint_schedule(que->que_si);
2662 } 2696 }
2663} 2697}
2664 2698
2665/************************************************************************ 2699/************************************************************************
2666 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2700 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2667 ************************************************************************/ 2701 ************************************************************************/
2668static int 2702static int
2669ixgbe_msix_que(void *arg) 2703ixgbe_msix_que(void *arg)
2670{ 2704{
2671 struct ix_queue *que = arg; 2705 struct ix_queue *que = arg;
2672 struct adapter *adapter = que->adapter; 2706 struct adapter *adapter = que->adapter;
2673 struct ifnet *ifp = adapter->ifp; 2707 struct ifnet *ifp = adapter->ifp;
2674 struct tx_ring *txr = que->txr; 2708 struct tx_ring *txr = que->txr;
2675 struct rx_ring *rxr = que->rxr; 2709 struct rx_ring *rxr = que->rxr;
2676 bool more; 2710 bool more;
2677 u32 newitr = 0; 2711 u32 newitr = 0;
2678 2712
2679 /* Protect against spurious interrupts */ 2713 /* Protect against spurious interrupts */
2680 if ((ifp->if_flags & IFF_RUNNING) == 0) 2714 if ((ifp->if_flags & IFF_RUNNING) == 0)
2681 return 0; 2715 return 0;
2682 2716
2683 ixgbe_disable_queue(adapter, que->msix); 2717 ixgbe_disable_queue(adapter, que->msix);
2684 ++que->irqs.ev_count; 2718 ++que->irqs.ev_count;
2685 2719
2686 /* 2720 /*
2687 * Don't change "que->txrx_use_workqueue" from this point to avoid 2721 * Don't change "que->txrx_use_workqueue" from this point to avoid
2688 * flip-flopping softint/workqueue mode in one deferred processing. 2722 * flip-flopping softint/workqueue mode in one deferred processing.
2689 */ 2723 */
2690 que->txrx_use_workqueue = adapter->txrx_use_workqueue; 2724 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2691 2725
2692#ifdef __NetBSD__ 2726#ifdef __NetBSD__
2693 /* Don't run ixgbe_rxeof in interrupt context */ 2727 /* Don't run ixgbe_rxeof in interrupt context */
2694 more = true; 2728 more = true;
2695#else 2729#else
2696 more = ixgbe_rxeof(que); 2730 more = ixgbe_rxeof(que);
2697#endif 2731#endif
2698 2732
2699 IXGBE_TX_LOCK(txr); 2733 IXGBE_TX_LOCK(txr);
2700 ixgbe_txeof(txr); 2734 ixgbe_txeof(txr);
2701 IXGBE_TX_UNLOCK(txr); 2735 IXGBE_TX_UNLOCK(txr);
2702 2736
2703 /* Do AIM now? */ 2737 /* Do AIM now? */
2704 2738
2705 if (adapter->enable_aim == false) 2739 if (adapter->enable_aim == false)
2706 goto no_calc; 2740 goto no_calc;
2707 /* 2741 /*
2708 * Do Adaptive Interrupt Moderation: 2742 * Do Adaptive Interrupt Moderation:
2709 * - Write out last calculated setting 2743 * - Write out last calculated setting
2710 * - Calculate based on average size over 2744 * - Calculate based on average size over
2711 * the last interval. 2745 * the last interval.
2712 */ 2746 */
2713 if (que->eitr_setting) 2747 if (que->eitr_setting)
2714 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting); 2748 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2715 2749
2716 que->eitr_setting = 0; 2750 que->eitr_setting = 0;
2717 2751
2718 /* Idle, do nothing */ 2752 /* Idle, do nothing */
2719 if ((txr->bytes == 0) && (rxr->bytes == 0)) 2753 if ((txr->bytes == 0) && (rxr->bytes == 0))
2720 goto no_calc; 2754 goto no_calc;
2721 2755
2722 if ((txr->bytes) && (txr->packets)) 2756 if ((txr->bytes) && (txr->packets))
2723 newitr = txr->bytes/txr->packets; 2757 newitr = txr->bytes/txr->packets;
2724 if ((rxr->bytes) && (rxr->packets)) 2758 if ((rxr->bytes) && (rxr->packets))
2725 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 2759 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2726 newitr += 24; /* account for hardware frame, crc */ 2760 newitr += 24; /* account for hardware frame, crc */
2727 2761
2728 /* set an upper boundary */ 2762 /* set an upper boundary */
2729 newitr = uimin(newitr, 3000); 2763 newitr = uimin(newitr, 3000);
2730 2764
2731 /* Be nice to the mid range */ 2765 /* Be nice to the mid range */
2732 if ((newitr > 300) && (newitr < 1200)) 2766 if ((newitr > 300) && (newitr < 1200))
2733 newitr = (newitr / 3); 2767 newitr = (newitr / 3);
2734 else 2768 else
2735 newitr = (newitr / 2); 2769 newitr = (newitr / 2);
2736 2770
2737 /* 2771 /*
2738 * When RSC is used, ITR interval must be larger than RSC_DELAY. 2772 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2739 * Currently, we use 2us for RSC_DELAY. The minimum value is always 2773 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2740 * greater than 2us on 100M (and 10M?(not documented)), but it's not 2774 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2741 * on 1G and higher. 2775 * on 1G and higher.
2742 */ 2776 */
2743 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2777 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2744 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2778 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2745 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 2779 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2746 newitr = IXGBE_MIN_RSC_EITR_10G1G; 2780 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2747 } 2781 }
2748 2782
2749 /* save for next interrupt */ 2783 /* save for next interrupt */
2750 que->eitr_setting = newitr; 2784 que->eitr_setting = newitr;
2751 2785
2752 /* Reset state */ 2786 /* Reset state */
2753 txr->bytes = 0; 2787 txr->bytes = 0;
2754 txr->packets = 0; 2788 txr->packets = 0;
2755 rxr->bytes = 0; 2789 rxr->bytes = 0;
2756 rxr->packets = 0; 2790 rxr->packets = 0;
2757 2791
2758no_calc: 2792no_calc:
2759 if (more) 2793 if (more)
2760 ixgbe_sched_handle_que(adapter, que); 2794 ixgbe_sched_handle_que(adapter, que);
2761 else 2795 else
2762 ixgbe_enable_queue(adapter, que->msix); 2796 ixgbe_enable_queue(adapter, que->msix);
2763 2797
2764 return 1; 2798 return 1;
2765} /* ixgbe_msix_que */ 2799} /* ixgbe_msix_que */
2766 2800
2767/************************************************************************ 2801/************************************************************************
2768 * ixgbe_media_status - Media Ioctl callback 2802 * ixgbe_media_status - Media Ioctl callback
2769 * 2803 *
2770 * Called whenever the user queries the status of 2804 * Called whenever the user queries the status of
2771 * the interface using ifconfig. 2805 * the interface using ifconfig.
2772 ************************************************************************/ 2806 ************************************************************************/
2773static void 2807static void
2774ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2808ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2775{ 2809{
2776 struct adapter *adapter = ifp->if_softc; 2810 struct adapter *adapter = ifp->if_softc;
2777 struct ixgbe_hw *hw = &adapter->hw; 2811 struct ixgbe_hw *hw = &adapter->hw;
2778 int layer; 2812 int layer;
2779 2813
2780 INIT_DEBUGOUT("ixgbe_media_status: begin"); 2814 INIT_DEBUGOUT("ixgbe_media_status: begin");
2781 IXGBE_CORE_LOCK(adapter); 2815 IXGBE_CORE_LOCK(adapter);
2782 ixgbe_update_link_status(adapter); 2816 ixgbe_update_link_status(adapter);
2783 2817
2784 ifmr->ifm_status = IFM_AVALID; 2818 ifmr->ifm_status = IFM_AVALID;
2785 ifmr->ifm_active = IFM_ETHER; 2819 ifmr->ifm_active = IFM_ETHER;
2786 2820
2787 if (adapter->link_active != LINK_STATE_UP) { 2821 if (adapter->link_active != LINK_STATE_UP) {
2788 ifmr->ifm_active |= IFM_NONE; 2822 ifmr->ifm_active |= IFM_NONE;
2789 IXGBE_CORE_UNLOCK(adapter); 2823 IXGBE_CORE_UNLOCK(adapter);
2790 return; 2824 return;
2791 } 2825 }
2792 2826
2793 ifmr->ifm_status |= IFM_ACTIVE; 2827 ifmr->ifm_status |= IFM_ACTIVE;
2794 layer = adapter->phy_layer; 2828 layer = adapter->phy_layer;
2795 2829
2796 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2830 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2797 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T || 2831 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2798 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T || 2832 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2799 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2833 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2800 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2834 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2801 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2835 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2802 switch (adapter->link_speed) { 2836 switch (adapter->link_speed) {
2803 case IXGBE_LINK_SPEED_10GB_FULL: 2837 case IXGBE_LINK_SPEED_10GB_FULL:
2804 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2838 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2805 break; 2839 break;
2806 case IXGBE_LINK_SPEED_5GB_FULL: 2840 case IXGBE_LINK_SPEED_5GB_FULL:
2807 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2841 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2808 break; 2842 break;
2809 case IXGBE_LINK_SPEED_2_5GB_FULL: 2843 case IXGBE_LINK_SPEED_2_5GB_FULL:
2810 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2844 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2811 break; 2845 break;
2812 case IXGBE_LINK_SPEED_1GB_FULL: 2846 case IXGBE_LINK_SPEED_1GB_FULL:
2813 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2847 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2814 break; 2848 break;
2815 case IXGBE_LINK_SPEED_100_FULL: 2849 case IXGBE_LINK_SPEED_100_FULL:
2816 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2850 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2817 break; 2851 break;
2818 case IXGBE_LINK_SPEED_10_FULL: 2852 case IXGBE_LINK_SPEED_10_FULL:
2819 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2853 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2820 break; 2854 break;
2821 } 2855 }
2822 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2856 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2823 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2857 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2824 switch (adapter->link_speed) { 2858 switch (adapter->link_speed) {
2825 case IXGBE_LINK_SPEED_10GB_FULL: 2859 case IXGBE_LINK_SPEED_10GB_FULL:
2826 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2860 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2827 break; 2861 break;
2828 } 2862 }
2829 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2863 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2830 switch (adapter->link_speed) { 2864 switch (adapter->link_speed) {
2831 case IXGBE_LINK_SPEED_10GB_FULL: 2865 case IXGBE_LINK_SPEED_10GB_FULL:
2832 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2866 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2833 break; 2867 break;
2834 case IXGBE_LINK_SPEED_1GB_FULL: 2868 case IXGBE_LINK_SPEED_1GB_FULL:
2835 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2869 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2836 break; 2870 break;
2837 } 2871 }
2838 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2872 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2839 switch (adapter->link_speed) { 2873 switch (adapter->link_speed) {
2840 case IXGBE_LINK_SPEED_10GB_FULL: 2874 case IXGBE_LINK_SPEED_10GB_FULL:
2841 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2875 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2842 break; 2876 break;
2843 case IXGBE_LINK_SPEED_1GB_FULL: 2877 case IXGBE_LINK_SPEED_1GB_FULL:
2844 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2878 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2845 break; 2879 break;
2846 } 2880 }
2847 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2881 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2848 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2882 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2849 switch (adapter->link_speed) { 2883 switch (adapter->link_speed) {
2850 case IXGBE_LINK_SPEED_10GB_FULL: 2884 case IXGBE_LINK_SPEED_10GB_FULL:
2851 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2885 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2852 break; 2886 break;
2853 case IXGBE_LINK_SPEED_1GB_FULL: 2887 case IXGBE_LINK_SPEED_1GB_FULL:
2854 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2888 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2855 break; 2889 break;
2856 } 2890 }
2857 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2891 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2858 switch (adapter->link_speed) { 2892 switch (adapter->link_speed) {
2859 case IXGBE_LINK_SPEED_10GB_FULL: 2893 case IXGBE_LINK_SPEED_10GB_FULL:
2860 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2894 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2861 break; 2895 break;
2862 } 2896 }
2863 /* 2897 /*
2864 * XXX: These need to use the proper media types once 2898 * XXX: These need to use the proper media types once
2865 * they're added. 2899 * they're added.
2866 */ 2900 */
2867 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2901 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2868 switch (adapter->link_speed) { 2902 switch (adapter->link_speed) {
2869 case IXGBE_LINK_SPEED_10GB_FULL: 2903 case IXGBE_LINK_SPEED_10GB_FULL:
2870 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2904 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2871 break; 2905 break;
2872 case IXGBE_LINK_SPEED_2_5GB_FULL: 2906 case IXGBE_LINK_SPEED_2_5GB_FULL:
2873 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2907 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2874 break; 2908 break;
2875 case IXGBE_LINK_SPEED_1GB_FULL: 2909 case IXGBE_LINK_SPEED_1GB_FULL:
2876 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2910 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2877 break; 2911 break;
2878 } 2912 }
2879 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2913 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2880 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2914 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2881 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2915 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2882 switch (adapter->link_speed) { 2916 switch (adapter->link_speed) {
2883 case IXGBE_LINK_SPEED_10GB_FULL: 2917 case IXGBE_LINK_SPEED_10GB_FULL:
2884 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2918 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2885 break; 2919 break;
2886 case IXGBE_LINK_SPEED_2_5GB_FULL: 2920 case IXGBE_LINK_SPEED_2_5GB_FULL:
2887 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2921 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2888 break; 2922 break;
2889 case IXGBE_LINK_SPEED_1GB_FULL: 2923 case IXGBE_LINK_SPEED_1GB_FULL:
2890 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2924 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2891 break; 2925 break;
2892 } 2926 }
2893 2927
2894 /* If nothing is recognized... */ 2928 /* If nothing is recognized... */
2895#if 0 2929#if 0
2896 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2930 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2897 ifmr->ifm_active |= IFM_UNKNOWN; 2931 ifmr->ifm_active |= IFM_UNKNOWN;
2898#endif 2932#endif
2899 2933
2900 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 2934 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2901 2935
2902 /* Display current flow control setting used on link */ 2936 /* Display current flow control setting used on link */
2903 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2937 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2904 hw->fc.current_mode == ixgbe_fc_full) 2938 hw->fc.current_mode == ixgbe_fc_full)
2905 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2939 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2906 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2940 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2907 hw->fc.current_mode == ixgbe_fc_full) 2941 hw->fc.current_mode == ixgbe_fc_full)
2908 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2942 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2909 2943
2910 IXGBE_CORE_UNLOCK(adapter); 2944 IXGBE_CORE_UNLOCK(adapter);
2911 2945
2912 return; 2946 return;
2913} /* ixgbe_media_status */ 2947} /* ixgbe_media_status */
2914 2948
2915/************************************************************************ 2949/************************************************************************
2916 * ixgbe_media_change - Media Ioctl callback 2950 * ixgbe_media_change - Media Ioctl callback
2917 * 2951 *
2918 * Called when the user changes speed/duplex using 2952 * Called when the user changes speed/duplex using
2919 * media/mediopt option with ifconfig. 2953 * media/mediopt option with ifconfig.
2920 ************************************************************************/ 2954 ************************************************************************/
2921static int 2955static int
2922ixgbe_media_change(struct ifnet *ifp) 2956ixgbe_media_change(struct ifnet *ifp)
2923{ 2957{
2924 struct adapter *adapter = ifp->if_softc; 2958 struct adapter *adapter = ifp->if_softc;
2925 struct ifmedia *ifm = &adapter->media; 2959 struct ifmedia *ifm = &adapter->media;
2926 struct ixgbe_hw *hw = &adapter->hw; 2960 struct ixgbe_hw *hw = &adapter->hw;
2927 ixgbe_link_speed speed = 0; 2961 ixgbe_link_speed speed = 0;
2928 ixgbe_link_speed link_caps = 0; 2962 ixgbe_link_speed link_caps = 0;
2929 bool negotiate = false; 2963 bool negotiate = false;
2930 s32 err = IXGBE_NOT_IMPLEMENTED; 2964 s32 err = IXGBE_NOT_IMPLEMENTED;
2931 2965
2932 INIT_DEBUGOUT("ixgbe_media_change: begin"); 2966 INIT_DEBUGOUT("ixgbe_media_change: begin");
2933 2967
2934 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2968 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2935 return (EINVAL); 2969 return (EINVAL);
2936 2970
2937 if (hw->phy.media_type == ixgbe_media_type_backplane) 2971 if (hw->phy.media_type == ixgbe_media_type_backplane)
2938 return (EPERM); 2972 return (EPERM);
2939 2973
2940 IXGBE_CORE_LOCK(adapter); 2974 IXGBE_CORE_LOCK(adapter);
2941 /* 2975 /*
2942 * We don't actually need to check against the supported 2976 * We don't actually need to check against the supported
2943 * media types of the adapter; ifmedia will take care of 2977 * media types of the adapter; ifmedia will take care of
2944 * that for us. 2978 * that for us.
2945 */ 2979 */
2946 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2980 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2947 case IFM_AUTO: 2981 case IFM_AUTO:
2948 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 2982 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2949 &negotiate); 2983 &negotiate);
2950 if (err != IXGBE_SUCCESS) { 2984 if (err != IXGBE_SUCCESS) {
2951 device_printf(adapter->dev, "Unable to determine " 2985 device_printf(adapter->dev, "Unable to determine "
2952 "supported advertise speeds\n"); 2986 "supported advertise speeds\n");
2953 IXGBE_CORE_UNLOCK(adapter); 2987 IXGBE_CORE_UNLOCK(adapter);
2954 return (ENODEV); 2988 return (ENODEV);
2955 } 2989 }
2956 speed |= link_caps; 2990 speed |= link_caps;
2957 break; 2991 break;
2958 case IFM_10G_T: 2992 case IFM_10G_T:
2959 case IFM_10G_LRM: 2993 case IFM_10G_LRM:
2960 case IFM_10G_LR: 2994 case IFM_10G_LR:
2961 case IFM_10G_TWINAX: 2995 case IFM_10G_TWINAX:
2962 case IFM_10G_SR: 2996 case IFM_10G_SR:
2963 case IFM_10G_CX4: 2997 case IFM_10G_CX4:
2964 case IFM_10G_KR: 2998 case IFM_10G_KR:
2965 case IFM_10G_KX4: 2999 case IFM_10G_KX4:
2966 speed |= IXGBE_LINK_SPEED_10GB_FULL; 3000 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2967 break; 3001 break;
2968 case IFM_5000_T: 3002 case IFM_5000_T:
2969 speed |= IXGBE_LINK_SPEED_5GB_FULL; 3003 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2970 break; 3004 break;
2971 case IFM_2500_T: 3005 case IFM_2500_T:
2972 case IFM_2500_KX: 3006 case IFM_2500_KX:
2973 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 3007 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2974 break; 3008 break;
2975 case IFM_1000_T: 3009 case IFM_1000_T:
2976 case IFM_1000_LX: 3010 case IFM_1000_LX:
2977 case IFM_1000_SX: 3011 case IFM_1000_SX:
2978 case IFM_1000_KX: 3012 case IFM_1000_KX:
2979 speed |= IXGBE_LINK_SPEED_1GB_FULL; 3013 speed |= IXGBE_LINK_SPEED_1GB_FULL;
2980 break; 3014 break;
2981 case IFM_100_TX: 3015 case IFM_100_TX:
2982 speed |= IXGBE_LINK_SPEED_100_FULL; 3016 speed |= IXGBE_LINK_SPEED_100_FULL;
2983 break; 3017 break;
2984 case IFM_10_T: 3018 case IFM_10_T:
2985 speed |= IXGBE_LINK_SPEED_10_FULL; 3019 speed |= IXGBE_LINK_SPEED_10_FULL;
2986 break; 3020 break;
2987 case IFM_NONE: 3021 case IFM_NONE:
2988 break; 3022 break;
2989 default: 3023 default:
2990 goto invalid; 3024 goto invalid;
2991 } 3025 }
2992 3026
2993 hw->mac.autotry_restart = TRUE; 3027 hw->mac.autotry_restart = TRUE;
2994 hw->mac.ops.setup_link(hw, speed, TRUE); 3028 hw->mac.ops.setup_link(hw, speed, TRUE);
2995 adapter->advertise = 0; 3029 adapter->advertise = 0;
2996 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { 3030 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
2997 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) 3031 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
2998 adapter->advertise |= 1 << 2; 3032 adapter->advertise |= 1 << 2;
2999 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) 3033 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3000 adapter->advertise |= 1 << 1; 3034 adapter->advertise |= 1 << 1;
3001 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) 3035 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3002 adapter->advertise |= 1 << 0; 3036 adapter->advertise |= 1 << 0;
3003 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0) 3037 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3004 adapter->advertise |= 1 << 3; 3038 adapter->advertise |= 1 << 3;
3005 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0) 3039 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3006 adapter->advertise |= 1 << 4; 3040 adapter->advertise |= 1 << 4;
3007 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0) 3041 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3008 adapter->advertise |= 1 << 5; 3042 adapter->advertise |= 1 << 5;
3009 } 3043 }
3010 3044
3011 IXGBE_CORE_UNLOCK(adapter); 3045 IXGBE_CORE_UNLOCK(adapter);
3012 return (0); 3046 return (0);
3013 3047
3014invalid: 3048invalid:
3015 device_printf(adapter->dev, "Invalid media type!\n"); 3049 device_printf(adapter->dev, "Invalid media type!\n");
3016 IXGBE_CORE_UNLOCK(adapter); 3050 IXGBE_CORE_UNLOCK(adapter);
3017 3051
3018 return (EINVAL); 3052 return (EINVAL);
3019} /* ixgbe_media_change */ 3053} /* ixgbe_media_change */
3020 3054
3021/************************************************************************ 3055/************************************************************************
3022 * ixgbe_set_promisc 3056 * ixgbe_set_promisc
3023 ************************************************************************/ 3057 ************************************************************************/
3024static void 3058static void
3025ixgbe_set_promisc(struct adapter *adapter) 3059ixgbe_set_promisc(struct adapter *adapter)
3026{ 3060{
3027 struct ifnet *ifp = adapter->ifp; 3061 struct ifnet *ifp = adapter->ifp;
3028 int mcnt = 0; 3062 int mcnt = 0;
3029 u32 rctl; 3063 u32 rctl;
3030 struct ether_multi *enm; 3064 struct ether_multi *enm;
3031 struct ether_multistep step; 3065 struct ether_multistep step;
3032 struct ethercom *ec = &adapter->osdep.ec; 3066 struct ethercom *ec = &adapter->osdep.ec;
3033 3067
3034 KASSERT(mutex_owned(&adapter->core_mtx)); 3068 KASSERT(mutex_owned(&adapter->core_mtx));
3035 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3069 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
3036 rctl &= (~IXGBE_FCTRL_UPE); 3070 rctl &= (~IXGBE_FCTRL_UPE);
3037 ETHER_LOCK(ec); 3071 ETHER_LOCK(ec);
3038 if (ec->ec_flags & ETHER_F_ALLMULTI) 3072 if (ec->ec_flags & ETHER_F_ALLMULTI)
3039 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 3073 mcnt = MAX_NUM_MULTICAST_ADDRESSES;
3040 else { 3074 else {
3041 ETHER_FIRST_MULTI(step, ec, enm); 3075 ETHER_FIRST_MULTI(step, ec, enm);
3042 while (enm != NULL) { 3076 while (enm != NULL) {
3043 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 3077 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
3044 break; 3078 break;
3045 mcnt++; 3079 mcnt++;
3046 ETHER_NEXT_MULTI(step, enm); 3080 ETHER_NEXT_MULTI(step, enm);
3047 } 3081 }
3048 } 3082 }
3049 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) 3083 if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
3050 rctl &= (~IXGBE_FCTRL_MPE); 3084 rctl &= (~IXGBE_FCTRL_MPE);
3051 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 3085 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3052 3086
3053 if (ifp->if_flags & IFF_PROMISC) { 3087 if (ifp->if_flags & IFF_PROMISC) {
3054 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3088 rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3055 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 3089 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3056 } else if (ec->ec_flags & ETHER_F_ALLMULTI) { 3090 } else if (ec->ec_flags & ETHER_F_ALLMULTI) {
3057 rctl |= IXGBE_FCTRL_MPE; 3091 rctl |= IXGBE_FCTRL_MPE;
3058 rctl &= ~IXGBE_FCTRL_UPE; 3092 rctl &= ~IXGBE_FCTRL_UPE;
3059 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); 3093 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl);
3060 } 3094 }
3061 ETHER_UNLOCK(ec); 3095 ETHER_UNLOCK(ec);
3062} /* ixgbe_set_promisc */ 3096} /* ixgbe_set_promisc */
3063 3097
3064/************************************************************************ 3098/************************************************************************
3065 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 3099 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3066 ************************************************************************/ 3100 ************************************************************************/
3067static int 3101static int
3068ixgbe_msix_link(void *arg) 3102ixgbe_msix_link(void *arg)
3069{ 3103{
3070 struct adapter *adapter = arg; 3104 struct adapter *adapter = arg;
3071 struct ixgbe_hw *hw = &adapter->hw; 3105 struct ixgbe_hw *hw = &adapter->hw;
3072 u32 eicr, eicr_mask; 3106 u32 eicr, eicr_mask;
3073 s32 retval; 3107 s32 retval;
3074 3108
3075 ++adapter->link_irq.ev_count; 3109 ++adapter->link_irq.ev_count;
3076 3110
3077 /* Pause other interrupts */ 3111 /* Pause other interrupts */
3078 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 3112 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3079 3113
3080 /* First get the cause */ 3114 /* First get the cause */
3081 /* 3115 /*
3082 * The specifications of 82598, 82599, X540 and X550 say EICS register 3116 * The specifications of 82598, 82599, X540 and X550 say EICS register
3083 * is write only. However, Linux says it is a workaround for silicon 3117 * is write only. However, Linux says it is a workaround for silicon
3084 * errata to read EICS instead of EICR to get interrupt cause. It seems 3118 * errata to read EICS instead of EICR to get interrupt cause. It seems
3085 * there is a problem about read clear mechanism for EICR register. 3119 * there is a problem about read clear mechanism for EICR register.
3086 */ 3120 */
3087 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 3121 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3088 /* Be sure the queue bits are not cleared */ 3122 /* Be sure the queue bits are not cleared */
3089 eicr &= ~IXGBE_EICR_RTX_QUEUE; 3123 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3090 /* Clear interrupt with write */ 3124 /* Clear interrupt with write */
3091 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3125 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3092 3126
3093 /* Link status change */ 3127 /* Link status change */
3094 if (eicr & IXGBE_EICR_LSC) { 3128 if (eicr & IXGBE_EICR_LSC) {
3095 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3129 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3096 softint_schedule(adapter->link_si); 3130 softint_schedule(adapter->link_si);
3097 } 3131 }
3098 3132
3099 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3133 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3100 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 3134 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3101 (eicr & IXGBE_EICR_FLOW_DIR)) { 3135 (eicr & IXGBE_EICR_FLOW_DIR)) {
3102 /* This is probably overkill :) */ 3136 /* This is probably overkill :) */
3103 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) 3137 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3104 return 1; 3138 return 1;
3105 /* Disable the interrupt */ 3139 /* Disable the interrupt */
3106 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 3140 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3107 softint_schedule(adapter->fdir_si); 3141 softint_schedule(adapter->fdir_si);
3108 } 3142 }
3109 3143
3110 if (eicr & IXGBE_EICR_ECC) { 3144 if (eicr & IXGBE_EICR_ECC) {
3111 device_printf(adapter->dev, 3145 device_printf(adapter->dev,
3112 "CRITICAL: ECC ERROR!! Please Reboot!!\n"); 3146 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3113 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 3147 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3114 } 3148 }
3115 3149
3116 /* Check for over temp condition */ 3150 /* Check for over temp condition */
3117 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 3151 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3118 switch (adapter->hw.mac.type) { 3152 switch (adapter->hw.mac.type) {
3119 case ixgbe_mac_X550EM_a: 3153 case ixgbe_mac_X550EM_a:
3120 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 3154 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3121 break; 3155 break;
3122 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 3156 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3123 IXGBE_EICR_GPI_SDP0_X550EM_a); 3157 IXGBE_EICR_GPI_SDP0_X550EM_a);
3124 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3158 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3125 IXGBE_EICR_GPI_SDP0_X550EM_a); 3159 IXGBE_EICR_GPI_SDP0_X550EM_a);
3126 retval = hw->phy.ops.check_overtemp(hw); 3160 retval = hw->phy.ops.check_overtemp(hw);
3127 if (retval != IXGBE_ERR_OVERTEMP) 3161 if (retval != IXGBE_ERR_OVERTEMP)
3128 break; 3162 break;
3129 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 3163 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3130 device_printf(adapter->dev, "System shutdown required!\n"); 3164 device_printf(adapter->dev, "System shutdown required!\n");
3131 break; 3165 break;
3132 default: 3166 default:
3133 if (!(eicr & IXGBE_EICR_TS)) 3167 if (!(eicr & IXGBE_EICR_TS))
3134 break; 3168 break;
3135 retval = hw->phy.ops.check_overtemp(hw); 3169 retval = hw->phy.ops.check_overtemp(hw);
3136 if (retval != IXGBE_ERR_OVERTEMP) 3170 if (retval != IXGBE_ERR_OVERTEMP)
3137 break; 3171 break;
3138 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 3172 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3139 device_printf(adapter->dev, "System shutdown required!\n"); 3173 device_printf(adapter->dev, "System shutdown required!\n");
3140 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 3174 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3141 break; 3175 break;
3142 } 3176 }
3143 } 3177 }
3144 3178
3145 /* Check for VF message */ 3179 /* Check for VF message */
3146 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 3180 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3147 (eicr & IXGBE_EICR_MAILBOX)) 3181 (eicr & IXGBE_EICR_MAILBOX))
3148 softint_schedule(adapter->mbx_si); 3182 softint_schedule(adapter->mbx_si);
3149 } 3183 }
3150 3184
3151 if (ixgbe_is_sfp(hw)) { 3185 if (ixgbe_is_sfp(hw)) {
3152 /* Pluggable optics-related interrupt */ 3186 /* Pluggable optics-related interrupt */
3153 if (hw->mac.type >= ixgbe_mac_X540) 3187 if (hw->mac.type >= ixgbe_mac_X540)
3154 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3188 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3155 else 3189 else
3156 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3190 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3157 3191
3158 if (eicr & eicr_mask) { 3192 if (eicr & eicr_mask) {
3159 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3193 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3160 softint_schedule(adapter->mod_si); 3194 softint_schedule(adapter->mod_si);
3161 } 3195 }
3162 3196
3163 if ((hw->mac.type == ixgbe_mac_82599EB) && 3197 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3164 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3198 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3165 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3199 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3166 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3200 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3167 softint_schedule(adapter->msf_si); 3201 softint_schedule(adapter->msf_si);
3168 } 3202 }
3169 } 3203 }
3170 3204
3171 /* Check for fan failure */ 3205 /* Check for fan failure */
3172 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 3206 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3173 ixgbe_check_fan_failure(adapter, eicr, TRUE); 3207 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3174 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3208 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3175 } 3209 }
3176 3210
3177 /* External PHY interrupt */ 3211 /* External PHY interrupt */
3178 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3212 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3179 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 3213 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3180 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 3214 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3181 softint_schedule(adapter->phy_si); 3215 softint_schedule(adapter->phy_si);
3182 } 3216 }
3183 3217
3184 /* Re-enable other interrupts */ 3218 /* Re-enable other interrupts */
3185 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 3219 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3186 return 1; 3220 return 1;
3187} /* ixgbe_msix_link */ 3221} /* ixgbe_msix_link */
3188 3222
3189static void 3223static void
3190ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 3224ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3191{ 3225{
3192 3226
3193 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 3227 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3194 itr |= itr << 16; 3228 itr |= itr << 16;
3195 else 3229 else
3196 itr |= IXGBE_EITR_CNT_WDIS; 3230 itr |= IXGBE_EITR_CNT_WDIS;
3197 3231
3198 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr); 3232 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3199} 3233}
3200 3234
3201 3235
3202/************************************************************************ 3236/************************************************************************
3203 * ixgbe_sysctl_interrupt_rate_handler 3237 * ixgbe_sysctl_interrupt_rate_handler
3204 ************************************************************************/ 3238 ************************************************************************/
3205static int 3239static int
3206ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 3240ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3207{ 3241{
3208 struct sysctlnode node = *rnode; 3242 struct sysctlnode node = *rnode;
3209 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 3243 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3210 struct adapter *adapter; 3244 struct adapter *adapter;
3211 uint32_t reg, usec, rate; 3245 uint32_t reg, usec, rate;
3212 int error; 3246 int error;
3213 3247
3214 if (que == NULL) 3248 if (que == NULL)
3215 return 0; 3249 return 0;
3216 3250
3217 adapter = que->adapter; 3251 adapter = que->adapter;
3218 if (ixgbe_fw_recovery_mode_swflag(adapter)) 3252 if (ixgbe_fw_recovery_mode_swflag(adapter))
3219 return (EPERM); 3253 return (EPERM);
3220 3254
3221 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix)); 3255 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3222 usec = ((reg & 0x0FF8) >> 3); 3256 usec = ((reg & 0x0FF8) >> 3);
3223 if (usec > 0) 3257 if (usec > 0)
3224 rate = 500000 / usec; 3258 rate = 500000 / usec;
3225 else 3259 else
3226 rate = 0; 3260 rate = 0;
3227 node.sysctl_data = &rate; 3261 node.sysctl_data = &rate;
3228 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3262 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3229 if (error || newp == NULL) 3263 if (error || newp == NULL)
3230 return error; 3264 return error;
3231 reg &= ~0xfff; /* default, no limitation */ 3265 reg &= ~0xfff; /* default, no limitation */
3232 if (rate > 0 && rate < 500000) { 3266 if (rate > 0 && rate < 500000) {
3233 if (rate < 1000) 3267 if (rate < 1000)
3234 rate = 1000; 3268 rate = 1000;
3235 reg |= ((4000000/rate) & 0xff8); 3269 reg |= ((4000000/rate) & 0xff8);
3236 /* 3270 /*
3237 * When RSC is used, ITR interval must be larger than 3271 * When RSC is used, ITR interval must be larger than
3238 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 3272 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3239 * The minimum value is always greater than 2us on 100M 3273 * The minimum value is always greater than 2us on 100M
3240 * (and 10M?(not documented)), but it's not on 1G and higher. 3274 * (and 10M?(not documented)), but it's not on 1G and higher.
3241 */ 3275 */
3242 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 3276 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3243 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 3277 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3244 if ((adapter->num_queues > 1) 3278 if ((adapter->num_queues > 1)
3245 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 3279 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3246 return EINVAL; 3280 return EINVAL;
3247 } 3281 }
3248 ixgbe_max_interrupt_rate = rate; 3282 ixgbe_max_interrupt_rate = rate;
3249 } else 3283 } else
3250 ixgbe_max_interrupt_rate = 0; 3284 ixgbe_max_interrupt_rate = 0;
3251 ixgbe_eitr_write(adapter, que->msix, reg); 3285 ixgbe_eitr_write(adapter, que->msix, reg);
3252 3286
3253 return (0); 3287 return (0);
3254} /* ixgbe_sysctl_interrupt_rate_handler */ 3288} /* ixgbe_sysctl_interrupt_rate_handler */
3255 3289
3256const struct sysctlnode * 3290const struct sysctlnode *
3257ixgbe_sysctl_instance(struct adapter *adapter) 3291ixgbe_sysctl_instance(struct adapter *adapter)
3258{ 3292{
3259 const char *dvname; 3293 const char *dvname;
3260 struct sysctllog **log; 3294 struct sysctllog **log;
3261 int rc; 3295 int rc;
3262 const struct sysctlnode *rnode; 3296 const struct sysctlnode *rnode;
3263 3297
3264 if (adapter->sysctltop != NULL) 3298 if (adapter->sysctltop != NULL)
3265 return adapter->sysctltop; 3299 return adapter->sysctltop;
3266 3300
3267 log = &adapter->sysctllog; 3301 log = &adapter->sysctllog;
3268 dvname = device_xname(adapter->dev); 3302 dvname = device_xname(adapter->dev);
3269 3303
3270 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 3304 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3271 0, CTLTYPE_NODE, dvname, 3305 0, CTLTYPE_NODE, dvname,
3272 SYSCTL_DESCR("ixgbe information and settings"), 3306 SYSCTL_DESCR("ixgbe information and settings"),
3273 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 3307 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3274 goto err; 3308 goto err;
3275 3309
3276 return rnode; 3310 return rnode;
3277err: 3311err:
3278 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); 3312 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3279 return NULL; 3313 return NULL;
3280} 3314}
3281 3315
3282/************************************************************************ 3316/************************************************************************
3283 * ixgbe_add_device_sysctls 3317 * ixgbe_add_device_sysctls
3284 ************************************************************************/ 3318 ************************************************************************/
3285static void 3319static void
3286ixgbe_add_device_sysctls(struct adapter *adapter) 3320ixgbe_add_device_sysctls(struct adapter *adapter)
3287{ 3321{
3288 device_t dev = adapter->dev; 3322 device_t dev = adapter->dev;
3289 struct ixgbe_hw *hw = &adapter->hw; 3323 struct ixgbe_hw *hw = &adapter->hw;
3290 struct sysctllog **log; 3324 struct sysctllog **log;
3291 const struct sysctlnode *rnode, *cnode; 3325 const struct sysctlnode *rnode, *cnode;
3292 3326
3293 log = &adapter->sysctllog; 3327 log = &adapter->sysctllog;
3294 3328
3295 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 3329 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3296 aprint_error_dev(dev, "could not create sysctl root\n"); 3330 aprint_error_dev(dev, "could not create sysctl root\n");
3297 return; 3331 return;
3298 } 3332 }
3299 3333
3300 if (sysctl_createv(log, 0, &rnode, &cnode, 3334 if (sysctl_createv(log, 0, &rnode, &cnode,
3301 CTLFLAG_READWRITE, CTLTYPE_INT, 3335 CTLFLAG_READWRITE, CTLTYPE_INT,
3302 "debug", SYSCTL_DESCR("Debug Info"), 3336 "debug", SYSCTL_DESCR("Debug Info"),
3303 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 3337 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3304 aprint_error_dev(dev, "could not create sysctl\n"); 3338 aprint_error_dev(dev, "could not create sysctl\n");
3305 3339
3306 if (sysctl_createv(log, 0, &rnode, &cnode, 3340 if (sysctl_createv(log, 0, &rnode, &cnode,
3307 CTLFLAG_READONLY, CTLTYPE_INT, 3341 CTLFLAG_READONLY, CTLTYPE_INT,
3308 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"), 3342 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3309 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 3343 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3310 aprint_error_dev(dev, "could not create sysctl\n"); 3344 aprint_error_dev(dev, "could not create sysctl\n");
3311 3345
3312 if (sysctl_createv(log, 0, &rnode, &cnode, 3346 if (sysctl_createv(log, 0, &rnode, &cnode,
3313 CTLFLAG_READONLY, CTLTYPE_INT, 3347 CTLFLAG_READONLY, CTLTYPE_INT,
3314 "num_queues", SYSCTL_DESCR("Number of queues"), 3348 "num_queues", SYSCTL_DESCR("Number of queues"),
3315 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0) 3349 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3316 aprint_error_dev(dev, "could not create sysctl\n"); 3350 aprint_error_dev(dev, "could not create sysctl\n");
3317 3351
3318 /* Sysctls for all devices */ 3352 /* Sysctls for all devices */
3319 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3353 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3320 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC), 3354 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3321 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, 3355 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3322 CTL_EOL) != 0) 3356 CTL_EOL) != 0)
3323 aprint_error_dev(dev, "could not create sysctl\n"); 3357 aprint_error_dev(dev, "could not create sysctl\n");
3324 3358
3325 adapter->enable_aim = ixgbe_enable_aim; 3359 adapter->enable_aim = ixgbe_enable_aim;
3326 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3360 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3327 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 3361 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3328 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 3362 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3329 aprint_error_dev(dev, "could not create sysctl\n"); 3363 aprint_error_dev(dev, "could not create sysctl\n");
3330 3364
3331 if (sysctl_createv(log, 0, &rnode, &cnode, 3365 if (sysctl_createv(log, 0, &rnode, &cnode,
3332 CTLFLAG_READWRITE, CTLTYPE_INT, 3366 CTLFLAG_READWRITE, CTLTYPE_INT,
3333 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED), 3367 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3334 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE, 3368 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3335 CTL_EOL) != 0) 3369 CTL_EOL) != 0)
3336 aprint_error_dev(dev, "could not create sysctl\n"); 3370 aprint_error_dev(dev, "could not create sysctl\n");
3337 3371
3338 /* 3372 /*
3339 * If each "que->txrx_use_workqueue" is changed in sysctl handler, 3373 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3340 * it causesflip-flopping softint/workqueue mode in one deferred 3374 * it causesflip-flopping softint/workqueue mode in one deferred
3341 * processing. Therefore, preempt_disable()/preempt_enable() are 3375 * processing. Therefore, preempt_disable()/preempt_enable() are
3342 * required in ixgbe_sched_handle_que() to avoid 3376 * required in ixgbe_sched_handle_que() to avoid
3343 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule(). 3377 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3344 * I think changing "que->txrx_use_workqueue" in interrupt handler 3378 * I think changing "que->txrx_use_workqueue" in interrupt handler
3345 * is lighter than doing preempt_disable()/preempt_enable() in every 3379 * is lighter than doing preempt_disable()/preempt_enable() in every
3346 * ixgbe_sched_handle_que(). 3380 * ixgbe_sched_handle_que().
3347 */ 3381 */
3348 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue; 3382 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3349 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3383 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3350 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 3384 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3351 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) 3385 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3352 aprint_error_dev(dev, "could not create sysctl\n"); 3386 aprint_error_dev(dev, "could not create sysctl\n");
3353 3387
3354#ifdef IXGBE_DEBUG 3388#ifdef IXGBE_DEBUG
3355 /* testing sysctls (for all devices) */ 3389 /* testing sysctls (for all devices) */
3356 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3390 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3357 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"), 3391 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3358 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE, 3392 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3359 CTL_EOL) != 0) 3393 CTL_EOL) != 0)
3360 aprint_error_dev(dev, "could not create sysctl\n"); 3394 aprint_error_dev(dev, "could not create sysctl\n");
3361 3395
3362 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, 3396 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3363 CTLTYPE_STRING, "print_rss_config", 3397 CTLTYPE_STRING, "print_rss_config",
3364 SYSCTL_DESCR("Prints RSS Configuration"), 3398 SYSCTL_DESCR("Prints RSS Configuration"),
3365 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE, 3399 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3366 CTL_EOL) != 0) 3400 CTL_EOL) != 0)
3367 aprint_error_dev(dev, "could not create sysctl\n"); 3401 aprint_error_dev(dev, "could not create sysctl\n");
3368#endif 3402#endif
3369 /* for X550 series devices */ 3403 /* for X550 series devices */
3370 if (hw->mac.type >= ixgbe_mac_X550) 3404 if (hw->mac.type >= ixgbe_mac_X550)
3371 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3405 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3372 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"), 3406 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3373 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, 3407 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3374 CTL_EOL) != 0) 3408 CTL_EOL) != 0)
3375 aprint_error_dev(dev, "could not create sysctl\n"); 3409 aprint_error_dev(dev, "could not create sysctl\n");
3376 3410
3377 /* for WoL-capable devices */ 3411 /* for WoL-capable devices */
3378 if (adapter->wol_support) { 3412 if (adapter->wol_support) {
3379 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3413 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3380 CTLTYPE_BOOL, "wol_enable", 3414 CTLTYPE_BOOL, "wol_enable",
3381 SYSCTL_DESCR("Enable/Disable Wake on LAN"), 3415 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3382 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, 3416 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3383 CTL_EOL) != 0) 3417 CTL_EOL) != 0)
3384 aprint_error_dev(dev, "could not create sysctl\n"); 3418 aprint_error_dev(dev, "could not create sysctl\n");
3385 3419
3386 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3420 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3387 CTLTYPE_INT, "wufc", 3421 CTLTYPE_INT, "wufc",
3388 SYSCTL_DESCR("Enable/Disable Wake Up Filters"), 3422 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3389 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, 3423 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3390 CTL_EOL) != 0) 3424 CTL_EOL) != 0)
3391 aprint_error_dev(dev, "could not create sysctl\n"); 3425 aprint_error_dev(dev, "could not create sysctl\n");
3392 } 3426 }
3393 3427
3394 /* for X552/X557-AT devices */ 3428 /* for X552/X557-AT devices */
3395 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 3429 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3396 const struct sysctlnode *phy_node; 3430 const struct sysctlnode *phy_node;
3397 3431
3398 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE, 3432 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3399 "phy", SYSCTL_DESCR("External PHY sysctls"), 3433 "phy", SYSCTL_DESCR("External PHY sysctls"),
3400 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) { 3434 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3401 aprint_error_dev(dev, "could not create sysctl\n"); 3435 aprint_error_dev(dev, "could not create sysctl\n");
3402 return; 3436 return;
3403 } 3437 }
3404 3438
3405 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3439 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3406 CTLTYPE_INT, "temp", 3440 CTLTYPE_INT, "temp",
3407 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"), 3441 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3408 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, 3442 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3409 CTL_EOL) != 0) 3443 CTL_EOL) != 0)
3410 aprint_error_dev(dev, "could not create sysctl\n"); 3444 aprint_error_dev(dev, "could not create sysctl\n");
3411 3445
3412 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3446 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3413 CTLTYPE_INT, "overtemp_occurred", 3447 CTLTYPE_INT, "overtemp_occurred",
3414 SYSCTL_DESCR("External PHY High Temperature Event Occurred"), 3448 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3415 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, 3449 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3416 CTL_CREATE, CTL_EOL) != 0) 3450 CTL_CREATE, CTL_EOL) != 0)
3417 aprint_error_dev(dev, "could not create sysctl\n"); 3451 aprint_error_dev(dev, "could not create sysctl\n");
3418 } 3452 }
3419 3453
3420 if ((hw->mac.type == ixgbe_mac_X550EM_a) 3454 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3421 && (hw->phy.type == ixgbe_phy_fw)) 3455 && (hw->phy.type == ixgbe_phy_fw))
3422 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3456 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3423 CTLTYPE_BOOL, "force_10_100_autonego", 3457 CTLTYPE_BOOL, "force_10_100_autonego",
3424 SYSCTL_DESCR("Force autonego on 10M and 100M"), 3458 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3425 NULL, 0, &hw->phy.force_10_100_autonego, 0, 3459 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3426 CTL_CREATE, CTL_EOL) != 0) 3460 CTL_CREATE, CTL_EOL) != 0)
3427 aprint_error_dev(dev, "could not create sysctl\n"); 3461 aprint_error_dev(dev, "could not create sysctl\n");
3428 3462
3429 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 3463 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3430 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3464 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3431 CTLTYPE_INT, "eee_state", 3465 CTLTYPE_INT, "eee_state",
3432 SYSCTL_DESCR("EEE Power Save State"), 3466 SYSCTL_DESCR("EEE Power Save State"),
3433 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE, 3467 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3434 CTL_EOL) != 0) 3468 CTL_EOL) != 0)
3435 aprint_error_dev(dev, "could not create sysctl\n"); 3469 aprint_error_dev(dev, "could not create sysctl\n");
3436 } 3470 }
3437} /* ixgbe_add_device_sysctls */ 3471} /* ixgbe_add_device_sysctls */
3438 3472
3439/************************************************************************ 3473/************************************************************************
3440 * ixgbe_allocate_pci_resources 3474 * ixgbe_allocate_pci_resources
3441 ************************************************************************/ 3475 ************************************************************************/
3442static int 3476static int
3443ixgbe_allocate_pci_resources(struct adapter *adapter, 3477ixgbe_allocate_pci_resources(struct adapter *adapter,
3444 const struct pci_attach_args *pa) 3478 const struct pci_attach_args *pa)
3445{ 3479{
3446 pcireg_t memtype, csr; 3480 pcireg_t memtype, csr;
3447 device_t dev = adapter->dev; 3481 device_t dev = adapter->dev;

cvs diff -r1.125 -r1.125.2.1 src/sys/dev/pci/ixgbe/ixv.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixv.c 2019/07/30 08:38:03 1.125
+++ src/sys/dev/pci/ixgbe/ixv.c 2019/09/01 11:07:05 1.125.2.1
@@ -1,3044 +1,3073 @@ @@ -1,3044 +1,3073 @@
1/*$NetBSD: ixv.c,v 1.125 2019/07/30 08:38:03 msaitoh Exp $*/ 1/*$NetBSD: ixv.c,v 1.125.2.1 2019/09/01 11:07:05 martin Exp $*/
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37#ifdef _KERNEL_OPT 37#ifdef _KERNEL_OPT
38#include "opt_inet.h" 38#include "opt_inet.h"
39#include "opt_inet6.h" 39#include "opt_inet6.h"
40#include "opt_net_mpsafe.h" 40#include "opt_net_mpsafe.h"
41#endif 41#endif
42 42
43#include "ixgbe.h" 43#include "ixgbe.h"
44#include "vlan.h" 44#include "vlan.h"
45 45
46/************************************************************************ 46/************************************************************************
47 * Driver version 47 * Driver version
48 ************************************************************************/ 48 ************************************************************************/
49static const char ixv_driver_version[] = "2.0.1-k"; 49static const char ixv_driver_version[] = "2.0.1-k";
50/* XXX NetBSD: + 1.5.17 */ 50/* XXX NetBSD: + 1.5.17 */
51 51
52/************************************************************************ 52/************************************************************************
53 * PCI Device ID Table 53 * PCI Device ID Table
54 * 54 *
55 * Used by probe to select devices to load on 55 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings 56 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s 57 * Last entry must be all 0s
58 * 58 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/ 60 ************************************************************************/
61static const ixgbe_vendor_info_t ixv_vendor_info_array[] = 61static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
62{ 62{
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */ 68 /* required last entry */
69 {0, 0, 0, 0, 0} 69 {0, 0, 0, 0, 0}
70}; 70};
71 71
72/************************************************************************ 72/************************************************************************
73 * Table of branding strings 73 * Table of branding strings
74 ************************************************************************/ 74 ************************************************************************/
75static const char *ixv_strings[] = { 75static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver" 76 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77}; 77};
78 78
79/********************************************************************* 79/*********************************************************************
80 * Function prototypes 80 * Function prototypes
81 *********************************************************************/ 81 *********************************************************************/
82static int ixv_probe(device_t, cfdata_t, void *); 82static int ixv_probe(device_t, cfdata_t, void *);
83static void ixv_attach(device_t, device_t, void *); 83static void ixv_attach(device_t, device_t, void *);
84static int ixv_detach(device_t, int); 84static int ixv_detach(device_t, int);
85#if 0 85#if 0
86static int ixv_shutdown(device_t); 86static int ixv_shutdown(device_t);
87#endif 87#endif
88static int ixv_ifflags_cb(struct ethercom *); 88static int ixv_ifflags_cb(struct ethercom *);
89static int ixv_ioctl(struct ifnet *, u_long, void *); 89static int ixv_ioctl(struct ifnet *, u_long, void *);
90static int ixv_init(struct ifnet *); 90static int ixv_init(struct ifnet *);
91static void ixv_init_locked(struct adapter *); 91static void ixv_init_locked(struct adapter *);
92static void ixv_ifstop(struct ifnet *, int); 92static void ixv_ifstop(struct ifnet *, int);
93static void ixv_stop(void *); 93static void ixv_stop(void *);
94static void ixv_init_device_features(struct adapter *); 94static void ixv_init_device_features(struct adapter *);
95static void ixv_media_status(struct ifnet *, struct ifmediareq *); 95static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96static int ixv_media_change(struct ifnet *); 96static int ixv_media_change(struct ifnet *);
97static int ixv_allocate_pci_resources(struct adapter *, 97static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *); 98 const struct pci_attach_args *);
99static int ixv_allocate_msix(struct adapter *, 99static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *); 100 const struct pci_attach_args *);
101static int ixv_configure_interrupts(struct adapter *); 101static int ixv_configure_interrupts(struct adapter *);
102static void ixv_free_pci_resources(struct adapter *); 102static void ixv_free_pci_resources(struct adapter *);
103static void ixv_local_timer(void *); 103static void ixv_local_timer(void *);
104static void ixv_local_timer_locked(void *); 104static void ixv_local_timer_locked(void *);
105static int ixv_setup_interface(device_t, struct adapter *); 105static int ixv_setup_interface(device_t, struct adapter *);
106static int ixv_negotiate_api(struct adapter *); 106static int ixv_negotiate_api(struct adapter *);
107 107
108static void ixv_initialize_transmit_units(struct adapter *); 108static void ixv_initialize_transmit_units(struct adapter *);
109static void ixv_initialize_receive_units(struct adapter *); 109static void ixv_initialize_receive_units(struct adapter *);
110static void ixv_initialize_rss_mapping(struct adapter *); 110static void ixv_initialize_rss_mapping(struct adapter *);
111static s32 ixv_check_link(struct adapter *); 111static s32 ixv_check_link(struct adapter *);
112 112
113static void ixv_enable_intr(struct adapter *); 113static void ixv_enable_intr(struct adapter *);
114static void ixv_disable_intr(struct adapter *); 114static void ixv_disable_intr(struct adapter *);
115static void ixv_set_multi(struct adapter *); 115static void ixv_set_multi(struct adapter *);
116static void ixv_update_link_status(struct adapter *); 116static void ixv_update_link_status(struct adapter *);
117static int ixv_sysctl_debug(SYSCTLFN_PROTO); 117static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118static void ixv_set_ivar(struct adapter *, u8, u8, s8); 118static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119static void ixv_configure_ivars(struct adapter *); 119static void ixv_configure_ivars(struct adapter *);
120static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 120static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); 121static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
122 122
 123static void ixv_setup_vlan_tagging(struct adapter *);
123static int ixv_setup_vlan_support(struct adapter *); 124static int ixv_setup_vlan_support(struct adapter *);
124static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); 125static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
125static int ixv_register_vlan(void *, struct ifnet *, u16); 126static int ixv_register_vlan(void *, struct ifnet *, u16);
126static int ixv_unregister_vlan(void *, struct ifnet *, u16); 127static int ixv_unregister_vlan(void *, struct ifnet *, u16);
127 128
128static void ixv_add_device_sysctls(struct adapter *); 129static void ixv_add_device_sysctls(struct adapter *);
129static void ixv_save_stats(struct adapter *); 130static void ixv_save_stats(struct adapter *);
130static void ixv_init_stats(struct adapter *); 131static void ixv_init_stats(struct adapter *);
131static void ixv_update_stats(struct adapter *); 132static void ixv_update_stats(struct adapter *);
132static void ixv_add_stats_sysctls(struct adapter *); 133static void ixv_add_stats_sysctls(struct adapter *);
133 134
134/* Sysctl handlers */ 135/* Sysctl handlers */
135static void ixv_set_sysctl_value(struct adapter *, const char *, 136static void ixv_set_sysctl_value(struct adapter *, const char *,
136 const char *, int *, int); 137 const char *, int *, int);
137static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 138static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
138static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 139static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
139static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); 140static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
140static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); 141static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
141static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); 142static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
142static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); 143static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
143 144
144/* The MSI-X Interrupt handlers */ 145/* The MSI-X Interrupt handlers */
145static int ixv_msix_que(void *); 146static int ixv_msix_que(void *);
146static int ixv_msix_mbx(void *); 147static int ixv_msix_mbx(void *);
147 148
148/* Deferred interrupt tasklets */ 149/* Deferred interrupt tasklets */
149static void ixv_handle_que(void *); 150static void ixv_handle_que(void *);
150static void ixv_handle_link(void *); 151static void ixv_handle_link(void *);
151 152
152/* Workqueue handler for deferred work */ 153/* Workqueue handler for deferred work */
153static void ixv_handle_que_work(struct work *, void *); 154static void ixv_handle_que_work(struct work *, void *);
154 155
155const struct sysctlnode *ixv_sysctl_instance(struct adapter *); 156const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
156static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); 157static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
157 158
158/************************************************************************ 159/************************************************************************
159 * FreeBSD Device Interface Entry Points 160 * FreeBSD Device Interface Entry Points
160 ************************************************************************/ 161 ************************************************************************/
161CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), 162CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
162 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, 163 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
163 DVF_DETACH_SHUTDOWN); 164 DVF_DETACH_SHUTDOWN);
164 165
165#if 0 166#if 0
166static driver_t ixv_driver = { 167static driver_t ixv_driver = {
167 "ixv", ixv_methods, sizeof(struct adapter), 168 "ixv", ixv_methods, sizeof(struct adapter),
168}; 169};
169 170
170devclass_t ixv_devclass; 171devclass_t ixv_devclass;
171DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 172DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
172MODULE_DEPEND(ixv, pci, 1, 1, 1); 173MODULE_DEPEND(ixv, pci, 1, 1, 1);
173MODULE_DEPEND(ixv, ether, 1, 1, 1); 174MODULE_DEPEND(ixv, ether, 1, 1, 1);
174#endif 175#endif
175 176
176/* 177/*
177 * TUNEABLE PARAMETERS: 178 * TUNEABLE PARAMETERS:
178 */ 179 */
179 180
180/* Number of Queues - do not exceed MSI-X vectors - 1 */ 181/* Number of Queues - do not exceed MSI-X vectors - 1 */
181static int ixv_num_queues = 0; 182static int ixv_num_queues = 0;
182#define TUNABLE_INT(__x, __y) 183#define TUNABLE_INT(__x, __y)
183TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 184TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
184 185
185/* 186/*
186 * AIM: Adaptive Interrupt Moderation 187 * AIM: Adaptive Interrupt Moderation
187 * which means that the interrupt rate 188 * which means that the interrupt rate
188 * is varied over time based on the 189 * is varied over time based on the
189 * traffic for that interrupt vector 190 * traffic for that interrupt vector
190 */ 191 */
191static bool ixv_enable_aim = false; 192static bool ixv_enable_aim = false;
192TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 193TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
193 194
194static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 195static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
195TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); 196TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
196 197
197/* How many packets rxeof tries to clean at a time */ 198/* How many packets rxeof tries to clean at a time */
198static int ixv_rx_process_limit = 256; 199static int ixv_rx_process_limit = 256;
199TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 200TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
200 201
201/* How many packets txeof tries to clean at a time */ 202/* How many packets txeof tries to clean at a time */
202static int ixv_tx_process_limit = 256; 203static int ixv_tx_process_limit = 256;
203TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 204TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
204 205
205/* Which packet processing uses workqueue or softint */ 206/* Which packet processing uses workqueue or softint */
206static bool ixv_txrx_workqueue = false; 207static bool ixv_txrx_workqueue = false;
207 208
208/* 209/*
209 * Number of TX descriptors per ring, 210 * Number of TX descriptors per ring,
210 * setting higher than RX as this seems 211 * setting higher than RX as this seems
211 * the better performing choice. 212 * the better performing choice.
212 */ 213 */
213static int ixv_txd = PERFORM_TXD; 214static int ixv_txd = PERFORM_TXD;
214TUNABLE_INT("hw.ixv.txd", &ixv_txd); 215TUNABLE_INT("hw.ixv.txd", &ixv_txd);
215 216
216/* Number of RX descriptors per ring */ 217/* Number of RX descriptors per ring */
217static int ixv_rxd = PERFORM_RXD; 218static int ixv_rxd = PERFORM_RXD;
218TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 219TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
219 220
220/* Legacy Transmit (single queue) */ 221/* Legacy Transmit (single queue) */
221static int ixv_enable_legacy_tx = 0; 222static int ixv_enable_legacy_tx = 0;
222TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); 223TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
223 224
224#ifdef NET_MPSAFE 225#ifdef NET_MPSAFE
225#define IXGBE_MPSAFE 1 226#define IXGBE_MPSAFE 1
226#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 227#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
227#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE 228#define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE
228#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 229#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
229#else 230#else
230#define IXGBE_CALLOUT_FLAGS 0 231#define IXGBE_CALLOUT_FLAGS 0
231#define IXGBE_SOFTINFT_FLAGS 0 232#define IXGBE_SOFTINFT_FLAGS 0
232#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 233#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
233#endif 234#endif
234#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 235#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
235 236
236#if 0 237#if 0
237static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); 238static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
238static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); 239static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
239#endif 240#endif
240 241
241/************************************************************************ 242/************************************************************************
242 * ixv_probe - Device identification routine 243 * ixv_probe - Device identification routine
243 * 244 *
244 * Determines if the driver should be loaded on 245 * Determines if the driver should be loaded on
245 * adapter based on its PCI vendor/device ID. 246 * adapter based on its PCI vendor/device ID.
246 * 247 *
247 * return BUS_PROBE_DEFAULT on success, positive on failure 248 * return BUS_PROBE_DEFAULT on success, positive on failure
248 ************************************************************************/ 249 ************************************************************************/
249static int 250static int
250ixv_probe(device_t dev, cfdata_t cf, void *aux) 251ixv_probe(device_t dev, cfdata_t cf, void *aux)
251{ 252{
252#ifdef __HAVE_PCI_MSI_MSIX 253#ifdef __HAVE_PCI_MSI_MSIX
253 const struct pci_attach_args *pa = aux; 254 const struct pci_attach_args *pa = aux;
254 255
255 return (ixv_lookup(pa) != NULL) ? 1 : 0; 256 return (ixv_lookup(pa) != NULL) ? 1 : 0;
256#else 257#else
257 return 0; 258 return 0;
258#endif 259#endif
259} /* ixv_probe */ 260} /* ixv_probe */
260 261
261static const ixgbe_vendor_info_t * 262static const ixgbe_vendor_info_t *
262ixv_lookup(const struct pci_attach_args *pa) 263ixv_lookup(const struct pci_attach_args *pa)
263{ 264{
264 const ixgbe_vendor_info_t *ent; 265 const ixgbe_vendor_info_t *ent;
265 pcireg_t subid; 266 pcireg_t subid;
266 267
267 INIT_DEBUGOUT("ixv_lookup: begin"); 268 INIT_DEBUGOUT("ixv_lookup: begin");
268 269
269 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 270 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
270 return NULL; 271 return NULL;
271 272
272 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 273 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
273 274
274 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { 275 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
275 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 276 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
276 (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 277 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
277 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 278 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
278 (ent->subvendor_id == 0)) && 279 (ent->subvendor_id == 0)) &&
279 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 280 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
280 (ent->subdevice_id == 0))) { 281 (ent->subdevice_id == 0))) {
281 return ent; 282 return ent;
282 } 283 }
283 } 284 }
284 285
285 return NULL; 286 return NULL;
286} 287}
287 288
288/************************************************************************ 289/************************************************************************
289 * ixv_attach - Device initialization routine 290 * ixv_attach - Device initialization routine
290 * 291 *
291 * Called when the driver is being loaded. 292 * Called when the driver is being loaded.
292 * Identifies the type of hardware, allocates all resources 293 * Identifies the type of hardware, allocates all resources
293 * and initializes the hardware. 294 * and initializes the hardware.
294 * 295 *
295 * return 0 on success, positive on failure 296 * return 0 on success, positive on failure
296 ************************************************************************/ 297 ************************************************************************/
297static void 298static void
298ixv_attach(device_t parent, device_t dev, void *aux) 299ixv_attach(device_t parent, device_t dev, void *aux)
299{ 300{
300 struct adapter *adapter; 301 struct adapter *adapter;
301 struct ixgbe_hw *hw; 302 struct ixgbe_hw *hw;
302 int error = 0; 303 int error = 0;
303 pcireg_t id, subid; 304 pcireg_t id, subid;
304 const ixgbe_vendor_info_t *ent; 305 const ixgbe_vendor_info_t *ent;
305 const struct pci_attach_args *pa = aux; 306 const struct pci_attach_args *pa = aux;
306 const char *apivstr; 307 const char *apivstr;
307 const char *str; 308 const char *str;
308 char buf[256]; 309 char buf[256];
309 310
310 INIT_DEBUGOUT("ixv_attach: begin"); 311 INIT_DEBUGOUT("ixv_attach: begin");
311 312
312 /* 313 /*
313 * Make sure BUSMASTER is set, on a VM under 314 * Make sure BUSMASTER is set, on a VM under
314 * KVM it may not be and will break things. 315 * KVM it may not be and will break things.
315 */ 316 */
316 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 317 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
317 318
318 /* Allocate, clear, and link in our adapter structure */ 319 /* Allocate, clear, and link in our adapter structure */
319 adapter = device_private(dev); 320 adapter = device_private(dev);
320 adapter->dev = dev; 321 adapter->dev = dev;
321 adapter->hw.back = adapter; 322 adapter->hw.back = adapter;
322 hw = &adapter->hw; 323 hw = &adapter->hw;
323 324
324 adapter->init_locked = ixv_init_locked; 325 adapter->init_locked = ixv_init_locked;
325 adapter->stop_locked = ixv_stop; 326 adapter->stop_locked = ixv_stop;
326 327
327 adapter->osdep.pc = pa->pa_pc; 328 adapter->osdep.pc = pa->pa_pc;
328 adapter->osdep.tag = pa->pa_tag; 329 adapter->osdep.tag = pa->pa_tag;
329 if (pci_dma64_available(pa)) 330 if (pci_dma64_available(pa))
330 adapter->osdep.dmat = pa->pa_dmat64; 331 adapter->osdep.dmat = pa->pa_dmat64;
331 else 332 else
332 adapter->osdep.dmat = pa->pa_dmat; 333 adapter->osdep.dmat = pa->pa_dmat;
333 adapter->osdep.attached = false; 334 adapter->osdep.attached = false;
334 335
335 ent = ixv_lookup(pa); 336 ent = ixv_lookup(pa);
336 337
337 KASSERT(ent != NULL); 338 KASSERT(ent != NULL);
338 339
339 aprint_normal(": %s, Version - %s\n", 340 aprint_normal(": %s, Version - %s\n",
340 ixv_strings[ent->index], ixv_driver_version); 341 ixv_strings[ent->index], ixv_driver_version);
341 342
342 /* Core Lock Init*/ 343 /* Core Lock Init*/
343 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 344 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
344 345
345 /* Do base PCI setup - map BAR0 */ 346 /* Do base PCI setup - map BAR0 */
346 if (ixv_allocate_pci_resources(adapter, pa)) { 347 if (ixv_allocate_pci_resources(adapter, pa)) {
347 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); 348 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
348 error = ENXIO; 349 error = ENXIO;
349 goto err_out; 350 goto err_out;
350 } 351 }
351 352
352 /* SYSCTL APIs */ 353 /* SYSCTL APIs */
353 ixv_add_device_sysctls(adapter); 354 ixv_add_device_sysctls(adapter);
354 355
355 /* Set up the timer callout */ 356 /* Set up the timer callout */
356 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 357 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
357 358
358 /* Save off the information about this board */ 359 /* Save off the information about this board */
359 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 360 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
360 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 361 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
361 hw->vendor_id = PCI_VENDOR(id); 362 hw->vendor_id = PCI_VENDOR(id);
362 hw->device_id = PCI_PRODUCT(id); 363 hw->device_id = PCI_PRODUCT(id);
363 hw->revision_id = 364 hw->revision_id =
364 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 365 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
365 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 366 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
366 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 367 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
367 368
368 /* A subset of set_mac_type */ 369 /* A subset of set_mac_type */
369 switch (hw->device_id) { 370 switch (hw->device_id) {
370 case IXGBE_DEV_ID_82599_VF: 371 case IXGBE_DEV_ID_82599_VF:
371 hw->mac.type = ixgbe_mac_82599_vf; 372 hw->mac.type = ixgbe_mac_82599_vf;
372 str = "82599 VF"; 373 str = "82599 VF";
373 break; 374 break;
374 case IXGBE_DEV_ID_X540_VF: 375 case IXGBE_DEV_ID_X540_VF:
375 hw->mac.type = ixgbe_mac_X540_vf; 376 hw->mac.type = ixgbe_mac_X540_vf;
376 str = "X540 VF"; 377 str = "X540 VF";
377 break; 378 break;
378 case IXGBE_DEV_ID_X550_VF: 379 case IXGBE_DEV_ID_X550_VF:
379 hw->mac.type = ixgbe_mac_X550_vf; 380 hw->mac.type = ixgbe_mac_X550_vf;
380 str = "X550 VF"; 381 str = "X550 VF";
381 break; 382 break;
382 case IXGBE_DEV_ID_X550EM_X_VF: 383 case IXGBE_DEV_ID_X550EM_X_VF:
383 hw->mac.type = ixgbe_mac_X550EM_x_vf; 384 hw->mac.type = ixgbe_mac_X550EM_x_vf;
384 str = "X550EM X VF"; 385 str = "X550EM X VF";
385 break; 386 break;
386 case IXGBE_DEV_ID_X550EM_A_VF: 387 case IXGBE_DEV_ID_X550EM_A_VF:
387 hw->mac.type = ixgbe_mac_X550EM_a_vf; 388 hw->mac.type = ixgbe_mac_X550EM_a_vf;
388 str = "X550EM A VF"; 389 str = "X550EM A VF";
389 break; 390 break;
390 default: 391 default:
391 /* Shouldn't get here since probe succeeded */ 392 /* Shouldn't get here since probe succeeded */
392 aprint_error_dev(dev, "Unknown device ID!\n"); 393 aprint_error_dev(dev, "Unknown device ID!\n");
393 error = ENXIO; 394 error = ENXIO;
394 goto err_out; 395 goto err_out;
395 break; 396 break;
396 } 397 }
397 aprint_normal_dev(dev, "device %s\n", str); 398 aprint_normal_dev(dev, "device %s\n", str);
398 399
399 ixv_init_device_features(adapter); 400 ixv_init_device_features(adapter);
400 401
401 /* Initialize the shared code */ 402 /* Initialize the shared code */
402 error = ixgbe_init_ops_vf(hw); 403 error = ixgbe_init_ops_vf(hw);
403 if (error) { 404 if (error) {
404 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); 405 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
405 error = EIO; 406 error = EIO;
406 goto err_out; 407 goto err_out;
407 } 408 }
408 409
409 /* Setup the mailbox */ 410 /* Setup the mailbox */
410 ixgbe_init_mbx_params_vf(hw); 411 ixgbe_init_mbx_params_vf(hw);
411 412
412 /* Set the right number of segments */ 413 /* Set the right number of segments */
413 adapter->num_segs = IXGBE_82599_SCATTER; 414 adapter->num_segs = IXGBE_82599_SCATTER;
414 415
415 /* Reset mbox api to 1.0 */ 416 /* Reset mbox api to 1.0 */
416 error = hw->mac.ops.reset_hw(hw); 417 error = hw->mac.ops.reset_hw(hw);
417 if (error == IXGBE_ERR_RESET_FAILED) 418 if (error == IXGBE_ERR_RESET_FAILED)
418 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); 419 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
419 else if (error) 420 else if (error)
420 aprint_error_dev(dev, "...reset_hw() failed with error %d\n", 421 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
421 error); 422 error);
422 if (error) { 423 if (error) {
423 error = EIO; 424 error = EIO;
424 goto err_out; 425 goto err_out;
425 } 426 }
426 427
427 error = hw->mac.ops.init_hw(hw); 428 error = hw->mac.ops.init_hw(hw);
428 if (error) { 429 if (error) {
429 aprint_error_dev(dev, "...init_hw() failed!\n"); 430 aprint_error_dev(dev, "...init_hw() failed!\n");
430 error = EIO; 431 error = EIO;
431 goto err_out; 432 goto err_out;
432 } 433 }
433 434
434 /* Negotiate mailbox API version */ 435 /* Negotiate mailbox API version */
435 error = ixv_negotiate_api(adapter); 436 error = ixv_negotiate_api(adapter);
436 if (error) 437 if (error)
437 aprint_normal_dev(dev, 438 aprint_normal_dev(dev,
438 "MBX API negotiation failed during attach!\n"); 439 "MBX API negotiation failed during attach!\n");
439 switch (hw->api_version) { 440 switch (hw->api_version) {
440 case ixgbe_mbox_api_10: 441 case ixgbe_mbox_api_10:
441 apivstr = "1.0"; 442 apivstr = "1.0";
442 break; 443 break;
443 case ixgbe_mbox_api_20: 444 case ixgbe_mbox_api_20:
444 apivstr = "2.0"; 445 apivstr = "2.0";
445 break; 446 break;
446 case ixgbe_mbox_api_11: 447 case ixgbe_mbox_api_11:
447 apivstr = "1.1"; 448 apivstr = "1.1";
448 break; 449 break;
449 case ixgbe_mbox_api_12: 450 case ixgbe_mbox_api_12:
450 apivstr = "1.2"; 451 apivstr = "1.2";
451 break; 452 break;
452 case ixgbe_mbox_api_13: 453 case ixgbe_mbox_api_13:
453 apivstr = "1.3"; 454 apivstr = "1.3";
454 break; 455 break;
455 default: 456 default:
456 apivstr = "unknown"; 457 apivstr = "unknown";
457 break; 458 break;
458 } 459 }
459 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); 460 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
460 461
461 /* If no mac address was assigned, make a random one */ 462 /* If no mac address was assigned, make a random one */
462 if (!ixv_check_ether_addr(hw->mac.addr)) { 463 if (!ixv_check_ether_addr(hw->mac.addr)) {
463 u8 addr[ETHER_ADDR_LEN]; 464 u8 addr[ETHER_ADDR_LEN];
464 uint64_t rndval = cprng_strong64(); 465 uint64_t rndval = cprng_strong64();
465 466
466 memcpy(addr, &rndval, sizeof(addr)); 467 memcpy(addr, &rndval, sizeof(addr));
467 addr[0] &= 0xFE; 468 addr[0] &= 0xFE;
468 addr[0] |= 0x02; 469 addr[0] |= 0x02;
469 bcopy(addr, hw->mac.addr, sizeof(addr)); 470 bcopy(addr, hw->mac.addr, sizeof(addr));
470 } 471 }
471 472
472 /* Register for VLAN events */ 473 /* Register for VLAN events */
473 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); 474 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
474 475
475 /* Sysctls for limiting the amount of work done in the taskqueues */ 476 /* Sysctls for limiting the amount of work done in the taskqueues */
476 ixv_set_sysctl_value(adapter, "rx_processing_limit", 477 ixv_set_sysctl_value(adapter, "rx_processing_limit",
477 "max number of rx packets to process", 478 "max number of rx packets to process",
478 &adapter->rx_process_limit, ixv_rx_process_limit); 479 &adapter->rx_process_limit, ixv_rx_process_limit);
479 480
480 ixv_set_sysctl_value(adapter, "tx_processing_limit", 481 ixv_set_sysctl_value(adapter, "tx_processing_limit",
481 "max number of tx packets to process", 482 "max number of tx packets to process",
482 &adapter->tx_process_limit, ixv_tx_process_limit); 483 &adapter->tx_process_limit, ixv_tx_process_limit);
483 484
484 /* Do descriptor calc and sanity checks */ 485 /* Do descriptor calc and sanity checks */
485 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 486 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
486 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 487 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
487 aprint_error_dev(dev, "TXD config issue, using default!\n"); 488 aprint_error_dev(dev, "TXD config issue, using default!\n");
488 adapter->num_tx_desc = DEFAULT_TXD; 489 adapter->num_tx_desc = DEFAULT_TXD;
489 } else 490 } else
490 adapter->num_tx_desc = ixv_txd; 491 adapter->num_tx_desc = ixv_txd;
491 492
492 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 493 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
493 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 494 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
494 aprint_error_dev(dev, "RXD config issue, using default!\n"); 495 aprint_error_dev(dev, "RXD config issue, using default!\n");
495 adapter->num_rx_desc = DEFAULT_RXD; 496 adapter->num_rx_desc = DEFAULT_RXD;
496 } else 497 } else
497 adapter->num_rx_desc = ixv_rxd; 498 adapter->num_rx_desc = ixv_rxd;
498 499
499 /* Setup MSI-X */ 500 /* Setup MSI-X */
500 error = ixv_configure_interrupts(adapter); 501 error = ixv_configure_interrupts(adapter);
501 if (error) 502 if (error)
502 goto err_out; 503 goto err_out;
503 504
504 /* Allocate our TX/RX Queues */ 505 /* Allocate our TX/RX Queues */
505 if (ixgbe_allocate_queues(adapter)) { 506 if (ixgbe_allocate_queues(adapter)) {
506 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); 507 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
507 error = ENOMEM; 508 error = ENOMEM;
508 goto err_out; 509 goto err_out;
509 } 510 }
510 511
511 /* hw.ix defaults init */ 512 /* hw.ix defaults init */
512 adapter->enable_aim = ixv_enable_aim; 513 adapter->enable_aim = ixv_enable_aim;
513 514
514 adapter->txrx_use_workqueue = ixv_txrx_workqueue; 515 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
515 516
516 error = ixv_allocate_msix(adapter, pa); 517 error = ixv_allocate_msix(adapter, pa);
517 if (error) { 518 if (error) {
518 device_printf(dev, "ixv_allocate_msix() failed!\n"); 519 device_printf(dev, "ixv_allocate_msix() failed!\n");
519 goto err_late; 520 goto err_late;
520 } 521 }
521 522
522 /* Setup OS specific network interface */ 523 /* Setup OS specific network interface */
523 error = ixv_setup_interface(dev, adapter); 524 error = ixv_setup_interface(dev, adapter);
524 if (error != 0) { 525 if (error != 0) {
525 aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); 526 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
526 goto err_late; 527 goto err_late;
527 } 528 }
528 529
529 /* Do the stats setup */ 530 /* Do the stats setup */
530 ixv_save_stats(adapter); 531 ixv_save_stats(adapter);
531 ixv_init_stats(adapter); 532 ixv_init_stats(adapter);
532 ixv_add_stats_sysctls(adapter); 533 ixv_add_stats_sysctls(adapter);
533 534
534 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 535 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
535 ixgbe_netmap_attach(adapter); 536 ixgbe_netmap_attach(adapter);
536 537
537 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 538 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
538 aprint_verbose_dev(dev, "feature cap %s\n", buf); 539 aprint_verbose_dev(dev, "feature cap %s\n", buf);
539 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 540 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
540 aprint_verbose_dev(dev, "feature ena %s\n", buf); 541 aprint_verbose_dev(dev, "feature ena %s\n", buf);
541 542
542 INIT_DEBUGOUT("ixv_attach: end"); 543 INIT_DEBUGOUT("ixv_attach: end");
543 adapter->osdep.attached = true; 544 adapter->osdep.attached = true;
544 545
545 return; 546 return;
546 547
547err_late: 548err_late:
548 ixgbe_free_transmit_structures(adapter); 549 ixgbe_free_transmit_structures(adapter);
549 ixgbe_free_receive_structures(adapter); 550 ixgbe_free_receive_structures(adapter);
550 free(adapter->queues, M_DEVBUF); 551 free(adapter->queues, M_DEVBUF);
551err_out: 552err_out:
552 ixv_free_pci_resources(adapter); 553 ixv_free_pci_resources(adapter);
553 IXGBE_CORE_LOCK_DESTROY(adapter); 554 IXGBE_CORE_LOCK_DESTROY(adapter);
554 555
555 return; 556 return;
556} /* ixv_attach */ 557} /* ixv_attach */
557 558
558/************************************************************************ 559/************************************************************************
559 * ixv_detach - Device removal routine 560 * ixv_detach - Device removal routine
560 * 561 *
561 * Called when the driver is being removed. 562 * Called when the driver is being removed.
562 * Stops the adapter and deallocates all the resources 563 * Stops the adapter and deallocates all the resources
563 * that were allocated for driver operation. 564 * that were allocated for driver operation.
564 * 565 *
565 * return 0 on success, positive on failure 566 * return 0 on success, positive on failure
566 ************************************************************************/ 567 ************************************************************************/
567static int 568static int
568ixv_detach(device_t dev, int flags) 569ixv_detach(device_t dev, int flags)
569{ 570{
570 struct adapter *adapter = device_private(dev); 571 struct adapter *adapter = device_private(dev);
571 struct ixgbe_hw *hw = &adapter->hw; 572 struct ixgbe_hw *hw = &adapter->hw;
572 struct ix_queue *que = adapter->queues; 573 struct ix_queue *que = adapter->queues;
573 struct tx_ring *txr = adapter->tx_rings; 574 struct tx_ring *txr = adapter->tx_rings;
574 struct rx_ring *rxr = adapter->rx_rings; 575 struct rx_ring *rxr = adapter->rx_rings;
575 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 576 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
576 577
577 INIT_DEBUGOUT("ixv_detach: begin"); 578 INIT_DEBUGOUT("ixv_detach: begin");
578 if (adapter->osdep.attached == false) 579 if (adapter->osdep.attached == false)
579 return 0; 580 return 0;
580 581
581 /* Stop the interface. Callouts are stopped in it. */ 582 /* Stop the interface. Callouts are stopped in it. */
582 ixv_ifstop(adapter->ifp, 1); 583 ixv_ifstop(adapter->ifp, 1);
583 584
584#if NVLAN > 0 585#if NVLAN > 0
585 /* Make sure VLANs are not using driver */ 586 /* Make sure VLANs are not using driver */
586 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 587 if (!VLAN_ATTACHED(&adapter->osdep.ec))
587 ; /* nothing to do: no VLANs */ 588 ; /* nothing to do: no VLANs */
588 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 589 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
589 vlan_ifdetach(adapter->ifp); 590 vlan_ifdetach(adapter->ifp);
590 else { 591 else {
591 aprint_error_dev(dev, "VLANs in use, detach first\n"); 592 aprint_error_dev(dev, "VLANs in use, detach first\n");
592 return EBUSY; 593 return EBUSY;
593 } 594 }
594#endif 595#endif
595 596
596 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 597 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
597 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 598 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
598 softint_disestablish(txr->txr_si); 599 softint_disestablish(txr->txr_si);
599 softint_disestablish(que->que_si); 600 softint_disestablish(que->que_si);
600 } 601 }
601 if (adapter->txr_wq != NULL) 602 if (adapter->txr_wq != NULL)
602 workqueue_destroy(adapter->txr_wq); 603 workqueue_destroy(adapter->txr_wq);
603 if (adapter->txr_wq_enqueued != NULL) 604 if (adapter->txr_wq_enqueued != NULL)
604 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 605 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
605 if (adapter->que_wq != NULL) 606 if (adapter->que_wq != NULL)
606 workqueue_destroy(adapter->que_wq); 607 workqueue_destroy(adapter->que_wq);
607 608
608 /* Drain the Mailbox(link) queue */ 609 /* Drain the Mailbox(link) queue */
609 softint_disestablish(adapter->link_si); 610 softint_disestablish(adapter->link_si);
610 611
611 ether_ifdetach(adapter->ifp); 612 ether_ifdetach(adapter->ifp);
612 callout_halt(&adapter->timer, NULL); 613 callout_halt(&adapter->timer, NULL);
613 614
614 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 615 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
615 netmap_detach(adapter->ifp); 616 netmap_detach(adapter->ifp);
616 617
617 ixv_free_pci_resources(adapter); 618 ixv_free_pci_resources(adapter);
618#if 0 /* XXX the NetBSD port is probably missing something here */ 619#if 0 /* XXX the NetBSD port is probably missing something here */
619 bus_generic_detach(dev); 620 bus_generic_detach(dev);
620#endif 621#endif
621 if_detach(adapter->ifp); 622 if_detach(adapter->ifp);
622 if_percpuq_destroy(adapter->ipq); 623 if_percpuq_destroy(adapter->ipq);
623 624
624 sysctl_teardown(&adapter->sysctllog); 625 sysctl_teardown(&adapter->sysctllog);
625 evcnt_detach(&adapter->efbig_tx_dma_setup); 626 evcnt_detach(&adapter->efbig_tx_dma_setup);
626 evcnt_detach(&adapter->mbuf_defrag_failed); 627 evcnt_detach(&adapter->mbuf_defrag_failed);
627 evcnt_detach(&adapter->efbig2_tx_dma_setup); 628 evcnt_detach(&adapter->efbig2_tx_dma_setup);
628 evcnt_detach(&adapter->einval_tx_dma_setup); 629 evcnt_detach(&adapter->einval_tx_dma_setup);
629 evcnt_detach(&adapter->other_tx_dma_setup); 630 evcnt_detach(&adapter->other_tx_dma_setup);
630 evcnt_detach(&adapter->eagain_tx_dma_setup); 631 evcnt_detach(&adapter->eagain_tx_dma_setup);
631 evcnt_detach(&adapter->enomem_tx_dma_setup); 632 evcnt_detach(&adapter->enomem_tx_dma_setup);
632 evcnt_detach(&adapter->watchdog_events); 633 evcnt_detach(&adapter->watchdog_events);
633 evcnt_detach(&adapter->tso_err); 634 evcnt_detach(&adapter->tso_err);
634 evcnt_detach(&adapter->link_irq); 635 evcnt_detach(&adapter->link_irq);
635 636
636 txr = adapter->tx_rings; 637 txr = adapter->tx_rings;
637 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 638 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
638 evcnt_detach(&adapter->queues[i].irqs); 639 evcnt_detach(&adapter->queues[i].irqs);
639 evcnt_detach(&adapter->queues[i].handleq); 640 evcnt_detach(&adapter->queues[i].handleq);
640 evcnt_detach(&adapter->queues[i].req); 641 evcnt_detach(&adapter->queues[i].req);
641 evcnt_detach(&txr->no_desc_avail); 642 evcnt_detach(&txr->no_desc_avail);
642 evcnt_detach(&txr->total_packets); 643 evcnt_detach(&txr->total_packets);
643 evcnt_detach(&txr->tso_tx); 644 evcnt_detach(&txr->tso_tx);
644#ifndef IXGBE_LEGACY_TX 645#ifndef IXGBE_LEGACY_TX
645 evcnt_detach(&txr->pcq_drops); 646 evcnt_detach(&txr->pcq_drops);
646#endif 647#endif
647 648
648 evcnt_detach(&rxr->rx_packets); 649 evcnt_detach(&rxr->rx_packets);
649 evcnt_detach(&rxr->rx_bytes); 650 evcnt_detach(&rxr->rx_bytes);
650 evcnt_detach(&rxr->rx_copies); 651 evcnt_detach(&rxr->rx_copies);
651 evcnt_detach(&rxr->no_jmbuf); 652 evcnt_detach(&rxr->no_jmbuf);
652 evcnt_detach(&rxr->rx_discarded); 653 evcnt_detach(&rxr->rx_discarded);
653 } 654 }
654 evcnt_detach(&stats->ipcs); 655 evcnt_detach(&stats->ipcs);
655 evcnt_detach(&stats->l4cs); 656 evcnt_detach(&stats->l4cs);
656 evcnt_detach(&stats->ipcs_bad); 657 evcnt_detach(&stats->ipcs_bad);
657 evcnt_detach(&stats->l4cs_bad); 658 evcnt_detach(&stats->l4cs_bad);
658 659
659 /* Packet Reception Stats */ 660 /* Packet Reception Stats */
660 evcnt_detach(&stats->vfgorc); 661 evcnt_detach(&stats->vfgorc);
661 evcnt_detach(&stats->vfgprc); 662 evcnt_detach(&stats->vfgprc);
662 evcnt_detach(&stats->vfmprc); 663 evcnt_detach(&stats->vfmprc);
663 664
664 /* Packet Transmission Stats */ 665 /* Packet Transmission Stats */
665 evcnt_detach(&stats->vfgotc); 666 evcnt_detach(&stats->vfgotc);
666 evcnt_detach(&stats->vfgptc); 667 evcnt_detach(&stats->vfgptc);
667 668
668 /* Mailbox Stats */ 669 /* Mailbox Stats */
669 evcnt_detach(&hw->mbx.stats.msgs_tx); 670 evcnt_detach(&hw->mbx.stats.msgs_tx);
670 evcnt_detach(&hw->mbx.stats.msgs_rx); 671 evcnt_detach(&hw->mbx.stats.msgs_rx);
671 evcnt_detach(&hw->mbx.stats.acks); 672 evcnt_detach(&hw->mbx.stats.acks);
672 evcnt_detach(&hw->mbx.stats.reqs); 673 evcnt_detach(&hw->mbx.stats.reqs);
673 evcnt_detach(&hw->mbx.stats.rsts); 674 evcnt_detach(&hw->mbx.stats.rsts);
674 675
675 ixgbe_free_transmit_structures(adapter); 676 ixgbe_free_transmit_structures(adapter);
676 ixgbe_free_receive_structures(adapter); 677 ixgbe_free_receive_structures(adapter);
677 for (int i = 0; i < adapter->num_queues; i++) { 678 for (int i = 0; i < adapter->num_queues; i++) {
678 struct ix_queue *lque = &adapter->queues[i]; 679 struct ix_queue *lque = &adapter->queues[i];
679 mutex_destroy(&lque->dc_mtx); 680 mutex_destroy(&lque->dc_mtx);
680 } 681 }
681 free(adapter->queues, M_DEVBUF); 682 free(adapter->queues, M_DEVBUF);
682 683
683 IXGBE_CORE_LOCK_DESTROY(adapter); 684 IXGBE_CORE_LOCK_DESTROY(adapter);
684 685
685 return (0); 686 return (0);
686} /* ixv_detach */ 687} /* ixv_detach */
687 688
688/************************************************************************ 689/************************************************************************
689 * ixv_init_locked - Init entry point 690 * ixv_init_locked - Init entry point
690 * 691 *
691 * Used in two ways: It is used by the stack as an init entry 692 * Used in two ways: It is used by the stack as an init entry
692 * point in network interface structure. It is also used 693 * point in network interface structure. It is also used
693 * by the driver as a hw/sw initialization routine to get 694 * by the driver as a hw/sw initialization routine to get
694 * to a consistent state. 695 * to a consistent state.
695 * 696 *
696 * return 0 on success, positive on failure 697 * return 0 on success, positive on failure
697 ************************************************************************/ 698 ************************************************************************/
698static void 699static void
699ixv_init_locked(struct adapter *adapter) 700ixv_init_locked(struct adapter *adapter)
700{ 701{
701 struct ifnet *ifp = adapter->ifp; 702 struct ifnet *ifp = adapter->ifp;
702 device_t dev = adapter->dev; 703 device_t dev = adapter->dev;
703 struct ixgbe_hw *hw = &adapter->hw; 704 struct ixgbe_hw *hw = &adapter->hw;
704 struct ix_queue *que; 705 struct ix_queue *que;
705 int error = 0; 706 int error = 0;
706 uint32_t mask; 707 uint32_t mask;
707 int i; 708 int i;
708 709
709 INIT_DEBUGOUT("ixv_init_locked: begin"); 710 INIT_DEBUGOUT("ixv_init_locked: begin");
710 KASSERT(mutex_owned(&adapter->core_mtx)); 711 KASSERT(mutex_owned(&adapter->core_mtx));
711 hw->adapter_stopped = FALSE; 712 hw->adapter_stopped = FALSE;
712 hw->mac.ops.stop_adapter(hw); 713 hw->mac.ops.stop_adapter(hw);
713 callout_stop(&adapter->timer); 714 callout_stop(&adapter->timer);
714 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 715 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
715 que->disabled_count = 0; 716 que->disabled_count = 0;
716 717
717 /* reprogram the RAR[0] in case user changed it. */ 718 /* reprogram the RAR[0] in case user changed it. */
718 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 719 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
719 720
720 /* Get the latest mac address, User can use a LAA */ 721 /* Get the latest mac address, User can use a LAA */
721 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 722 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
722 IXGBE_ETH_LENGTH_OF_ADDRESS); 723 IXGBE_ETH_LENGTH_OF_ADDRESS);
723 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 724 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
724 725
725 /* Prepare transmit descriptors and buffers */ 726 /* Prepare transmit descriptors and buffers */
726 if (ixgbe_setup_transmit_structures(adapter)) { 727 if (ixgbe_setup_transmit_structures(adapter)) {
727 aprint_error_dev(dev, "Could not setup transmit structures\n"); 728 aprint_error_dev(dev, "Could not setup transmit structures\n");
728 ixv_stop(adapter); 729 ixv_stop(adapter);
729 return; 730 return;
730 } 731 }
731 732
732 /* Reset VF and renegotiate mailbox API version */ 733 /* Reset VF and renegotiate mailbox API version */
733 hw->mac.ops.reset_hw(hw); 734 hw->mac.ops.reset_hw(hw);
734 hw->mac.ops.start_hw(hw); 735 hw->mac.ops.start_hw(hw);
735 error = ixv_negotiate_api(adapter); 736 error = ixv_negotiate_api(adapter);
736 if (error) 737 if (error)
737 device_printf(dev, 738 device_printf(dev,
738 "Mailbox API negotiation failed in init_locked!\n"); 739 "Mailbox API negotiation failed in init_locked!\n");
739 740
740 ixv_initialize_transmit_units(adapter); 741 ixv_initialize_transmit_units(adapter);
741 742
742 /* Setup Multicast table */ 743 /* Setup Multicast table */
743 ixv_set_multi(adapter); 744 ixv_set_multi(adapter);
744 745
745 /* 746 /*
746 * Determine the correct mbuf pool 747 * Determine the correct mbuf pool
747 * for doing jumbo/headersplit 748 * for doing jumbo/headersplit
748 */ 749 */
749 if (ifp->if_mtu > ETHERMTU) 750 if (ifp->if_mtu > ETHERMTU)
750 adapter->rx_mbuf_sz = MJUMPAGESIZE; 751 adapter->rx_mbuf_sz = MJUMPAGESIZE;
751 else 752 else
752 adapter->rx_mbuf_sz = MCLBYTES; 753 adapter->rx_mbuf_sz = MCLBYTES;
753 754
754 /* Prepare receive descriptors and buffers */ 755 /* Prepare receive descriptors and buffers */
755 if (ixgbe_setup_receive_structures(adapter)) { 756 if (ixgbe_setup_receive_structures(adapter)) {
756 device_printf(dev, "Could not setup receive structures\n"); 757 device_printf(dev, "Could not setup receive structures\n");
757 ixv_stop(adapter); 758 ixv_stop(adapter);
758 return; 759 return;
759 } 760 }
760 761
761 /* Configure RX settings */ 762 /* Configure RX settings */
762 ixv_initialize_receive_units(adapter); 763 ixv_initialize_receive_units(adapter);
763 764
764#if 0 /* XXX isn't it required? -- msaitoh */ 765#if 0 /* XXX isn't it required? -- msaitoh */
765 /* Set the various hardware offload abilities */ 766 /* Set the various hardware offload abilities */
766 ifp->if_hwassist = 0; 767 ifp->if_hwassist = 0;
767 if (ifp->if_capenable & IFCAP_TSO4) 768 if (ifp->if_capenable & IFCAP_TSO4)
768 ifp->if_hwassist |= CSUM_TSO; 769 ifp->if_hwassist |= CSUM_TSO;
769 if (ifp->if_capenable & IFCAP_TXCSUM) { 770 if (ifp->if_capenable & IFCAP_TXCSUM) {
770 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 771 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
771#if __FreeBSD_version >= 800000 772#if __FreeBSD_version >= 800000
772 ifp->if_hwassist |= CSUM_SCTP; 773 ifp->if_hwassist |= CSUM_SCTP;
773#endif 774#endif
774 } 775 }
775#endif 776#endif
776 777
777 /* Set up VLAN offload and filter */ 778 /* Set up VLAN offload and filter */
778 ixv_setup_vlan_support(adapter); 779 ixv_setup_vlan_support(adapter);
779 780
780 /* Set up MSI-X routing */ 781 /* Set up MSI-X routing */
781 ixv_configure_ivars(adapter); 782 ixv_configure_ivars(adapter);
782 783
783 /* Set up auto-mask */ 784 /* Set up auto-mask */
784 mask = (1 << adapter->vector); 785 mask = (1 << adapter->vector);
785 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 786 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
786 mask |= (1 << que->msix); 787 mask |= (1 << que->msix);
787 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); 788 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
788 789
789 /* Set moderation on the Link interrupt */ 790 /* Set moderation on the Link interrupt */
790 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 791 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
791 792
792 /* Stats init */ 793 /* Stats init */
793 ixv_init_stats(adapter); 794 ixv_init_stats(adapter);
794 795
795 /* Config/Enable Link */ 796 /* Config/Enable Link */
796 hw->mac.get_link_status = TRUE; 797 hw->mac.get_link_status = TRUE;
797 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, 798 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
798 FALSE); 799 FALSE);
799 800
800 /* Start watchdog */ 801 /* Start watchdog */
801 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 802 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
802 803
803 /* And now turn on interrupts */ 804 /* And now turn on interrupts */
804 ixv_enable_intr(adapter); 805 ixv_enable_intr(adapter);
805 806
806 /* Update saved flags. See ixgbe_ifflags_cb() */ 807 /* Update saved flags. See ixgbe_ifflags_cb() */
807 adapter->if_flags = ifp->if_flags; 808 adapter->if_flags = ifp->if_flags;
808 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 809 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
809 810
810 /* Now inform the stack we're ready */ 811 /* Now inform the stack we're ready */
811 ifp->if_flags |= IFF_RUNNING; 812 ifp->if_flags |= IFF_RUNNING;
812 ifp->if_flags &= ~IFF_OACTIVE; 813 ifp->if_flags &= ~IFF_OACTIVE;
813 814
814 return; 815 return;
815} /* ixv_init_locked */ 816} /* ixv_init_locked */
816 817
817/************************************************************************ 818/************************************************************************
818 * ixv_enable_queue 819 * ixv_enable_queue
819 ************************************************************************/ 820 ************************************************************************/
820static inline void 821static inline void
821ixv_enable_queue(struct adapter *adapter, u32 vector) 822ixv_enable_queue(struct adapter *adapter, u32 vector)
822{ 823{
823 struct ixgbe_hw *hw = &adapter->hw; 824 struct ixgbe_hw *hw = &adapter->hw;
824 struct ix_queue *que = &adapter->queues[vector]; 825 struct ix_queue *que = &adapter->queues[vector];
825 u32 queue = 1UL << vector; 826 u32 queue = 1UL << vector;
826 u32 mask; 827 u32 mask;
827 828
828 mutex_enter(&que->dc_mtx); 829 mutex_enter(&que->dc_mtx);
829 if (que->disabled_count > 0 && --que->disabled_count > 0) 830 if (que->disabled_count > 0 && --que->disabled_count > 0)
830 goto out; 831 goto out;
831 832
832 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 833 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
833 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 834 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
834out: 835out:
835 mutex_exit(&que->dc_mtx); 836 mutex_exit(&que->dc_mtx);
836} /* ixv_enable_queue */ 837} /* ixv_enable_queue */
837 838
838/************************************************************************ 839/************************************************************************
839 * ixv_disable_queue 840 * ixv_disable_queue
840 ************************************************************************/ 841 ************************************************************************/
841static inline void 842static inline void
842ixv_disable_queue(struct adapter *adapter, u32 vector) 843ixv_disable_queue(struct adapter *adapter, u32 vector)
843{ 844{
844 struct ixgbe_hw *hw = &adapter->hw; 845 struct ixgbe_hw *hw = &adapter->hw;
845 struct ix_queue *que = &adapter->queues[vector]; 846 struct ix_queue *que = &adapter->queues[vector];
846 u32 queue = 1UL << vector; 847 u32 queue = 1UL << vector;
847 u32 mask; 848 u32 mask;
848 849
849 mutex_enter(&que->dc_mtx); 850 mutex_enter(&que->dc_mtx);
850 if (que->disabled_count++ > 0) 851 if (que->disabled_count++ > 0)
851 goto out; 852 goto out;
852 853
853 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 854 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
854 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 855 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
855out: 856out:
856 mutex_exit(&que->dc_mtx); 857 mutex_exit(&que->dc_mtx);
857} /* ixv_disable_queue */ 858} /* ixv_disable_queue */
858 859
859#if 0 860#if 0
860static inline void 861static inline void
861ixv_rearm_queues(struct adapter *adapter, u64 queues) 862ixv_rearm_queues(struct adapter *adapter, u64 queues)
862{ 863{
863 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 864 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
864 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 865 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
865} /* ixv_rearm_queues */ 866} /* ixv_rearm_queues */
866#endif 867#endif
867 868
868 869
869/************************************************************************ 870/************************************************************************
870 * ixv_msix_que - MSI-X Queue Interrupt Service routine 871 * ixv_msix_que - MSI-X Queue Interrupt Service routine
871 ************************************************************************/ 872 ************************************************************************/
872static int 873static int
873ixv_msix_que(void *arg) 874ixv_msix_que(void *arg)
874{ 875{
875 struct ix_queue *que = arg; 876 struct ix_queue *que = arg;
876 struct adapter *adapter = que->adapter; 877 struct adapter *adapter = que->adapter;
877 struct tx_ring *txr = que->txr; 878 struct tx_ring *txr = que->txr;
878 struct rx_ring *rxr = que->rxr; 879 struct rx_ring *rxr = que->rxr;
879 bool more; 880 bool more;
880 u32 newitr = 0; 881 u32 newitr = 0;
881 882
882 ixv_disable_queue(adapter, que->msix); 883 ixv_disable_queue(adapter, que->msix);
883 ++que->irqs.ev_count; 884 ++que->irqs.ev_count;
884 885
885#ifdef __NetBSD__ 886#ifdef __NetBSD__
886 /* Don't run ixgbe_rxeof in interrupt context */ 887 /* Don't run ixgbe_rxeof in interrupt context */
887 more = true; 888 more = true;
888#else 889#else
889 more = ixgbe_rxeof(que); 890 more = ixgbe_rxeof(que);
890#endif 891#endif
891 892
892 IXGBE_TX_LOCK(txr); 893 IXGBE_TX_LOCK(txr);
893 ixgbe_txeof(txr); 894 ixgbe_txeof(txr);
894 IXGBE_TX_UNLOCK(txr); 895 IXGBE_TX_UNLOCK(txr);
895 896
896 /* Do AIM now? */ 897 /* Do AIM now? */
897 898
898 if (adapter->enable_aim == false) 899 if (adapter->enable_aim == false)
899 goto no_calc; 900 goto no_calc;
900 /* 901 /*
901 * Do Adaptive Interrupt Moderation: 902 * Do Adaptive Interrupt Moderation:
902 * - Write out last calculated setting 903 * - Write out last calculated setting
903 * - Calculate based on average size over 904 * - Calculate based on average size over
904 * the last interval. 905 * the last interval.
905 */ 906 */
906 if (que->eitr_setting) 907 if (que->eitr_setting)
907 ixv_eitr_write(adapter, que->msix, que->eitr_setting); 908 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
908 909
909 que->eitr_setting = 0; 910 que->eitr_setting = 0;
910 911
911 /* Idle, do nothing */ 912 /* Idle, do nothing */
912 if ((txr->bytes == 0) && (rxr->bytes == 0)) 913 if ((txr->bytes == 0) && (rxr->bytes == 0))
913 goto no_calc; 914 goto no_calc;
914 915
915 if ((txr->bytes) && (txr->packets)) 916 if ((txr->bytes) && (txr->packets))
916 newitr = txr->bytes/txr->packets; 917 newitr = txr->bytes/txr->packets;
917 if ((rxr->bytes) && (rxr->packets)) 918 if ((rxr->bytes) && (rxr->packets))
918 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 919 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
919 newitr += 24; /* account for hardware frame, crc */ 920 newitr += 24; /* account for hardware frame, crc */
920 921
921 /* set an upper boundary */ 922 /* set an upper boundary */
922 newitr = uimin(newitr, 3000); 923 newitr = uimin(newitr, 3000);
923 924
924 /* Be nice to the mid range */ 925 /* Be nice to the mid range */
925 if ((newitr > 300) && (newitr < 1200)) 926 if ((newitr > 300) && (newitr < 1200))
926 newitr = (newitr / 3); 927 newitr = (newitr / 3);
927 else 928 else
928 newitr = (newitr / 2); 929 newitr = (newitr / 2);
929 930
930 /* 931 /*
931 * When RSC is used, ITR interval must be larger than RSC_DELAY. 932 * When RSC is used, ITR interval must be larger than RSC_DELAY.
932 * Currently, we use 2us for RSC_DELAY. The minimum value is always 933 * Currently, we use 2us for RSC_DELAY. The minimum value is always
933 * greater than 2us on 100M (and 10M?(not documented)), but it's not 934 * greater than 2us on 100M (and 10M?(not documented)), but it's not
934 * on 1G and higher. 935 * on 1G and higher.
935 */ 936 */
936 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 937 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
937 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 938 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
938 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 939 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
939 newitr = IXGBE_MIN_RSC_EITR_10G1G; 940 newitr = IXGBE_MIN_RSC_EITR_10G1G;
940 } 941 }
941 942
942 /* save for next interrupt */ 943 /* save for next interrupt */
943 que->eitr_setting = newitr; 944 que->eitr_setting = newitr;
944 945
945 /* Reset state */ 946 /* Reset state */
946 txr->bytes = 0; 947 txr->bytes = 0;
947 txr->packets = 0; 948 txr->packets = 0;
948 rxr->bytes = 0; 949 rxr->bytes = 0;
949 rxr->packets = 0; 950 rxr->packets = 0;
950 951
951no_calc: 952no_calc:
952 if (more) 953 if (more)
953 softint_schedule(que->que_si); 954 softint_schedule(que->que_si);
954 else /* Re-enable this interrupt */ 955 else /* Re-enable this interrupt */
955 ixv_enable_queue(adapter, que->msix); 956 ixv_enable_queue(adapter, que->msix);
956 957
957 return 1; 958 return 1;
958} /* ixv_msix_que */ 959} /* ixv_msix_que */
959 960
960/************************************************************************ 961/************************************************************************
961 * ixv_msix_mbx 962 * ixv_msix_mbx
962 ************************************************************************/ 963 ************************************************************************/
963static int 964static int
964ixv_msix_mbx(void *arg) 965ixv_msix_mbx(void *arg)
965{ 966{
966 struct adapter *adapter = arg; 967 struct adapter *adapter = arg;
967 struct ixgbe_hw *hw = &adapter->hw; 968 struct ixgbe_hw *hw = &adapter->hw;
968 969
969 ++adapter->link_irq.ev_count; 970 ++adapter->link_irq.ev_count;
970 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ 971 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
971 972
972 /* Link status change */ 973 /* Link status change */
973 hw->mac.get_link_status = TRUE; 974 hw->mac.get_link_status = TRUE;
974 softint_schedule(adapter->link_si); 975 softint_schedule(adapter->link_si);
975 976
976 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 977 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
977 978
978 return 1; 979 return 1;
979} /* ixv_msix_mbx */ 980} /* ixv_msix_mbx */
980 981
981static void 982static void
982ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 983ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
983{ 984{
984 985
985 /* 986 /*
986 * Newer devices than 82598 have VF function, so this function is 987 * Newer devices than 82598 have VF function, so this function is
987 * simple. 988 * simple.
988 */ 989 */
989 itr |= IXGBE_EITR_CNT_WDIS; 990 itr |= IXGBE_EITR_CNT_WDIS;
990 991
991 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); 992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
992} 993}
993 994
994 995
995/************************************************************************ 996/************************************************************************
996 * ixv_media_status - Media Ioctl callback 997 * ixv_media_status - Media Ioctl callback
997 * 998 *
998 * Called whenever the user queries the status of 999 * Called whenever the user queries the status of
999 * the interface using ifconfig. 1000 * the interface using ifconfig.
1000 ************************************************************************/ 1001 ************************************************************************/
1001static void 1002static void
1002ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1003ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1003{ 1004{
1004 struct adapter *adapter = ifp->if_softc; 1005 struct adapter *adapter = ifp->if_softc;
1005 1006
1006 INIT_DEBUGOUT("ixv_media_status: begin"); 1007 INIT_DEBUGOUT("ixv_media_status: begin");
1007 IXGBE_CORE_LOCK(adapter); 1008 IXGBE_CORE_LOCK(adapter);
1008 ixv_update_link_status(adapter); 1009 ixv_update_link_status(adapter);
1009 1010
1010 ifmr->ifm_status = IFM_AVALID; 1011 ifmr->ifm_status = IFM_AVALID;
1011 ifmr->ifm_active = IFM_ETHER; 1012 ifmr->ifm_active = IFM_ETHER;
1012 1013
1013 if (adapter->link_active != LINK_STATE_UP) { 1014 if (adapter->link_active != LINK_STATE_UP) {
1014 ifmr->ifm_active |= IFM_NONE; 1015 ifmr->ifm_active |= IFM_NONE;
1015 IXGBE_CORE_UNLOCK(adapter); 1016 IXGBE_CORE_UNLOCK(adapter);
1016 return; 1017 return;
1017 } 1018 }
1018 1019
1019 ifmr->ifm_status |= IFM_ACTIVE; 1020 ifmr->ifm_status |= IFM_ACTIVE;
1020 1021
1021 switch (adapter->link_speed) { 1022 switch (adapter->link_speed) {
1022 case IXGBE_LINK_SPEED_10GB_FULL: 1023 case IXGBE_LINK_SPEED_10GB_FULL:
1023 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1024 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1024 break; 1025 break;
1025 case IXGBE_LINK_SPEED_5GB_FULL: 1026 case IXGBE_LINK_SPEED_5GB_FULL:
1026 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 1027 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1027 break; 1028 break;
1028 case IXGBE_LINK_SPEED_2_5GB_FULL: 1029 case IXGBE_LINK_SPEED_2_5GB_FULL:
1029 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 1030 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1030 break; 1031 break;
1031 case IXGBE_LINK_SPEED_1GB_FULL: 1032 case IXGBE_LINK_SPEED_1GB_FULL:
1032 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1033 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1033 break; 1034 break;
1034 case IXGBE_LINK_SPEED_100_FULL: 1035 case IXGBE_LINK_SPEED_100_FULL:
1035 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1036 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1036 break; 1037 break;
1037 case IXGBE_LINK_SPEED_10_FULL: 1038 case IXGBE_LINK_SPEED_10_FULL:
1038 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 1039 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1039 break; 1040 break;
1040 } 1041 }
1041 1042
1042 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 1043 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1043 1044
1044 IXGBE_CORE_UNLOCK(adapter); 1045 IXGBE_CORE_UNLOCK(adapter);
1045} /* ixv_media_status */ 1046} /* ixv_media_status */
1046 1047
1047/************************************************************************ 1048/************************************************************************
1048 * ixv_media_change - Media Ioctl callback 1049 * ixv_media_change - Media Ioctl callback
1049 * 1050 *
1050 * Called when the user changes speed/duplex using 1051 * Called when the user changes speed/duplex using
1051 * media/mediopt option with ifconfig. 1052 * media/mediopt option with ifconfig.
1052 ************************************************************************/ 1053 ************************************************************************/
1053static int 1054static int
1054ixv_media_change(struct ifnet *ifp) 1055ixv_media_change(struct ifnet *ifp)
1055{ 1056{
1056 struct adapter *adapter = ifp->if_softc; 1057 struct adapter *adapter = ifp->if_softc;
1057 struct ifmedia *ifm = &adapter->media; 1058 struct ifmedia *ifm = &adapter->media;
1058 1059
1059 INIT_DEBUGOUT("ixv_media_change: begin"); 1060 INIT_DEBUGOUT("ixv_media_change: begin");
1060 1061
1061 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1062 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1062 return (EINVAL); 1063 return (EINVAL);
1063 1064
1064 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1065 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1065 case IFM_AUTO: 1066 case IFM_AUTO:
1066 break; 1067 break;
1067 default: 1068 default:
1068 device_printf(adapter->dev, "Only auto media type\n"); 1069 device_printf(adapter->dev, "Only auto media type\n");
1069 return (EINVAL); 1070 return (EINVAL);
1070 } 1071 }
1071 1072
1072 return (0); 1073 return (0);
1073} /* ixv_media_change */ 1074} /* ixv_media_change */
1074 1075
1075 1076
1076/************************************************************************ 1077/************************************************************************
1077 * ixv_negotiate_api 1078 * ixv_negotiate_api
1078 * 1079 *
1079 * Negotiate the Mailbox API with the PF; 1080 * Negotiate the Mailbox API with the PF;
1080 * start with the most featured API first. 1081 * start with the most featured API first.
1081 ************************************************************************/ 1082 ************************************************************************/
1082static int 1083static int
1083ixv_negotiate_api(struct adapter *adapter) 1084ixv_negotiate_api(struct adapter *adapter)
1084{ 1085{
1085 struct ixgbe_hw *hw = &adapter->hw; 1086 struct ixgbe_hw *hw = &adapter->hw;
1086 int mbx_api[] = { ixgbe_mbox_api_11, 1087 int mbx_api[] = { ixgbe_mbox_api_11,
1087 ixgbe_mbox_api_10, 1088 ixgbe_mbox_api_10,
1088 ixgbe_mbox_api_unknown }; 1089 ixgbe_mbox_api_unknown };
1089 int i = 0; 1090 int i = 0;
1090 1091
1091 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 1092 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1092 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 1093 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1093 return (0); 1094 return (0);
1094 i++; 1095 i++;
1095 } 1096 }
1096 1097
1097 return (EINVAL); 1098 return (EINVAL);
1098} /* ixv_negotiate_api */ 1099} /* ixv_negotiate_api */
1099 1100
1100 1101
1101/************************************************************************ 1102/************************************************************************
1102 * ixv_set_multi - Multicast Update 1103 * ixv_set_multi - Multicast Update
1103 * 1104 *
1104 * Called whenever multicast address list is updated. 1105 * Called whenever multicast address list is updated.
1105 ************************************************************************/ 1106 ************************************************************************/
1106static void 1107static void
1107ixv_set_multi(struct adapter *adapter) 1108ixv_set_multi(struct adapter *adapter)
1108{ 1109{
1109 struct ether_multi *enm; 1110 struct ether_multi *enm;
1110 struct ether_multistep step; 1111 struct ether_multistep step;
1111 struct ethercom *ec = &adapter->osdep.ec; 1112 struct ethercom *ec = &adapter->osdep.ec;
1112 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 1113 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1113 u8 *update_ptr; 1114 u8 *update_ptr;
1114 int mcnt = 0; 1115 int mcnt = 0;
1115 1116
1116 KASSERT(mutex_owned(&adapter->core_mtx)); 1117 KASSERT(mutex_owned(&adapter->core_mtx));
1117 IOCTL_DEBUGOUT("ixv_set_multi: begin"); 1118 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1118 1119
1119 ETHER_LOCK(ec); 1120 ETHER_LOCK(ec);
1120 ETHER_FIRST_MULTI(step, ec, enm); 1121 ETHER_FIRST_MULTI(step, ec, enm);
1121 while (enm != NULL) { 1122 while (enm != NULL) {
1122 bcopy(enm->enm_addrlo, 1123 bcopy(enm->enm_addrlo,
1123 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1124 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1124 IXGBE_ETH_LENGTH_OF_ADDRESS); 1125 IXGBE_ETH_LENGTH_OF_ADDRESS);
1125 mcnt++; 1126 mcnt++;
1126 /* XXX This might be required --msaitoh */ 1127 /* XXX This might be required --msaitoh */
1127 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) 1128 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)
1128 break; 1129 break;
1129 ETHER_NEXT_MULTI(step, enm); 1130 ETHER_NEXT_MULTI(step, enm);
1130 } 1131 }
1131 ETHER_UNLOCK(ec); 1132 ETHER_UNLOCK(ec);
1132 1133
1133 update_ptr = mta; 1134 update_ptr = mta;
1134 1135
1135 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 1136 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
1136 ixv_mc_array_itr, TRUE); 1137 ixv_mc_array_itr, TRUE);
1137} /* ixv_set_multi */ 1138} /* ixv_set_multi */
1138 1139
1139/************************************************************************ 1140/************************************************************************
1140 * ixv_mc_array_itr 1141 * ixv_mc_array_itr
1141 * 1142 *
1142 * An iterator function needed by the multicast shared code. 1143 * An iterator function needed by the multicast shared code.
1143 * It feeds the shared code routine the addresses in the 1144 * It feeds the shared code routine the addresses in the
1144 * array of ixv_set_multi() one by one. 1145 * array of ixv_set_multi() one by one.
1145 ************************************************************************/ 1146 ************************************************************************/
1146static u8 * 1147static u8 *
1147ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1148ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1148{ 1149{
1149 u8 *addr = *update_ptr; 1150 u8 *addr = *update_ptr;
1150 u8 *newptr; 1151 u8 *newptr;
1151 1152
1152 *vmdq = 0; 1153 *vmdq = 0;
1153 1154
1154 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1155 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1155 *update_ptr = newptr; 1156 *update_ptr = newptr;
1156 1157
1157 return addr; 1158 return addr;
1158} /* ixv_mc_array_itr */ 1159} /* ixv_mc_array_itr */
1159 1160
1160/************************************************************************ 1161/************************************************************************
1161 * ixv_local_timer - Timer routine 1162 * ixv_local_timer - Timer routine
1162 * 1163 *
1163 * Checks for link status, updates statistics, 1164 * Checks for link status, updates statistics,
1164 * and runs the watchdog check. 1165 * and runs the watchdog check.
1165 ************************************************************************/ 1166 ************************************************************************/
1166static void 1167static void
1167ixv_local_timer(void *arg) 1168ixv_local_timer(void *arg)
1168{ 1169{
1169 struct adapter *adapter = arg; 1170 struct adapter *adapter = arg;
1170 1171
1171 IXGBE_CORE_LOCK(adapter); 1172 IXGBE_CORE_LOCK(adapter);
1172 ixv_local_timer_locked(adapter); 1173 ixv_local_timer_locked(adapter);
1173 IXGBE_CORE_UNLOCK(adapter); 1174 IXGBE_CORE_UNLOCK(adapter);
1174} 1175}
1175 1176
1176static void 1177static void
1177ixv_local_timer_locked(void *arg) 1178ixv_local_timer_locked(void *arg)
1178{ 1179{
1179 struct adapter *adapter = arg; 1180 struct adapter *adapter = arg;
1180 device_t dev = adapter->dev; 1181 device_t dev = adapter->dev;
1181 struct ix_queue *que = adapter->queues; 1182 struct ix_queue *que = adapter->queues;
1182 u64 queues = 0; 1183 u64 queues = 0;
1183 u64 v0, v1, v2, v3, v4, v5, v6, v7; 1184 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1184 int hung = 0; 1185 int hung = 0;
1185 int i; 1186 int i;
1186 1187
1187 KASSERT(mutex_owned(&adapter->core_mtx)); 1188 KASSERT(mutex_owned(&adapter->core_mtx));
1188 1189
1189 if (ixv_check_link(adapter)) { 1190 if (ixv_check_link(adapter)) {
1190 ixv_init_locked(adapter); 1191 ixv_init_locked(adapter);
1191 return; 1192 return;
1192 } 1193 }
1193 1194
1194 /* Stats Update */ 1195 /* Stats Update */
1195 ixv_update_stats(adapter); 1196 ixv_update_stats(adapter);
1196 1197
1197 /* Update some event counters */ 1198 /* Update some event counters */
1198 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 1199 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1199 que = adapter->queues; 1200 que = adapter->queues;
1200 for (i = 0; i < adapter->num_queues; i++, que++) { 1201 for (i = 0; i < adapter->num_queues; i++, que++) {
1201 struct tx_ring *txr = que->txr; 1202 struct tx_ring *txr = que->txr;
1202 1203
1203 v0 += txr->q_efbig_tx_dma_setup; 1204 v0 += txr->q_efbig_tx_dma_setup;
1204 v1 += txr->q_mbuf_defrag_failed; 1205 v1 += txr->q_mbuf_defrag_failed;
1205 v2 += txr->q_efbig2_tx_dma_setup; 1206 v2 += txr->q_efbig2_tx_dma_setup;
1206 v3 += txr->q_einval_tx_dma_setup; 1207 v3 += txr->q_einval_tx_dma_setup;
1207 v4 += txr->q_other_tx_dma_setup; 1208 v4 += txr->q_other_tx_dma_setup;
1208 v5 += txr->q_eagain_tx_dma_setup; 1209 v5 += txr->q_eagain_tx_dma_setup;
1209 v6 += txr->q_enomem_tx_dma_setup; 1210 v6 += txr->q_enomem_tx_dma_setup;
1210 v7 += txr->q_tso_err; 1211 v7 += txr->q_tso_err;
1211 } 1212 }
1212 adapter->efbig_tx_dma_setup.ev_count = v0; 1213 adapter->efbig_tx_dma_setup.ev_count = v0;
1213 adapter->mbuf_defrag_failed.ev_count = v1; 1214 adapter->mbuf_defrag_failed.ev_count = v1;
1214 adapter->efbig2_tx_dma_setup.ev_count = v2; 1215 adapter->efbig2_tx_dma_setup.ev_count = v2;
1215 adapter->einval_tx_dma_setup.ev_count = v3; 1216 adapter->einval_tx_dma_setup.ev_count = v3;
1216 adapter->other_tx_dma_setup.ev_count = v4; 1217 adapter->other_tx_dma_setup.ev_count = v4;
1217 adapter->eagain_tx_dma_setup.ev_count = v5; 1218 adapter->eagain_tx_dma_setup.ev_count = v5;
1218 adapter->enomem_tx_dma_setup.ev_count = v6; 1219 adapter->enomem_tx_dma_setup.ev_count = v6;
1219 adapter->tso_err.ev_count = v7; 1220 adapter->tso_err.ev_count = v7;
1220 1221
1221 /* 1222 /*
1222 * Check the TX queues status 1223 * Check the TX queues status
1223 * - mark hung queues so we don't schedule on them 1224 * - mark hung queues so we don't schedule on them
1224 * - watchdog only if all queues show hung 1225 * - watchdog only if all queues show hung
1225 */ 1226 */
1226 que = adapter->queues; 1227 que = adapter->queues;
1227 for (i = 0; i < adapter->num_queues; i++, que++) { 1228 for (i = 0; i < adapter->num_queues; i++, que++) {
1228 /* Keep track of queues with work for soft irq */ 1229 /* Keep track of queues with work for soft irq */
1229 if (que->txr->busy) 1230 if (que->txr->busy)
1230 queues |= ((u64)1 << que->me); 1231 queues |= ((u64)1 << que->me);
1231 /* 1232 /*
1232 * Each time txeof runs without cleaning, but there 1233 * Each time txeof runs without cleaning, but there
1233 * are uncleaned descriptors it increments busy. If 1234 * are uncleaned descriptors it increments busy. If
1234 * we get to the MAX we declare it hung. 1235 * we get to the MAX we declare it hung.
1235 */ 1236 */
1236 if (que->busy == IXGBE_QUEUE_HUNG) { 1237 if (que->busy == IXGBE_QUEUE_HUNG) {
1237 ++hung; 1238 ++hung;
1238 /* Mark the queue as inactive */ 1239 /* Mark the queue as inactive */
1239 adapter->active_queues &= ~((u64)1 << que->me); 1240 adapter->active_queues &= ~((u64)1 << que->me);
1240 continue; 1241 continue;
1241 } else { 1242 } else {
1242 /* Check if we've come back from hung */ 1243 /* Check if we've come back from hung */
1243 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1244 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1244 adapter->active_queues |= ((u64)1 << que->me); 1245 adapter->active_queues |= ((u64)1 << que->me);
1245 } 1246 }
1246 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1247 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1247 device_printf(dev, 1248 device_printf(dev,
1248 "Warning queue %d appears to be hung!\n", i); 1249 "Warning queue %d appears to be hung!\n", i);
1249 que->txr->busy = IXGBE_QUEUE_HUNG; 1250 que->txr->busy = IXGBE_QUEUE_HUNG;
1250 ++hung; 1251 ++hung;
1251 } 1252 }
1252 } 1253 }
1253 1254
1254 /* Only truly watchdog if all queues show hung */ 1255 /* Only truly watchdog if all queues show hung */
1255 if (hung == adapter->num_queues) 1256 if (hung == adapter->num_queues)
1256 goto watchdog; 1257 goto watchdog;
1257#if 0 1258#if 0
1258 else if (queues != 0) { /* Force an IRQ on queues with work */ 1259 else if (queues != 0) { /* Force an IRQ on queues with work */
1259 ixv_rearm_queues(adapter, queues); 1260 ixv_rearm_queues(adapter, queues);
1260 } 1261 }
1261#endif 1262#endif
1262 1263
1263 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1264 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1264 1265
1265 return; 1266 return;
1266 1267
1267watchdog: 1268watchdog:
1268 1269
1269 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1270 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1270 adapter->ifp->if_flags &= ~IFF_RUNNING; 1271 adapter->ifp->if_flags &= ~IFF_RUNNING;
1271 adapter->watchdog_events.ev_count++; 1272 adapter->watchdog_events.ev_count++;
1272 ixv_init_locked(adapter); 1273 ixv_init_locked(adapter);
1273} /* ixv_local_timer */ 1274} /* ixv_local_timer */
1274 1275
1275/************************************************************************ 1276/************************************************************************
1276 * ixv_update_link_status - Update OS on link state 1277 * ixv_update_link_status - Update OS on link state
1277 * 1278 *
1278 * Note: Only updates the OS on the cached link state. 1279 * Note: Only updates the OS on the cached link state.
1279 * The real check of the hardware only happens with 1280 * The real check of the hardware only happens with
1280 * a link interrupt. 1281 * a link interrupt.
1281 ************************************************************************/ 1282 ************************************************************************/
1282static void 1283static void
1283ixv_update_link_status(struct adapter *adapter) 1284ixv_update_link_status(struct adapter *adapter)
1284{ 1285{
1285 struct ifnet *ifp = adapter->ifp; 1286 struct ifnet *ifp = adapter->ifp;
1286 device_t dev = adapter->dev; 1287 device_t dev = adapter->dev;
1287 1288
1288 KASSERT(mutex_owned(&adapter->core_mtx)); 1289 KASSERT(mutex_owned(&adapter->core_mtx));
1289 1290
1290 if (adapter->link_up) { 1291 if (adapter->link_up) {
1291 if (adapter->link_active != LINK_STATE_UP) { 1292 if (adapter->link_active != LINK_STATE_UP) {
1292 if (bootverbose) { 1293 if (bootverbose) {
1293 const char *bpsmsg; 1294 const char *bpsmsg;
1294 1295
1295 switch (adapter->link_speed) { 1296 switch (adapter->link_speed) {
1296 case IXGBE_LINK_SPEED_10GB_FULL: 1297 case IXGBE_LINK_SPEED_10GB_FULL:
1297 bpsmsg = "10 Gbps"; 1298 bpsmsg = "10 Gbps";
1298 break; 1299 break;
1299 case IXGBE_LINK_SPEED_5GB_FULL: 1300 case IXGBE_LINK_SPEED_5GB_FULL:
1300 bpsmsg = "5 Gbps"; 1301 bpsmsg = "5 Gbps";
1301 break; 1302 break;
1302 case IXGBE_LINK_SPEED_2_5GB_FULL: 1303 case IXGBE_LINK_SPEED_2_5GB_FULL:
1303 bpsmsg = "2.5 Gbps"; 1304 bpsmsg = "2.5 Gbps";
1304 break; 1305 break;
1305 case IXGBE_LINK_SPEED_1GB_FULL: 1306 case IXGBE_LINK_SPEED_1GB_FULL:
1306 bpsmsg = "1 Gbps"; 1307 bpsmsg = "1 Gbps";
1307 break; 1308 break;
1308 case IXGBE_LINK_SPEED_100_FULL: 1309 case IXGBE_LINK_SPEED_100_FULL:
1309 bpsmsg = "100 Mbps"; 1310 bpsmsg = "100 Mbps";
1310 break; 1311 break;
1311 case IXGBE_LINK_SPEED_10_FULL: 1312 case IXGBE_LINK_SPEED_10_FULL:
1312 bpsmsg = "10 Mbps"; 1313 bpsmsg = "10 Mbps";
1313 break; 1314 break;
1314 default: 1315 default:
1315 bpsmsg = "unknown speed"; 1316 bpsmsg = "unknown speed";
1316 break; 1317 break;
1317 } 1318 }
1318 device_printf(dev, "Link is up %s %s \n", 1319 device_printf(dev, "Link is up %s %s \n",
1319 bpsmsg, "Full Duplex"); 1320 bpsmsg, "Full Duplex");
1320 } 1321 }
1321 adapter->link_active = LINK_STATE_UP; 1322 adapter->link_active = LINK_STATE_UP;
1322 if_link_state_change(ifp, LINK_STATE_UP); 1323 if_link_state_change(ifp, LINK_STATE_UP);
1323 } 1324 }
1324 } else { 1325 } else {
1325 /* 1326 /*
1326 * Do it when link active changes to DOWN. i.e. 1327 * Do it when link active changes to DOWN. i.e.
1327 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 1328 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1328 * b) LINK_STATE_UP -> LINK_STATE_DOWN 1329 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1329 */ 1330 */
1330 if (adapter->link_active != LINK_STATE_DOWN) { 1331 if (adapter->link_active != LINK_STATE_DOWN) {
1331 if (bootverbose) 1332 if (bootverbose)
1332 device_printf(dev, "Link is Down\n"); 1333 device_printf(dev, "Link is Down\n");
1333 if_link_state_change(ifp, LINK_STATE_DOWN); 1334 if_link_state_change(ifp, LINK_STATE_DOWN);
1334 adapter->link_active = LINK_STATE_DOWN; 1335 adapter->link_active = LINK_STATE_DOWN;
1335 } 1336 }
1336 } 1337 }
1337} /* ixv_update_link_status */ 1338} /* ixv_update_link_status */
1338 1339
1339 1340
1340/************************************************************************ 1341/************************************************************************
1341 * ixv_stop - Stop the hardware 1342 * ixv_stop - Stop the hardware
1342 * 1343 *
1343 * Disables all traffic on the adapter by issuing a 1344 * Disables all traffic on the adapter by issuing a
1344 * global reset on the MAC and deallocates TX/RX buffers. 1345 * global reset on the MAC and deallocates TX/RX buffers.
1345 ************************************************************************/ 1346 ************************************************************************/
1346static void 1347static void
1347ixv_ifstop(struct ifnet *ifp, int disable) 1348ixv_ifstop(struct ifnet *ifp, int disable)
1348{ 1349{
1349 struct adapter *adapter = ifp->if_softc; 1350 struct adapter *adapter = ifp->if_softc;
1350 1351
1351 IXGBE_CORE_LOCK(adapter); 1352 IXGBE_CORE_LOCK(adapter);
1352 ixv_stop(adapter); 1353 ixv_stop(adapter);
1353 IXGBE_CORE_UNLOCK(adapter); 1354 IXGBE_CORE_UNLOCK(adapter);
1354} 1355}
1355 1356
1356static void 1357static void
1357ixv_stop(void *arg) 1358ixv_stop(void *arg)
1358{ 1359{
1359 struct ifnet *ifp; 1360 struct ifnet *ifp;
1360 struct adapter *adapter = arg; 1361 struct adapter *adapter = arg;
1361 struct ixgbe_hw *hw = &adapter->hw; 1362 struct ixgbe_hw *hw = &adapter->hw;
1362 1363
1363 ifp = adapter->ifp; 1364 ifp = adapter->ifp;
1364 1365
1365 KASSERT(mutex_owned(&adapter->core_mtx)); 1366 KASSERT(mutex_owned(&adapter->core_mtx));
1366 1367
1367 INIT_DEBUGOUT("ixv_stop: begin\n"); 1368 INIT_DEBUGOUT("ixv_stop: begin\n");
1368 ixv_disable_intr(adapter); 1369 ixv_disable_intr(adapter);
1369 1370
1370 /* Tell the stack that the interface is no longer active */ 1371 /* Tell the stack that the interface is no longer active */
1371 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1372 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1372 1373
1373 hw->mac.ops.reset_hw(hw); 1374 hw->mac.ops.reset_hw(hw);
1374 adapter->hw.adapter_stopped = FALSE; 1375 adapter->hw.adapter_stopped = FALSE;
1375 hw->mac.ops.stop_adapter(hw); 1376 hw->mac.ops.stop_adapter(hw);
1376 callout_stop(&adapter->timer); 1377 callout_stop(&adapter->timer);
1377 1378
1378 /* reprogram the RAR[0] in case user changed it. */ 1379 /* reprogram the RAR[0] in case user changed it. */
1379 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1380 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1380 1381
1381 return; 1382 return;
1382} /* ixv_stop */ 1383} /* ixv_stop */
1383 1384
1384 1385
1385/************************************************************************ 1386/************************************************************************
1386 * ixv_allocate_pci_resources 1387 * ixv_allocate_pci_resources
1387 ************************************************************************/ 1388 ************************************************************************/
1388static int 1389static int
1389ixv_allocate_pci_resources(struct adapter *adapter, 1390ixv_allocate_pci_resources(struct adapter *adapter,
1390 const struct pci_attach_args *pa) 1391 const struct pci_attach_args *pa)
1391{ 1392{
1392 pcireg_t memtype, csr; 1393 pcireg_t memtype, csr;
1393 device_t dev = adapter->dev; 1394 device_t dev = adapter->dev;
1394 bus_addr_t addr; 1395 bus_addr_t addr;
1395 int flags; 1396 int flags;
1396 1397
1397 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 1398 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1398 switch (memtype) { 1399 switch (memtype) {
1399 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1400 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1400 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1401 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1401 adapter->osdep.mem_bus_space_tag = pa->pa_memt; 1402 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1402 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 1403 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1403 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) 1404 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1404 goto map_err; 1405 goto map_err;
1405 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 1406 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1406 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 1407 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1407 flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 1408 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1408 } 1409 }
1409 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, 1410 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1410 adapter->osdep.mem_size, flags, 1411 adapter->osdep.mem_size, flags,
1411 &adapter->osdep.mem_bus_space_handle) != 0) { 1412 &adapter->osdep.mem_bus_space_handle) != 0) {
1412map_err: 1413map_err:
1413 adapter->osdep.mem_size = 0; 1414 adapter->osdep.mem_size = 0;
1414 aprint_error_dev(dev, "unable to map BAR0\n"); 1415 aprint_error_dev(dev, "unable to map BAR0\n");
1415 return ENXIO; 1416 return ENXIO;
1416 } 1417 }
1417 /* 1418 /*
1418 * Enable address decoding for memory range in case it's not 1419 * Enable address decoding for memory range in case it's not
1419 * set. 1420 * set.
1420 */ 1421 */
1421 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 1422 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1422 PCI_COMMAND_STATUS_REG); 1423 PCI_COMMAND_STATUS_REG);
1423 csr |= PCI_COMMAND_MEM_ENABLE; 1424 csr |= PCI_COMMAND_MEM_ENABLE;
1424 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1425 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1425 csr); 1426 csr);
1426 break; 1427 break;
1427 default: 1428 default:
1428 aprint_error_dev(dev, "unexpected type on BAR0\n"); 1429 aprint_error_dev(dev, "unexpected type on BAR0\n");
1429 return ENXIO; 1430 return ENXIO;
1430 } 1431 }
1431 1432
1432 /* Pick up the tuneable queues */ 1433 /* Pick up the tuneable queues */
1433 adapter->num_queues = ixv_num_queues; 1434 adapter->num_queues = ixv_num_queues;
1434 1435
1435 return (0); 1436 return (0);
1436} /* ixv_allocate_pci_resources */ 1437} /* ixv_allocate_pci_resources */
1437 1438
1438/************************************************************************ 1439/************************************************************************
1439 * ixv_free_pci_resources 1440 * ixv_free_pci_resources
1440 ************************************************************************/ 1441 ************************************************************************/
1441static void 1442static void
1442ixv_free_pci_resources(struct adapter * adapter) 1443ixv_free_pci_resources(struct adapter * adapter)
1443{ 1444{
1444 struct ix_queue *que = adapter->queues; 1445 struct ix_queue *que = adapter->queues;
1445 int rid; 1446 int rid;
1446 1447
1447 /* 1448 /*
1448 * Release all msix queue resources: 1449 * Release all msix queue resources:
1449 */ 1450 */
1450 for (int i = 0; i < adapter->num_queues; i++, que++) { 1451 for (int i = 0; i < adapter->num_queues; i++, que++) {
1451 if (que->res != NULL) 1452 if (que->res != NULL)
1452 pci_intr_disestablish(adapter->osdep.pc, 1453 pci_intr_disestablish(adapter->osdep.pc,
1453 adapter->osdep.ihs[i]); 1454 adapter->osdep.ihs[i]);
1454 } 1455 }
1455 1456
1456 1457
1457 /* Clean the Mailbox interrupt last */ 1458 /* Clean the Mailbox interrupt last */
1458 rid = adapter->vector; 1459 rid = adapter->vector;
1459 1460
1460 if (adapter->osdep.ihs[rid] != NULL) { 1461 if (adapter->osdep.ihs[rid] != NULL) {
1461 pci_intr_disestablish(adapter->osdep.pc, 1462 pci_intr_disestablish(adapter->osdep.pc,
1462 adapter->osdep.ihs[rid]); 1463 adapter->osdep.ihs[rid]);
1463 adapter->osdep.ihs[rid] = NULL; 1464 adapter->osdep.ihs[rid] = NULL;
1464 } 1465 }
1465 1466
1466 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1467 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1467 adapter->osdep.nintrs); 1468 adapter->osdep.nintrs);
1468 1469
1469 if (adapter->osdep.mem_size != 0) { 1470 if (adapter->osdep.mem_size != 0) {
1470 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 1471 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1471 adapter->osdep.mem_bus_space_handle, 1472 adapter->osdep.mem_bus_space_handle,
1472 adapter->osdep.mem_size); 1473 adapter->osdep.mem_size);
1473 } 1474 }
1474 1475
1475 return; 1476 return;
1476} /* ixv_free_pci_resources */ 1477} /* ixv_free_pci_resources */
1477 1478
1478/************************************************************************ 1479/************************************************************************
1479 * ixv_setup_interface 1480 * ixv_setup_interface
1480 * 1481 *
1481 * Setup networking device structure and register an interface. 1482 * Setup networking device structure and register an interface.
1482 ************************************************************************/ 1483 ************************************************************************/
1483static int 1484static int
1484ixv_setup_interface(device_t dev, struct adapter *adapter) 1485ixv_setup_interface(device_t dev, struct adapter *adapter)
1485{ 1486{
1486 struct ethercom *ec = &adapter->osdep.ec; 1487 struct ethercom *ec = &adapter->osdep.ec;
1487 struct ifnet *ifp; 1488 struct ifnet *ifp;
1488 int rv; 1489 int rv;
1489 1490
1490 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1491 INIT_DEBUGOUT("ixv_setup_interface: begin");
1491 1492
1492 ifp = adapter->ifp = &ec->ec_if; 1493 ifp = adapter->ifp = &ec->ec_if;
1493 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1494 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1494 ifp->if_baudrate = IF_Gbps(10); 1495 ifp->if_baudrate = IF_Gbps(10);
1495 ifp->if_init = ixv_init; 1496 ifp->if_init = ixv_init;
1496 ifp->if_stop = ixv_ifstop; 1497 ifp->if_stop = ixv_ifstop;
1497 ifp->if_softc = adapter; 1498 ifp->if_softc = adapter;
1498 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1499 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1499#ifdef IXGBE_MPSAFE 1500#ifdef IXGBE_MPSAFE
1500 ifp->if_extflags = IFEF_MPSAFE; 1501 ifp->if_extflags = IFEF_MPSAFE;
1501#endif 1502#endif
1502 ifp->if_ioctl = ixv_ioctl; 1503 ifp->if_ioctl = ixv_ioctl;
1503 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1504 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1504#if 0 1505#if 0
1505 ixv_start_locked = ixgbe_legacy_start_locked; 1506 ixv_start_locked = ixgbe_legacy_start_locked;
1506#endif 1507#endif
1507 } else { 1508 } else {
1508 ifp->if_transmit = ixgbe_mq_start; 1509 ifp->if_transmit = ixgbe_mq_start;
1509#if 0 1510#if 0
1510 ixv_start_locked = ixgbe_mq_start_locked; 1511 ixv_start_locked = ixgbe_mq_start_locked;
1511#endif 1512#endif
1512 } 1513 }
1513 ifp->if_start = ixgbe_legacy_start; 1514 ifp->if_start = ixgbe_legacy_start;
1514 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1515 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1515 IFQ_SET_READY(&ifp->if_snd); 1516 IFQ_SET_READY(&ifp->if_snd);
1516 1517
1517 rv = if_initialize(ifp); 1518 rv = if_initialize(ifp);
1518 if (rv != 0) { 1519 if (rv != 0) {
1519 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); 1520 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1520 return rv; 1521 return rv;
1521 } 1522 }
1522 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1523 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1523 ether_ifattach(ifp, adapter->hw.mac.addr); 1524 ether_ifattach(ifp, adapter->hw.mac.addr);
1524 /* 1525 /*
1525 * We use per TX queue softint, so if_deferred_start_init() isn't 1526 * We use per TX queue softint, so if_deferred_start_init() isn't
1526 * used. 1527 * used.
1527 */ 1528 */
1528 ether_set_ifflags_cb(ec, ixv_ifflags_cb); 1529 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1529 1530
1530 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1531 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1531 1532
1532 /* 1533 /*
1533 * Tell the upper layer(s) we support long frames. 1534 * Tell the upper layer(s) we support long frames.
1534 */ 1535 */
1535 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1536 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1536 1537
1537 /* Set capability flags */ 1538 /* Set capability flags */
1538 ifp->if_capabilities |= IFCAP_HWCSUM 1539 ifp->if_capabilities |= IFCAP_HWCSUM
1539 | IFCAP_TSOv4 1540 | IFCAP_TSOv4
1540 | IFCAP_TSOv6; 1541 | IFCAP_TSOv6;
1541 ifp->if_capenable = 0; 1542 ifp->if_capenable = 0;
1542 1543
1543 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER 1544 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1544 | ETHERCAP_VLAN_HWTAGGING 1545 | ETHERCAP_VLAN_HWTAGGING
1545 | ETHERCAP_VLAN_HWCSUM 1546 | ETHERCAP_VLAN_HWCSUM
1546 | ETHERCAP_JUMBO_MTU 1547 | ETHERCAP_JUMBO_MTU
1547 | ETHERCAP_VLAN_MTU; 1548 | ETHERCAP_VLAN_MTU;
1548 1549
1549 /* Enable the above capabilities by default */ 1550 /* Enable the above capabilities by default */
1550 ec->ec_capenable = ec->ec_capabilities; 1551 ec->ec_capenable = ec->ec_capabilities;
1551 1552
1552 /* Don't enable LRO by default */ 1553 /* Don't enable LRO by default */
1553#if 0 1554#if 0
1554 /* NetBSD doesn't support LRO yet */ 1555 /* NetBSD doesn't support LRO yet */
1555 ifp->if_capabilities |= IFCAP_LRO; 1556 ifp->if_capabilities |= IFCAP_LRO;
1556#endif 1557#endif
1557 1558
1558 /* 1559 /*
1559 * Specify the media types supported by this adapter and register 1560 * Specify the media types supported by this adapter and register
1560 * callbacks to update media and link information 1561 * callbacks to update media and link information
1561 */ 1562 */
1562 ec->ec_ifmedia = &adapter->media; 1563 ec->ec_ifmedia = &adapter->media;
1563 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1564 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1564 ixv_media_status); 1565 ixv_media_status);
1565 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1566 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1566 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1567 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1567 1568
1568 if_register(ifp); 1569 if_register(ifp);
1569 1570
1570 return 0; 1571 return 0;
1571} /* ixv_setup_interface */ 1572} /* ixv_setup_interface */
1572 1573
1573 1574
1574/************************************************************************ 1575/************************************************************************
1575 * ixv_initialize_transmit_units - Enable transmit unit. 1576 * ixv_initialize_transmit_units - Enable transmit unit.
1576 ************************************************************************/ 1577 ************************************************************************/
1577static void 1578static void
1578ixv_initialize_transmit_units(struct adapter *adapter) 1579ixv_initialize_transmit_units(struct adapter *adapter)
1579{ 1580{
1580 struct tx_ring *txr = adapter->tx_rings; 1581 struct tx_ring *txr = adapter->tx_rings;
1581 struct ixgbe_hw *hw = &adapter->hw; 1582 struct ixgbe_hw *hw = &adapter->hw;
1582 int i; 1583 int i;
1583 1584
1584 for (i = 0; i < adapter->num_queues; i++, txr++) { 1585 for (i = 0; i < adapter->num_queues; i++, txr++) {
1585 u64 tdba = txr->txdma.dma_paddr; 1586 u64 tdba = txr->txdma.dma_paddr;
1586 u32 txctrl, txdctl; 1587 u32 txctrl, txdctl;
1587 int j = txr->me; 1588 int j = txr->me;
1588 1589
1589 /* Set WTHRESH to 8, burst writeback */ 1590 /* Set WTHRESH to 8, burst writeback */
1590 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1591 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1591 txdctl |= (8 << 16); 1592 txdctl |= (8 << 16);
1592 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1593 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1593 1594
1594 /* Set the HW Tx Head and Tail indices */ 1595 /* Set the HW Tx Head and Tail indices */
1595 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); 1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); 1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1597 1598
1598 /* Set Tx Tail register */ 1599 /* Set Tx Tail register */
1599 txr->tail = IXGBE_VFTDT(j); 1600 txr->tail = IXGBE_VFTDT(j);
1600 1601
1601 txr->txr_no_space = false; 1602 txr->txr_no_space = false;
1602 1603
1603 /* Set Ring parameters */ 1604 /* Set Ring parameters */
1604 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1605 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1605 (tdba & 0x00000000ffffffffULL)); 1606 (tdba & 0x00000000ffffffffULL));
1606 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1607 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1607 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1608 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1608 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); 1609 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1609 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1610 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1610 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1611 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1611 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1612 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1612 1613
1613 /* Now enable */ 1614 /* Now enable */
1614 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1615 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1615 txdctl |= IXGBE_TXDCTL_ENABLE; 1616 txdctl |= IXGBE_TXDCTL_ENABLE;
1616 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1617 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1617 } 1618 }
1618 1619
1619 return; 1620 return;
1620} /* ixv_initialize_transmit_units */ 1621} /* ixv_initialize_transmit_units */
1621 1622
1622 1623
1623/************************************************************************ 1624/************************************************************************
1624 * ixv_initialize_rss_mapping 1625 * ixv_initialize_rss_mapping
1625 ************************************************************************/ 1626 ************************************************************************/
1626static void 1627static void
1627ixv_initialize_rss_mapping(struct adapter *adapter) 1628ixv_initialize_rss_mapping(struct adapter *adapter)
1628{ 1629{
1629 struct ixgbe_hw *hw = &adapter->hw; 1630 struct ixgbe_hw *hw = &adapter->hw;
1630 u32 reta = 0, mrqc, rss_key[10]; 1631 u32 reta = 0, mrqc, rss_key[10];
1631 int queue_id; 1632 int queue_id;
1632 int i, j; 1633 int i, j;
1633 u32 rss_hash_config; 1634 u32 rss_hash_config;
1634 1635
1635 /* force use default RSS key. */ 1636 /* force use default RSS key. */
1636#ifdef __NetBSD__ 1637#ifdef __NetBSD__
1637 rss_getkey((uint8_t *) &rss_key); 1638 rss_getkey((uint8_t *) &rss_key);
1638#else 1639#else
1639 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1640 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1640 /* Fetch the configured RSS key */ 1641 /* Fetch the configured RSS key */
1641 rss_getkey((uint8_t *)&rss_key); 1642 rss_getkey((uint8_t *)&rss_key);
1642 } else { 1643 } else {
1643 /* set up random bits */ 1644 /* set up random bits */
1644 cprng_fast(&rss_key, sizeof(rss_key)); 1645 cprng_fast(&rss_key, sizeof(rss_key));
1645 } 1646 }
1646#endif 1647#endif
1647 1648
1648 /* Now fill out hash function seeds */ 1649 /* Now fill out hash function seeds */
1649 for (i = 0; i < 10; i++) 1650 for (i = 0; i < 10; i++)
1650 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1651 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1651 1652
1652 /* Set up the redirection table */ 1653 /* Set up the redirection table */
1653 for (i = 0, j = 0; i < 64; i++, j++) { 1654 for (i = 0, j = 0; i < 64; i++, j++) {
1654 if (j == adapter->num_queues) 1655 if (j == adapter->num_queues)
1655 j = 0; 1656 j = 0;
1656 1657
1657 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1658 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1658 /* 1659 /*
1659 * Fetch the RSS bucket id for the given indirection 1660 * Fetch the RSS bucket id for the given indirection
1660 * entry. Cap it at the number of configured buckets 1661 * entry. Cap it at the number of configured buckets
1661 * (which is num_queues.) 1662 * (which is num_queues.)
1662 */ 1663 */
1663 queue_id = rss_get_indirection_to_bucket(i); 1664 queue_id = rss_get_indirection_to_bucket(i);
1664 queue_id = queue_id % adapter->num_queues; 1665 queue_id = queue_id % adapter->num_queues;
1665 } else 1666 } else
1666 queue_id = j; 1667 queue_id = j;
1667 1668
1668 /* 1669 /*
1669 * The low 8 bits are for hash value (n+0); 1670 * The low 8 bits are for hash value (n+0);
1670 * The next 8 bits are for hash value (n+1), etc. 1671 * The next 8 bits are for hash value (n+1), etc.
1671 */ 1672 */
1672 reta >>= 8; 1673 reta >>= 8;
1673 reta |= ((uint32_t)queue_id) << 24; 1674 reta |= ((uint32_t)queue_id) << 24;
1674 if ((i & 3) == 3) { 1675 if ((i & 3) == 3) {
1675 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1676 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1676 reta = 0; 1677 reta = 0;
1677 } 1678 }
1678 } 1679 }
1679 1680
1680 /* Perform hash on these packet types */ 1681 /* Perform hash on these packet types */
1681 if (adapter->feat_en & IXGBE_FEATURE_RSS) 1682 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1682 rss_hash_config = rss_gethashconfig(); 1683 rss_hash_config = rss_gethashconfig();
1683 else { 1684 else {
1684 /* 1685 /*
1685 * Disable UDP - IP fragments aren't currently being handled 1686 * Disable UDP - IP fragments aren't currently being handled
1686 * and so we end up with a mix of 2-tuple and 4-tuple 1687 * and so we end up with a mix of 2-tuple and 4-tuple
1687 * traffic. 1688 * traffic.
1688 */ 1689 */
1689 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1690 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1690 | RSS_HASHTYPE_RSS_TCP_IPV4 1691 | RSS_HASHTYPE_RSS_TCP_IPV4
1691 | RSS_HASHTYPE_RSS_IPV6 1692 | RSS_HASHTYPE_RSS_IPV6
1692 | RSS_HASHTYPE_RSS_TCP_IPV6; 1693 | RSS_HASHTYPE_RSS_TCP_IPV6;
1693 } 1694 }
1694 1695
1695 mrqc = IXGBE_MRQC_RSSEN; 1696 mrqc = IXGBE_MRQC_RSSEN;
1696 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1697 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1697 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1698 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1698 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1699 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1699 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1700 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1700 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1701 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1701 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1702 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1702 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1703 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1703 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1704 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1704 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1705 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1705 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1706 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1706 __func__); 1707 __func__);
1707 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1708 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1708 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1709 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1709 __func__); 1710 __func__);
1710 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1711 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1711 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1712 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1712 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1713 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1713 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1714 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1714 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1715 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1715 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1716 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1716 __func__); 1717 __func__);
1717 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1718 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1718} /* ixv_initialize_rss_mapping */ 1719} /* ixv_initialize_rss_mapping */
1719 1720
1720 1721
1721/************************************************************************ 1722/************************************************************************
1722 * ixv_initialize_receive_units - Setup receive registers and features. 1723 * ixv_initialize_receive_units - Setup receive registers and features.
1723 ************************************************************************/ 1724 ************************************************************************/
1724static void 1725static void
1725ixv_initialize_receive_units(struct adapter *adapter) 1726ixv_initialize_receive_units(struct adapter *adapter)
1726{ 1727{
1727 struct rx_ring *rxr = adapter->rx_rings; 1728 struct rx_ring *rxr = adapter->rx_rings;
1728 struct ixgbe_hw *hw = &adapter->hw; 1729 struct ixgbe_hw *hw = &adapter->hw;
1729 struct ifnet *ifp = adapter->ifp; 1730 struct ifnet *ifp = adapter->ifp;
1730 u32 bufsz, psrtype; 1731 u32 bufsz, psrtype;
1731 1732
1732 if (ifp->if_mtu > ETHERMTU) 1733 if (ifp->if_mtu > ETHERMTU)
1733 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1734 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1734 else 1735 else
1735 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1736 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1736 1737
1737 psrtype = IXGBE_PSRTYPE_TCPHDR 1738 psrtype = IXGBE_PSRTYPE_TCPHDR
1738 | IXGBE_PSRTYPE_UDPHDR 1739 | IXGBE_PSRTYPE_UDPHDR
1739 | IXGBE_PSRTYPE_IPV4HDR 1740 | IXGBE_PSRTYPE_IPV4HDR
1740 | IXGBE_PSRTYPE_IPV6HDR 1741 | IXGBE_PSRTYPE_IPV6HDR
1741 | IXGBE_PSRTYPE_L2HDR; 1742 | IXGBE_PSRTYPE_L2HDR;
1742 1743
1743 if (adapter->num_queues > 1) 1744 if (adapter->num_queues > 1)
1744 psrtype |= 1 << 29; 1745 psrtype |= 1 << 29;
1745 1746
1746 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1747 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1747 1748
1748 /* Tell PF our max_frame size */ 1749 /* Tell PF our max_frame size */
1749 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { 1750 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1750 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1751 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1751 } 1752 }
1752 1753
1753 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1754 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1754 u64 rdba = rxr->rxdma.dma_paddr; 1755 u64 rdba = rxr->rxdma.dma_paddr;
1755 u32 reg, rxdctl; 1756 u32 reg, rxdctl;
1756 int j = rxr->me; 1757 int j = rxr->me;
1757 1758
1758 /* Disable the queue */ 1759 /* Disable the queue */
1759 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1760 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1760 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1761 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1761 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1762 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1762 for (int k = 0; k < 10; k++) { 1763 for (int k = 0; k < 10; k++) {
1763 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1764 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1764 IXGBE_RXDCTL_ENABLE) 1765 IXGBE_RXDCTL_ENABLE)
1765 msec_delay(1); 1766 msec_delay(1);
1766 else 1767 else
1767 break; 1768 break;
1768 } 1769 }
1769 wmb(); 1770 wmb();
1770 /* Setup the Base and Length of the Rx Descriptor Ring */ 1771 /* Setup the Base and Length of the Rx Descriptor Ring */
1771 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1772 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1772 (rdba & 0x00000000ffffffffULL)); 1773 (rdba & 0x00000000ffffffffULL));
1773 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1774 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1774 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1775 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1775 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1776 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1776 1777
1777 /* Reset the ring indices */ 1778 /* Reset the ring indices */
1778 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1779 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1779 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1780 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1780 1781
1781 /* Set up the SRRCTL register */ 1782 /* Set up the SRRCTL register */
1782 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1783 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1783 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1784 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1784 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1785 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1785 reg |= bufsz; 1786 reg |= bufsz;
1786 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1787 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1787 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1788 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1788 1789
1789 /* Capture Rx Tail index */ 1790 /* Capture Rx Tail index */
1790 rxr->tail = IXGBE_VFRDT(rxr->me); 1791 rxr->tail = IXGBE_VFRDT(rxr->me);
1791 1792
1792 /* Do the queue enabling last */ 1793 /* Do the queue enabling last */
1793 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1794 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1794 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1795 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1795 for (int k = 0; k < 10; k++) { 1796 for (int k = 0; k < 10; k++) {
1796 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1797 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1797 IXGBE_RXDCTL_ENABLE) 1798 IXGBE_RXDCTL_ENABLE)
1798 break; 1799 break;
1799 msec_delay(1); 1800 msec_delay(1);
1800 } 1801 }
1801 wmb(); 1802 wmb();
1802 1803
1803 /* Set the Tail Pointer */ 1804 /* Set the Tail Pointer */
1804#ifdef DEV_NETMAP 1805#ifdef DEV_NETMAP
1805 /* 1806 /*
1806 * In netmap mode, we must preserve the buffers made 1807 * In netmap mode, we must preserve the buffers made
1807 * available to userspace before the if_init() 1808 * available to userspace before the if_init()
1808 * (this is true by default on the TX side, because 1809 * (this is true by default on the TX side, because
1809 * init makes all buffers available to userspace). 1810 * init makes all buffers available to userspace).
1810 * 1811 *
1811 * netmap_reset() and the device specific routines 1812 * netmap_reset() and the device specific routines
1812 * (e.g. ixgbe_setup_receive_rings()) map these 1813 * (e.g. ixgbe_setup_receive_rings()) map these
1813 * buffers at the end of the NIC ring, so here we 1814 * buffers at the end of the NIC ring, so here we
1814 * must set the RDT (tail) register to make sure 1815 * must set the RDT (tail) register to make sure
1815 * they are not overwritten. 1816 * they are not overwritten.
1816 * 1817 *
1817 * In this driver the NIC ring starts at RDH = 0, 1818 * In this driver the NIC ring starts at RDH = 0,
1818 * RDT points to the last slot available for reception (?), 1819 * RDT points to the last slot available for reception (?),
1819 * so RDT = num_rx_desc - 1 means the whole ring is available. 1820 * so RDT = num_rx_desc - 1 means the whole ring is available.
1820 */ 1821 */
1821 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 1822 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1822 (ifp->if_capenable & IFCAP_NETMAP)) { 1823 (ifp->if_capenable & IFCAP_NETMAP)) {
1823 struct netmap_adapter *na = NA(adapter->ifp); 1824 struct netmap_adapter *na = NA(adapter->ifp);
1824 struct netmap_kring *kring = na->rx_rings[i]; 1825 struct netmap_kring *kring = na->rx_rings[i];
1825 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1826 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1826 1827
1827 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1828 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1828 } else 1829 } else
1829#endif /* DEV_NETMAP */ 1830#endif /* DEV_NETMAP */
1830 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1831 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1831 adapter->num_rx_desc - 1); 1832 adapter->num_rx_desc - 1);
1832 } 1833 }
1833 1834
1834 ixv_initialize_rss_mapping(adapter); 1835 ixv_initialize_rss_mapping(adapter);
1835} /* ixv_initialize_receive_units */ 1836} /* ixv_initialize_receive_units */
1836 1837
1837/************************************************************************ 1838/************************************************************************
1838 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function 1839 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1839 * 1840 *
1840 * Retrieves the TDH value from the hardware 1841 * Retrieves the TDH value from the hardware
1841 ************************************************************************/ 1842 ************************************************************************/
1842static int 1843static int
1843ixv_sysctl_tdh_handler(SYSCTLFN_ARGS) 1844ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1844{ 1845{
1845 struct sysctlnode node = *rnode; 1846 struct sysctlnode node = *rnode;
1846 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1847 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1847 uint32_t val; 1848 uint32_t val;
1848 1849
1849 if (!txr) 1850 if (!txr)
1850 return (0); 1851 return (0);
1851 1852
1852 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me)); 1853 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1853 node.sysctl_data = &val; 1854 node.sysctl_data = &val;
1854 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1855 return sysctl_lookup(SYSCTLFN_CALL(&node));
1855} /* ixv_sysctl_tdh_handler */ 1856} /* ixv_sysctl_tdh_handler */
1856 1857
1857/************************************************************************ 1858/************************************************************************
1858 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1859 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1859 * 1860 *
1860 * Retrieves the TDT value from the hardware 1861 * Retrieves the TDT value from the hardware
1861 ************************************************************************/ 1862 ************************************************************************/
1862static int 1863static int
1863ixv_sysctl_tdt_handler(SYSCTLFN_ARGS) 1864ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1864{ 1865{
1865 struct sysctlnode node = *rnode; 1866 struct sysctlnode node = *rnode;
1866 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1867 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1867 uint32_t val; 1868 uint32_t val;
1868 1869
1869 if (!txr) 1870 if (!txr)
1870 return (0); 1871 return (0);
1871 1872
1872 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me)); 1873 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1873 node.sysctl_data = &val; 1874 node.sysctl_data = &val;
1874 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1875 return sysctl_lookup(SYSCTLFN_CALL(&node));
1875} /* ixv_sysctl_tdt_handler */ 1876} /* ixv_sysctl_tdt_handler */
1876 1877
1877/************************************************************************ 1878/************************************************************************
1878 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check 1879 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1879 * handler function 1880 * handler function
1880 * 1881 *
1881 * Retrieves the next_to_check value 1882 * Retrieves the next_to_check value
1882 ************************************************************************/ 1883 ************************************************************************/
1883static int 1884static int
1884ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 1885ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1885{ 1886{
1886 struct sysctlnode node = *rnode; 1887 struct sysctlnode node = *rnode;
1887 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1888 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1888 uint32_t val; 1889 uint32_t val;
1889 1890
1890 if (!rxr) 1891 if (!rxr)
1891 return (0); 1892 return (0);
1892 1893
1893 val = rxr->next_to_check; 1894 val = rxr->next_to_check;
1894 node.sysctl_data = &val; 1895 node.sysctl_data = &val;
1895 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1896 return sysctl_lookup(SYSCTLFN_CALL(&node));
1896} /* ixv_sysctl_next_to_check_handler */ 1897} /* ixv_sysctl_next_to_check_handler */
1897 1898
1898/************************************************************************ 1899/************************************************************************
1899 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function 1900 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1900 * 1901 *
1901 * Retrieves the RDH value from the hardware 1902 * Retrieves the RDH value from the hardware
1902 ************************************************************************/ 1903 ************************************************************************/
1903static int 1904static int
1904ixv_sysctl_rdh_handler(SYSCTLFN_ARGS) 1905ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1905{ 1906{
1906 struct sysctlnode node = *rnode; 1907 struct sysctlnode node = *rnode;
1907 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1908 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1908 uint32_t val; 1909 uint32_t val;
1909 1910
1910 if (!rxr) 1911 if (!rxr)
1911 return (0); 1912 return (0);
1912 1913
1913 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me)); 1914 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1914 node.sysctl_data = &val; 1915 node.sysctl_data = &val;
1915 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1916 return sysctl_lookup(SYSCTLFN_CALL(&node));
1916} /* ixv_sysctl_rdh_handler */ 1917} /* ixv_sysctl_rdh_handler */
1917 1918
1918/************************************************************************ 1919/************************************************************************
1919 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function 1920 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1920 * 1921 *
1921 * Retrieves the RDT value from the hardware 1922 * Retrieves the RDT value from the hardware
1922 ************************************************************************/ 1923 ************************************************************************/
1923static int 1924static int
1924ixv_sysctl_rdt_handler(SYSCTLFN_ARGS) 1925ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1925{ 1926{
1926 struct sysctlnode node = *rnode; 1927 struct sysctlnode node = *rnode;
1927 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1928 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1928 uint32_t val; 1929 uint32_t val;
1929 1930
1930 if (!rxr) 1931 if (!rxr)
1931 return (0); 1932 return (0);
1932 1933
1933 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me)); 1934 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
1934 node.sysctl_data = &val; 1935 node.sysctl_data = &val;
1935 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1936 return sysctl_lookup(SYSCTLFN_CALL(&node));
1936} /* ixv_sysctl_rdt_handler */ 1937} /* ixv_sysctl_rdt_handler */
1937 1938
1938/************************************************************************ 1939static void
1939 * ixv_setup_vlan_support 1940ixv_setup_vlan_tagging(struct adapter *adapter)
1940 ************************************************************************/ 
1941static int 
1942ixv_setup_vlan_support(struct adapter *adapter) 
1943{ 1941{
1944 struct ethercom *ec = &adapter->osdep.ec; 1942 struct ethercom *ec = &adapter->osdep.ec;
1945 struct ixgbe_hw *hw = &adapter->hw; 1943 struct ixgbe_hw *hw = &adapter->hw;
1946 struct rx_ring *rxr; 1944 struct rx_ring *rxr;
1947 u32 ctrl, vid, vfta, retry; 1945 u32 ctrl;
1948 struct vlanid_list *vlanidp; 1946 int i;
1949 int rv, error = 0; 
1950 bool usevlan; 
1951 bool hwtagging; 1947 bool hwtagging;
1952 1948
1953 /* 
1954 * This function is called from both if_init and ifflags_cb() 
1955 * on NetBSD. 
1956 */ 
1957 usevlan = VLAN_ATTACHED(ec); 
1958 
1959 /* Enable HW tagging only if any vlan is attached */ 1949 /* Enable HW tagging only if any vlan is attached */
1960 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 1950 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
1961 && VLAN_ATTACHED(ec); 1951 && VLAN_ATTACHED(ec);
1962 1952
1963 /* Enable the queues */ 1953 /* Enable the queues */
1964 for (int i = 0; i < adapter->num_queues; i++) { 1954 for (i = 0; i < adapter->num_queues; i++) {
1965 rxr = &adapter->rx_rings[i]; 1955 rxr = &adapter->rx_rings[i];
1966 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me)); 1956 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
1967 if (hwtagging) 1957 if (hwtagging)
1968 ctrl |= IXGBE_RXDCTL_VME; 1958 ctrl |= IXGBE_RXDCTL_VME;
1969 else 1959 else
1970 ctrl &= ~IXGBE_RXDCTL_VME; 1960 ctrl &= ~IXGBE_RXDCTL_VME;
1971 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl); 1961 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
1972 /* 1962 /*
1973 * Let Rx path know that it needs to store VLAN tag 1963 * Let Rx path know that it needs to store VLAN tag
1974 * as part of extra mbuf info. 1964 * as part of extra mbuf info.
1975 */ 1965 */
1976 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 1966 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
1977 } 1967 }
 1968} /* ixv_setup_vlan_tagging */
 1969
 1970/************************************************************************
 1971 * ixv_setup_vlan_support
 1972 ************************************************************************/
 1973static int
 1974ixv_setup_vlan_support(struct adapter *adapter)
 1975{
 1976 struct ethercom *ec = &adapter->osdep.ec;
 1977 struct ixgbe_hw *hw = &adapter->hw;
 1978 u32 vid, vfta, retry;
 1979 struct vlanid_list *vlanidp;
 1980 int rv, error = 0;
 1981
 1982 /*
 1983 * This function is called from both if_init and ifflags_cb()
 1984 * on NetBSD.
 1985 */
 1986
 1987 /*
 1988 * Part 1:
 1989 * Setup VLAN HW tagging
 1990 */
 1991 ixv_setup_vlan_tagging(adapter);
1978 1992
1979 if (!usevlan) 1993 if (!VLAN_ATTACHED(ec))
1980 return 0; 1994 return 0;
1981 1995
 1996 /*
 1997 * Part 2:
 1998 * Setup VLAN HW filter
 1999 */
1982 /* Cleanup shadow_vfta */ 2000 /* Cleanup shadow_vfta */
1983 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 2001 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
1984 adapter->shadow_vfta[i] = 0; 2002 adapter->shadow_vfta[i] = 0;
1985 /* Generate shadow_vfta from ec_vids */ 2003 /* Generate shadow_vfta from ec_vids */
1986 mutex_enter(ec->ec_lock); 2004 ETHER_LOCK(ec);
1987 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2005 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
1988 uint32_t idx; 2006 uint32_t idx;
1989 2007
1990 idx = vlanidp->vid / 32; 2008 idx = vlanidp->vid / 32;
1991 KASSERT(idx < IXGBE_VFTA_SIZE); 2009 KASSERT(idx < IXGBE_VFTA_SIZE);
1992 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2010 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
1993 } 2011 }
1994 mutex_exit(ec->ec_lock); 2012 ETHER_UNLOCK(ec);
1995  2013
1996 /* 2014 /*
1997 * A soft reset zero's out the VFTA, so 2015 * A soft reset zero's out the VFTA, so
1998 * we need to repopulate it now. 2016 * we need to repopulate it now.
1999 */ 2017 */
2000 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 2018 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2001 if (adapter->shadow_vfta[i] == 0) 2019 if (adapter->shadow_vfta[i] == 0)
2002 continue; 2020 continue;
2003 vfta = adapter->shadow_vfta[i]; 2021 vfta = adapter->shadow_vfta[i];
2004 /* 2022 /*
2005 * Reconstruct the vlan id's 2023 * Reconstruct the vlan id's
2006 * based on the bits set in each 2024 * based on the bits set in each
2007 * of the array ints. 2025 * of the array ints.
2008 */ 2026 */
2009 for (int j = 0; j < 32; j++) { 2027 for (int j = 0; j < 32; j++) {
2010 retry = 0; 2028 retry = 0;
2011 if ((vfta & ((u32)1 << j)) == 0) 2029 if ((vfta & ((u32)1 << j)) == 0)
2012 continue; 2030 continue;
2013 vid = (i * 32) + j; 2031 vid = (i * 32) + j;
2014  2032
2015 /* Call the shared code mailbox routine */ 2033 /* Call the shared code mailbox routine */
2016 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE, 2034 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2017 FALSE)) != 0) { 2035 FALSE)) != 0) {
2018 if (++retry > 5) { 2036 if (++retry > 5) {
2019 device_printf(adapter->dev, 2037 device_printf(adapter->dev,
2020 "%s: max retry exceeded\n", 2038 "%s: max retry exceeded\n",
2021 __func__); 2039 __func__);
2022 break; 2040 break;
2023 } 2041 }
2024 } 2042 }
2025 if (rv != 0) { 2043 if (rv != 0) {
2026 device_printf(adapter->dev, 2044 device_printf(adapter->dev,
2027 "failed to set vlan %d\n", vid); 2045 "failed to set vlan %d\n", vid);
2028 error = EACCES; 2046 error = EACCES;
2029 } 2047 }
2030 } 2048 }
2031 } 2049 }
2032 return error; 2050 return error;
2033} /* ixv_setup_vlan_support */ 2051} /* ixv_setup_vlan_support */
2034 2052
2035static int 2053static int
2036ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2054ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2037{ 2055{
2038 struct ifnet *ifp = &ec->ec_if; 2056 struct ifnet *ifp = &ec->ec_if;
 2057 struct adapter *adapter = ifp->if_softc;
2039 int rv; 2058 int rv;
2040 2059
2041 if (set) 2060 if (set)
2042 rv = ixv_register_vlan(ifp->if_softc, ifp, vid); 2061 rv = ixv_register_vlan(ifp->if_softc, ifp, vid);
2043 else 2062 else
2044 rv = ixv_unregister_vlan(ifp->if_softc, ifp, vid); 2063 rv = ixv_unregister_vlan(ifp->if_softc, ifp, vid);
2045 2064
 2065 if (rv != 0)
 2066 return rv;
 2067
 2068 /*
 2069 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
 2070 * or 0 to 1.
 2071 */
 2072 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
 2073 ixv_setup_vlan_tagging(adapter);
 2074
2046 return rv; 2075 return rv;
2047} 2076}
2048 2077
2049/************************************************************************ 2078/************************************************************************
2050 * ixv_register_vlan 2079 * ixv_register_vlan
2051 * 2080 *
2052 * Run via a vlan config EVENT, it enables us to use the 2081 * Run via a vlan config EVENT, it enables us to use the
2053 * HW Filter table since we can get the vlan id. This just 2082 * HW Filter table since we can get the vlan id. This just
2054 * creates the entry in the soft version of the VFTA, init 2083 * creates the entry in the soft version of the VFTA, init
2055 * will repopulate the real table. 2084 * will repopulate the real table.
2056 ************************************************************************/ 2085 ************************************************************************/
2057static int 2086static int
2058ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2087ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2059{ 2088{
2060 struct adapter *adapter = ifp->if_softc; 2089 struct adapter *adapter = ifp->if_softc;
2061 struct ixgbe_hw *hw = &adapter->hw; 2090 struct ixgbe_hw *hw = &adapter->hw;
2062 u16 index, bit; 2091 u16 index, bit;
2063 int error; 2092 int error;
2064 2093
2065 if (ifp->if_softc != arg) /* Not our event */ 2094 if (ifp->if_softc != arg) /* Not our event */
2066 return EINVAL; 2095 return EINVAL;
2067 2096
2068 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2097 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2069 return EINVAL; 2098 return EINVAL;
2070 IXGBE_CORE_LOCK(adapter); 2099 IXGBE_CORE_LOCK(adapter);
2071 index = (vtag >> 5) & 0x7F; 2100 index = (vtag >> 5) & 0x7F;
2072 bit = vtag & 0x1F; 2101 bit = vtag & 0x1F;
2073 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2102 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2074 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false); 2103 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2075 IXGBE_CORE_UNLOCK(adapter); 2104 IXGBE_CORE_UNLOCK(adapter);
2076 2105
2077 if (error != 0) { 2106 if (error != 0) {
2078 device_printf(adapter->dev, "failed to register vlan %hu\n", 2107 device_printf(adapter->dev, "failed to register vlan %hu\n",
2079 vtag); 2108 vtag);
2080 error = EACCES; 2109 error = EACCES;
2081 } 2110 }
2082 return error; 2111 return error;
2083} /* ixv_register_vlan */ 2112} /* ixv_register_vlan */
2084 2113
2085/************************************************************************ 2114/************************************************************************
2086 * ixv_unregister_vlan 2115 * ixv_unregister_vlan
2087 * 2116 *
2088 * Run via a vlan unconfig EVENT, remove our entry 2117 * Run via a vlan unconfig EVENT, remove our entry
2089 * in the soft vfta. 2118 * in the soft vfta.
2090 ************************************************************************/ 2119 ************************************************************************/
2091static int 2120static int
2092ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) 2121ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
2093{ 2122{
2094 struct adapter *adapter = ifp->if_softc; 2123 struct adapter *adapter = ifp->if_softc;
2095 struct ixgbe_hw *hw = &adapter->hw; 2124 struct ixgbe_hw *hw = &adapter->hw;
2096 u16 index, bit; 2125 u16 index, bit;
2097 int error; 2126 int error;
2098 2127
2099 if (ifp->if_softc != arg) 2128 if (ifp->if_softc != arg)
2100 return EINVAL; 2129 return EINVAL;
2101 2130
2102 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2131 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2103 return EINVAL; 2132 return EINVAL;
2104 2133
2105 IXGBE_CORE_LOCK(adapter); 2134 IXGBE_CORE_LOCK(adapter);
2106 index = (vtag >> 5) & 0x7F; 2135 index = (vtag >> 5) & 0x7F;
2107 bit = vtag & 0x1F; 2136 bit = vtag & 0x1F;
2108 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2137 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2109 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false); 2138 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2110 IXGBE_CORE_UNLOCK(adapter); 2139 IXGBE_CORE_UNLOCK(adapter);
2111 2140
2112 if (error != 0) { 2141 if (error != 0) {
2113 device_printf(adapter->dev, "failed to unregister vlan %hu\n", 2142 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2114 vtag); 2143 vtag);
2115 error = EIO; 2144 error = EIO;
2116 } 2145 }
2117 return error; 2146 return error;
2118} /* ixv_unregister_vlan */ 2147} /* ixv_unregister_vlan */
2119 2148
2120/************************************************************************ 2149/************************************************************************
2121 * ixv_enable_intr 2150 * ixv_enable_intr
2122 ************************************************************************/ 2151 ************************************************************************/
2123static void 2152static void
2124ixv_enable_intr(struct adapter *adapter) 2153ixv_enable_intr(struct adapter *adapter)
2125{ 2154{
2126 struct ixgbe_hw *hw = &adapter->hw; 2155 struct ixgbe_hw *hw = &adapter->hw;
2127 struct ix_queue *que = adapter->queues; 2156 struct ix_queue *que = adapter->queues;
2128 u32 mask; 2157 u32 mask;
2129 int i; 2158 int i;
2130 2159
2131 /* For VTEIAC */ 2160 /* For VTEIAC */
2132 mask = (1 << adapter->vector); 2161 mask = (1 << adapter->vector);
2133 for (i = 0; i < adapter->num_queues; i++, que++) 2162 for (i = 0; i < adapter->num_queues; i++, que++)
2134 mask |= (1 << que->msix); 2163 mask |= (1 << que->msix);
2135 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 2164 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2136 2165
2137 /* For VTEIMS */ 2166 /* For VTEIMS */
2138 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 2167 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2139 que = adapter->queues; 2168 que = adapter->queues;
2140 for (i = 0; i < adapter->num_queues; i++, que++) 2169 for (i = 0; i < adapter->num_queues; i++, que++)
2141 ixv_enable_queue(adapter, que->msix); 2170 ixv_enable_queue(adapter, que->msix);
2142 2171
2143 IXGBE_WRITE_FLUSH(hw); 2172 IXGBE_WRITE_FLUSH(hw);
2144} /* ixv_enable_intr */ 2173} /* ixv_enable_intr */
2145 2174
2146/************************************************************************ 2175/************************************************************************
2147 * ixv_disable_intr 2176 * ixv_disable_intr
2148 ************************************************************************/ 2177 ************************************************************************/
2149static void 2178static void
2150ixv_disable_intr(struct adapter *adapter) 2179ixv_disable_intr(struct adapter *adapter)
2151{ 2180{
2152 struct ix_queue *que = adapter->queues; 2181 struct ix_queue *que = adapter->queues;
2153 2182
2154 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 2183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2155 2184
2156 /* disable interrupts other than queues */ 2185 /* disable interrupts other than queues */
2157 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector); 2186 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2158 2187
2159 for (int i = 0; i < adapter->num_queues; i++, que++) 2188 for (int i = 0; i < adapter->num_queues; i++, que++)
2160 ixv_disable_queue(adapter, que->msix); 2189 ixv_disable_queue(adapter, que->msix);
2161 2190
2162 IXGBE_WRITE_FLUSH(&adapter->hw); 2191 IXGBE_WRITE_FLUSH(&adapter->hw);
2163} /* ixv_disable_intr */ 2192} /* ixv_disable_intr */
2164 2193
2165/************************************************************************ 2194/************************************************************************
2166 * ixv_set_ivar 2195 * ixv_set_ivar
2167 * 2196 *
2168 * Setup the correct IVAR register for a particular MSI-X interrupt 2197 * Setup the correct IVAR register for a particular MSI-X interrupt
2169 * - entry is the register array entry 2198 * - entry is the register array entry
2170 * - vector is the MSI-X vector for this queue 2199 * - vector is the MSI-X vector for this queue
2171 * - type is RX/TX/MISC 2200 * - type is RX/TX/MISC
2172 ************************************************************************/ 2201 ************************************************************************/
2173static void 2202static void
2174ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 2203ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2175{ 2204{
2176 struct ixgbe_hw *hw = &adapter->hw; 2205 struct ixgbe_hw *hw = &adapter->hw;
2177 u32 ivar, index; 2206 u32 ivar, index;
2178 2207
2179 vector |= IXGBE_IVAR_ALLOC_VAL; 2208 vector |= IXGBE_IVAR_ALLOC_VAL;
2180 2209
2181 if (type == -1) { /* MISC IVAR */ 2210 if (type == -1) { /* MISC IVAR */
2182 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 2211 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2183 ivar &= ~0xFF; 2212 ivar &= ~0xFF;
2184 ivar |= vector; 2213 ivar |= vector;
2185 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 2214 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2186 } else { /* RX/TX IVARS */ 2215 } else { /* RX/TX IVARS */
2187 index = (16 * (entry & 1)) + (8 * type); 2216 index = (16 * (entry & 1)) + (8 * type);
2188 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 2217 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2189 ivar &= ~(0xffUL << index); 2218 ivar &= ~(0xffUL << index);
2190 ivar |= ((u32)vector << index); 2219 ivar |= ((u32)vector << index);
2191 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 2220 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2192 } 2221 }
2193} /* ixv_set_ivar */ 2222} /* ixv_set_ivar */
2194 2223
2195/************************************************************************ 2224/************************************************************************
2196 * ixv_configure_ivars 2225 * ixv_configure_ivars
2197 ************************************************************************/ 2226 ************************************************************************/
2198static void 2227static void
2199ixv_configure_ivars(struct adapter *adapter) 2228ixv_configure_ivars(struct adapter *adapter)
2200{ 2229{
2201 struct ix_queue *que = adapter->queues; 2230 struct ix_queue *que = adapter->queues;
2202 2231
2203 /* XXX We should sync EITR value calculation with ixgbe.c? */ 2232 /* XXX We should sync EITR value calculation with ixgbe.c? */
2204 2233
2205 for (int i = 0; i < adapter->num_queues; i++, que++) { 2234 for (int i = 0; i < adapter->num_queues; i++, que++) {
2206 /* First the RX queue entry */ 2235 /* First the RX queue entry */
2207 ixv_set_ivar(adapter, i, que->msix, 0); 2236 ixv_set_ivar(adapter, i, que->msix, 0);
2208 /* ... and the TX */ 2237 /* ... and the TX */
2209 ixv_set_ivar(adapter, i, que->msix, 1); 2238 ixv_set_ivar(adapter, i, que->msix, 1);
2210 /* Set an initial value in EITR */ 2239 /* Set an initial value in EITR */
2211 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT); 2240 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2212 } 2241 }
2213 2242
2214 /* For the mailbox interrupt */ 2243 /* For the mailbox interrupt */
2215 ixv_set_ivar(adapter, 1, adapter->vector, -1); 2244 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2216} /* ixv_configure_ivars */ 2245} /* ixv_configure_ivars */
2217 2246
2218 2247
2219/************************************************************************ 2248/************************************************************************
2220 * ixv_save_stats 2249 * ixv_save_stats
2221 * 2250 *
2222 * The VF stats registers never have a truly virgin 2251 * The VF stats registers never have a truly virgin
2223 * starting point, so this routine tries to make an 2252 * starting point, so this routine tries to make an
2224 * artificial one, marking ground zero on attach as 2253 * artificial one, marking ground zero on attach as
2225 * it were. 2254 * it were.
2226 ************************************************************************/ 2255 ************************************************************************/
2227static void 2256static void
2228ixv_save_stats(struct adapter *adapter) 2257ixv_save_stats(struct adapter *adapter)
2229{ 2258{
2230 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2259 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2231 2260
2232 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) { 2261 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2233 stats->saved_reset_vfgprc += 2262 stats->saved_reset_vfgprc +=
2234 stats->vfgprc.ev_count - stats->base_vfgprc; 2263 stats->vfgprc.ev_count - stats->base_vfgprc;
2235 stats->saved_reset_vfgptc += 2264 stats->saved_reset_vfgptc +=
2236 stats->vfgptc.ev_count - stats->base_vfgptc; 2265 stats->vfgptc.ev_count - stats->base_vfgptc;
2237 stats->saved_reset_vfgorc += 2266 stats->saved_reset_vfgorc +=
2238 stats->vfgorc.ev_count - stats->base_vfgorc; 2267 stats->vfgorc.ev_count - stats->base_vfgorc;
2239 stats->saved_reset_vfgotc += 2268 stats->saved_reset_vfgotc +=
2240 stats->vfgotc.ev_count - stats->base_vfgotc; 2269 stats->vfgotc.ev_count - stats->base_vfgotc;
2241 stats->saved_reset_vfmprc += 2270 stats->saved_reset_vfmprc +=
2242 stats->vfmprc.ev_count - stats->base_vfmprc; 2271 stats->vfmprc.ev_count - stats->base_vfmprc;
2243 } 2272 }
2244} /* ixv_save_stats */ 2273} /* ixv_save_stats */
2245 2274
2246/************************************************************************ 2275/************************************************************************
2247 * ixv_init_stats 2276 * ixv_init_stats
2248 ************************************************************************/ 2277 ************************************************************************/
2249static void 2278static void
2250ixv_init_stats(struct adapter *adapter) 2279ixv_init_stats(struct adapter *adapter)
2251{ 2280{
2252 struct ixgbe_hw *hw = &adapter->hw; 2281 struct ixgbe_hw *hw = &adapter->hw;
2253 2282
2254 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 2283 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2255 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 2284 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2256 adapter->stats.vf.last_vfgorc |= 2285 adapter->stats.vf.last_vfgorc |=
2257 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 2286 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2258 2287
2259 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 2288 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2260 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 2289 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2261 adapter->stats.vf.last_vfgotc |= 2290 adapter->stats.vf.last_vfgotc |=
2262 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 2291 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2263 2292
2264 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 2293 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2265 2294
2266 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 2295 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2267 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 2296 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2268 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 2297 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2269 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 2298 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2270 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 2299 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2271} /* ixv_init_stats */ 2300} /* ixv_init_stats */
2272 2301
2273#define UPDATE_STAT_32(reg, last, count) \ 2302#define UPDATE_STAT_32(reg, last, count) \
2274{ \ 2303{ \
2275 u32 current = IXGBE_READ_REG(hw, (reg)); \ 2304 u32 current = IXGBE_READ_REG(hw, (reg)); \
2276 if (current < (last)) \ 2305 if (current < (last)) \
2277 count.ev_count += 0x100000000LL; \ 2306 count.ev_count += 0x100000000LL; \
2278 (last) = current; \ 2307 (last) = current; \
2279 count.ev_count &= 0xFFFFFFFF00000000LL; \ 2308 count.ev_count &= 0xFFFFFFFF00000000LL; \
2280 count.ev_count |= current; \ 2309 count.ev_count |= current; \
2281} 2310}
2282 2311
2283#define UPDATE_STAT_36(lsb, msb, last, count) \ 2312#define UPDATE_STAT_36(lsb, msb, last, count) \
2284{ \ 2313{ \
2285 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \ 2314 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2286 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \ 2315 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2287 u64 current = ((cur_msb << 32) | cur_lsb); \ 2316 u64 current = ((cur_msb << 32) | cur_lsb); \
2288 if (current < (last)) \ 2317 if (current < (last)) \
2289 count.ev_count += 0x1000000000LL; \ 2318 count.ev_count += 0x1000000000LL; \
2290 (last) = current; \ 2319 (last) = current; \
2291 count.ev_count &= 0xFFFFFFF000000000LL; \ 2320 count.ev_count &= 0xFFFFFFF000000000LL; \
2292 count.ev_count |= current; \ 2321 count.ev_count |= current; \
2293} 2322}
2294 2323
2295/************************************************************************ 2324/************************************************************************
2296 * ixv_update_stats - Update the board statistics counters. 2325 * ixv_update_stats - Update the board statistics counters.
2297 ************************************************************************/ 2326 ************************************************************************/
2298void 2327void
2299ixv_update_stats(struct adapter *adapter) 2328ixv_update_stats(struct adapter *adapter)
2300{ 2329{
2301 struct ixgbe_hw *hw = &adapter->hw; 2330 struct ixgbe_hw *hw = &adapter->hw;
2302 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2331 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2303 2332
2304 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc); 2333 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2305 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc); 2334 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2306 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc, 2335 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2307 stats->vfgorc); 2336 stats->vfgorc);
2308 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc, 2337 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2309 stats->vfgotc); 2338 stats->vfgotc);
2310 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc); 2339 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2311 2340
2312 /* Fill out the OS statistics structure */ 2341 /* Fill out the OS statistics structure */
2313 /* 2342 /*
2314 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with 2343 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
2315 * adapter->stats counters. It's required to make ifconfig -z 2344 * adapter->stats counters. It's required to make ifconfig -z
2316 * (SOICZIFDATA) work. 2345 * (SOICZIFDATA) work.
2317 */ 2346 */
2318} /* ixv_update_stats */ 2347} /* ixv_update_stats */
2319 2348
2320/************************************************************************ 2349/************************************************************************
2321 * ixv_sysctl_interrupt_rate_handler 2350 * ixv_sysctl_interrupt_rate_handler
2322 ************************************************************************/ 2351 ************************************************************************/
2323static int 2352static int
2324ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 2353ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2325{ 2354{
2326 struct sysctlnode node = *rnode; 2355 struct sysctlnode node = *rnode;
2327 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 2356 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2328 struct adapter *adapter = que->adapter; 2357 struct adapter *adapter = que->adapter;
2329 uint32_t reg, usec, rate; 2358 uint32_t reg, usec, rate;
2330 int error; 2359 int error;
2331 2360
2332 if (que == NULL) 2361 if (que == NULL)
2333 return 0; 2362 return 0;
2334 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix)); 2363 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2335 usec = ((reg & 0x0FF8) >> 3); 2364 usec = ((reg & 0x0FF8) >> 3);
2336 if (usec > 0) 2365 if (usec > 0)
2337 rate = 500000 / usec; 2366 rate = 500000 / usec;
2338 else 2367 else
2339 rate = 0; 2368 rate = 0;
2340 node.sysctl_data = &rate; 2369 node.sysctl_data = &rate;
2341 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2370 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2342 if (error || newp == NULL) 2371 if (error || newp == NULL)
2343 return error; 2372 return error;
2344 reg &= ~0xfff; /* default, no limitation */ 2373 reg &= ~0xfff; /* default, no limitation */
2345 if (rate > 0 && rate < 500000) { 2374 if (rate > 0 && rate < 500000) {
2346 if (rate < 1000) 2375 if (rate < 1000)
2347 rate = 1000; 2376 rate = 1000;
2348 reg |= ((4000000/rate) & 0xff8); 2377 reg |= ((4000000/rate) & 0xff8);
2349 /* 2378 /*
2350 * When RSC is used, ITR interval must be larger than 2379 * When RSC is used, ITR interval must be larger than
2351 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 2380 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2352 * The minimum value is always greater than 2us on 100M 2381 * The minimum value is always greater than 2us on 100M
2353 * (and 10M?(not documented)), but it's not on 1G and higher. 2382 * (and 10M?(not documented)), but it's not on 1G and higher.
2354 */ 2383 */
2355 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2384 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2356 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2385 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2357 if ((adapter->num_queues > 1) 2386 if ((adapter->num_queues > 1)
2358 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 2387 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2359 return EINVAL; 2388 return EINVAL;
2360 } 2389 }
2361 ixv_max_interrupt_rate = rate; 2390 ixv_max_interrupt_rate = rate;
2362 } else 2391 } else
2363 ixv_max_interrupt_rate = 0; 2392 ixv_max_interrupt_rate = 0;
2364 ixv_eitr_write(adapter, que->msix, reg); 2393 ixv_eitr_write(adapter, que->msix, reg);
2365 2394
2366 return (0); 2395 return (0);
2367} /* ixv_sysctl_interrupt_rate_handler */ 2396} /* ixv_sysctl_interrupt_rate_handler */
2368 2397
2369const struct sysctlnode * 2398const struct sysctlnode *
2370ixv_sysctl_instance(struct adapter *adapter) 2399ixv_sysctl_instance(struct adapter *adapter)
2371{ 2400{
2372 const char *dvname; 2401 const char *dvname;
2373 struct sysctllog **log; 2402 struct sysctllog **log;
2374 int rc; 2403 int rc;
2375 const struct sysctlnode *rnode; 2404 const struct sysctlnode *rnode;
2376 2405
2377 log = &adapter->sysctllog; 2406 log = &adapter->sysctllog;
2378 dvname = device_xname(adapter->dev); 2407 dvname = device_xname(adapter->dev);
2379 2408
2380 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 2409 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2381 0, CTLTYPE_NODE, dvname, 2410 0, CTLTYPE_NODE, dvname,
2382 SYSCTL_DESCR("ixv information and settings"), 2411 SYSCTL_DESCR("ixv information and settings"),
2383 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 2412 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2384 goto err; 2413 goto err;
2385 2414
2386 return rnode; 2415 return rnode;
2387err: 2416err:
2388 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); 2417 printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2389 return NULL; 2418 return NULL;
2390} 2419}
2391 2420
2392static void 2421static void
2393ixv_add_device_sysctls(struct adapter *adapter) 2422ixv_add_device_sysctls(struct adapter *adapter)
2394{ 2423{
2395 struct sysctllog **log; 2424 struct sysctllog **log;
2396 const struct sysctlnode *rnode, *cnode; 2425 const struct sysctlnode *rnode, *cnode;
2397 device_t dev; 2426 device_t dev;
2398 2427
2399 dev = adapter->dev; 2428 dev = adapter->dev;
2400 log = &adapter->sysctllog; 2429 log = &adapter->sysctllog;
2401 2430
2402 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2431 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2403 aprint_error_dev(dev, "could not create sysctl root\n"); 2432 aprint_error_dev(dev, "could not create sysctl root\n");
2404 return; 2433 return;
2405 } 2434 }
2406 2435
2407 if (sysctl_createv(log, 0, &rnode, &cnode, 2436 if (sysctl_createv(log, 0, &rnode, &cnode,
2408 CTLFLAG_READWRITE, CTLTYPE_INT, 2437 CTLFLAG_READWRITE, CTLTYPE_INT,
2409 "debug", SYSCTL_DESCR("Debug Info"), 2438 "debug", SYSCTL_DESCR("Debug Info"),
2410 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 2439 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2411 aprint_error_dev(dev, "could not create sysctl\n"); 2440 aprint_error_dev(dev, "could not create sysctl\n");
2412 2441
2413 if (sysctl_createv(log, 0, &rnode, &cnode, 2442 if (sysctl_createv(log, 0, &rnode, &cnode,
2414 CTLFLAG_READWRITE, CTLTYPE_BOOL, 2443 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2415 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 2444 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2416 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 2445 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2417 aprint_error_dev(dev, "could not create sysctl\n"); 2446 aprint_error_dev(dev, "could not create sysctl\n");
2418 2447
2419 if (sysctl_createv(log, 0, &rnode, &cnode, 2448 if (sysctl_createv(log, 0, &rnode, &cnode,
2420 CTLFLAG_READWRITE, CTLTYPE_BOOL, 2449 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2421 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 2450 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2422 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) 2451 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2423 aprint_error_dev(dev, "could not create sysctl\n"); 2452 aprint_error_dev(dev, "could not create sysctl\n");
2424} 2453}
2425 2454
2426/************************************************************************ 2455/************************************************************************
2427 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 2456 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2428 ************************************************************************/ 2457 ************************************************************************/
2429static void 2458static void
2430ixv_add_stats_sysctls(struct adapter *adapter) 2459ixv_add_stats_sysctls(struct adapter *adapter)
2431{ 2460{
2432 device_t dev = adapter->dev; 2461 device_t dev = adapter->dev;
2433 struct tx_ring *txr = adapter->tx_rings; 2462 struct tx_ring *txr = adapter->tx_rings;
2434 struct rx_ring *rxr = adapter->rx_rings; 2463 struct rx_ring *rxr = adapter->rx_rings;
2435 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2464 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2436 struct ixgbe_hw *hw = &adapter->hw; 2465 struct ixgbe_hw *hw = &adapter->hw;
2437 const struct sysctlnode *rnode, *cnode; 2466 const struct sysctlnode *rnode, *cnode;
2438 struct sysctllog **log = &adapter->sysctllog; 2467 struct sysctllog **log = &adapter->sysctllog;
2439 const char *xname = device_xname(dev); 2468 const char *xname = device_xname(dev);
2440 2469
2441 /* Driver Statistics */ 2470 /* Driver Statistics */
2442 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 2471 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2443 NULL, xname, "Driver tx dma soft fail EFBIG"); 2472 NULL, xname, "Driver tx dma soft fail EFBIG");
2444 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 2473 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2445 NULL, xname, "m_defrag() failed"); 2474 NULL, xname, "m_defrag() failed");
2446 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 2475 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2447 NULL, xname, "Driver tx dma hard fail EFBIG"); 2476 NULL, xname, "Driver tx dma hard fail EFBIG");
2448 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 2477 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2449 NULL, xname, "Driver tx dma hard fail EINVAL"); 2478 NULL, xname, "Driver tx dma hard fail EINVAL");
2450 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 2479 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2451 NULL, xname, "Driver tx dma hard fail other"); 2480 NULL, xname, "Driver tx dma hard fail other");
2452 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 2481 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2453 NULL, xname, "Driver tx dma soft fail EAGAIN"); 2482 NULL, xname, "Driver tx dma soft fail EAGAIN");
2454 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 2483 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2455 NULL, xname, "Driver tx dma soft fail ENOMEM"); 2484 NULL, xname, "Driver tx dma soft fail ENOMEM");
2456 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 2485 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2457 NULL, xname, "Watchdog timeouts"); 2486 NULL, xname, "Watchdog timeouts");
2458 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 2487 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2459 NULL, xname, "TSO errors"); 2488 NULL, xname, "TSO errors");
2460 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, 2489 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2461 NULL, xname, "Link MSI-X IRQ Handled"); 2490 NULL, xname, "Link MSI-X IRQ Handled");
2462 2491
2463 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2492 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2464 snprintf(adapter->queues[i].evnamebuf, 2493 snprintf(adapter->queues[i].evnamebuf,
2465 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 2494 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2466 xname, i); 2495 xname, i);
2467 snprintf(adapter->queues[i].namebuf, 2496 snprintf(adapter->queues[i].namebuf,
2468 sizeof(adapter->queues[i].namebuf), "q%d", i); 2497 sizeof(adapter->queues[i].namebuf), "q%d", i);
2469 2498
2470 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2499 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2471 aprint_error_dev(dev, "could not create sysctl root\n"); 2500 aprint_error_dev(dev, "could not create sysctl root\n");
2472 break; 2501 break;
2473 } 2502 }
2474 2503
2475 if (sysctl_createv(log, 0, &rnode, &rnode, 2504 if (sysctl_createv(log, 0, &rnode, &rnode,
2476 0, CTLTYPE_NODE, 2505 0, CTLTYPE_NODE,
2477 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 2506 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2478 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 2507 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2479 break; 2508 break;
2480 2509
2481 if (sysctl_createv(log, 0, &rnode, &cnode, 2510 if (sysctl_createv(log, 0, &rnode, &cnode,
2482 CTLFLAG_READWRITE, CTLTYPE_INT, 2511 CTLFLAG_READWRITE, CTLTYPE_INT,
2483 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 2512 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2484 ixv_sysctl_interrupt_rate_handler, 0, 2513 ixv_sysctl_interrupt_rate_handler, 0,
2485 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 2514 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2486 break; 2515 break;
2487 2516
2488 if (sysctl_createv(log, 0, &rnode, &cnode, 2517 if (sysctl_createv(log, 0, &rnode, &cnode,
2489 CTLFLAG_READONLY, CTLTYPE_INT, 2518 CTLFLAG_READONLY, CTLTYPE_INT,
2490 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 2519 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2491 ixv_sysctl_tdh_handler, 0, (void *)txr, 2520 ixv_sysctl_tdh_handler, 0, (void *)txr,
2492 0, CTL_CREATE, CTL_EOL) != 0) 2521 0, CTL_CREATE, CTL_EOL) != 0)
2493 break; 2522 break;
2494 2523
2495 if (sysctl_createv(log, 0, &rnode, &cnode, 2524 if (sysctl_createv(log, 0, &rnode, &cnode,
2496 CTLFLAG_READONLY, CTLTYPE_INT, 2525 CTLFLAG_READONLY, CTLTYPE_INT,
2497 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 2526 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2498 ixv_sysctl_tdt_handler, 0, (void *)txr, 2527 ixv_sysctl_tdt_handler, 0, (void *)txr,
2499 0, CTL_CREATE, CTL_EOL) != 0) 2528 0, CTL_CREATE, CTL_EOL) != 0)
2500 break; 2529 break;
2501 2530
2502 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 2531 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2503 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 2532 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2504 evcnt_attach_dynamic(&adapter->queues[i].handleq, 2533 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2505 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 2534 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2506 "Handled queue in softint"); 2535 "Handled queue in softint");
2507 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 2536 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2508 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 2537 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2509 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 2538 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2510 NULL, adapter->queues[i].evnamebuf, "TSO"); 2539 NULL, adapter->queues[i].evnamebuf, "TSO");
2511 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 2540 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2512 NULL, adapter->queues[i].evnamebuf, 2541 NULL, adapter->queues[i].evnamebuf,
2513 "Queue No Descriptor Available"); 2542 "Queue No Descriptor Available");
2514 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 2543 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2515 NULL, adapter->queues[i].evnamebuf, 2544 NULL, adapter->queues[i].evnamebuf,
2516 "Queue Packets Transmitted"); 2545 "Queue Packets Transmitted");
2517#ifndef IXGBE_LEGACY_TX 2546#ifndef IXGBE_LEGACY_TX
2518 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 2547 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2519 NULL, adapter->queues[i].evnamebuf, 2548 NULL, adapter->queues[i].evnamebuf,
2520 "Packets dropped in pcq"); 2549 "Packets dropped in pcq");
2521#endif 2550#endif
2522 2551
2523#ifdef LRO 2552#ifdef LRO
2524 struct lro_ctrl *lro = &rxr->lro; 2553 struct lro_ctrl *lro = &rxr->lro;
2525#endif /* LRO */ 2554#endif /* LRO */
2526 2555
2527 if (sysctl_createv(log, 0, &rnode, &cnode, 2556 if (sysctl_createv(log, 0, &rnode, &cnode,
2528 CTLFLAG_READONLY, 2557 CTLFLAG_READONLY,
2529 CTLTYPE_INT, 2558 CTLTYPE_INT,
2530 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), 2559 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2531 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 2560 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2532 CTL_CREATE, CTL_EOL) != 0) 2561 CTL_CREATE, CTL_EOL) != 0)
2533 break; 2562 break;
2534 2563
2535 if (sysctl_createv(log, 0, &rnode, &cnode, 2564 if (sysctl_createv(log, 0, &rnode, &cnode,
2536 CTLFLAG_READONLY, 2565 CTLFLAG_READONLY,
2537 CTLTYPE_INT, 2566 CTLTYPE_INT,
2538 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), 2567 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2539 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0, 2568 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2540 CTL_CREATE, CTL_EOL) != 0) 2569 CTL_CREATE, CTL_EOL) != 0)
2541 break; 2570 break;
2542 2571
2543 if (sysctl_createv(log, 0, &rnode, &cnode, 2572 if (sysctl_createv(log, 0, &rnode, &cnode,
2544 CTLFLAG_READONLY, 2573 CTLFLAG_READONLY,
2545 CTLTYPE_INT, 2574 CTLTYPE_INT,
2546 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), 2575 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2547 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0, 2576 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2548 CTL_CREATE, CTL_EOL) != 0) 2577 CTL_CREATE, CTL_EOL) != 0)
2549 break; 2578 break;
2550 2579
2551 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 2580 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2552 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); 2581 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2553 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 2582 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2554 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); 2583 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2555 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 2584 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2556 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 2585 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2557 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, 2586 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2558 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); 2587 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2559 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 2588 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2560 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 2589 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2561#ifdef LRO 2590#ifdef LRO
2562 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 2591 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2563 CTLFLAG_RD, &lro->lro_queued, 0, 2592 CTLFLAG_RD, &lro->lro_queued, 0,
2564 "LRO Queued"); 2593 "LRO Queued");
2565 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 2594 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2566 CTLFLAG_RD, &lro->lro_flushed, 0, 2595 CTLFLAG_RD, &lro->lro_flushed, 0,
2567 "LRO Flushed"); 2596 "LRO Flushed");
2568#endif /* LRO */ 2597#endif /* LRO */
2569 } 2598 }
2570 2599
2571 /* MAC stats get their own sub node */ 2600 /* MAC stats get their own sub node */
2572 2601
2573 snprintf(stats->namebuf, 2602 snprintf(stats->namebuf,
2574 sizeof(stats->namebuf), "%s MAC Statistics", xname); 2603 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2575 2604
2576 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 2605 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2577 stats->namebuf, "rx csum offload - IP"); 2606 stats->namebuf, "rx csum offload - IP");
2578 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 2607 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2579 stats->namebuf, "rx csum offload - L4"); 2608 stats->namebuf, "rx csum offload - L4");
2580 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 2609 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2581 stats->namebuf, "rx csum offload - IP bad"); 2610 stats->namebuf, "rx csum offload - IP bad");
2582 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 2611 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2583 stats->namebuf, "rx csum offload - L4 bad"); 2612 stats->namebuf, "rx csum offload - L4 bad");
2584 2613
2585 /* Packet Reception Stats */ 2614 /* Packet Reception Stats */
2586 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL, 2615 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2587 xname, "Good Packets Received"); 2616 xname, "Good Packets Received");
2588 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL, 2617 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2589 xname, "Good Octets Received"); 2618 xname, "Good Octets Received");
2590 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL, 2619 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2591 xname, "Multicast Packets Received"); 2620 xname, "Multicast Packets Received");
2592 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL, 2621 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2593 xname, "Good Packets Transmitted"); 2622 xname, "Good Packets Transmitted");
2594 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL, 2623 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2595 xname, "Good Octets Transmitted"); 2624 xname, "Good Octets Transmitted");
2596 2625
2597 /* Mailbox Stats */ 2626 /* Mailbox Stats */
2598 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL, 2627 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2599 xname, "message TXs"); 2628 xname, "message TXs");
2600 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL, 2629 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2601 xname, "message RXs"); 2630 xname, "message RXs");
2602 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL, 2631 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2603 xname, "ACKs"); 2632 xname, "ACKs");
2604 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL, 2633 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2605 xname, "REQs"); 2634 xname, "REQs");
2606 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL, 2635 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2607 xname, "RSTs"); 2636 xname, "RSTs");
2608 2637
2609} /* ixv_add_stats_sysctls */ 2638} /* ixv_add_stats_sysctls */
2610 2639
2611/************************************************************************ 2640/************************************************************************
2612 * ixv_set_sysctl_value 2641 * ixv_set_sysctl_value
2613 ************************************************************************/ 2642 ************************************************************************/
2614static void 2643static void
2615ixv_set_sysctl_value(struct adapter *adapter, const char *name, 2644ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2616 const char *description, int *limit, int value) 2645 const char *description, int *limit, int value)
2617{ 2646{
2618 device_t dev = adapter->dev; 2647 device_t dev = adapter->dev;
2619 struct sysctllog **log; 2648 struct sysctllog **log;
2620 const struct sysctlnode *rnode, *cnode; 2649 const struct sysctlnode *rnode, *cnode;
2621 2650
2622 log = &adapter->sysctllog; 2651 log = &adapter->sysctllog;
2623 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2652 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2624 aprint_error_dev(dev, "could not create sysctl root\n"); 2653 aprint_error_dev(dev, "could not create sysctl root\n");
2625 return; 2654 return;
2626 } 2655 }
2627 if (sysctl_createv(log, 0, &rnode, &cnode, 2656 if (sysctl_createv(log, 0, &rnode, &cnode,
2628 CTLFLAG_READWRITE, CTLTYPE_INT, 2657 CTLFLAG_READWRITE, CTLTYPE_INT,
2629 name, SYSCTL_DESCR(description), 2658 name, SYSCTL_DESCR(description),
2630 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) 2659 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2631 aprint_error_dev(dev, "could not create sysctl\n"); 2660 aprint_error_dev(dev, "could not create sysctl\n");
2632 *limit = value; 2661 *limit = value;
2633} /* ixv_set_sysctl_value */ 2662} /* ixv_set_sysctl_value */
2634 2663
2635/************************************************************************ 2664/************************************************************************
2636 * ixv_print_debug_info 2665 * ixv_print_debug_info
2637 * 2666 *
2638 * Called only when em_display_debug_stats is enabled. 2667 * Called only when em_display_debug_stats is enabled.
2639 * Provides a way to take a look at important statistics 2668 * Provides a way to take a look at important statistics
2640 * maintained by the driver and hardware. 2669 * maintained by the driver and hardware.
2641 ************************************************************************/ 2670 ************************************************************************/
2642static void 2671static void
2643ixv_print_debug_info(struct adapter *adapter) 2672ixv_print_debug_info(struct adapter *adapter)
2644{ 2673{
2645 device_t dev = adapter->dev; 2674 device_t dev = adapter->dev;
2646 struct ix_queue *que = adapter->queues; 2675 struct ix_queue *que = adapter->queues;
2647 struct rx_ring *rxr; 2676 struct rx_ring *rxr;
2648 struct tx_ring *txr; 2677 struct tx_ring *txr;
2649#ifdef LRO 2678#ifdef LRO
2650 struct lro_ctrl *lro; 2679 struct lro_ctrl *lro;
2651#endif /* LRO */ 2680#endif /* LRO */
2652 2681
2653 for (int i = 0; i < adapter->num_queues; i++, que++) { 2682 for (int i = 0; i < adapter->num_queues; i++, que++) {
2654 txr = que->txr; 2683 txr = que->txr;
2655 rxr = que->rxr; 2684 rxr = que->rxr;
2656#ifdef LRO 2685#ifdef LRO
2657 lro = &rxr->lro; 2686 lro = &rxr->lro;
2658#endif /* LRO */ 2687#endif /* LRO */
2659 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n", 2688 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2660 que->msix, (long)que->irqs.ev_count); 2689 que->msix, (long)que->irqs.ev_count);
2661 device_printf(dev, "RX(%d) Packets Received: %lld\n", 2690 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2662 rxr->me, (long long)rxr->rx_packets.ev_count); 2691 rxr->me, (long long)rxr->rx_packets.ev_count);
2663 device_printf(dev, "RX(%d) Bytes Received: %lu\n", 2692 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2664 rxr->me, (long)rxr->rx_bytes.ev_count); 2693 rxr->me, (long)rxr->rx_bytes.ev_count);
2665#ifdef LRO 2694#ifdef LRO
2666 device_printf(dev, "RX(%d) LRO Queued= %ju\n", 2695 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2667 rxr->me, (uintmax_t)lro->lro_queued); 2696 rxr->me, (uintmax_t)lro->lro_queued);
2668 device_printf(dev, "RX(%d) LRO Flushed= %ju\n", 2697 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2669 rxr->me, (uintmax_t)lro->lro_flushed); 2698 rxr->me, (uintmax_t)lro->lro_flushed);
2670#endif /* LRO */ 2699#endif /* LRO */
2671 device_printf(dev, "TX(%d) Packets Sent: %lu\n", 2700 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2672 txr->me, (long)txr->total_packets.ev_count); 2701 txr->me, (long)txr->total_packets.ev_count);
2673 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n", 2702 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2674 txr->me, (long)txr->no_desc_avail.ev_count); 2703 txr->me, (long)txr->no_desc_avail.ev_count);
2675 } 2704 }
2676 2705
2677 device_printf(dev, "MBX IRQ Handled: %lu\n", 2706 device_printf(dev, "MBX IRQ Handled: %lu\n",
2678 (long)adapter->link_irq.ev_count); 2707 (long)adapter->link_irq.ev_count);
2679} /* ixv_print_debug_info */ 2708} /* ixv_print_debug_info */
2680 2709
2681/************************************************************************ 2710/************************************************************************
2682 * ixv_sysctl_debug 2711 * ixv_sysctl_debug
2683 ************************************************************************/ 2712 ************************************************************************/
2684static int 2713static int
2685ixv_sysctl_debug(SYSCTLFN_ARGS) 2714ixv_sysctl_debug(SYSCTLFN_ARGS)
2686{ 2715{
2687 struct sysctlnode node = *rnode; 2716 struct sysctlnode node = *rnode;
2688 struct adapter *adapter = (struct adapter *)node.sysctl_data; 2717 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2689 int error, result; 2718 int error, result;
2690 2719
2691 node.sysctl_data = &result; 2720 node.sysctl_data = &result;
2692 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2721 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2693 2722
2694 if (error || newp == NULL) 2723 if (error || newp == NULL)
2695 return error; 2724 return error;
2696 2725
2697 if (result == 1) 2726 if (result == 1)
2698 ixv_print_debug_info(adapter); 2727 ixv_print_debug_info(adapter);
2699 2728
2700 return 0; 2729 return 0;
2701} /* ixv_sysctl_debug */ 2730} /* ixv_sysctl_debug */
2702 2731
2703/************************************************************************ 2732/************************************************************************
2704 * ixv_init_device_features 2733 * ixv_init_device_features
2705 ************************************************************************/ 2734 ************************************************************************/
2706static void 2735static void
2707ixv_init_device_features(struct adapter *adapter) 2736ixv_init_device_features(struct adapter *adapter)
2708{ 2737{
2709 adapter->feat_cap = IXGBE_FEATURE_NETMAP 2738 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2710 | IXGBE_FEATURE_VF 2739 | IXGBE_FEATURE_VF
2711 | IXGBE_FEATURE_RSS 2740 | IXGBE_FEATURE_RSS
2712 | IXGBE_FEATURE_LEGACY_TX; 2741 | IXGBE_FEATURE_LEGACY_TX;
2713 2742
2714 /* A tad short on feature flags for VFs, atm. */ 2743 /* A tad short on feature flags for VFs, atm. */
2715 switch (adapter->hw.mac.type) { 2744 switch (adapter->hw.mac.type) {
2716 case ixgbe_mac_82599_vf: 2745 case ixgbe_mac_82599_vf:
2717 break; 2746 break;
2718 case ixgbe_mac_X540_vf: 2747 case ixgbe_mac_X540_vf:
2719 break; 2748 break;
2720 case ixgbe_mac_X550_vf: 2749 case ixgbe_mac_X550_vf:
2721 case ixgbe_mac_X550EM_x_vf: 2750 case ixgbe_mac_X550EM_x_vf:
2722 case ixgbe_mac_X550EM_a_vf: 2751 case ixgbe_mac_X550EM_a_vf:
2723 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 2752 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2724 break; 2753 break;
2725 default: 2754 default:
2726 break; 2755 break;
2727 } 2756 }
2728 2757
2729 /* Enabled by default... */ 2758 /* Enabled by default... */
2730 /* Is a virtual function (VF) */ 2759 /* Is a virtual function (VF) */
2731 if (adapter->feat_cap & IXGBE_FEATURE_VF) 2760 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2732 adapter->feat_en |= IXGBE_FEATURE_VF; 2761 adapter->feat_en |= IXGBE_FEATURE_VF;
2733 /* Netmap */ 2762 /* Netmap */
2734 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 2763 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2735 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 2764 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2736 /* Receive-Side Scaling (RSS) */ 2765 /* Receive-Side Scaling (RSS) */
2737 if (adapter->feat_cap & IXGBE_FEATURE_RSS) 2766 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2738 adapter->feat_en |= IXGBE_FEATURE_RSS; 2767 adapter->feat_en |= IXGBE_FEATURE_RSS;
2739 /* Needs advanced context descriptor regardless of offloads req'd */ 2768 /* Needs advanced context descriptor regardless of offloads req'd */
2740 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 2769 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2741 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 2770 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2742 2771
2743 /* Enabled via sysctl... */ 2772 /* Enabled via sysctl... */
2744 /* Legacy (single queue) transmit */ 2773 /* Legacy (single queue) transmit */
2745 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 2774 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2746 ixv_enable_legacy_tx) 2775 ixv_enable_legacy_tx)
2747 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 2776 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2748} /* ixv_init_device_features */ 2777} /* ixv_init_device_features */
2749 2778
2750/************************************************************************ 2779/************************************************************************
2751 * ixv_shutdown - Shutdown entry point 2780 * ixv_shutdown - Shutdown entry point
2752 ************************************************************************/ 2781 ************************************************************************/
2753#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 2782#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2754static int 2783static int
2755ixv_shutdown(device_t dev) 2784ixv_shutdown(device_t dev)
2756{ 2785{
2757 struct adapter *adapter = device_private(dev); 2786 struct adapter *adapter = device_private(dev);
2758 IXGBE_CORE_LOCK(adapter); 2787 IXGBE_CORE_LOCK(adapter);
2759 ixv_stop(adapter); 2788 ixv_stop(adapter);
2760 IXGBE_CORE_UNLOCK(adapter); 2789 IXGBE_CORE_UNLOCK(adapter);
2761 2790
2762 return (0); 2791 return (0);
2763} /* ixv_shutdown */ 2792} /* ixv_shutdown */
2764#endif 2793#endif
2765 2794
2766static int 2795static int
2767ixv_ifflags_cb(struct ethercom *ec) 2796ixv_ifflags_cb(struct ethercom *ec)
2768{ 2797{
2769 struct ifnet *ifp = &ec->ec_if; 2798 struct ifnet *ifp = &ec->ec_if;
2770 struct adapter *adapter = ifp->if_softc; 2799 struct adapter *adapter = ifp->if_softc;
2771 int change, rv = 0; 2800 int change, rv = 0;
2772 2801
2773 IXGBE_CORE_LOCK(adapter); 2802 IXGBE_CORE_LOCK(adapter);
2774 2803
2775 change = ifp->if_flags ^ adapter->if_flags; 2804 change = ifp->if_flags ^ adapter->if_flags;
2776 if (change != 0) 2805 if (change != 0)
2777 adapter->if_flags = ifp->if_flags; 2806 adapter->if_flags = ifp->if_flags;
2778 2807
2779 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 2808 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2780 rv = ENETRESET; 2809 rv = ENETRESET;
2781 goto out; 2810 goto out;
2782 } 2811 }
2783 2812
2784 /* Check for ec_capenable. */ 2813 /* Check for ec_capenable. */
2785 change = ec->ec_capenable ^ adapter->ec_capenable; 2814 change = ec->ec_capenable ^ adapter->ec_capenable;
2786 adapter->ec_capenable = ec->ec_capenable; 2815 adapter->ec_capenable = ec->ec_capenable;
2787 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 2816 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
2788 | ETHERCAP_VLAN_HWFILTER)) != 0) { 2817 | ETHERCAP_VLAN_HWFILTER)) != 0) {
2789 rv = ENETRESET; 2818 rv = ENETRESET;
2790 goto out; 2819 goto out;
2791 } 2820 }
2792 2821
2793 /* 2822 /*
2794 * Special handling is not required for ETHERCAP_VLAN_MTU. 2823 * Special handling is not required for ETHERCAP_VLAN_MTU.
2795 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 2824 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
2796 */ 2825 */
2797 2826
2798 /* Set up VLAN support and filter */ 2827 /* Set up VLAN support and filter */
2799 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 2828 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
2800 rv = ixv_setup_vlan_support(adapter); 2829 rv = ixv_setup_vlan_support(adapter);
2801 2830
2802out: 2831out:
2803 IXGBE_CORE_UNLOCK(adapter); 2832 IXGBE_CORE_UNLOCK(adapter);
2804 2833
2805 return rv; 2834 return rv;
2806} 2835}
2807 2836
2808 2837
2809/************************************************************************ 2838/************************************************************************
2810 * ixv_ioctl - Ioctl entry point 2839 * ixv_ioctl - Ioctl entry point
2811 * 2840 *
2812 * Called when the user wants to configure the interface. 2841 * Called when the user wants to configure the interface.
2813 * 2842 *
2814 * return 0 on success, positive on failure 2843 * return 0 on success, positive on failure
2815 ************************************************************************/ 2844 ************************************************************************/
2816static int 2845static int
2817ixv_ioctl(struct ifnet *ifp, u_long command, void *data) 2846ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2818{ 2847{
2819 struct adapter *adapter = ifp->if_softc; 2848 struct adapter *adapter = ifp->if_softc;
2820 struct ifcapreq *ifcr = data; 2849 struct ifcapreq *ifcr = data;
2821 int error = 0; 2850 int error = 0;
2822 int l4csum_en; 2851 int l4csum_en;
2823 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 2852 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
2824 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 2853 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2825 2854
2826 switch (command) { 2855 switch (command) {
2827 case SIOCSIFFLAGS: 2856 case SIOCSIFFLAGS:
2828 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 2857 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2829 break; 2858 break;
2830 case SIOCADDMULTI: 2859 case SIOCADDMULTI:
2831 case SIOCDELMULTI: 2860 case SIOCDELMULTI:
2832 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 2861 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
2833 break; 2862 break;
2834 case SIOCSIFMEDIA: 2863 case SIOCSIFMEDIA:
2835 case SIOCGIFMEDIA: 2864 case SIOCGIFMEDIA:
2836 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 2865 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
2837 break; 2866 break;
2838 case SIOCSIFCAP: 2867 case SIOCSIFCAP:
2839 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 2868 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
2840 break; 2869 break;
2841 case SIOCSIFMTU: 2870 case SIOCSIFMTU:
2842 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 2871 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
2843 break; 2872 break;
2844 default: 2873 default:
2845 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 2874 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
2846 break; 2875 break;
2847 } 2876 }
2848 2877
2849 switch (command) { 2878 switch (command) {
2850 case SIOCSIFCAP: 2879 case SIOCSIFCAP:
2851 /* Layer-4 Rx checksum offload has to be turned on and 2880 /* Layer-4 Rx checksum offload has to be turned on and
2852 * off as a unit. 2881 * off as a unit.
2853 */ 2882 */
2854 l4csum_en = ifcr->ifcr_capenable & l4csum; 2883 l4csum_en = ifcr->ifcr_capenable & l4csum;
2855 if (l4csum_en != l4csum && l4csum_en != 0) 2884 if (l4csum_en != l4csum && l4csum_en != 0)
2856 return EINVAL; 2885 return EINVAL;
2857 /*FALLTHROUGH*/ 2886 /*FALLTHROUGH*/
2858 case SIOCADDMULTI: 2887 case SIOCADDMULTI:
2859 case SIOCDELMULTI: 2888 case SIOCDELMULTI:
2860 case SIOCSIFFLAGS: 2889 case SIOCSIFFLAGS:
2861 case SIOCSIFMTU: 2890 case SIOCSIFMTU:
2862 default: 2891 default:
2863 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 2892 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2864 return error; 2893 return error;
2865 if ((ifp->if_flags & IFF_RUNNING) == 0) 2894 if ((ifp->if_flags & IFF_RUNNING) == 0)
2866 ; 2895 ;
2867 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 2896 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
2868 IXGBE_CORE_LOCK(adapter); 2897 IXGBE_CORE_LOCK(adapter);
2869 ixv_init_locked(adapter); 2898 ixv_init_locked(adapter);
2870 IXGBE_CORE_UNLOCK(adapter); 2899 IXGBE_CORE_UNLOCK(adapter);
2871 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 2900 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
2872 /* 2901 /*
2873 * Multicast list has changed; set the hardware filter 2902 * Multicast list has changed; set the hardware filter
2874 * accordingly. 2903 * accordingly.
2875 */ 2904 */
2876 IXGBE_CORE_LOCK(adapter); 2905 IXGBE_CORE_LOCK(adapter);
2877 ixv_disable_intr(adapter); 2906 ixv_disable_intr(adapter);
2878 ixv_set_multi(adapter); 2907 ixv_set_multi(adapter);
2879 ixv_enable_intr(adapter); 2908 ixv_enable_intr(adapter);
2880 IXGBE_CORE_UNLOCK(adapter); 2909 IXGBE_CORE_UNLOCK(adapter);
2881 } 2910 }
2882 return 0; 2911 return 0;
2883 } 2912 }
2884} /* ixv_ioctl */ 2913} /* ixv_ioctl */
2885 2914
2886/************************************************************************ 2915/************************************************************************
2887 * ixv_init 2916 * ixv_init
2888 ************************************************************************/ 2917 ************************************************************************/
2889static int 2918static int
2890ixv_init(struct ifnet *ifp) 2919ixv_init(struct ifnet *ifp)
2891{ 2920{
2892 struct adapter *adapter = ifp->if_softc; 2921 struct adapter *adapter = ifp->if_softc;
2893 2922
2894 IXGBE_CORE_LOCK(adapter); 2923 IXGBE_CORE_LOCK(adapter);
2895 ixv_init_locked(adapter); 2924 ixv_init_locked(adapter);
2896 IXGBE_CORE_UNLOCK(adapter); 2925 IXGBE_CORE_UNLOCK(adapter);
2897 2926
2898 return 0; 2927 return 0;
2899} /* ixv_init */ 2928} /* ixv_init */
2900 2929
2901/************************************************************************ 2930/************************************************************************
2902 * ixv_handle_que 2931 * ixv_handle_que
2903 ************************************************************************/ 2932 ************************************************************************/
2904static void 2933static void
2905ixv_handle_que(void *context) 2934ixv_handle_que(void *context)
2906{ 2935{
2907 struct ix_queue *que = context; 2936 struct ix_queue *que = context;
2908 struct adapter *adapter = que->adapter; 2937 struct adapter *adapter = que->adapter;
2909 struct tx_ring *txr = que->txr; 2938 struct tx_ring *txr = que->txr;
2910 struct ifnet *ifp = adapter->ifp; 2939 struct ifnet *ifp = adapter->ifp;
2911 bool more; 2940 bool more;
2912 2941
2913 que->handleq.ev_count++; 2942 que->handleq.ev_count++;
2914 2943
2915 if (ifp->if_flags & IFF_RUNNING) { 2944 if (ifp->if_flags & IFF_RUNNING) {
2916 more = ixgbe_rxeof(que); 2945 more = ixgbe_rxeof(que);
2917 IXGBE_TX_LOCK(txr); 2946 IXGBE_TX_LOCK(txr);
2918 more |= ixgbe_txeof(txr); 2947 more |= ixgbe_txeof(txr);
2919 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 2948 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
2920 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) 2949 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
2921 ixgbe_mq_start_locked(ifp, txr); 2950 ixgbe_mq_start_locked(ifp, txr);
2922 /* Only for queue 0 */ 2951 /* Only for queue 0 */
2923 /* NetBSD still needs this for CBQ */ 2952 /* NetBSD still needs this for CBQ */
2924 if ((&adapter->queues[0] == que) 2953 if ((&adapter->queues[0] == que)
2925 && (!ixgbe_legacy_ring_empty(ifp, NULL))) 2954 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
2926 ixgbe_legacy_start_locked(ifp, txr); 2955 ixgbe_legacy_start_locked(ifp, txr);
2927 IXGBE_TX_UNLOCK(txr); 2956 IXGBE_TX_UNLOCK(txr);
2928 if (more) { 2957 if (more) {
2929 que->req.ev_count++; 2958 que->req.ev_count++;
2930 if (adapter->txrx_use_workqueue) { 2959 if (adapter->txrx_use_workqueue) {
2931 /* 2960 /*
2932 * "enqueued flag" is not required here 2961 * "enqueued flag" is not required here
2933 * the same as ixg(4). See ixgbe_msix_que(). 2962 * the same as ixg(4). See ixgbe_msix_que().
2934 */ 2963 */
2935 workqueue_enqueue(adapter->que_wq, 2964 workqueue_enqueue(adapter->que_wq,
2936 &que->wq_cookie, curcpu()); 2965 &que->wq_cookie, curcpu());
2937 } else 2966 } else
2938 softint_schedule(que->que_si); 2967 softint_schedule(que->que_si);
2939 return; 2968 return;
2940 } 2969 }
2941 } 2970 }
2942 2971
2943 /* Re-enable this interrupt */ 2972 /* Re-enable this interrupt */
2944 ixv_enable_queue(adapter, que->msix); 2973 ixv_enable_queue(adapter, que->msix);
2945 2974
2946 return; 2975 return;
2947} /* ixv_handle_que */ 2976} /* ixv_handle_que */
2948 2977
2949/************************************************************************ 2978/************************************************************************
2950 * ixv_handle_que_work 2979 * ixv_handle_que_work
2951 ************************************************************************/ 2980 ************************************************************************/
2952static void 2981static void
2953ixv_handle_que_work(struct work *wk, void *context) 2982ixv_handle_que_work(struct work *wk, void *context)
2954{ 2983{
2955 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); 2984 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
2956 2985
2957 /* 2986 /*
2958 * "enqueued flag" is not required here the same as ixg(4). 2987 * "enqueued flag" is not required here the same as ixg(4).
2959 * See ixgbe_msix_que(). 2988 * See ixgbe_msix_que().
2960 */ 2989 */
2961 ixv_handle_que(que); 2990 ixv_handle_que(que);
2962} 2991}
2963 2992
2964/************************************************************************ 2993/************************************************************************
2965 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers 2994 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
2966 ************************************************************************/ 2995 ************************************************************************/
2967static int 2996static int
2968ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa) 2997ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
2969{ 2998{
2970 device_t dev = adapter->dev; 2999 device_t dev = adapter->dev;
2971 struct ix_queue *que = adapter->queues; 3000 struct ix_queue *que = adapter->queues;
2972 struct tx_ring *txr = adapter->tx_rings; 3001 struct tx_ring *txr = adapter->tx_rings;
2973 int error, msix_ctrl, rid, vector = 0; 3002 int error, msix_ctrl, rid, vector = 0;
2974 pci_chipset_tag_t pc; 3003 pci_chipset_tag_t pc;
2975 pcitag_t tag; 3004 pcitag_t tag;
2976 char intrbuf[PCI_INTRSTR_LEN]; 3005 char intrbuf[PCI_INTRSTR_LEN];
2977 char wqname[MAXCOMLEN]; 3006 char wqname[MAXCOMLEN];
2978 char intr_xname[32]; 3007 char intr_xname[32];
2979 const char *intrstr = NULL; 3008 const char *intrstr = NULL;
2980 kcpuset_t *affinity; 3009 kcpuset_t *affinity;
2981 int cpu_id = 0; 3010 int cpu_id = 0;
2982 3011
2983 pc = adapter->osdep.pc; 3012 pc = adapter->osdep.pc;
2984 tag = adapter->osdep.tag; 3013 tag = adapter->osdep.tag;
2985 3014
2986 adapter->osdep.nintrs = adapter->num_queues + 1; 3015 adapter->osdep.nintrs = adapter->num_queues + 1;
2987 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs, 3016 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
2988 adapter->osdep.nintrs) != 0) { 3017 adapter->osdep.nintrs) != 0) {
2989 aprint_error_dev(dev, 3018 aprint_error_dev(dev,
2990 "failed to allocate MSI-X interrupt\n"); 3019 "failed to allocate MSI-X interrupt\n");
2991 return (ENXIO); 3020 return (ENXIO);
2992 } 3021 }
2993 3022
2994 kcpuset_create(&affinity, false); 3023 kcpuset_create(&affinity, false);
2995 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 3024 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
2996 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", 3025 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
2997 device_xname(dev), i); 3026 device_xname(dev), i);
2998 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf, 3027 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
2999 sizeof(intrbuf)); 3028 sizeof(intrbuf));
3000#ifdef IXGBE_MPSAFE 3029#ifdef IXGBE_MPSAFE
3001 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE, 3030 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3002 true); 3031 true);
3003#endif 3032#endif
3004 /* Set the handler function */ 3033 /* Set the handler function */
3005 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc, 3034 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3006 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que, 3035 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3007 intr_xname); 3036 intr_xname);
3008 if (que->res == NULL) { 3037 if (que->res == NULL) {
3009 pci_intr_release(pc, adapter->osdep.intrs, 3038 pci_intr_release(pc, adapter->osdep.intrs,
3010 adapter->osdep.nintrs); 3039 adapter->osdep.nintrs);
3011 aprint_error_dev(dev, 3040 aprint_error_dev(dev,
3012 "Failed to register QUE handler\n"); 3041 "Failed to register QUE handler\n");
3013 kcpuset_destroy(affinity); 3042 kcpuset_destroy(affinity);
3014 return (ENXIO); 3043 return (ENXIO);
3015 } 3044 }
3016 que->msix = vector; 3045 que->msix = vector;
3017 adapter->active_queues |= (u64)(1 << que->msix); 3046 adapter->active_queues |= (u64)(1 << que->msix);
3018 3047
3019 cpu_id = i; 3048 cpu_id = i;
3020 /* Round-robin affinity */ 3049 /* Round-robin affinity */
3021 kcpuset_zero(affinity); 3050 kcpuset_zero(affinity);
3022 kcpuset_set(affinity, cpu_id % ncpu); 3051 kcpuset_set(affinity, cpu_id % ncpu);
3023 error = interrupt_distribute(adapter->osdep.ihs[i], affinity, 3052 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3024 NULL); 3053 NULL);
3025 aprint_normal_dev(dev, "for TX/RX, interrupting at %s", 3054 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3026 intrstr); 3055 intrstr);
3027 if (error == 0) 3056 if (error == 0)
3028 aprint_normal(", bound queue %d to cpu %d\n", 3057 aprint_normal(", bound queue %d to cpu %d\n",
3029 i, cpu_id % ncpu); 3058 i, cpu_id % ncpu);
3030 else 3059 else
3031 aprint_normal("\n"); 3060 aprint_normal("\n");
3032 3061
3033#ifndef IXGBE_LEGACY_TX 3062#ifndef IXGBE_LEGACY_TX
3034 txr->txr_si 3063 txr->txr_si
3035 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 3064 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3036 ixgbe_deferred_mq_start, txr); 3065 ixgbe_deferred_mq_start, txr);
3037#endif 3066#endif
3038 que->que_si 3067 que->que_si
3039 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, 3068 = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS,
3040 ixv_handle_que, que); 3069 ixv_handle_que, que);
3041 if (que->que_si == NULL) { 3070 if (que->que_si == NULL) {
3042 aprint_error_dev(dev, 3071 aprint_error_dev(dev,
3043 "could not establish software interrupt\n"); 3072 "could not establish software interrupt\n");
3044 } 3073 }

cvs diff -r1.141 -r1.141.2.1 src/sys/net/if_vlan.c (switch to unified diff)

--- src/sys/net/if_vlan.c 2019/07/17 03:26:24 1.141
+++ src/sys/net/if_vlan.c 2019/09/01 11:07:06 1.141.2.1
@@ -1,1676 +1,1677 @@ @@ -1,1676 +1,1677 @@
1/* $NetBSD: if_vlan.c,v 1.141 2019/07/17 03:26:24 msaitoh Exp $ */ 1/* $NetBSD: if_vlan.c,v 1.141.2.1 2019/09/01 11:07:06 martin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc. 8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright 1998 Massachusetts Institute of Technology 33 * Copyright 1998 Massachusetts Institute of Technology
34 * 34 *
35 * Permission to use, copy, modify, and distribute this software and 35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby 36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this 37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above 38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all 39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used 40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the 41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes 42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any 43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied 44 * purpose. It is provided "as is" without express or implied
45 * warranty. 45 * warranty.
46 * 46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE. 58 * SUCH DAMAGE.
59 * 59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp 60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp 61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */ 62 */
63 63
64/* 64/*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be 65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is 66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be 67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do 68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then 69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it 70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface, 71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them. 72 * and ask it to send them.
73 * 73 *
74 * TODO: 74 * TODO:
75 * 75 *
76 * - Need some way to notify vlan interfaces when the parent 76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU. 77 * interface changes MTU.
78 */ 78 */
79 79
80#include <sys/cdefs.h> 80#include <sys/cdefs.h>
81__KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.141 2019/07/17 03:26:24 msaitoh Exp $"); 81__KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.141.2.1 2019/09/01 11:07:06 martin Exp $");
82 82
83#ifdef _KERNEL_OPT 83#ifdef _KERNEL_OPT
84#include "opt_inet.h" 84#include "opt_inet.h"
85#include "opt_net_mpsafe.h" 85#include "opt_net_mpsafe.h"
86#endif 86#endif
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/systm.h> 89#include <sys/systm.h>
90#include <sys/kernel.h> 90#include <sys/kernel.h>
91#include <sys/mbuf.h> 91#include <sys/mbuf.h>
92#include <sys/queue.h> 92#include <sys/queue.h>
93#include <sys/socket.h> 93#include <sys/socket.h>
94#include <sys/sockio.h> 94#include <sys/sockio.h>
95#include <sys/systm.h> 95#include <sys/systm.h>
96#include <sys/proc.h> 96#include <sys/proc.h>
97#include <sys/kauth.h> 97#include <sys/kauth.h>
98#include <sys/mutex.h> 98#include <sys/mutex.h>
99#include <sys/kmem.h> 99#include <sys/kmem.h>
100#include <sys/cpu.h> 100#include <sys/cpu.h>
101#include <sys/pserialize.h> 101#include <sys/pserialize.h>
102#include <sys/psref.h> 102#include <sys/psref.h>
103#include <sys/pslist.h> 103#include <sys/pslist.h>
104#include <sys/atomic.h> 104#include <sys/atomic.h>
105#include <sys/device.h> 105#include <sys/device.h>
106#include <sys/module.h> 106#include <sys/module.h>
107 107
108#include <net/bpf.h> 108#include <net/bpf.h>
109#include <net/if.h> 109#include <net/if.h>
110#include <net/if_dl.h> 110#include <net/if_dl.h>
111#include <net/if_types.h> 111#include <net/if_types.h>
112#include <net/if_ether.h> 112#include <net/if_ether.h>
113#include <net/if_vlanvar.h> 113#include <net/if_vlanvar.h>
114 114
115#ifdef INET 115#ifdef INET
116#include <netinet/in.h> 116#include <netinet/in.h>
117#include <netinet/if_inarp.h> 117#include <netinet/if_inarp.h>
118#endif 118#endif
119#ifdef INET6 119#ifdef INET6
120#include <netinet6/in6_ifattach.h> 120#include <netinet6/in6_ifattach.h>
121#include <netinet6/in6_var.h> 121#include <netinet6/in6_var.h>
122#endif 122#endif
123 123
124#include "ioconf.h" 124#include "ioconf.h"
125 125
126struct vlan_mc_entry { 126struct vlan_mc_entry {
127 LIST_ENTRY(vlan_mc_entry) mc_entries; 127 LIST_ENTRY(vlan_mc_entry) mc_entries;
128 /* 128 /*
129 * A key to identify this entry. The mc_addr below can't be 129 * A key to identify this entry. The mc_addr below can't be
130 * used since multiple sockaddr may mapped into the same 130 * used since multiple sockaddr may mapped into the same
131 * ether_multi (e.g., AF_UNSPEC). 131 * ether_multi (e.g., AF_UNSPEC).
132 */ 132 */
133 struct ether_multi *mc_enm; 133 struct ether_multi *mc_enm;
134 struct sockaddr_storage mc_addr; 134 struct sockaddr_storage mc_addr;
135}; 135};
136 136
137struct ifvlan_linkmib { 137struct ifvlan_linkmib {
138 struct ifvlan *ifvm_ifvlan; 138 struct ifvlan *ifvm_ifvlan;
139 const struct vlan_multisw *ifvm_msw; 139 const struct vlan_multisw *ifvm_msw;
140 int ifvm_encaplen; /* encapsulation length */ 140 int ifvm_encaplen; /* encapsulation length */
141 int ifvm_mtufudge; /* MTU fudged by this much */ 141 int ifvm_mtufudge; /* MTU fudged by this much */
142 int ifvm_mintu; /* min transmission unit */ 142 int ifvm_mintu; /* min transmission unit */
143 uint16_t ifvm_proto; /* encapsulation ethertype */ 143 uint16_t ifvm_proto; /* encapsulation ethertype */
144 uint16_t ifvm_tag; /* tag to apply on packets */ 144 uint16_t ifvm_tag; /* tag to apply on packets */
145 struct ifnet *ifvm_p; /* parent interface of this vlan */ 145 struct ifnet *ifvm_p; /* parent interface of this vlan */
146 146
147 struct psref_target ifvm_psref; 147 struct psref_target ifvm_psref;
148}; 148};
149 149
150struct ifvlan { 150struct ifvlan {
151 struct ethercom ifv_ec; 151 struct ethercom ifv_ec;
152 struct ifvlan_linkmib *ifv_mib; /* 152 struct ifvlan_linkmib *ifv_mib; /*
153 * reader must use vlan_getref_linkmib() 153 * reader must use vlan_getref_linkmib()
154 * instead of direct dereference 154 * instead of direct dereference
155 */ 155 */
156 kmutex_t ifv_lock; /* writer lock for ifv_mib */ 156 kmutex_t ifv_lock; /* writer lock for ifv_mib */
157 pserialize_t ifv_psz; 157 pserialize_t ifv_psz;
158 158
159 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead; 159 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
160 LIST_ENTRY(ifvlan) ifv_list; 160 LIST_ENTRY(ifvlan) ifv_list;
161 struct pslist_entry ifv_hash; 161 struct pslist_entry ifv_hash;
162 int ifv_flags; 162 int ifv_flags;
163}; 163};
164 164
165#define IFVF_PROMISC 0x01 /* promiscuous mode enabled */ 165#define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
166 166
167#define ifv_if ifv_ec.ec_if 167#define ifv_if ifv_ec.ec_if
168 168
169#define ifv_msw ifv_mib.ifvm_msw 169#define ifv_msw ifv_mib.ifvm_msw
170#define ifv_encaplen ifv_mib.ifvm_encaplen 170#define ifv_encaplen ifv_mib.ifvm_encaplen
171#define ifv_mtufudge ifv_mib.ifvm_mtufudge 171#define ifv_mtufudge ifv_mib.ifvm_mtufudge
172#define ifv_mintu ifv_mib.ifvm_mintu 172#define ifv_mintu ifv_mib.ifvm_mintu
173#define ifv_tag ifv_mib.ifvm_tag 173#define ifv_tag ifv_mib.ifvm_tag
174 174
175struct vlan_multisw { 175struct vlan_multisw {
176 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *); 176 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
177 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *); 177 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
178 void (*vmsw_purgemulti)(struct ifvlan *); 178 void (*vmsw_purgemulti)(struct ifvlan *);
179}; 179};
180 180
181static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *); 181static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
182static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *); 182static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
183static void vlan_ether_purgemulti(struct ifvlan *); 183static void vlan_ether_purgemulti(struct ifvlan *);
184 184
185const struct vlan_multisw vlan_ether_multisw = { 185const struct vlan_multisw vlan_ether_multisw = {
186 .vmsw_addmulti = vlan_ether_addmulti, 186 .vmsw_addmulti = vlan_ether_addmulti,
187 .vmsw_delmulti = vlan_ether_delmulti, 187 .vmsw_delmulti = vlan_ether_delmulti,
188 .vmsw_purgemulti = vlan_ether_purgemulti, 188 .vmsw_purgemulti = vlan_ether_purgemulti,
189}; 189};
190 190
191static int vlan_clone_create(struct if_clone *, int); 191static int vlan_clone_create(struct if_clone *, int);
192static int vlan_clone_destroy(struct ifnet *); 192static int vlan_clone_destroy(struct ifnet *);
193static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t); 193static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
194static int vlan_ioctl(struct ifnet *, u_long, void *); 194static int vlan_ioctl(struct ifnet *, u_long, void *);
195static void vlan_start(struct ifnet *); 195static void vlan_start(struct ifnet *);
196static int vlan_transmit(struct ifnet *, struct mbuf *); 196static int vlan_transmit(struct ifnet *, struct mbuf *);
197static void vlan_unconfig(struct ifnet *); 197static void vlan_unconfig(struct ifnet *);
198static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *); 198static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
199static void vlan_hash_init(void); 199static void vlan_hash_init(void);
200static int vlan_hash_fini(void); 200static int vlan_hash_fini(void);
201static int vlan_tag_hash(uint16_t, u_long); 201static int vlan_tag_hash(uint16_t, u_long);
202static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *, 202static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
203 struct psref *); 203 struct psref *);
204static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *); 204static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
205static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *); 205static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
206static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *, 206static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
207 uint16_t, struct psref *); 207 uint16_t, struct psref *);
208 208
209static struct { 209static struct {
210 kmutex_t lock; 210 kmutex_t lock;
211 LIST_HEAD(vlan_ifvlist, ifvlan) list; 211 LIST_HEAD(vlan_ifvlist, ifvlan) list;
212} ifv_list __cacheline_aligned; 212} ifv_list __cacheline_aligned;
213 213
214 214
215#if !defined(VLAN_TAG_HASH_SIZE) 215#if !defined(VLAN_TAG_HASH_SIZE)
216#define VLAN_TAG_HASH_SIZE 32 216#define VLAN_TAG_HASH_SIZE 32
217#endif 217#endif
218static struct { 218static struct {
219 kmutex_t lock; 219 kmutex_t lock;
220 struct pslist_head *lists; 220 struct pslist_head *lists;
221 u_long mask; 221 u_long mask;
222} ifv_hash __cacheline_aligned = { 222} ifv_hash __cacheline_aligned = {
223 .lists = NULL, 223 .lists = NULL,
224 .mask = 0, 224 .mask = 0,
225}; 225};
226 226
227pserialize_t vlan_psz __read_mostly; 227pserialize_t vlan_psz __read_mostly;
228static struct psref_class *ifvm_psref_class __read_mostly; 228static struct psref_class *ifvm_psref_class __read_mostly;
229 229
230struct if_clone vlan_cloner = 230struct if_clone vlan_cloner =
231 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy); 231 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
232 232
233/* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */ 233/* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
234static char vlan_zero_pad_buff[ETHER_MIN_LEN]; 234static char vlan_zero_pad_buff[ETHER_MIN_LEN];
235 235
236static inline int 236static inline int
237vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch) 237vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
238{ 238{
239 int e; 239 int e;
240 240
241 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 241 KERNEL_LOCK_UNLESS_NET_MPSAFE();
242 e = ifpromisc(ifp, pswitch); 242 e = ifpromisc(ifp, pswitch);
243 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 243 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
244 244
245 return e; 245 return e;
246} 246}
247 247
248static inline int 248static inline int
249vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch) 249vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
250{ 250{
251 int e; 251 int e;
252 252
253 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 253 KERNEL_LOCK_UNLESS_NET_MPSAFE();
254 e = ifpromisc_locked(ifp, pswitch); 254 e = ifpromisc_locked(ifp, pswitch);
255 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 255 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
256 256
257 return e; 257 return e;
258} 258}
259 259
260void 260void
261vlanattach(int n) 261vlanattach(int n)
262{ 262{
263 263
264 /* 264 /*
265 * Nothing to do here, initialization is handled by the 265 * Nothing to do here, initialization is handled by the
266 * module initialization code in vlaninit() below. 266 * module initialization code in vlaninit() below.
267 */ 267 */
268} 268}
269 269
270static void 270static void
271vlaninit(void) 271vlaninit(void)
272{ 272{
273 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE); 273 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
274 LIST_INIT(&ifv_list.list); 274 LIST_INIT(&ifv_list.list);
275 275
276 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE); 276 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
277 vlan_psz = pserialize_create(); 277 vlan_psz = pserialize_create();
278 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET); 278 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
279 if_clone_attach(&vlan_cloner); 279 if_clone_attach(&vlan_cloner);
280 280
281 vlan_hash_init(); 281 vlan_hash_init();
282 MODULE_HOOK_SET(if_vlan_vlan_input_hook, "vlan_inp", vlan_input); 282 MODULE_HOOK_SET(if_vlan_vlan_input_hook, "vlan_inp", vlan_input);
283} 283}
284 284
285static int 285static int
286vlandetach(void) 286vlandetach(void)
287{ 287{
288 bool is_empty; 288 bool is_empty;
289 int error; 289 int error;
290 290
291 mutex_enter(&ifv_list.lock); 291 mutex_enter(&ifv_list.lock);
292 is_empty = LIST_EMPTY(&ifv_list.list); 292 is_empty = LIST_EMPTY(&ifv_list.list);
293 mutex_exit(&ifv_list.lock); 293 mutex_exit(&ifv_list.lock);
294 294
295 if (!is_empty) 295 if (!is_empty)
296 return EBUSY; 296 return EBUSY;
297 297
298 error = vlan_hash_fini(); 298 error = vlan_hash_fini();
299 if (error != 0) 299 if (error != 0)
300 return error; 300 return error;
301 301
302 if_clone_detach(&vlan_cloner); 302 if_clone_detach(&vlan_cloner);
303 psref_class_destroy(ifvm_psref_class); 303 psref_class_destroy(ifvm_psref_class);
304 pserialize_destroy(vlan_psz); 304 pserialize_destroy(vlan_psz);
305 mutex_destroy(&ifv_hash.lock); 305 mutex_destroy(&ifv_hash.lock);
306 mutex_destroy(&ifv_list.lock); 306 mutex_destroy(&ifv_list.lock);
307 307
308 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook); 308 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
309 return 0; 309 return 0;
310} 310}
311 311
312static void 312static void
313vlan_reset_linkname(struct ifnet *ifp) 313vlan_reset_linkname(struct ifnet *ifp)
314{ 314{
315 315
316 /* 316 /*
317 * We start out with a "802.1Q VLAN" type and zero-length 317 * We start out with a "802.1Q VLAN" type and zero-length
318 * addresses. When we attach to a parent interface, we 318 * addresses. When we attach to a parent interface, we
319 * inherit its type, address length, address, and data link 319 * inherit its type, address length, address, and data link
320 * type. 320 * type.
321 */ 321 */
322 322
323 ifp->if_type = IFT_L2VLAN; 323 ifp->if_type = IFT_L2VLAN;
324 ifp->if_addrlen = 0; 324 ifp->if_addrlen = 0;
325 ifp->if_dlt = DLT_NULL; 325 ifp->if_dlt = DLT_NULL;
326 if_alloc_sadl(ifp); 326 if_alloc_sadl(ifp);
327} 327}
328 328
329static int 329static int
330vlan_clone_create(struct if_clone *ifc, int unit) 330vlan_clone_create(struct if_clone *ifc, int unit)
331{ 331{
332 struct ifvlan *ifv; 332 struct ifvlan *ifv;
333 struct ifnet *ifp; 333 struct ifnet *ifp;
334 struct ifvlan_linkmib *mib; 334 struct ifvlan_linkmib *mib;
335 int rv; 335 int rv;
336 336
337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO); 337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP); 338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
339 ifp = &ifv->ifv_if; 339 ifp = &ifv->ifv_if;
340 LIST_INIT(&ifv->ifv_mc_listhead); 340 LIST_INIT(&ifv->ifv_mc_listhead);
341 341
342 mib->ifvm_ifvlan = ifv; 342 mib->ifvm_ifvlan = ifv;
343 mib->ifvm_p = NULL; 343 mib->ifvm_p = NULL;
344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class); 344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
345 345
346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE); 346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
347 ifv->ifv_psz = pserialize_create(); 347 ifv->ifv_psz = pserialize_create();
348 ifv->ifv_mib = mib; 348 ifv->ifv_mib = mib;
349 349
350 mutex_enter(&ifv_list.lock); 350 mutex_enter(&ifv_list.lock);
351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list); 351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
352 mutex_exit(&ifv_list.lock); 352 mutex_exit(&ifv_list.lock);
353 353
354 if_initname(ifp, ifc->ifc_name, unit); 354 if_initname(ifp, ifc->ifc_name, unit);
355 ifp->if_softc = ifv; 355 ifp->if_softc = ifv;
356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE; 357 ifp->if_extflags = IFEF_NO_LINK_STATE_CHANGE;
358#ifdef NET_MPSAFE 358#ifdef NET_MPSAFE
359 ifp->if_extflags |= IFEF_MPSAFE; 359 ifp->if_extflags |= IFEF_MPSAFE;
360#endif 360#endif
361 ifp->if_start = vlan_start; 361 ifp->if_start = vlan_start;
362 ifp->if_transmit = vlan_transmit; 362 ifp->if_transmit = vlan_transmit;
363 ifp->if_ioctl = vlan_ioctl; 363 ifp->if_ioctl = vlan_ioctl;
364 IFQ_SET_READY(&ifp->if_snd); 364 IFQ_SET_READY(&ifp->if_snd);
365 365
366 rv = if_initialize(ifp); 366 rv = if_initialize(ifp);
367 if (rv != 0) { 367 if (rv != 0) {
368 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname, 368 aprint_error("%s: if_initialize failed(%d)\n", ifp->if_xname,
369 rv); 369 rv);
370 goto fail; 370 goto fail;
371 } 371 }
372 372
373 vlan_reset_linkname(ifp); 373 vlan_reset_linkname(ifp);
374 if_register(ifp); 374 if_register(ifp);
375 return 0; 375 return 0;
376 376
377fail: 377fail:
378 mutex_enter(&ifv_list.lock); 378 mutex_enter(&ifv_list.lock);
379 LIST_REMOVE(ifv, ifv_list); 379 LIST_REMOVE(ifv, ifv_list);
380 mutex_exit(&ifv_list.lock); 380 mutex_exit(&ifv_list.lock);
381 381
382 mutex_destroy(&ifv->ifv_lock); 382 mutex_destroy(&ifv->ifv_lock);
383 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class); 383 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
384 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib)); 384 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
385 free(ifv, M_DEVBUF); 385 free(ifv, M_DEVBUF);
386 386
387 return rv; 387 return rv;
388} 388}
389 389
390static int 390static int
391vlan_clone_destroy(struct ifnet *ifp) 391vlan_clone_destroy(struct ifnet *ifp)
392{ 392{
393 struct ifvlan *ifv = ifp->if_softc; 393 struct ifvlan *ifv = ifp->if_softc;
394 394
395 mutex_enter(&ifv_list.lock); 395 mutex_enter(&ifv_list.lock);
396 LIST_REMOVE(ifv, ifv_list); 396 LIST_REMOVE(ifv, ifv_list);
397 mutex_exit(&ifv_list.lock); 397 mutex_exit(&ifv_list.lock);
398 398
399 IFNET_LOCK(ifp); 399 IFNET_LOCK(ifp);
400 vlan_unconfig(ifp); 400 vlan_unconfig(ifp);
401 IFNET_UNLOCK(ifp); 401 IFNET_UNLOCK(ifp);
402 if_detach(ifp); 402 if_detach(ifp);
403 403
404 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class); 404 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
405 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib)); 405 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
406 pserialize_destroy(ifv->ifv_psz); 406 pserialize_destroy(ifv->ifv_psz);
407 mutex_destroy(&ifv->ifv_lock); 407 mutex_destroy(&ifv->ifv_lock);
408 free(ifv, M_DEVBUF); 408 free(ifv, M_DEVBUF);
409 409
410 return 0; 410 return 0;
411} 411}
412 412
413/* 413/*
414 * Configure a VLAN interface. 414 * Configure a VLAN interface.
415 */ 415 */
416static int 416static int
417vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 417vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
418{ 418{
419 struct ifnet *ifp = &ifv->ifv_if; 419 struct ifnet *ifp = &ifv->ifv_if;
420 struct ifvlan_linkmib *nmib = NULL; 420 struct ifvlan_linkmib *nmib = NULL;
421 struct ifvlan_linkmib *omib = NULL; 421 struct ifvlan_linkmib *omib = NULL;
422 struct ifvlan_linkmib *checkmib; 422 struct ifvlan_linkmib *checkmib;
423 struct psref_target *nmib_psref = NULL; 423 struct psref_target *nmib_psref = NULL;
424 const uint16_t vid = EVL_VLANOFTAG(tag); 424 const uint16_t vid = EVL_VLANOFTAG(tag);
425 int error = 0; 425 int error = 0;
426 int idx; 426 int idx;
427 bool omib_cleanup = false; 427 bool omib_cleanup = false;
428 struct psref psref; 428 struct psref psref;
429 429
430 /* VLAN ID 0 and 4095 are reserved in the spec */ 430 /* VLAN ID 0 and 4095 are reserved in the spec */
431 if ((vid == 0) || (vid == 0xfff)) 431 if ((vid == 0) || (vid == 0xfff))
432 return EINVAL; 432 return EINVAL;
433 433
434 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP); 434 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
435 mutex_enter(&ifv->ifv_lock); 435 mutex_enter(&ifv->ifv_lock);
436 omib = ifv->ifv_mib; 436 omib = ifv->ifv_mib;
437 437
438 if (omib->ifvm_p != NULL) { 438 if (omib->ifvm_p != NULL) {
439 error = EBUSY; 439 error = EBUSY;
440 goto done; 440 goto done;
441 } 441 }
442 442
443 /* Duplicate check */ 443 /* Duplicate check */
444 checkmib = vlan_lookup_tag_psref(p, vid, &psref); 444 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
445 if (checkmib != NULL) { 445 if (checkmib != NULL) {
446 vlan_putref_linkmib(checkmib, &psref); 446 vlan_putref_linkmib(checkmib, &psref);
447 error = EEXIST; 447 error = EEXIST;
448 goto done; 448 goto done;
449 } 449 }
450 450
451 *nmib = *omib; 451 *nmib = *omib;
452 nmib_psref = &nmib->ifvm_psref; 452 nmib_psref = &nmib->ifvm_psref;
453 453
454 psref_target_init(nmib_psref, ifvm_psref_class); 454 psref_target_init(nmib_psref, ifvm_psref_class);
455 455
456 switch (p->if_type) { 456 switch (p->if_type) {
457 case IFT_ETHER: 457 case IFT_ETHER:
458 { 458 {
459 struct ethercom *ec = (void *)p; 459 struct ethercom *ec = (void *)p;
460 struct vlanid_list *vidmem; 460 struct vlanid_list *vidmem;
461 461
462 nmib->ifvm_msw = &vlan_ether_multisw; 462 nmib->ifvm_msw = &vlan_ether_multisw;
463 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN; 463 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
464 nmib->ifvm_mintu = ETHERMIN; 464 nmib->ifvm_mintu = ETHERMIN;
465 465
466 if (ec->ec_nvlans++ == 0) { 466 if (ec->ec_nvlans++ == 0) {
467 IFNET_LOCK(p); 467 IFNET_LOCK(p);
468 error = ether_enable_vlan_mtu(p); 468 error = ether_enable_vlan_mtu(p);
469 IFNET_UNLOCK(p); 469 IFNET_UNLOCK(p);
470 if (error >= 0) { 470 if (error >= 0) {
471 if (error) { 471 if (error) {
472 ec->ec_nvlans--; 472 ec->ec_nvlans--;
473 goto done; 473 goto done;
474 } 474 }
475 nmib->ifvm_mtufudge = 0; 475 nmib->ifvm_mtufudge = 0;
476 } else { 476 } else {
477 /* 477 /*
478 * Fudge the MTU by the encapsulation size. This 478 * Fudge the MTU by the encapsulation size. This
479 * makes us incompatible with strictly compliant 479 * makes us incompatible with strictly compliant
480 * 802.1Q implementations, but allows us to use 480 * 802.1Q implementations, but allows us to use
481 * the feature with other NetBSD 481 * the feature with other NetBSD
482 * implementations, which might still be useful. 482 * implementations, which might still be useful.
483 */ 483 */
484 nmib->ifvm_mtufudge = nmib->ifvm_encaplen; 484 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
485 } 485 }
486 error = 0; 486 error = 0;
487 } 487 }
488 /* 488 /* Add a vid to the list */
489 * Add a vid to the list even if it's not enabled in case 489 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
490 * it's enabled later. 490 vidmem->vid = vid;
491 */ 491 ETHER_LOCK(ec);
492 if (ec->ec_capabilities & ETHERCAP_VLAN_HWFILTER) { 492 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
493 vidmem = kmem_alloc(sizeof(struct vlanid_list), 493 ETHER_UNLOCK(ec);
494 KM_SLEEP); 494
495 if (vidmem == NULL){ 495 if (ec->ec_vlan_cb != NULL) {
 496 /*
 497 * Call ec_vlan_cb(). It will setup VLAN HW filter or
 498 * HW tagging function.
 499 */
 500 error = (*ec->ec_vlan_cb)(ec, vid, true);
 501 if (error) {
496 ec->ec_nvlans--; 502 ec->ec_nvlans--;
497 if (ec->ec_nvlans == 0) 503 if (ec->ec_nvlans == 0) {
 504 IFNET_LOCK(p);
498 (void)ether_disable_vlan_mtu(p); 505 (void)ether_disable_vlan_mtu(p);
499 error = ENOMEM; 506 IFNET_UNLOCK(p);
500 goto done; 
501 } 
502 vidmem->vid = vid; 
503 mutex_enter(ec->ec_lock); 
504 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list); 
505 mutex_exit(ec->ec_lock); 
506 } 
507 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) { 
508 if (ec->ec_vlan_cb != NULL) { 
509 error = (*ec->ec_vlan_cb)(ec, vid, true); 
510 if (error) { 
511 ec->ec_nvlans--; 
512 if (ec->ec_nvlans == 0) 
513 (void)ether_disable_vlan_mtu(p); 
514 goto done; 
515 } 507 }
 508 goto done;
516 } 509 }
517 } 510 }
518 /* 511 /*
519 * If the parent interface can do hardware-assisted 512 * If the parent interface can do hardware-assisted
520 * VLAN encapsulation, then propagate its hardware- 513 * VLAN encapsulation, then propagate its hardware-
521 * assisted checksumming flags and tcp segmentation 514 * assisted checksumming flags and tcp segmentation
522 * offload. 515 * offload.
523 */ 516 */
524 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) { 517 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
525 ifp->if_capabilities = p->if_capabilities & 518 ifp->if_capabilities = p->if_capabilities &
526 (IFCAP_TSOv4 | IFCAP_TSOv6 | 519 (IFCAP_TSOv4 | IFCAP_TSOv6 |
527 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 520 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
528 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 521 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
529 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 522 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
530 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx | 523 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
531 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx); 524 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
532 } 525 }
533 526
534 /* 527 /*
535 * We inherit the parent's Ethernet address. 528 * We inherit the parent's Ethernet address.
536 */ 529 */
537 ether_ifattach(ifp, CLLADDR(p->if_sadl)); 530 ether_ifattach(ifp, CLLADDR(p->if_sadl));
538 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */ 531 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
539 break; 532 break;
540 } 533 }
541 534
542 default: 535 default:
543 error = EPROTONOSUPPORT; 536 error = EPROTONOSUPPORT;
544 goto done; 537 goto done;
545 } 538 }
546 539
547 nmib->ifvm_p = p; 540 nmib->ifvm_p = p;
548 nmib->ifvm_tag = vid; 541 nmib->ifvm_tag = vid;
549 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge; 542 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
550 ifv->ifv_if.if_flags = p->if_flags & 543 ifv->ifv_if.if_flags = p->if_flags &
551 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 544 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
552 545
553 /* 546 /*
554 * Inherit the if_type from the parent. This allows us 547 * Inherit the if_type from the parent. This allows us
555 * to participate in bridges of that type. 548 * to participate in bridges of that type.
556 */ 549 */
557 ifv->ifv_if.if_type = p->if_type; 550 ifv->ifv_if.if_type = p->if_type;
558 551
559 PSLIST_ENTRY_INIT(ifv, ifv_hash); 552 PSLIST_ENTRY_INIT(ifv, ifv_hash);
560 idx = vlan_tag_hash(vid, ifv_hash.mask); 553 idx = vlan_tag_hash(vid, ifv_hash.mask);
561 554
562 mutex_enter(&ifv_hash.lock); 555 mutex_enter(&ifv_hash.lock);
563 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash); 556 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
564 mutex_exit(&ifv_hash.lock); 557 mutex_exit(&ifv_hash.lock);
565 558
566 vlan_linkmib_update(ifv, nmib); 559 vlan_linkmib_update(ifv, nmib);
567 nmib = NULL; 560 nmib = NULL;
568 nmib_psref = NULL; 561 nmib_psref = NULL;
569 omib_cleanup = true; 562 omib_cleanup = true;
570 563
571done: 564done:
572 mutex_exit(&ifv->ifv_lock); 565 mutex_exit(&ifv->ifv_lock);
573 566
574 if (nmib_psref) 567 if (nmib_psref)
575 psref_target_destroy(nmib_psref, ifvm_psref_class); 568 psref_target_destroy(nmib_psref, ifvm_psref_class);
576 if (nmib) 569 if (nmib)
577 kmem_free(nmib, sizeof(*nmib)); 570 kmem_free(nmib, sizeof(*nmib));
578 if (omib_cleanup) 571 if (omib_cleanup)
579 kmem_free(omib, sizeof(*omib)); 572 kmem_free(omib, sizeof(*omib));
580 573
581 return error; 574 return error;
582} 575}
583 576
584/* 577/*
585 * Unconfigure a VLAN interface. 578 * Unconfigure a VLAN interface.
586 */ 579 */
587static void 580static void
588vlan_unconfig(struct ifnet *ifp) 581vlan_unconfig(struct ifnet *ifp)
589{ 582{
590 struct ifvlan *ifv = ifp->if_softc; 583 struct ifvlan *ifv = ifp->if_softc;
591 struct ifvlan_linkmib *nmib = NULL; 584 struct ifvlan_linkmib *nmib = NULL;
592 int error; 585 int error;
593 586
594 KASSERT(IFNET_LOCKED(ifp)); 587 KASSERT(IFNET_LOCKED(ifp));
595 588
596 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP); 589 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
597 590
598 mutex_enter(&ifv->ifv_lock); 591 mutex_enter(&ifv->ifv_lock);
599 error = vlan_unconfig_locked(ifv, nmib); 592 error = vlan_unconfig_locked(ifv, nmib);
600 mutex_exit(&ifv->ifv_lock); 593 mutex_exit(&ifv->ifv_lock);
601 594
602 if (error) 595 if (error)
603 kmem_free(nmib, sizeof(*nmib)); 596 kmem_free(nmib, sizeof(*nmib));
604} 597}
605static int 598static int
606vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib) 599vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
607{ 600{
608 struct ifnet *p; 601 struct ifnet *p;
609 struct ifnet *ifp = &ifv->ifv_if; 602 struct ifnet *ifp = &ifv->ifv_if;
610 struct psref_target *nmib_psref = NULL; 603 struct psref_target *nmib_psref = NULL;
611 struct ifvlan_linkmib *omib; 604 struct ifvlan_linkmib *omib;
612 int error = 0; 605 int error = 0;
613 606
614 KASSERT(IFNET_LOCKED(ifp)); 607 KASSERT(IFNET_LOCKED(ifp));
615 KASSERT(mutex_owned(&ifv->ifv_lock)); 608 KASSERT(mutex_owned(&ifv->ifv_lock));
616 609
617 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING); 610 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
618 611
619 omib = ifv->ifv_mib; 612 omib = ifv->ifv_mib;
620 p = omib->ifvm_p; 613 p = omib->ifvm_p;
621 614
622 if (p == NULL) { 615 if (p == NULL) {
623 error = -1; 616 error = -1;
624 goto done; 617 goto done;
625 } 618 }
626 619
627 *nmib = *omib; 620 *nmib = *omib;
628 nmib_psref = &nmib->ifvm_psref; 621 nmib_psref = &nmib->ifvm_psref;
629 psref_target_init(nmib_psref, ifvm_psref_class); 622 psref_target_init(nmib_psref, ifvm_psref_class);
630 623
631 /* 624 /*
632 * Since the interface is being unconfigured, we need to empty the 625 * Since the interface is being unconfigured, we need to empty the
633 * list of multicast groups that we may have joined while we were 626 * list of multicast groups that we may have joined while we were
634 * alive and remove them from the parent's list also. 627 * alive and remove them from the parent's list also.
635 */ 628 */
636 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv); 629 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
637 630
638 /* Disconnect from parent. */ 631 /* Disconnect from parent. */
639 switch (p->if_type) { 632 switch (p->if_type) {
640 case IFT_ETHER: 633 case IFT_ETHER:
641 { 634 {
642 struct ethercom *ec = (void *)p; 635 struct ethercom *ec = (void *)p;
643 struct vlanid_list *vlanidp, *tmpp; 636 struct vlanid_list *vlanidp;
644 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag); 637 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
645 638
646 mutex_enter(ec->ec_lock); 639 ETHER_LOCK(ec);
647 SIMPLEQ_FOREACH_SAFE(vlanidp, &ec->ec_vids, vid_list, tmpp) { 640 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
648 if (vlanidp->vid == vid) { 641 if (vlanidp->vid == vid) {
649 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp, 642 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
650 vlanid_list, vid_list); 643 vlanid_list, vid_list);
651 kmem_free(vlanidp, sizeof(*vlanidp)); 644 break;
652 } 645 }
653 } 646 }
654 mutex_exit(ec->ec_lock); 647 ETHER_UNLOCK(ec);
655 if (ec->ec_vlan_cb != NULL) 648 if (vlanidp != NULL)
 649 kmem_free(vlanidp, sizeof(*vlanidp));
 650
 651 if (ec->ec_vlan_cb != NULL) {
 652 /*
 653 * Call ec_vlan_cb(). It will setup VLAN HW filter or
 654 * HW tagging function.
 655 */
656 (void)(*ec->ec_vlan_cb)(ec, vid, false); 656 (void)(*ec->ec_vlan_cb)(ec, vid, false);
 657 }
657 if (--ec->ec_nvlans == 0) { 658 if (--ec->ec_nvlans == 0) {
658 IFNET_LOCK(p); 659 IFNET_LOCK(p);
659 (void)ether_disable_vlan_mtu(p); 660 (void)ether_disable_vlan_mtu(p);
660 IFNET_UNLOCK(p); 661 IFNET_UNLOCK(p);
661 } 662 }
662 663
663 /* XXX ether_ifdetach must not be called with IFNET_LOCK */ 664 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
664 mutex_exit(&ifv->ifv_lock); 665 mutex_exit(&ifv->ifv_lock);
665 IFNET_UNLOCK(ifp); 666 IFNET_UNLOCK(ifp);
666 ether_ifdetach(ifp); 667 ether_ifdetach(ifp);
667 IFNET_LOCK(ifp); 668 IFNET_LOCK(ifp);
668 mutex_enter(&ifv->ifv_lock); 669 mutex_enter(&ifv->ifv_lock);
669 670
670 /* if_free_sadl must be called with IFNET_LOCK */ 671 /* if_free_sadl must be called with IFNET_LOCK */
671 if_free_sadl(ifp, 1); 672 if_free_sadl(ifp, 1);
672 673
673 /* Restore vlan_ioctl overwritten by ether_ifdetach */ 674 /* Restore vlan_ioctl overwritten by ether_ifdetach */
674 ifp->if_ioctl = vlan_ioctl; 675 ifp->if_ioctl = vlan_ioctl;
675 vlan_reset_linkname(ifp); 676 vlan_reset_linkname(ifp);
676 break; 677 break;
677 } 678 }
678 679
679 default: 680 default:
680 panic("%s: impossible", __func__); 681 panic("%s: impossible", __func__);
681 } 682 }
682 683
683 nmib->ifvm_p = NULL; 684 nmib->ifvm_p = NULL;
684 ifv->ifv_if.if_mtu = 0; 685 ifv->ifv_if.if_mtu = 0;
685 ifv->ifv_flags = 0; 686 ifv->ifv_flags = 0;
686 687
687 mutex_enter(&ifv_hash.lock); 688 mutex_enter(&ifv_hash.lock);
688 PSLIST_WRITER_REMOVE(ifv, ifv_hash); 689 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
689 pserialize_perform(vlan_psz); 690 pserialize_perform(vlan_psz);
690 mutex_exit(&ifv_hash.lock); 691 mutex_exit(&ifv_hash.lock);
691 PSLIST_ENTRY_DESTROY(ifv, ifv_hash); 692 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
692 693
693 vlan_linkmib_update(ifv, nmib); 694 vlan_linkmib_update(ifv, nmib);
694 695
695 mutex_exit(&ifv->ifv_lock); 696 mutex_exit(&ifv->ifv_lock);
696 697
697 nmib_psref = NULL; 698 nmib_psref = NULL;
698 kmem_free(omib, sizeof(*omib)); 699 kmem_free(omib, sizeof(*omib));
699 700
700#ifdef INET6 701#ifdef INET6
701 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 702 KERNEL_LOCK_UNLESS_NET_MPSAFE();
702 /* To delete v6 link local addresses */ 703 /* To delete v6 link local addresses */
703 if (in6_present) 704 if (in6_present)
704 in6_ifdetach(ifp); 705 in6_ifdetach(ifp);
705 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 706 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
706#endif 707#endif
707 708
708 if ((ifp->if_flags & IFF_PROMISC) != 0) 709 if ((ifp->if_flags & IFF_PROMISC) != 0)
709 vlan_safe_ifpromisc_locked(ifp, 0); 710 vlan_safe_ifpromisc_locked(ifp, 0);
710 if_down_locked(ifp); 711 if_down_locked(ifp);
711 ifp->if_capabilities = 0; 712 ifp->if_capabilities = 0;
712 mutex_enter(&ifv->ifv_lock); 713 mutex_enter(&ifv->ifv_lock);
713done: 714done:
714 715
715 if (nmib_psref) 716 if (nmib_psref)
716 psref_target_destroy(nmib_psref, ifvm_psref_class); 717 psref_target_destroy(nmib_psref, ifvm_psref_class);
717 718
718 return error; 719 return error;
719} 720}
720 721
721static void 722static void
722vlan_hash_init(void) 723vlan_hash_init(void)
723{ 724{
724 725
725 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true, 726 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
726 &ifv_hash.mask); 727 &ifv_hash.mask);
727} 728}
728 729
729static int 730static int
730vlan_hash_fini(void) 731vlan_hash_fini(void)
731{ 732{
732 int i; 733 int i;
733 734
734 mutex_enter(&ifv_hash.lock); 735 mutex_enter(&ifv_hash.lock);
735 736
736 for (i = 0; i < ifv_hash.mask + 1; i++) { 737 for (i = 0; i < ifv_hash.mask + 1; i++) {
737 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan, 738 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
738 ifv_hash) != NULL) { 739 ifv_hash) != NULL) {
739 mutex_exit(&ifv_hash.lock); 740 mutex_exit(&ifv_hash.lock);
740 return EBUSY; 741 return EBUSY;
741 } 742 }
742 } 743 }
743 744
744 for (i = 0; i < ifv_hash.mask + 1; i++) 745 for (i = 0; i < ifv_hash.mask + 1; i++)
745 PSLIST_DESTROY(&ifv_hash.lists[i]); 746 PSLIST_DESTROY(&ifv_hash.lists[i]);
746 747
747 mutex_exit(&ifv_hash.lock); 748 mutex_exit(&ifv_hash.lock);
748 749
749 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask); 750 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
750 751
751 ifv_hash.lists = NULL; 752 ifv_hash.lists = NULL;
752 ifv_hash.mask = 0; 753 ifv_hash.mask = 0;
753 754
754 return 0; 755 return 0;
755} 756}
756 757
757static int 758static int
758vlan_tag_hash(uint16_t tag, u_long mask) 759vlan_tag_hash(uint16_t tag, u_long mask)
759{ 760{
760 uint32_t hash; 761 uint32_t hash;
761 762
762 hash = (tag >> 8) ^ tag; 763 hash = (tag >> 8) ^ tag;
763 hash = (hash >> 2) ^ hash; 764 hash = (hash >> 2) ^ hash;
764 765
765 return hash & mask; 766 return hash & mask;
766} 767}
767 768
768static struct ifvlan_linkmib * 769static struct ifvlan_linkmib *
769vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref) 770vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
770{ 771{
771 struct ifvlan_linkmib *mib; 772 struct ifvlan_linkmib *mib;
772 int s; 773 int s;
773 774
774 s = pserialize_read_enter(); 775 s = pserialize_read_enter();
775 mib = sc->ifv_mib; 776 mib = sc->ifv_mib;
776 if (mib == NULL) { 777 if (mib == NULL) {
777 pserialize_read_exit(s); 778 pserialize_read_exit(s);
778 return NULL; 779 return NULL;
779 } 780 }
780 membar_datadep_consumer(); 781 membar_datadep_consumer();
781 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class); 782 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
782 pserialize_read_exit(s); 783 pserialize_read_exit(s);
783 784
784 return mib; 785 return mib;
785} 786}
786 787
787static void 788static void
788vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref) 789vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
789{ 790{
790 if (mib == NULL) 791 if (mib == NULL)
791 return; 792 return;
792 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class); 793 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
793} 794}
794 795
795static struct ifvlan_linkmib * 796static struct ifvlan_linkmib *
796vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref) 797vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
797{ 798{
798 int idx; 799 int idx;
799 int s; 800 int s;
800 struct ifvlan *sc; 801 struct ifvlan *sc;
801 802
802 idx = vlan_tag_hash(tag, ifv_hash.mask); 803 idx = vlan_tag_hash(tag, ifv_hash.mask);
803 804
804 s = pserialize_read_enter(); 805 s = pserialize_read_enter();
805 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan, 806 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
806 ifv_hash) { 807 ifv_hash) {
807 struct ifvlan_linkmib *mib = sc->ifv_mib; 808 struct ifvlan_linkmib *mib = sc->ifv_mib;
808 if (mib == NULL) 809 if (mib == NULL)
809 continue; 810 continue;
810 if (mib->ifvm_tag != tag) 811 if (mib->ifvm_tag != tag)
811 continue; 812 continue;
812 if (mib->ifvm_p != ifp) 813 if (mib->ifvm_p != ifp)
813 continue; 814 continue;
814 815
815 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class); 816 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
816 pserialize_read_exit(s); 817 pserialize_read_exit(s);
817 return mib; 818 return mib;
818 } 819 }
819 pserialize_read_exit(s); 820 pserialize_read_exit(s);
820 return NULL; 821 return NULL;
821} 822}
822 823
823static void 824static void
824vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib) 825vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
825{ 826{
826 struct ifvlan_linkmib *omib = ifv->ifv_mib; 827 struct ifvlan_linkmib *omib = ifv->ifv_mib;
827 828
828 KASSERT(mutex_owned(&ifv->ifv_lock)); 829 KASSERT(mutex_owned(&ifv->ifv_lock));
829 830
830 membar_producer(); 831 membar_producer();
831 ifv->ifv_mib = nmib; 832 ifv->ifv_mib = nmib;
832 833
833 pserialize_perform(ifv->ifv_psz); 834 pserialize_perform(ifv->ifv_psz);
834 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class); 835 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
835} 836}
836 837
837/* 838/*
838 * Called when a parent interface is detaching; destroy any VLAN 839 * Called when a parent interface is detaching; destroy any VLAN
839 * configuration for the parent interface. 840 * configuration for the parent interface.
840 */ 841 */
841void 842void
842vlan_ifdetach(struct ifnet *p) 843vlan_ifdetach(struct ifnet *p)
843{ 844{
844 struct ifvlan *ifv; 845 struct ifvlan *ifv;
845 struct ifvlan_linkmib *mib, **nmibs; 846 struct ifvlan_linkmib *mib, **nmibs;
846 struct psref psref; 847 struct psref psref;
847 int error; 848 int error;
848 int bound; 849 int bound;
849 int i, cnt = 0; 850 int i, cnt = 0;
850 851
851 bound = curlwp_bind(); 852 bound = curlwp_bind();
852 853
853 mutex_enter(&ifv_list.lock); 854 mutex_enter(&ifv_list.lock);
854 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) { 855 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
855 mib = vlan_getref_linkmib(ifv, &psref); 856 mib = vlan_getref_linkmib(ifv, &psref);
856 if (mib == NULL) 857 if (mib == NULL)
857 continue; 858 continue;
858 859
859 if (mib->ifvm_p == p) 860 if (mib->ifvm_p == p)
860 cnt++; 861 cnt++;
861 862
862 vlan_putref_linkmib(mib, &psref); 863 vlan_putref_linkmib(mib, &psref);
863 } 864 }
864 mutex_exit(&ifv_list.lock); 865 mutex_exit(&ifv_list.lock);
865 866
866 if (cnt == 0) { 867 if (cnt == 0) {
867 curlwp_bindx(bound); 868 curlwp_bindx(bound);
868 return; 869 return;
869 } 870 }
870 871
871 /* 872 /*
872 * The value of "cnt" does not increase while ifv_list.lock 873 * The value of "cnt" does not increase while ifv_list.lock
873 * and ifv->ifv_lock are released here, because the parent 874 * and ifv->ifv_lock are released here, because the parent
874 * interface is detaching. 875 * interface is detaching.
875 */ 876 */
876 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP); 877 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
877 for (i = 0; i < cnt; i++) { 878 for (i = 0; i < cnt; i++) {
878 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP); 879 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
879 } 880 }
880 881
881 mutex_enter(&ifv_list.lock); 882 mutex_enter(&ifv_list.lock);
882 883
883 i = 0; 884 i = 0;
884 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) { 885 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
885 struct ifnet *ifp = &ifv->ifv_if; 886 struct ifnet *ifp = &ifv->ifv_if;
886 887
887 /* IFNET_LOCK must be held before ifv_lock. */ 888 /* IFNET_LOCK must be held before ifv_lock. */
888 IFNET_LOCK(ifp); 889 IFNET_LOCK(ifp);
889 mutex_enter(&ifv->ifv_lock); 890 mutex_enter(&ifv->ifv_lock);
890 891
891 /* XXX ifv_mib = NULL? */ 892 /* XXX ifv_mib = NULL? */
892 if (ifv->ifv_mib->ifvm_p == p) { 893 if (ifv->ifv_mib->ifvm_p == p) {
893 KASSERTMSG(i < cnt, 894 KASSERTMSG(i < cnt,
894 "no memory for unconfig, parent=%s", p->if_xname); 895 "no memory for unconfig, parent=%s", p->if_xname);
895 error = vlan_unconfig_locked(ifv, nmibs[i]); 896 error = vlan_unconfig_locked(ifv, nmibs[i]);
896 if (!error) { 897 if (!error) {
897 nmibs[i] = NULL; 898 nmibs[i] = NULL;
898 i++; 899 i++;
899 } 900 }
900 901
901 } 902 }
902 903
903 mutex_exit(&ifv->ifv_lock); 904 mutex_exit(&ifv->ifv_lock);
904 IFNET_UNLOCK(ifp); 905 IFNET_UNLOCK(ifp);
905 } 906 }
906 907
907 mutex_exit(&ifv_list.lock); 908 mutex_exit(&ifv_list.lock);
908 909
909 curlwp_bindx(bound); 910 curlwp_bindx(bound);
910 911
911 for (i = 0; i < cnt; i++) { 912 for (i = 0; i < cnt; i++) {
912 if (nmibs[i]) 913 if (nmibs[i])
913 kmem_free(nmibs[i], sizeof(*nmibs[i])); 914 kmem_free(nmibs[i], sizeof(*nmibs[i]));
914 } 915 }
915 916
916 kmem_free(nmibs, sizeof(*nmibs) * cnt); 917 kmem_free(nmibs, sizeof(*nmibs) * cnt);
917 918
918 return; 919 return;
919} 920}
920 921
921static int 922static int
922vlan_set_promisc(struct ifnet *ifp) 923vlan_set_promisc(struct ifnet *ifp)
923{ 924{
924 struct ifvlan *ifv = ifp->if_softc; 925 struct ifvlan *ifv = ifp->if_softc;
925 struct ifvlan_linkmib *mib; 926 struct ifvlan_linkmib *mib;
926 struct psref psref; 927 struct psref psref;
927 int error = 0; 928 int error = 0;
928 int bound; 929 int bound;
929 930
930 bound = curlwp_bind(); 931 bound = curlwp_bind();
931 mib = vlan_getref_linkmib(ifv, &psref); 932 mib = vlan_getref_linkmib(ifv, &psref);
932 if (mib == NULL) { 933 if (mib == NULL) {
933 curlwp_bindx(bound); 934 curlwp_bindx(bound);
934 return EBUSY; 935 return EBUSY;
935 } 936 }
936 937
937 if ((ifp->if_flags & IFF_PROMISC) != 0) { 938 if ((ifp->if_flags & IFF_PROMISC) != 0) {
938 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) { 939 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
939 error = vlan_safe_ifpromisc(mib->ifvm_p, 1); 940 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
940 if (error == 0) 941 if (error == 0)
941 ifv->ifv_flags |= IFVF_PROMISC; 942 ifv->ifv_flags |= IFVF_PROMISC;
942 } 943 }
943 } else { 944 } else {
944 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) { 945 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
945 error = vlan_safe_ifpromisc(mib->ifvm_p, 0); 946 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
946 if (error == 0) 947 if (error == 0)
947 ifv->ifv_flags &= ~IFVF_PROMISC; 948 ifv->ifv_flags &= ~IFVF_PROMISC;
948 } 949 }
949 } 950 }
950 vlan_putref_linkmib(mib, &psref); 951 vlan_putref_linkmib(mib, &psref);
951 curlwp_bindx(bound); 952 curlwp_bindx(bound);
952 953
953 return error; 954 return error;
954} 955}
955 956
956static int 957static int
957vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data) 958vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
958{ 959{
959 struct lwp *l = curlwp; 960 struct lwp *l = curlwp;
960 struct ifvlan *ifv = ifp->if_softc; 961 struct ifvlan *ifv = ifp->if_softc;
961 struct ifaddr *ifa = (struct ifaddr *) data; 962 struct ifaddr *ifa = (struct ifaddr *) data;
962 struct ifreq *ifr = (struct ifreq *) data; 963 struct ifreq *ifr = (struct ifreq *) data;
963 struct ifnet *pr; 964 struct ifnet *pr;
964 struct ifcapreq *ifcr; 965 struct ifcapreq *ifcr;
965 struct vlanreq vlr; 966 struct vlanreq vlr;
966 struct ifvlan_linkmib *mib; 967 struct ifvlan_linkmib *mib;
967 struct psref psref; 968 struct psref psref;
968 int error = 0; 969 int error = 0;
969 int bound; 970 int bound;
970 971
971 switch (cmd) { 972 switch (cmd) {
972 case SIOCSIFMTU: 973 case SIOCSIFMTU:
973 bound = curlwp_bind(); 974 bound = curlwp_bind();
974 mib = vlan_getref_linkmib(ifv, &psref); 975 mib = vlan_getref_linkmib(ifv, &psref);
975 if (mib == NULL) { 976 if (mib == NULL) {
976 curlwp_bindx(bound); 977 curlwp_bindx(bound);
977 error = EBUSY; 978 error = EBUSY;
978 break; 979 break;
979 } 980 }
980 981
981 if (mib->ifvm_p == NULL) { 982 if (mib->ifvm_p == NULL) {
982 vlan_putref_linkmib(mib, &psref); 983 vlan_putref_linkmib(mib, &psref);
983 curlwp_bindx(bound); 984 curlwp_bindx(bound);
984 error = EINVAL; 985 error = EINVAL;
985 } else if ( 986 } else if (
986 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) || 987 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
987 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) { 988 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
988 vlan_putref_linkmib(mib, &psref); 989 vlan_putref_linkmib(mib, &psref);
989 curlwp_bindx(bound); 990 curlwp_bindx(bound);
990 error = EINVAL; 991 error = EINVAL;
991 } else { 992 } else {
992 vlan_putref_linkmib(mib, &psref); 993 vlan_putref_linkmib(mib, &psref);
993 curlwp_bindx(bound); 994 curlwp_bindx(bound);
994 995
995 error = ifioctl_common(ifp, cmd, data); 996 error = ifioctl_common(ifp, cmd, data);
996 if (error == ENETRESET) 997 if (error == ENETRESET)
997 error = 0; 998 error = 0;
998 } 999 }
999 1000
1000 break; 1001 break;
1001 1002
1002 case SIOCSETVLAN: 1003 case SIOCSETVLAN:
1003 if ((error = kauth_authorize_network(l->l_cred, 1004 if ((error = kauth_authorize_network(l->l_cred,
1004 KAUTH_NETWORK_INTERFACE, 1005 KAUTH_NETWORK_INTERFACE,
1005 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd, 1006 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1006 NULL)) != 0) 1007 NULL)) != 0)
1007 break; 1008 break;
1008 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0) 1009 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1009 break; 1010 break;
1010 1011
1011 if (vlr.vlr_parent[0] == '\0') { 1012 if (vlr.vlr_parent[0] == '\0') {
1012 bound = curlwp_bind(); 1013 bound = curlwp_bind();
1013 mib = vlan_getref_linkmib(ifv, &psref); 1014 mib = vlan_getref_linkmib(ifv, &psref);
1014 if (mib == NULL) { 1015 if (mib == NULL) {
1015 curlwp_bindx(bound); 1016 curlwp_bindx(bound);
1016 error = EBUSY; 1017 error = EBUSY;
1017 break; 1018 break;
1018 } 1019 }
1019 1020
1020 if (mib->ifvm_p != NULL && 1021 if (mib->ifvm_p != NULL &&
1021 (ifp->if_flags & IFF_PROMISC) != 0) 1022 (ifp->if_flags & IFF_PROMISC) != 0)
1022 error = vlan_safe_ifpromisc(mib->ifvm_p, 0); 1023 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1023 1024
1024 vlan_putref_linkmib(mib, &psref); 1025 vlan_putref_linkmib(mib, &psref);
1025 curlwp_bindx(bound); 1026 curlwp_bindx(bound);
1026 1027
1027 vlan_unconfig(ifp); 1028 vlan_unconfig(ifp);
1028 break; 1029 break;
1029 } 1030 }
1030 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) { 1031 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1031 error = EINVAL; /* check for valid tag */ 1032 error = EINVAL; /* check for valid tag */
1032 break; 1033 break;
1033 } 1034 }
1034 if ((pr = ifunit(vlr.vlr_parent)) == NULL) { 1035 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1035 error = ENOENT; 1036 error = ENOENT;
1036 break; 1037 break;
1037 } 1038 }
1038  1039
1039 error = vlan_config(ifv, pr, vlr.vlr_tag); 1040 error = vlan_config(ifv, pr, vlr.vlr_tag);
1040 if (error != 0) 1041 if (error != 0)
1041 break; 1042 break;
1042 1043
1043 /* Update promiscuous mode, if necessary. */ 1044 /* Update promiscuous mode, if necessary. */
1044 vlan_set_promisc(ifp); 1045 vlan_set_promisc(ifp);
1045 1046
1046 ifp->if_flags |= IFF_RUNNING; 1047 ifp->if_flags |= IFF_RUNNING;
1047 break; 1048 break;
1048 1049
1049 case SIOCGETVLAN: 1050 case SIOCGETVLAN:
1050 memset(&vlr, 0, sizeof(vlr)); 1051 memset(&vlr, 0, sizeof(vlr));
1051 bound = curlwp_bind(); 1052 bound = curlwp_bind();
1052 mib = vlan_getref_linkmib(ifv, &psref); 1053 mib = vlan_getref_linkmib(ifv, &psref);
1053 if (mib == NULL) { 1054 if (mib == NULL) {
1054 curlwp_bindx(bound); 1055 curlwp_bindx(bound);
1055 error = EBUSY; 1056 error = EBUSY;
1056 break; 1057 break;
1057 } 1058 }
1058 if (mib->ifvm_p != NULL) { 1059 if (mib->ifvm_p != NULL) {
1059 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s", 1060 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1060 mib->ifvm_p->if_xname); 1061 mib->ifvm_p->if_xname);
1061 vlr.vlr_tag = mib->ifvm_tag; 1062 vlr.vlr_tag = mib->ifvm_tag;
1062 } 1063 }
1063 vlan_putref_linkmib(mib, &psref); 1064 vlan_putref_linkmib(mib, &psref);
1064 curlwp_bindx(bound); 1065 curlwp_bindx(bound);
1065 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1066 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1066 break; 1067 break;
1067 1068
1068 case SIOCSIFFLAGS: 1069 case SIOCSIFFLAGS:
1069 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1070 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1070 break; 1071 break;
1071 /* 1072 /*
1072 * For promiscuous mode, we enable promiscuous mode on 1073 * For promiscuous mode, we enable promiscuous mode on
1073 * the parent if we need promiscuous on the VLAN interface. 1074 * the parent if we need promiscuous on the VLAN interface.
1074 */ 1075 */
1075 bound = curlwp_bind(); 1076 bound = curlwp_bind();
1076 mib = vlan_getref_linkmib(ifv, &psref); 1077 mib = vlan_getref_linkmib(ifv, &psref);
1077 if (mib == NULL) { 1078 if (mib == NULL) {
1078 curlwp_bindx(bound); 1079 curlwp_bindx(bound);
1079 error = EBUSY; 1080 error = EBUSY;
1080 break; 1081 break;
1081 } 1082 }
1082 1083
1083 if (mib->ifvm_p != NULL) 1084 if (mib->ifvm_p != NULL)
1084 error = vlan_set_promisc(ifp); 1085 error = vlan_set_promisc(ifp);
1085 vlan_putref_linkmib(mib, &psref); 1086 vlan_putref_linkmib(mib, &psref);
1086 curlwp_bindx(bound); 1087 curlwp_bindx(bound);
1087 break; 1088 break;
1088 1089
1089 case SIOCADDMULTI: 1090 case SIOCADDMULTI:
1090 mutex_enter(&ifv->ifv_lock); 1091 mutex_enter(&ifv->ifv_lock);
1091 mib = ifv->ifv_mib; 1092 mib = ifv->ifv_mib;
1092 if (mib == NULL) { 1093 if (mib == NULL) {
1093 error = EBUSY; 1094 error = EBUSY;
1094 mutex_exit(&ifv->ifv_lock); 1095 mutex_exit(&ifv->ifv_lock);
1095 break; 1096 break;
1096 } 1097 }
1097 1098
1098 error = (mib->ifvm_p != NULL) ? 1099 error = (mib->ifvm_p != NULL) ?
1099 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL; 1100 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1100 mib = NULL; 1101 mib = NULL;
1101 mutex_exit(&ifv->ifv_lock); 1102 mutex_exit(&ifv->ifv_lock);
1102 break; 1103 break;
1103 1104
1104 case SIOCDELMULTI: 1105 case SIOCDELMULTI:
1105 mutex_enter(&ifv->ifv_lock); 1106 mutex_enter(&ifv->ifv_lock);
1106 mib = ifv->ifv_mib; 1107 mib = ifv->ifv_mib;
1107 if (mib == NULL) { 1108 if (mib == NULL) {
1108 error = EBUSY; 1109 error = EBUSY;
1109 mutex_exit(&ifv->ifv_lock); 1110 mutex_exit(&ifv->ifv_lock);
1110 break; 1111 break;
1111 } 1112 }
1112 error = (mib->ifvm_p != NULL) ? 1113 error = (mib->ifvm_p != NULL) ?
1113 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL; 1114 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1114 mib = NULL; 1115 mib = NULL;
1115 mutex_exit(&ifv->ifv_lock); 1116 mutex_exit(&ifv->ifv_lock);
1116 break; 1117 break;
1117 1118
1118 case SIOCSIFCAP: 1119 case SIOCSIFCAP:
1119 ifcr = data; 1120 ifcr = data;
1120 /* make sure caps are enabled on parent */ 1121 /* make sure caps are enabled on parent */
1121 bound = curlwp_bind(); 1122 bound = curlwp_bind();
1122 mib = vlan_getref_linkmib(ifv, &psref); 1123 mib = vlan_getref_linkmib(ifv, &psref);
1123 if (mib == NULL) { 1124 if (mib == NULL) {
1124 curlwp_bindx(bound); 1125 curlwp_bindx(bound);
1125 error = EBUSY; 1126 error = EBUSY;
1126 break; 1127 break;
1127 } 1128 }
1128 1129
1129 if (mib->ifvm_p == NULL) { 1130 if (mib->ifvm_p == NULL) {
1130 vlan_putref_linkmib(mib, &psref); 1131 vlan_putref_linkmib(mib, &psref);
1131 curlwp_bindx(bound); 1132 curlwp_bindx(bound);
1132 error = EINVAL; 1133 error = EINVAL;
1133 break; 1134 break;
1134 } 1135 }
1135 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) != 1136 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1136 ifcr->ifcr_capenable) { 1137 ifcr->ifcr_capenable) {
1137 vlan_putref_linkmib(mib, &psref); 1138 vlan_putref_linkmib(mib, &psref);
1138 curlwp_bindx(bound); 1139 curlwp_bindx(bound);
1139 error = EINVAL; 1140 error = EINVAL;
1140 break; 1141 break;
1141 } 1142 }
1142 1143
1143 vlan_putref_linkmib(mib, &psref); 1144 vlan_putref_linkmib(mib, &psref);
1144 curlwp_bindx(bound); 1145 curlwp_bindx(bound);
1145 1146
1146 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 1147 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1147 error = 0; 1148 error = 0;
1148 break; 1149 break;
1149 case SIOCINITIFADDR: 1150 case SIOCINITIFADDR:
1150 bound = curlwp_bind(); 1151 bound = curlwp_bind();
1151 mib = vlan_getref_linkmib(ifv, &psref); 1152 mib = vlan_getref_linkmib(ifv, &psref);
1152 if (mib == NULL) { 1153 if (mib == NULL) {
1153 curlwp_bindx(bound); 1154 curlwp_bindx(bound);
1154 error = EBUSY; 1155 error = EBUSY;
1155 break; 1156 break;
1156 } 1157 }
1157 1158
1158 if (mib->ifvm_p == NULL) { 1159 if (mib->ifvm_p == NULL) {
1159 error = EINVAL; 1160 error = EINVAL;
1160 vlan_putref_linkmib(mib, &psref); 1161 vlan_putref_linkmib(mib, &psref);
1161 curlwp_bindx(bound); 1162 curlwp_bindx(bound);
1162 break; 1163 break;
1163 } 1164 }
1164 vlan_putref_linkmib(mib, &psref); 1165 vlan_putref_linkmib(mib, &psref);
1165 curlwp_bindx(bound); 1166 curlwp_bindx(bound);
1166 1167
1167 ifp->if_flags |= IFF_UP; 1168 ifp->if_flags |= IFF_UP;
1168#ifdef INET 1169#ifdef INET
1169 if (ifa->ifa_addr->sa_family == AF_INET) 1170 if (ifa->ifa_addr->sa_family == AF_INET)
1170 arp_ifinit(ifp, ifa); 1171 arp_ifinit(ifp, ifa);
1171#endif 1172#endif
1172 break; 1173 break;
1173 1174
1174 default: 1175 default:
1175 error = ether_ioctl(ifp, cmd, data); 1176 error = ether_ioctl(ifp, cmd, data);
1176 } 1177 }
1177 1178
1178 return error; 1179 return error;
1179} 1180}
1180 1181
1181static int 1182static int
1182vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr) 1183vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1183{ 1184{
1184 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1185 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1185 struct vlan_mc_entry *mc; 1186 struct vlan_mc_entry *mc;
1186 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 1187 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1187 struct ifvlan_linkmib *mib; 1188 struct ifvlan_linkmib *mib;
1188 int error; 1189 int error;
1189 1190
1190 KASSERT(mutex_owned(&ifv->ifv_lock)); 1191 KASSERT(mutex_owned(&ifv->ifv_lock));
1191 1192
1192 if (sa->sa_len > sizeof(struct sockaddr_storage)) 1193 if (sa->sa_len > sizeof(struct sockaddr_storage))
1193 return EINVAL; 1194 return EINVAL;
1194 1195
1195 error = ether_addmulti(sa, &ifv->ifv_ec); 1196 error = ether_addmulti(sa, &ifv->ifv_ec);
1196 if (error != ENETRESET) 1197 if (error != ENETRESET)
1197 return error; 1198 return error;
1198 1199
1199 /* 1200 /*
1200 * This is a new multicast address. We have to tell parent 1201 * This is a new multicast address. We have to tell parent
1201 * about it. Also, remember this multicast address so that 1202 * about it. Also, remember this multicast address so that
1202 * we can delete it on unconfigure. 1203 * we can delete it on unconfigure.
1203 */ 1204 */
1204 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT); 1205 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1205 if (mc == NULL) { 1206 if (mc == NULL) {
1206 error = ENOMEM; 1207 error = ENOMEM;
1207 goto alloc_failed; 1208 goto alloc_failed;
1208 } 1209 }
1209 1210
1210 /* 1211 /*
1211 * Since ether_addmulti() returned ENETRESET, the following two 1212 * Since ether_addmulti() returned ENETRESET, the following two
1212 * statements shouldn't fail. Here ifv_ec is implicitly protected 1213 * statements shouldn't fail. Here ifv_ec is implicitly protected
1213 * by the ifv_lock lock. 1214 * by the ifv_lock lock.
1214 */ 1215 */
1215 error = ether_multiaddr(sa, addrlo, addrhi); 1216 error = ether_multiaddr(sa, addrlo, addrhi);
1216 KASSERT(error == 0); 1217 KASSERT(error == 0);
1217 1218
1218 ETHER_LOCK(&ifv->ifv_ec); 1219 ETHER_LOCK(&ifv->ifv_ec);
1219 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec); 1220 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1220 ETHER_UNLOCK(&ifv->ifv_ec); 1221 ETHER_UNLOCK(&ifv->ifv_ec);
1221 1222
1222 KASSERT(mc->mc_enm != NULL); 1223 KASSERT(mc->mc_enm != NULL);
1223 1224
1224 memcpy(&mc->mc_addr, sa, sa->sa_len); 1225 memcpy(&mc->mc_addr, sa, sa->sa_len);
1225 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries); 1226 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1226 1227
1227 mib = ifv->ifv_mib; 1228 mib = ifv->ifv_mib;
1228 1229
1229 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p); 1230 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1230 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa); 1231 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1231 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p); 1232 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1232 1233
1233 if (error != 0) 1234 if (error != 0)
1234 goto ioctl_failed; 1235 goto ioctl_failed;
1235 return error; 1236 return error;
1236 1237
1237ioctl_failed: 1238ioctl_failed:
1238 LIST_REMOVE(mc, mc_entries); 1239 LIST_REMOVE(mc, mc_entries);
1239 free(mc, M_DEVBUF); 1240 free(mc, M_DEVBUF);
1240 1241
1241alloc_failed: 1242alloc_failed:
1242 (void)ether_delmulti(sa, &ifv->ifv_ec); 1243 (void)ether_delmulti(sa, &ifv->ifv_ec);
1243 return error; 1244 return error;
1244} 1245}
1245 1246
1246static int 1247static int
1247vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr) 1248vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1248{ 1249{
1249 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1250 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1250 struct ether_multi *enm; 1251 struct ether_multi *enm;
1251 struct vlan_mc_entry *mc; 1252 struct vlan_mc_entry *mc;
1252 struct ifvlan_linkmib *mib; 1253 struct ifvlan_linkmib *mib;
1253 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 1254 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1254 int error; 1255 int error;
1255 1256
1256 KASSERT(mutex_owned(&ifv->ifv_lock)); 1257 KASSERT(mutex_owned(&ifv->ifv_lock));
1257 1258
1258 /* 1259 /*
1259 * Find a key to lookup vlan_mc_entry. We have to do this 1260 * Find a key to lookup vlan_mc_entry. We have to do this
1260 * before calling ether_delmulti for obvious reasons. 1261 * before calling ether_delmulti for obvious reasons.
1261 */ 1262 */
1262 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0) 1263 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1263 return error; 1264 return error;
1264 1265
1265 ETHER_LOCK(&ifv->ifv_ec); 1266 ETHER_LOCK(&ifv->ifv_ec);
1266 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec); 1267 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1267 ETHER_UNLOCK(&ifv->ifv_ec); 1268 ETHER_UNLOCK(&ifv->ifv_ec);
1268 if (enm == NULL) 1269 if (enm == NULL)
1269 return EINVAL; 1270 return EINVAL;
1270 1271
1271 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) { 1272 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1272 if (mc->mc_enm == enm) 1273 if (mc->mc_enm == enm)
1273 break; 1274 break;
1274 } 1275 }
1275 1276
1276 /* We woun't delete entries we didn't add */ 1277 /* We woun't delete entries we didn't add */
1277 if (mc == NULL) 1278 if (mc == NULL)
1278 return EINVAL; 1279 return EINVAL;
1279 1280
1280 error = ether_delmulti(sa, &ifv->ifv_ec); 1281 error = ether_delmulti(sa, &ifv->ifv_ec);
1281 if (error != ENETRESET) 1282 if (error != ENETRESET)
1282 return error; 1283 return error;
1283 1284
1284 /* We no longer use this multicast address. Tell parent so. */ 1285 /* We no longer use this multicast address. Tell parent so. */
1285 mib = ifv->ifv_mib; 1286 mib = ifv->ifv_mib;
1286 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa); 1287 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1287 1288
1288 if (error == 0) { 1289 if (error == 0) {
1289 /* And forget about this address. */ 1290 /* And forget about this address. */
1290 LIST_REMOVE(mc, mc_entries); 1291 LIST_REMOVE(mc, mc_entries);
1291 free(mc, M_DEVBUF); 1292 free(mc, M_DEVBUF);
1292 } else { 1293 } else {
1293 (void)ether_addmulti(sa, &ifv->ifv_ec); 1294 (void)ether_addmulti(sa, &ifv->ifv_ec);
1294 } 1295 }
1295 1296
1296 return error; 1297 return error;
1297} 1298}
1298 1299
1299/* 1300/*
1300 * Delete any multicast address we have asked to add from parent 1301 * Delete any multicast address we have asked to add from parent
1301 * interface. Called when the vlan is being unconfigured. 1302 * interface. Called when the vlan is being unconfigured.
1302 */ 1303 */
1303static void 1304static void
1304vlan_ether_purgemulti(struct ifvlan *ifv) 1305vlan_ether_purgemulti(struct ifvlan *ifv)
1305{ 1306{
1306 struct vlan_mc_entry *mc; 1307 struct vlan_mc_entry *mc;
1307 struct ifvlan_linkmib *mib; 1308 struct ifvlan_linkmib *mib;
1308 1309
1309 KASSERT(mutex_owned(&ifv->ifv_lock)); 1310 KASSERT(mutex_owned(&ifv->ifv_lock));
1310 mib = ifv->ifv_mib; 1311 mib = ifv->ifv_mib;
1311 if (mib == NULL) { 1312 if (mib == NULL) {
1312 return; 1313 return;
1313 } 1314 }
1314 1315
1315 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) { 1316 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1316 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI, 1317 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1317 sstocsa(&mc->mc_addr)); 1318 sstocsa(&mc->mc_addr));
1318 LIST_REMOVE(mc, mc_entries); 1319 LIST_REMOVE(mc, mc_entries);
1319 free(mc, M_DEVBUF); 1320 free(mc, M_DEVBUF);
1320 } 1321 }
1321} 1322}
1322 1323
1323static void 1324static void
1324vlan_start(struct ifnet *ifp) 1325vlan_start(struct ifnet *ifp)
1325{ 1326{
1326 struct ifvlan *ifv = ifp->if_softc; 1327 struct ifvlan *ifv = ifp->if_softc;
1327 struct ifnet *p; 1328 struct ifnet *p;
1328 struct ethercom *ec; 1329 struct ethercom *ec;
1329 struct mbuf *m; 1330 struct mbuf *m;
1330 struct ifvlan_linkmib *mib; 1331 struct ifvlan_linkmib *mib;
1331 struct psref psref; 1332 struct psref psref;
1332 int error; 1333 int error;
1333 1334
1334 mib = vlan_getref_linkmib(ifv, &psref); 1335 mib = vlan_getref_linkmib(ifv, &psref);
1335 if (mib == NULL) 1336 if (mib == NULL)
1336 return; 1337 return;
1337 p = mib->ifvm_p; 1338 p = mib->ifvm_p;
1338 ec = (void *)mib->ifvm_p; 1339 ec = (void *)mib->ifvm_p;
1339 1340
1340 ifp->if_flags |= IFF_OACTIVE; 1341 ifp->if_flags |= IFF_OACTIVE;
1341 1342
1342 for (;;) { 1343 for (;;) {
1343 IFQ_DEQUEUE(&ifp->if_snd, m); 1344 IFQ_DEQUEUE(&ifp->if_snd, m);
1344 if (m == NULL) 1345 if (m == NULL)
1345 break; 1346 break;
1346 1347
1347#ifdef ALTQ 1348#ifdef ALTQ
1348 /* 1349 /*
1349 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is 1350 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1350 * defined. 1351 * defined.
1351 */ 1352 */
1352 KERNEL_LOCK(1, NULL); 1353 KERNEL_LOCK(1, NULL);
1353 /* 1354 /*
1354 * If ALTQ is enabled on the parent interface, do 1355 * If ALTQ is enabled on the parent interface, do
1355 * classification; the queueing discipline might 1356 * classification; the queueing discipline might
1356 * not require classification, but might require 1357 * not require classification, but might require
1357 * the address family/header pointer in the pktattr. 1358 * the address family/header pointer in the pktattr.
1358 */ 1359 */
1359 if (ALTQ_IS_ENABLED(&p->if_snd)) { 1360 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1360 switch (p->if_type) { 1361 switch (p->if_type) {
1361 case IFT_ETHER: 1362 case IFT_ETHER:
1362 altq_etherclassify(&p->if_snd, m); 1363 altq_etherclassify(&p->if_snd, m);
1363 break; 1364 break;
1364 default: 1365 default:
1365 panic("%s: impossible (altq)", __func__); 1366 panic("%s: impossible (altq)", __func__);
1366 } 1367 }
1367 } 1368 }
1368 KERNEL_UNLOCK_ONE(NULL); 1369 KERNEL_UNLOCK_ONE(NULL);
1369#endif /* ALTQ */ 1370#endif /* ALTQ */
1370 1371
1371 bpf_mtap(ifp, m, BPF_D_OUT); 1372 bpf_mtap(ifp, m, BPF_D_OUT);
1372 /* 1373 /*
1373 * If the parent can insert the tag itself, just mark 1374 * If the parent can insert the tag itself, just mark
1374 * the tag in the mbuf header. 1375 * the tag in the mbuf header.
1375 */ 1376 */
1376 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) { 1377 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1377 vlan_set_tag(m, mib->ifvm_tag); 1378 vlan_set_tag(m, mib->ifvm_tag);
1378 } else { 1379 } else {
1379 /* 1380 /*
1380 * insert the tag ourselves 1381 * insert the tag ourselves
1381 */ 1382 */
1382 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT); 1383 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1383 if (m == NULL) { 1384 if (m == NULL) {
1384 printf("%s: unable to prepend encap header", 1385 printf("%s: unable to prepend encap header",
1385 p->if_xname); 1386 p->if_xname);
1386 ifp->if_oerrors++; 1387 ifp->if_oerrors++;
1387 continue; 1388 continue;
1388 } 1389 }
1389 1390
1390 switch (p->if_type) { 1391 switch (p->if_type) {
1391 case IFT_ETHER: 1392 case IFT_ETHER:
1392 { 1393 {
1393 struct ether_vlan_header *evl; 1394 struct ether_vlan_header *evl;
1394 1395
1395 if (m->m_len < sizeof(struct ether_vlan_header)) 1396 if (m->m_len < sizeof(struct ether_vlan_header))
1396 m = m_pullup(m, 1397 m = m_pullup(m,
1397 sizeof(struct ether_vlan_header)); 1398 sizeof(struct ether_vlan_header));
1398 if (m == NULL) { 1399 if (m == NULL) {
1399 printf("%s: unable to pullup encap " 1400 printf("%s: unable to pullup encap "
1400 "header", p->if_xname); 1401 "header", p->if_xname);
1401 ifp->if_oerrors++; 1402 ifp->if_oerrors++;
1402 continue; 1403 continue;
1403 } 1404 }
1404 1405
1405 /* 1406 /*
1406 * Transform the Ethernet header into an 1407 * Transform the Ethernet header into an
1407 * Ethernet header with 802.1Q encapsulation. 1408 * Ethernet header with 802.1Q encapsulation.
1408 */ 1409 */
1409 memmove(mtod(m, void *), 1410 memmove(mtod(m, void *),
1410 mtod(m, char *) + mib->ifvm_encaplen, 1411 mtod(m, char *) + mib->ifvm_encaplen,
1411 sizeof(struct ether_header)); 1412 sizeof(struct ether_header));
1412 evl = mtod(m, struct ether_vlan_header *); 1413 evl = mtod(m, struct ether_vlan_header *);
1413 evl->evl_proto = evl->evl_encap_proto; 1414 evl->evl_proto = evl->evl_encap_proto;
1414 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1415 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1415 evl->evl_tag = htons(mib->ifvm_tag); 1416 evl->evl_tag = htons(mib->ifvm_tag);
1416 1417
1417 /* 1418 /*
1418 * To cater for VLAN-aware layer 2 ethernet 1419 * To cater for VLAN-aware layer 2 ethernet
1419 * switches which may need to strip the tag 1420 * switches which may need to strip the tag
1420 * before forwarding the packet, make sure 1421 * before forwarding the packet, make sure
1421 * the packet+tag is at least 68 bytes long. 1422 * the packet+tag is at least 68 bytes long.
1422 * This is necessary because our parent will 1423 * This is necessary because our parent will
1423 * only pad to 64 bytes (ETHER_MIN_LEN) and 1424 * only pad to 64 bytes (ETHER_MIN_LEN) and
1424 * some switches will not pad by themselves 1425 * some switches will not pad by themselves
1425 * after deleting a tag. 1426 * after deleting a tag.
1426 */ 1427 */
1427 const size_t min_data_len = ETHER_MIN_LEN - 1428 const size_t min_data_len = ETHER_MIN_LEN -
1428 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1429 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1429 if (m->m_pkthdr.len < min_data_len) { 1430 if (m->m_pkthdr.len < min_data_len) {
1430 m_copyback(m, m->m_pkthdr.len, 1431 m_copyback(m, m->m_pkthdr.len,
1431 min_data_len - m->m_pkthdr.len, 1432 min_data_len - m->m_pkthdr.len,
1432 vlan_zero_pad_buff); 1433 vlan_zero_pad_buff);
1433 } 1434 }
1434 break; 1435 break;
1435 } 1436 }
1436 1437
1437 default: 1438 default:
1438 panic("%s: impossible", __func__); 1439 panic("%s: impossible", __func__);
1439 } 1440 }
1440 } 1441 }
1441 1442
1442 if ((p->if_flags & IFF_RUNNING) == 0) { 1443 if ((p->if_flags & IFF_RUNNING) == 0) {
1443 m_freem(m); 1444 m_freem(m);
1444 continue; 1445 continue;
1445 } 1446 }
1446 1447
1447 error = if_transmit_lock(p, m); 1448 error = if_transmit_lock(p, m);
1448 if (error) { 1449 if (error) {
1449 /* mbuf is already freed */ 1450 /* mbuf is already freed */
1450 ifp->if_oerrors++; 1451 ifp->if_oerrors++;
1451 continue; 1452 continue;
1452 } 1453 }
1453 ifp->if_opackets++; 1454 ifp->if_opackets++;
1454 } 1455 }
1455 1456
1456 ifp->if_flags &= ~IFF_OACTIVE; 1457 ifp->if_flags &= ~IFF_OACTIVE;
1457 1458
1458 /* Remove reference to mib before release */ 1459 /* Remove reference to mib before release */
1459 vlan_putref_linkmib(mib, &psref); 1460 vlan_putref_linkmib(mib, &psref);
1460} 1461}
1461 1462
1462static int 1463static int
1463vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1464vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1464{ 1465{
1465 struct ifvlan *ifv = ifp->if_softc; 1466 struct ifvlan *ifv = ifp->if_softc;
1466 struct ifnet *p; 1467 struct ifnet *p;
1467 struct ethercom *ec; 1468 struct ethercom *ec;
1468 struct ifvlan_linkmib *mib; 1469 struct ifvlan_linkmib *mib;
1469 struct psref psref; 1470 struct psref psref;
1470 int error; 1471 int error;
1471 size_t pktlen = m->m_pkthdr.len; 1472 size_t pktlen = m->m_pkthdr.len;
1472 bool mcast = (m->m_flags & M_MCAST) != 0; 1473 bool mcast = (m->m_flags & M_MCAST) != 0;
1473 1474
1474 mib = vlan_getref_linkmib(ifv, &psref); 1475 mib = vlan_getref_linkmib(ifv, &psref);
1475 if (mib == NULL) { 1476 if (mib == NULL) {
1476 m_freem(m); 1477 m_freem(m);
1477 return ENETDOWN; 1478 return ENETDOWN;
1478 } 1479 }
1479 1480
1480 p = mib->ifvm_p; 1481 p = mib->ifvm_p;
1481 ec = (void *)mib->ifvm_p; 1482 ec = (void *)mib->ifvm_p;
1482 1483
1483 bpf_mtap(ifp, m, BPF_D_OUT); 1484 bpf_mtap(ifp, m, BPF_D_OUT);
1484 1485
1485 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0) 1486 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1486 goto out; 1487 goto out;
1487 if (m == NULL) 1488 if (m == NULL)
1488 goto out; 1489 goto out;
1489 1490
1490 /* 1491 /*
1491 * If the parent can insert the tag itself, just mark 1492 * If the parent can insert the tag itself, just mark
1492 * the tag in the mbuf header. 1493 * the tag in the mbuf header.
1493 */ 1494 */
1494 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) { 1495 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1495 vlan_set_tag(m, mib->ifvm_tag); 1496 vlan_set_tag(m, mib->ifvm_tag);
1496 } else { 1497 } else {
1497 /* 1498 /*
1498 * insert the tag ourselves 1499 * insert the tag ourselves
1499 */ 1500 */
1500 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT); 1501 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1501 if (m == NULL) { 1502 if (m == NULL) {
1502 printf("%s: unable to prepend encap header", 1503 printf("%s: unable to prepend encap header",
1503 p->if_xname); 1504 p->if_xname);
1504 ifp->if_oerrors++; 1505 ifp->if_oerrors++;
1505 error = ENOBUFS; 1506 error = ENOBUFS;
1506 goto out; 1507 goto out;
1507 } 1508 }
1508 1509
1509 switch (p->if_type) { 1510 switch (p->if_type) {
1510 case IFT_ETHER: 1511 case IFT_ETHER:
1511 { 1512 {
1512 struct ether_vlan_header *evl; 1513 struct ether_vlan_header *evl;
1513 1514
1514 if (m->m_len < sizeof(struct ether_vlan_header)) 1515 if (m->m_len < sizeof(struct ether_vlan_header))
1515 m = m_pullup(m, 1516 m = m_pullup(m,
1516 sizeof(struct ether_vlan_header)); 1517 sizeof(struct ether_vlan_header));
1517 if (m == NULL) { 1518 if (m == NULL) {
1518 printf("%s: unable to pullup encap " 1519 printf("%s: unable to pullup encap "
1519 "header", p->if_xname); 1520 "header", p->if_xname);
1520 ifp->if_oerrors++; 1521 ifp->if_oerrors++;
1521 error = ENOBUFS; 1522 error = ENOBUFS;
1522 goto out; 1523 goto out;
1523 } 1524 }
1524 1525
1525 /* 1526 /*
1526 * Transform the Ethernet header into an 1527 * Transform the Ethernet header into an
1527 * Ethernet header with 802.1Q encapsulation. 1528 * Ethernet header with 802.1Q encapsulation.
1528 */ 1529 */
1529 memmove(mtod(m, void *), 1530 memmove(mtod(m, void *),
1530 mtod(m, char *) + mib->ifvm_encaplen, 1531 mtod(m, char *) + mib->ifvm_encaplen,
1531 sizeof(struct ether_header)); 1532 sizeof(struct ether_header));
1532 evl = mtod(m, struct ether_vlan_header *); 1533 evl = mtod(m, struct ether_vlan_header *);
1533 evl->evl_proto = evl->evl_encap_proto; 1534 evl->evl_proto = evl->evl_encap_proto;
1534 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1535 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1535 evl->evl_tag = htons(mib->ifvm_tag); 1536 evl->evl_tag = htons(mib->ifvm_tag);
1536 1537
1537 /* 1538 /*
1538 * To cater for VLAN-aware layer 2 ethernet 1539 * To cater for VLAN-aware layer 2 ethernet
1539 * switches which may need to strip the tag 1540 * switches which may need to strip the tag
1540 * before forwarding the packet, make sure 1541 * before forwarding the packet, make sure
1541 * the packet+tag is at least 68 bytes long. 1542 * the packet+tag is at least 68 bytes long.
1542 * This is necessary because our parent will 1543 * This is necessary because our parent will
1543 * only pad to 64 bytes (ETHER_MIN_LEN) and 1544 * only pad to 64 bytes (ETHER_MIN_LEN) and
1544 * some switches will not pad by themselves 1545 * some switches will not pad by themselves
1545 * after deleting a tag. 1546 * after deleting a tag.
1546 */ 1547 */
1547 const size_t min_data_len = ETHER_MIN_LEN - 1548 const size_t min_data_len = ETHER_MIN_LEN -
1548 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1549 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1549 if (m->m_pkthdr.len < min_data_len) { 1550 if (m->m_pkthdr.len < min_data_len) {
1550 m_copyback(m, m->m_pkthdr.len, 1551 m_copyback(m, m->m_pkthdr.len,
1551 min_data_len - m->m_pkthdr.len, 1552 min_data_len - m->m_pkthdr.len,
1552 vlan_zero_pad_buff); 1553 vlan_zero_pad_buff);
1553 } 1554 }
1554 break; 1555 break;
1555 } 1556 }
1556 1557
1557 default: 1558 default:
1558 panic("%s: impossible", __func__); 1559 panic("%s: impossible", __func__);
1559 } 1560 }
1560 } 1561 }
1561 1562
1562 if ((p->if_flags & IFF_RUNNING) == 0) { 1563 if ((p->if_flags & IFF_RUNNING) == 0) {
1563 m_freem(m); 1564 m_freem(m);
1564 error = ENETDOWN; 1565 error = ENETDOWN;
1565 goto out; 1566 goto out;
1566 } 1567 }
1567 1568
1568 error = if_transmit_lock(p, m); 1569 error = if_transmit_lock(p, m);
1569 if (error) { 1570 if (error) {
1570 /* mbuf is already freed */ 1571 /* mbuf is already freed */
1571 ifp->if_oerrors++; 1572 ifp->if_oerrors++;
1572 } else { 1573 } else {
1573 1574
1574 ifp->if_opackets++; 1575 ifp->if_opackets++;
1575 ifp->if_obytes += pktlen; 1576 ifp->if_obytes += pktlen;
1576 if (mcast) 1577 if (mcast)
1577 ifp->if_omcasts++; 1578 ifp->if_omcasts++;
1578 } 1579 }
1579 1580
1580out: 1581out:
1581 /* Remove reference to mib before release */ 1582 /* Remove reference to mib before release */
1582 vlan_putref_linkmib(mib, &psref); 1583 vlan_putref_linkmib(mib, &psref);
1583 return error; 1584 return error;
1584} 1585}
1585 1586
1586/* 1587/*
1587 * Given an Ethernet frame, find a valid vlan interface corresponding to the 1588 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1588 * given source interface and tag, then run the real packet through the 1589 * given source interface and tag, then run the real packet through the
1589 * parent's input routine. 1590 * parent's input routine.
1590 */ 1591 */
1591void 1592void
1592vlan_input(struct ifnet *ifp, struct mbuf *m) 1593vlan_input(struct ifnet *ifp, struct mbuf *m)
1593{ 1594{
1594 struct ifvlan *ifv; 1595 struct ifvlan *ifv;
1595 uint16_t vid; 1596 uint16_t vid;
1596 struct ifvlan_linkmib *mib; 1597 struct ifvlan_linkmib *mib;
1597 struct psref psref; 1598 struct psref psref;
1598 bool have_vtag; 1599 bool have_vtag;
1599 1600
1600 have_vtag = vlan_has_tag(m); 1601 have_vtag = vlan_has_tag(m);
1601 if (have_vtag) { 1602 if (have_vtag) {
1602 vid = EVL_VLANOFTAG(vlan_get_tag(m)); 1603 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1603 m->m_flags &= ~M_VLANTAG; 1604 m->m_flags &= ~M_VLANTAG;
1604 } else { 1605 } else {
1605 struct ether_vlan_header *evl; 1606 struct ether_vlan_header *evl;
1606 1607
1607 if (ifp->if_type != IFT_ETHER) { 1608 if (ifp->if_type != IFT_ETHER) {
1608 panic("%s: impossible", __func__); 1609 panic("%s: impossible", __func__);
1609 } 1610 }
1610 1611
1611 if (m->m_len < sizeof(struct ether_vlan_header) && 1612 if (m->m_len < sizeof(struct ether_vlan_header) &&
1612 (m = m_pullup(m, 1613 (m = m_pullup(m,
1613 sizeof(struct ether_vlan_header))) == NULL) { 1614 sizeof(struct ether_vlan_header))) == NULL) {
1614 printf("%s: no memory for VLAN header, " 1615 printf("%s: no memory for VLAN header, "
1615 "dropping packet.\n", ifp->if_xname); 1616 "dropping packet.\n", ifp->if_xname);
1616 return; 1617 return;
1617 } 1618 }
1618 evl = mtod(m, struct ether_vlan_header *); 1619 evl = mtod(m, struct ether_vlan_header *);
1619 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN); 1620 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1620 1621
1621 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 1622 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1622 1623
1623 /* 1624 /*
1624 * Restore the original ethertype. We'll remove 1625 * Restore the original ethertype. We'll remove
1625 * the encapsulation after we've found the vlan 1626 * the encapsulation after we've found the vlan
1626 * interface corresponding to the tag. 1627 * interface corresponding to the tag.
1627 */ 1628 */
1628 evl->evl_encap_proto = evl->evl_proto; 1629 evl->evl_encap_proto = evl->evl_proto;
1629 } 1630 }
1630 1631
1631 mib = vlan_lookup_tag_psref(ifp, vid, &psref); 1632 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1632 if (mib == NULL) { 1633 if (mib == NULL) {
1633 m_freem(m); 1634 m_freem(m);
1634 ifp->if_noproto++; 1635 ifp->if_noproto++;
1635 return; 1636 return;
1636 } 1637 }
1637 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN); 1638 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1638 1639
1639 ifv = mib->ifvm_ifvlan; 1640 ifv = mib->ifvm_ifvlan;
1640 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) != 1641 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1641 (IFF_UP | IFF_RUNNING)) { 1642 (IFF_UP | IFF_RUNNING)) {
1642 m_freem(m); 1643 m_freem(m);
1643 ifp->if_noproto++; 1644 ifp->if_noproto++;
1644 goto out; 1645 goto out;
1645 } 1646 }
1646 1647
1647 /* 1648 /*
1648 * Now, remove the encapsulation header. The original 1649 * Now, remove the encapsulation header. The original
1649 * header has already been fixed up above. 1650 * header has already been fixed up above.
1650 */ 1651 */
1651 if (!have_vtag) { 1652 if (!have_vtag) {
1652 memmove(mtod(m, char *) + mib->ifvm_encaplen, 1653 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1653 mtod(m, void *), sizeof(struct ether_header)); 1654 mtod(m, void *), sizeof(struct ether_header));
1654 m_adj(m, mib->ifvm_encaplen); 1655 m_adj(m, mib->ifvm_encaplen);
1655 } 1656 }
1656 1657
1657 m_set_rcvif(m, &ifv->ifv_if); 1658 m_set_rcvif(m, &ifv->ifv_if);
1658 ifv->ifv_if.if_ipackets++; 1659 ifv->ifv_if.if_ipackets++;
1659 1660
1660 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0) 1661 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1661 goto out; 1662 goto out;
1662 if (m == NULL) 1663 if (m == NULL)
1663 goto out; 1664 goto out;
1664 1665
1665 m->m_flags &= ~M_PROMISC; 1666 m->m_flags &= ~M_PROMISC;
1666 if_input(&ifv->ifv_if, m); 1667 if_input(&ifv->ifv_if, m);
1667out: 1668out:
1668 vlan_putref_linkmib(mib, &psref); 1669 vlan_putref_linkmib(mib, &psref);
1669} 1670}
1670 1671
1671/* 1672/*
1672 * Module infrastructure 1673 * Module infrastructure
1673 */ 1674 */
1674#include "if_module.h" 1675#include "if_module.h"
1675 1676
1676IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL) 1677IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)