| @@ -1,4203 +1,4200 @@ | | | @@ -1,4203 +1,4200 @@ |
1 | /* $NetBSD: ixgbe.c,v 1.199.2.1 2019/09/01 11:07:05 martin Exp $ */ | | 1 | /* $NetBSD: ixgbe.c,v 1.199.2.2 2019/09/01 11:12:45 martin Exp $ */ |
2 | | | 2 | |
3 | /****************************************************************************** | | 3 | /****************************************************************************** |
4 | | | 4 | |
5 | Copyright (c) 2001-2017, Intel Corporation | | 5 | Copyright (c) 2001-2017, Intel Corporation |
6 | All rights reserved. | | 6 | All rights reserved. |
7 | | | 7 | |
8 | Redistribution and use in source and binary forms, with or without | | 8 | Redistribution and use in source and binary forms, with or without |
9 | modification, are permitted provided that the following conditions are met: | | 9 | modification, are permitted provided that the following conditions are met: |
10 | | | 10 | |
11 | 1. Redistributions of source code must retain the above copyright notice, | | 11 | 1. Redistributions of source code must retain the above copyright notice, |
12 | this list of conditions and the following disclaimer. | | 12 | this list of conditions and the following disclaimer. |
13 | | | 13 | |
14 | 2. Redistributions in binary form must reproduce the above copyright | | 14 | 2. Redistributions in binary form must reproduce the above copyright |
15 | notice, this list of conditions and the following disclaimer in the | | 15 | notice, this list of conditions and the following disclaimer in the |
16 | documentation and/or other materials provided with the distribution. | | 16 | documentation and/or other materials provided with the distribution. |
17 | | | 17 | |
18 | 3. Neither the name of the Intel Corporation nor the names of its | | 18 | 3. Neither the name of the Intel Corporation nor the names of its |
19 | contributors may be used to endorse or promote products derived from | | 19 | contributors may be used to endorse or promote products derived from |
20 | this software without specific prior written permission. | | 20 | this software without specific prior written permission. |
21 | | | 21 | |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | POSSIBILITY OF SUCH DAMAGE. | | 32 | POSSIBILITY OF SUCH DAMAGE. |
33 | | | 33 | |
34 | ******************************************************************************/ | | 34 | ******************************************************************************/ |
35 | /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ | | 35 | /*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ |
36 | | | 36 | |
37 | /* | | 37 | /* |
38 | * Copyright (c) 2011 The NetBSD Foundation, Inc. | | 38 | * Copyright (c) 2011 The NetBSD Foundation, Inc. |
39 | * All rights reserved. | | 39 | * All rights reserved. |
40 | * | | 40 | * |
41 | * This code is derived from software contributed to The NetBSD Foundation | | 41 | * This code is derived from software contributed to The NetBSD Foundation |
42 | * by Coyote Point Systems, Inc. | | 42 | * by Coyote Point Systems, Inc. |
43 | * | | 43 | * |
44 | * Redistribution and use in source and binary forms, with or without | | 44 | * Redistribution and use in source and binary forms, with or without |
45 | * modification, are permitted provided that the following conditions | | 45 | * modification, are permitted provided that the following conditions |
46 | * are met: | | 46 | * are met: |
47 | * 1. Redistributions of source code must retain the above copyright | | 47 | * 1. Redistributions of source code must retain the above copyright |
48 | * notice, this list of conditions and the following disclaimer. | | 48 | * notice, this list of conditions and the following disclaimer. |
49 | * 2. Redistributions in binary form must reproduce the above copyright | | 49 | * 2. Redistributions in binary form must reproduce the above copyright |
50 | * notice, this list of conditions and the following disclaimer in the | | 50 | * notice, this list of conditions and the following disclaimer in the |
51 | * documentation and/or other materials provided with the distribution. | | 51 | * documentation and/or other materials provided with the distribution. |
52 | * | | 52 | * |
53 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 53 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 54 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 55 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 56 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 57 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 58 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 59 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 60 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 61 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 62 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
63 | * POSSIBILITY OF SUCH DAMAGE. | | 63 | * POSSIBILITY OF SUCH DAMAGE. |
64 | */ | | 64 | */ |
65 | | | 65 | |
66 | #ifdef _KERNEL_OPT | | 66 | #ifdef _KERNEL_OPT |
67 | #include "opt_inet.h" | | 67 | #include "opt_inet.h" |
68 | #include "opt_inet6.h" | | 68 | #include "opt_inet6.h" |
69 | #include "opt_net_mpsafe.h" | | 69 | #include "opt_net_mpsafe.h" |
70 | #endif | | 70 | #endif |
71 | | | 71 | |
72 | #include "ixgbe.h" | | 72 | #include "ixgbe.h" |
73 | #include "ixgbe_sriov.h" | | 73 | #include "ixgbe_sriov.h" |
74 | #include "vlan.h" | | 74 | #include "vlan.h" |
75 | | | 75 | |
76 | #include <sys/cprng.h> | | 76 | #include <sys/cprng.h> |
77 | #include <dev/mii/mii.h> | | 77 | #include <dev/mii/mii.h> |
78 | #include <dev/mii/miivar.h> | | 78 | #include <dev/mii/miivar.h> |
79 | | | 79 | |
80 | /************************************************************************ | | 80 | /************************************************************************ |
81 | * Driver version | | 81 | * Driver version |
82 | ************************************************************************/ | | 82 | ************************************************************************/ |
83 | static const char ixgbe_driver_version[] = "4.0.1-k"; | | 83 | static const char ixgbe_driver_version[] = "4.0.1-k"; |
84 | /* XXX NetBSD: + 3.3.10 */ | | 84 | /* XXX NetBSD: + 3.3.10 */ |
85 | | | 85 | |
86 | /************************************************************************ | | 86 | /************************************************************************ |
87 | * PCI Device ID Table | | 87 | * PCI Device ID Table |
88 | * | | 88 | * |
89 | * Used by probe to select devices to load on | | 89 | * Used by probe to select devices to load on |
90 | * Last field stores an index into ixgbe_strings | | 90 | * Last field stores an index into ixgbe_strings |
91 | * Last entry must be all 0s | | 91 | * Last entry must be all 0s |
92 | * | | 92 | * |
93 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | | 93 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } |
94 | ************************************************************************/ | | 94 | ************************************************************************/ |
95 | static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = | | 95 | static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = |
96 | { | | 96 | { |
97 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, | | 97 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, |
98 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, | | 98 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, |
99 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, | | 99 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, |
100 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, | | 100 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, |
101 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, | | 101 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, |
102 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, | | 102 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, |
103 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, | | 103 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, |
104 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, | | 104 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, |
105 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, | | 105 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, |
106 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, | | 106 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, |
107 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, | | 107 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, |
108 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, | | 108 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, |
109 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, | | 109 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, |
110 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, | | 110 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, |
111 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, | | 111 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, |
112 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, | | 112 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, |
113 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, | | 113 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, |
114 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, | | 114 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, |
115 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, | | 115 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, |
116 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, | | 116 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, |
117 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, | | 117 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, |
118 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, | | 118 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, |
119 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, | | 119 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, |
120 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, | | 120 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, |
121 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, | | 121 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, |
122 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, | | 122 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, |
123 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, | | 123 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, |
124 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, | | 124 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, |
125 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, | | 125 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, |
126 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, | | 126 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, |
127 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, | | 127 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, |
128 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, | | 128 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, |
129 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, | | 129 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, |
130 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, | | 130 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, |
131 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, | | 131 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, |
132 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, | | 132 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, |
133 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, | | 133 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, |
134 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, | | 134 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, |
135 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, | | 135 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, |
136 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, | | 136 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, |
137 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, | | 137 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, |
138 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, | | 138 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, |
139 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, | | 139 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, |
140 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, | | 140 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, |
141 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, | | 141 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, |
142 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, | | 142 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, |
143 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, | | 143 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, |
144 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, | | 144 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, |
145 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, | | 145 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, |
146 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, | | 146 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, |
147 | /* required last entry */ | | 147 | /* required last entry */ |
148 | {0, 0, 0, 0, 0} | | 148 | {0, 0, 0, 0, 0} |
149 | }; | | 149 | }; |
150 | | | 150 | |
151 | /************************************************************************ | | 151 | /************************************************************************ |
152 | * Table of branding strings | | 152 | * Table of branding strings |
153 | ************************************************************************/ | | 153 | ************************************************************************/ |
154 | static const char *ixgbe_strings[] = { | | 154 | static const char *ixgbe_strings[] = { |
155 | "Intel(R) PRO/10GbE PCI-Express Network Driver" | | 155 | "Intel(R) PRO/10GbE PCI-Express Network Driver" |
156 | }; | | 156 | }; |
157 | | | 157 | |
158 | /************************************************************************ | | 158 | /************************************************************************ |
159 | * Function prototypes | | 159 | * Function prototypes |
160 | ************************************************************************/ | | 160 | ************************************************************************/ |
161 | static int ixgbe_probe(device_t, cfdata_t, void *); | | 161 | static int ixgbe_probe(device_t, cfdata_t, void *); |
162 | static void ixgbe_attach(device_t, device_t, void *); | | 162 | static void ixgbe_attach(device_t, device_t, void *); |
163 | static int ixgbe_detach(device_t, int); | | 163 | static int ixgbe_detach(device_t, int); |
164 | #if 0 | | 164 | #if 0 |
165 | static int ixgbe_shutdown(device_t); | | 165 | static int ixgbe_shutdown(device_t); |
166 | #endif | | 166 | #endif |
167 | static bool ixgbe_suspend(device_t, const pmf_qual_t *); | | 167 | static bool ixgbe_suspend(device_t, const pmf_qual_t *); |
168 | static bool ixgbe_resume(device_t, const pmf_qual_t *); | | 168 | static bool ixgbe_resume(device_t, const pmf_qual_t *); |
169 | static int ixgbe_ifflags_cb(struct ethercom *); | | 169 | static int ixgbe_ifflags_cb(struct ethercom *); |
170 | static int ixgbe_ioctl(struct ifnet *, u_long, void *); | | 170 | static int ixgbe_ioctl(struct ifnet *, u_long, void *); |
171 | static void ixgbe_ifstop(struct ifnet *, int); | | 171 | static void ixgbe_ifstop(struct ifnet *, int); |
172 | static int ixgbe_init(struct ifnet *); | | 172 | static int ixgbe_init(struct ifnet *); |
173 | static void ixgbe_init_locked(struct adapter *); | | 173 | static void ixgbe_init_locked(struct adapter *); |
174 | static void ixgbe_stop(void *); | | 174 | static void ixgbe_stop(void *); |
175 | static void ixgbe_init_device_features(struct adapter *); | | 175 | static void ixgbe_init_device_features(struct adapter *); |
176 | static void ixgbe_check_fan_failure(struct adapter *, u32, bool); | | 176 | static void ixgbe_check_fan_failure(struct adapter *, u32, bool); |
177 | static void ixgbe_add_media_types(struct adapter *); | | 177 | static void ixgbe_add_media_types(struct adapter *); |
178 | static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); | | 178 | static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); |
179 | static int ixgbe_media_change(struct ifnet *); | | 179 | static int ixgbe_media_change(struct ifnet *); |
180 | static int ixgbe_allocate_pci_resources(struct adapter *, | | 180 | static int ixgbe_allocate_pci_resources(struct adapter *, |
181 | const struct pci_attach_args *); | | 181 | const struct pci_attach_args *); |
182 | static void ixgbe_free_softint(struct adapter *); | | 182 | static void ixgbe_free_softint(struct adapter *); |
183 | static void ixgbe_get_slot_info(struct adapter *); | | 183 | static void ixgbe_get_slot_info(struct adapter *); |
184 | static int ixgbe_allocate_msix(struct adapter *, | | 184 | static int ixgbe_allocate_msix(struct adapter *, |
185 | const struct pci_attach_args *); | | 185 | const struct pci_attach_args *); |
186 | static int ixgbe_allocate_legacy(struct adapter *, | | 186 | static int ixgbe_allocate_legacy(struct adapter *, |
187 | const struct pci_attach_args *); | | 187 | const struct pci_attach_args *); |
188 | static int ixgbe_configure_interrupts(struct adapter *); | | 188 | static int ixgbe_configure_interrupts(struct adapter *); |
189 | static void ixgbe_free_pciintr_resources(struct adapter *); | | 189 | static void ixgbe_free_pciintr_resources(struct adapter *); |
190 | static void ixgbe_free_pci_resources(struct adapter *); | | 190 | static void ixgbe_free_pci_resources(struct adapter *); |
191 | static void ixgbe_local_timer(void *); | | 191 | static void ixgbe_local_timer(void *); |
192 | static void ixgbe_local_timer1(void *); | | 192 | static void ixgbe_local_timer1(void *); |
193 | static void ixgbe_recovery_mode_timer(void *); | | 193 | static void ixgbe_recovery_mode_timer(void *); |
194 | static int ixgbe_setup_interface(device_t, struct adapter *); | | 194 | static int ixgbe_setup_interface(device_t, struct adapter *); |
195 | static void ixgbe_config_gpie(struct adapter *); | | 195 | static void ixgbe_config_gpie(struct adapter *); |
196 | static void ixgbe_config_dmac(struct adapter *); | | 196 | static void ixgbe_config_dmac(struct adapter *); |
197 | static void ixgbe_config_delay_values(struct adapter *); | | 197 | static void ixgbe_config_delay_values(struct adapter *); |
198 | static void ixgbe_config_link(struct adapter *); | | 198 | static void ixgbe_config_link(struct adapter *); |
199 | static void ixgbe_check_wol_support(struct adapter *); | | 199 | static void ixgbe_check_wol_support(struct adapter *); |
200 | static int ixgbe_setup_low_power_mode(struct adapter *); | | 200 | static int ixgbe_setup_low_power_mode(struct adapter *); |
201 | #if 0 | | 201 | #if 0 |
202 | static void ixgbe_rearm_queues(struct adapter *, u64); | | 202 | static void ixgbe_rearm_queues(struct adapter *, u64); |
203 | #endif | | 203 | #endif |
204 | | | 204 | |
205 | static void ixgbe_initialize_transmit_units(struct adapter *); | | 205 | static void ixgbe_initialize_transmit_units(struct adapter *); |
206 | static void ixgbe_initialize_receive_units(struct adapter *); | | 206 | static void ixgbe_initialize_receive_units(struct adapter *); |
207 | static void ixgbe_enable_rx_drop(struct adapter *); | | 207 | static void ixgbe_enable_rx_drop(struct adapter *); |
208 | static void ixgbe_disable_rx_drop(struct adapter *); | | 208 | static void ixgbe_disable_rx_drop(struct adapter *); |
209 | static void ixgbe_initialize_rss_mapping(struct adapter *); | | 209 | static void ixgbe_initialize_rss_mapping(struct adapter *); |
210 | | | 210 | |
211 | static void ixgbe_enable_intr(struct adapter *); | | 211 | static void ixgbe_enable_intr(struct adapter *); |
212 | static void ixgbe_disable_intr(struct adapter *); | | 212 | static void ixgbe_disable_intr(struct adapter *); |
213 | static void ixgbe_update_stats_counters(struct adapter *); | | 213 | static void ixgbe_update_stats_counters(struct adapter *); |
214 | static void ixgbe_set_promisc(struct adapter *); | | 214 | static void ixgbe_set_promisc(struct adapter *); |
215 | static void ixgbe_set_multi(struct adapter *); | | 215 | static void ixgbe_set_multi(struct adapter *); |
216 | static void ixgbe_update_link_status(struct adapter *); | | 216 | static void ixgbe_update_link_status(struct adapter *); |
217 | static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); | | 217 | static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); |
218 | static void ixgbe_configure_ivars(struct adapter *); | | 218 | static void ixgbe_configure_ivars(struct adapter *); |
219 | static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); | | 219 | static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); |
220 | static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); | | 220 | static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); |
221 | | | 221 | |
222 | static void ixgbe_setup_vlan_hw_tagging(struct adapter *); | | 222 | static void ixgbe_setup_vlan_hw_tagging(struct adapter *); |
223 | static void ixgbe_setup_vlan_hw_support(struct adapter *); | | 223 | static void ixgbe_setup_vlan_hw_support(struct adapter *); |
224 | static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); | | 224 | static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); |
225 | static int ixgbe_register_vlan(void *, struct ifnet *, u16); | | 225 | static int ixgbe_register_vlan(struct adapter *, u16); |
226 | static int ixgbe_unregister_vlan(void *, struct ifnet *, u16); | | 226 | static int ixgbe_unregister_vlan(struct adapter *, u16); |
227 | | | 227 | |
228 | static void ixgbe_add_device_sysctls(struct adapter *); | | 228 | static void ixgbe_add_device_sysctls(struct adapter *); |
229 | static void ixgbe_add_hw_stats(struct adapter *); | | 229 | static void ixgbe_add_hw_stats(struct adapter *); |
230 | static void ixgbe_clear_evcnt(struct adapter *); | | 230 | static void ixgbe_clear_evcnt(struct adapter *); |
231 | static int ixgbe_set_flowcntl(struct adapter *, int); | | 231 | static int ixgbe_set_flowcntl(struct adapter *, int); |
232 | static int ixgbe_set_advertise(struct adapter *, int); | | 232 | static int ixgbe_set_advertise(struct adapter *, int); |
233 | static int ixgbe_get_advertise(struct adapter *); | | 233 | static int ixgbe_get_advertise(struct adapter *); |
234 | | | 234 | |
235 | /* Sysctl handlers */ | | 235 | /* Sysctl handlers */ |
236 | static void ixgbe_set_sysctl_value(struct adapter *, const char *, | | 236 | static void ixgbe_set_sysctl_value(struct adapter *, const char *, |
237 | const char *, int *, int); | | 237 | const char *, int *, int); |
238 | static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); | | 238 | static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); |
239 | static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); | | 239 | static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); |
240 | static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); | | 240 | static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); |
241 | static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); | | 241 | static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); |
242 | static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); | | 242 | static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); |
243 | static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); | | 243 | static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); |
244 | #ifdef IXGBE_DEBUG | | 244 | #ifdef IXGBE_DEBUG |
245 | static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); | | 245 | static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); |
246 | static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); | | 246 | static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); |
247 | #endif | | 247 | #endif |
248 | static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); | | 248 | static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); |
249 | static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); | | 249 | static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); |
250 | static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); | | 250 | static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); |
251 | static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); | | 251 | static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); |
252 | static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); | | 252 | static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); |
253 | static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); | | 253 | static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); |
254 | static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); | | 254 | static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); |
255 | static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); | | 255 | static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); |
256 | static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); | | 256 | static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); |
257 | | | 257 | |
258 | /* Support for pluggable optic modules */ | | 258 | /* Support for pluggable optic modules */ |
259 | static bool ixgbe_sfp_probe(struct adapter *); | | 259 | static bool ixgbe_sfp_probe(struct adapter *); |
260 | | | 260 | |
261 | /* Legacy (single vector) interrupt handler */ | | 261 | /* Legacy (single vector) interrupt handler */ |
262 | static int ixgbe_legacy_irq(void *); | | 262 | static int ixgbe_legacy_irq(void *); |
263 | | | 263 | |
264 | /* The MSI/MSI-X Interrupt handlers */ | | 264 | /* The MSI/MSI-X Interrupt handlers */ |
265 | static int ixgbe_msix_que(void *); | | 265 | static int ixgbe_msix_que(void *); |
266 | static int ixgbe_msix_link(void *); | | 266 | static int ixgbe_msix_link(void *); |
267 | | | 267 | |
268 | /* Software interrupts for deferred work */ | | 268 | /* Software interrupts for deferred work */ |
269 | static void ixgbe_handle_que(void *); | | 269 | static void ixgbe_handle_que(void *); |
270 | static void ixgbe_handle_link(void *); | | 270 | static void ixgbe_handle_link(void *); |
271 | static void ixgbe_handle_msf(void *); | | 271 | static void ixgbe_handle_msf(void *); |
272 | static void ixgbe_handle_mod(void *); | | 272 | static void ixgbe_handle_mod(void *); |
273 | static void ixgbe_handle_phy(void *); | | 273 | static void ixgbe_handle_phy(void *); |
274 | | | 274 | |
275 | /* Workqueue handler for deferred work */ | | 275 | /* Workqueue handler for deferred work */ |
276 | static void ixgbe_handle_que_work(struct work *, void *); | | 276 | static void ixgbe_handle_que_work(struct work *, void *); |
277 | | | 277 | |
278 | static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); | | 278 | static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); |
279 | | | 279 | |
280 | /************************************************************************ | | 280 | /************************************************************************ |
281 | * NetBSD Device Interface Entry Points | | 281 | * NetBSD Device Interface Entry Points |
282 | ************************************************************************/ | | 282 | ************************************************************************/ |
283 | CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), | | 283 | CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), |
284 | ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, | | 284 | ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, |
285 | DVF_DETACH_SHUTDOWN); | | 285 | DVF_DETACH_SHUTDOWN); |
286 | | | 286 | |
287 | #if 0 | | 287 | #if 0 |
288 | devclass_t ix_devclass; | | 288 | devclass_t ix_devclass; |
289 | DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); | | 289 | DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); |
290 | | | 290 | |
291 | MODULE_DEPEND(ix, pci, 1, 1, 1); | | 291 | MODULE_DEPEND(ix, pci, 1, 1, 1); |
292 | MODULE_DEPEND(ix, ether, 1, 1, 1); | | 292 | MODULE_DEPEND(ix, ether, 1, 1, 1); |
293 | #ifdef DEV_NETMAP | | 293 | #ifdef DEV_NETMAP |
294 | MODULE_DEPEND(ix, netmap, 1, 1, 1); | | 294 | MODULE_DEPEND(ix, netmap, 1, 1, 1); |
295 | #endif | | 295 | #endif |
296 | #endif | | 296 | #endif |
297 | | | 297 | |
298 | /* | | 298 | /* |
299 | * TUNEABLE PARAMETERS: | | 299 | * TUNEABLE PARAMETERS: |
300 | */ | | 300 | */ |
301 | | | 301 | |
302 | /* | | 302 | /* |
303 | * AIM: Adaptive Interrupt Moderation | | 303 | * AIM: Adaptive Interrupt Moderation |
304 | * which means that the interrupt rate | | 304 | * which means that the interrupt rate |
305 | * is varied over time based on the | | 305 | * is varied over time based on the |
306 | * traffic for that interrupt vector | | 306 | * traffic for that interrupt vector |
307 | */ | | 307 | */ |
308 | static bool ixgbe_enable_aim = true; | | 308 | static bool ixgbe_enable_aim = true; |
309 | #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) | | 309 | #define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) |
310 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, | | 310 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, |
311 | "Enable adaptive interrupt moderation"); | | 311 | "Enable adaptive interrupt moderation"); |
312 | | | 312 | |
313 | static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); | | 313 | static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); |
314 | SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, | | 314 | SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, |
315 | &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); | | 315 | &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); |
316 | | | 316 | |
317 | /* How many packets rxeof tries to clean at a time */ | | 317 | /* How many packets rxeof tries to clean at a time */ |
318 | static int ixgbe_rx_process_limit = 256; | | 318 | static int ixgbe_rx_process_limit = 256; |
319 | SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, | | 319 | SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, |
320 | &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); | | 320 | &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); |
321 | | | 321 | |
322 | /* How many packets txeof tries to clean at a time */ | | 322 | /* How many packets txeof tries to clean at a time */ |
323 | static int ixgbe_tx_process_limit = 256; | | 323 | static int ixgbe_tx_process_limit = 256; |
324 | SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, | | 324 | SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, |
325 | &ixgbe_tx_process_limit, 0, | | 325 | &ixgbe_tx_process_limit, 0, |
326 | "Maximum number of sent packets to process at a time, -1 means unlimited"); | | 326 | "Maximum number of sent packets to process at a time, -1 means unlimited"); |
327 | | | 327 | |
328 | /* Flow control setting, default to full */ | | 328 | /* Flow control setting, default to full */ |
329 | static int ixgbe_flow_control = ixgbe_fc_full; | | 329 | static int ixgbe_flow_control = ixgbe_fc_full; |
330 | SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, | | 330 | SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, |
331 | &ixgbe_flow_control, 0, "Default flow control used for all adapters"); | | 331 | &ixgbe_flow_control, 0, "Default flow control used for all adapters"); |
332 | | | 332 | |
333 | /* Which packet processing uses workqueue or softint */ | | 333 | /* Which packet processing uses workqueue or softint */ |
334 | static bool ixgbe_txrx_workqueue = false; | | 334 | static bool ixgbe_txrx_workqueue = false; |
335 | | | 335 | |
336 | /* | | 336 | /* |
337 | * Smart speed setting, default to on | | 337 | * Smart speed setting, default to on |
338 | * this only works as a compile option | | 338 | * this only works as a compile option |
339 | * right now as its during attach, set | | 339 | * right now as its during attach, set |
340 | * this to 'ixgbe_smart_speed_off' to | | 340 | * this to 'ixgbe_smart_speed_off' to |
341 | * disable. | | 341 | * disable. |
342 | */ | | 342 | */ |
343 | static int ixgbe_smart_speed = ixgbe_smart_speed_on; | | 343 | static int ixgbe_smart_speed = ixgbe_smart_speed_on; |
344 | | | 344 | |
345 | /* | | 345 | /* |
346 | * MSI-X should be the default for best performance, | | 346 | * MSI-X should be the default for best performance, |
347 | * but this allows it to be forced off for testing. | | 347 | * but this allows it to be forced off for testing. |
348 | */ | | 348 | */ |
349 | static int ixgbe_enable_msix = 1; | | 349 | static int ixgbe_enable_msix = 1; |
350 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, | | 350 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, |
351 | "Enable MSI-X interrupts"); | | 351 | "Enable MSI-X interrupts"); |
352 | | | 352 | |
353 | /* | | 353 | /* |
354 | * Number of Queues, can be set to 0, | | 354 | * Number of Queues, can be set to 0, |
355 | * it then autoconfigures based on the | | 355 | * it then autoconfigures based on the |
356 | * number of cpus with a max of 8. This | | 356 | * number of cpus with a max of 8. This |
357 | * can be overriden manually here. | | 357 | * can be overriden manually here. |
358 | */ | | 358 | */ |
359 | static int ixgbe_num_queues = 0; | | 359 | static int ixgbe_num_queues = 0; |
360 | SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, | | 360 | SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, |
361 | "Number of queues to configure, 0 indicates autoconfigure"); | | 361 | "Number of queues to configure, 0 indicates autoconfigure"); |
362 | | | 362 | |
363 | /* | | 363 | /* |
364 | * Number of TX descriptors per ring, | | 364 | * Number of TX descriptors per ring, |
365 | * setting higher than RX as this seems | | 365 | * setting higher than RX as this seems |
366 | * the better performing choice. | | 366 | * the better performing choice. |
367 | */ | | 367 | */ |
368 | static int ixgbe_txd = PERFORM_TXD; | | 368 | static int ixgbe_txd = PERFORM_TXD; |
369 | SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, | | 369 | SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, |
370 | "Number of transmit descriptors per queue"); | | 370 | "Number of transmit descriptors per queue"); |
371 | | | 371 | |
372 | /* Number of RX descriptors per ring */ | | 372 | /* Number of RX descriptors per ring */ |
373 | static int ixgbe_rxd = PERFORM_RXD; | | 373 | static int ixgbe_rxd = PERFORM_RXD; |
374 | SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, | | 374 | SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, |
375 | "Number of receive descriptors per queue"); | | 375 | "Number of receive descriptors per queue"); |
376 | | | 376 | |
377 | /* | | 377 | /* |
378 | * Defining this on will allow the use | | 378 | * Defining this on will allow the use |
379 | * of unsupported SFP+ modules, note that | | 379 | * of unsupported SFP+ modules, note that |
380 | * doing so you are on your own :) | | 380 | * doing so you are on your own :) |
381 | */ | | 381 | */ |
382 | static int allow_unsupported_sfp = false; | | 382 | static int allow_unsupported_sfp = false; |
383 | #define TUNABLE_INT(__x, __y) | | 383 | #define TUNABLE_INT(__x, __y) |
384 | TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); | | 384 | TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); |
385 | | | 385 | |
386 | /* | | 386 | /* |
387 | * Not sure if Flow Director is fully baked, | | 387 | * Not sure if Flow Director is fully baked, |
388 | * so we'll default to turning it off. | | 388 | * so we'll default to turning it off. |
389 | */ | | 389 | */ |
390 | static int ixgbe_enable_fdir = 0; | | 390 | static int ixgbe_enable_fdir = 0; |
391 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, | | 391 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, |
392 | "Enable Flow Director"); | | 392 | "Enable Flow Director"); |
393 | | | 393 | |
394 | /* Legacy Transmit (single queue) */ | | 394 | /* Legacy Transmit (single queue) */ |
395 | static int ixgbe_enable_legacy_tx = 0; | | 395 | static int ixgbe_enable_legacy_tx = 0; |
396 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, | | 396 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, |
397 | &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); | | 397 | &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); |
398 | | | 398 | |
399 | /* Receive-Side Scaling */ | | 399 | /* Receive-Side Scaling */ |
400 | static int ixgbe_enable_rss = 1; | | 400 | static int ixgbe_enable_rss = 1; |
401 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, | | 401 | SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, |
402 | "Enable Receive-Side Scaling (RSS)"); | | 402 | "Enable Receive-Side Scaling (RSS)"); |
403 | | | 403 | |
404 | #if 0 | | 404 | #if 0 |
405 | static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); | | 405 | static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); |
406 | static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); | | 406 | static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); |
407 | #endif | | 407 | #endif |
408 | | | 408 | |
409 | #ifdef NET_MPSAFE | | 409 | #ifdef NET_MPSAFE |
410 | #define IXGBE_MPSAFE 1 | | 410 | #define IXGBE_MPSAFE 1 |
411 | #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE | | 411 | #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE |
412 | #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE | | 412 | #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE |
413 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE | | 413 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE |
414 | #else | | 414 | #else |
415 | #define IXGBE_CALLOUT_FLAGS 0 | | 415 | #define IXGBE_CALLOUT_FLAGS 0 |
416 | #define IXGBE_SOFTINFT_FLAGS 0 | | 416 | #define IXGBE_SOFTINFT_FLAGS 0 |
417 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | | 417 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU |
418 | #endif | | 418 | #endif |
419 | #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET | | 419 | #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET |
420 | | | 420 | |
421 | /************************************************************************ | | 421 | /************************************************************************ |
422 | * ixgbe_initialize_rss_mapping | | 422 | * ixgbe_initialize_rss_mapping |
423 | ************************************************************************/ | | 423 | ************************************************************************/ |
424 | static void | | 424 | static void |
425 | ixgbe_initialize_rss_mapping(struct adapter *adapter) | | 425 | ixgbe_initialize_rss_mapping(struct adapter *adapter) |
426 | { | | 426 | { |
427 | struct ixgbe_hw *hw = &adapter->hw; | | 427 | struct ixgbe_hw *hw = &adapter->hw; |
428 | u32 reta = 0, mrqc, rss_key[10]; | | 428 | u32 reta = 0, mrqc, rss_key[10]; |
429 | int queue_id, table_size, index_mult; | | 429 | int queue_id, table_size, index_mult; |
430 | int i, j; | | 430 | int i, j; |
431 | u32 rss_hash_config; | | 431 | u32 rss_hash_config; |
432 | | | 432 | |
433 | /* force use default RSS key. */ | | 433 | /* force use default RSS key. */ |
434 | #ifdef __NetBSD__ | | 434 | #ifdef __NetBSD__ |
435 | rss_getkey((uint8_t *) &rss_key); | | 435 | rss_getkey((uint8_t *) &rss_key); |
436 | #else | | 436 | #else |
437 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { | | 437 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { |
438 | /* Fetch the configured RSS key */ | | 438 | /* Fetch the configured RSS key */ |
439 | rss_getkey((uint8_t *) &rss_key); | | 439 | rss_getkey((uint8_t *) &rss_key); |
440 | } else { | | 440 | } else { |
441 | /* set up random bits */ | | 441 | /* set up random bits */ |
442 | cprng_fast(&rss_key, sizeof(rss_key)); | | 442 | cprng_fast(&rss_key, sizeof(rss_key)); |
443 | } | | 443 | } |
444 | #endif | | 444 | #endif |
445 | | | 445 | |
446 | /* Set multiplier for RETA setup and table size based on MAC */ | | 446 | /* Set multiplier for RETA setup and table size based on MAC */ |
447 | index_mult = 0x1; | | 447 | index_mult = 0x1; |
448 | table_size = 128; | | 448 | table_size = 128; |
449 | switch (adapter->hw.mac.type) { | | 449 | switch (adapter->hw.mac.type) { |
450 | case ixgbe_mac_82598EB: | | 450 | case ixgbe_mac_82598EB: |
451 | index_mult = 0x11; | | 451 | index_mult = 0x11; |
452 | break; | | 452 | break; |
453 | case ixgbe_mac_X550: | | 453 | case ixgbe_mac_X550: |
454 | case ixgbe_mac_X550EM_x: | | 454 | case ixgbe_mac_X550EM_x: |
455 | case ixgbe_mac_X550EM_a: | | 455 | case ixgbe_mac_X550EM_a: |
456 | table_size = 512; | | 456 | table_size = 512; |
457 | break; | | 457 | break; |
458 | default: | | 458 | default: |
459 | break; | | 459 | break; |
460 | } | | 460 | } |
461 | | | 461 | |
462 | /* Set up the redirection table */ | | 462 | /* Set up the redirection table */ |
463 | for (i = 0, j = 0; i < table_size; i++, j++) { | | 463 | for (i = 0, j = 0; i < table_size; i++, j++) { |
464 | if (j == adapter->num_queues) | | 464 | if (j == adapter->num_queues) |
465 | j = 0; | | 465 | j = 0; |
466 | | | 466 | |
467 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { | | 467 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { |
468 | /* | | 468 | /* |
469 | * Fetch the RSS bucket id for the given indirection | | 469 | * Fetch the RSS bucket id for the given indirection |
470 | * entry. Cap it at the number of configured buckets | | 470 | * entry. Cap it at the number of configured buckets |
471 | * (which is num_queues.) | | 471 | * (which is num_queues.) |
472 | */ | | 472 | */ |
473 | queue_id = rss_get_indirection_to_bucket(i); | | 473 | queue_id = rss_get_indirection_to_bucket(i); |
474 | queue_id = queue_id % adapter->num_queues; | | 474 | queue_id = queue_id % adapter->num_queues; |
475 | } else | | 475 | } else |
476 | queue_id = (j * index_mult); | | 476 | queue_id = (j * index_mult); |
477 | | | 477 | |
478 | /* | | 478 | /* |
479 | * The low 8 bits are for hash value (n+0); | | 479 | * The low 8 bits are for hash value (n+0); |
480 | * The next 8 bits are for hash value (n+1), etc. | | 480 | * The next 8 bits are for hash value (n+1), etc. |
481 | */ | | 481 | */ |
482 | reta = reta >> 8; | | 482 | reta = reta >> 8; |
483 | reta = reta | (((uint32_t) queue_id) << 24); | | 483 | reta = reta | (((uint32_t) queue_id) << 24); |
484 | if ((i & 3) == 3) { | | 484 | if ((i & 3) == 3) { |
485 | if (i < 128) | | 485 | if (i < 128) |
486 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | | 486 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); |
487 | else | | 487 | else |
488 | IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), | | 488 | IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), |
489 | reta); | | 489 | reta); |
490 | reta = 0; | | 490 | reta = 0; |
491 | } | | 491 | } |
492 | } | | 492 | } |
493 | | | 493 | |
494 | /* Now fill our hash function seeds */ | | 494 | /* Now fill our hash function seeds */ |
495 | for (i = 0; i < 10; i++) | | 495 | for (i = 0; i < 10; i++) |
496 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); | | 496 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); |
497 | | | 497 | |
498 | /* Perform hash on these packet types */ | | 498 | /* Perform hash on these packet types */ |
499 | if (adapter->feat_en & IXGBE_FEATURE_RSS) | | 499 | if (adapter->feat_en & IXGBE_FEATURE_RSS) |
500 | rss_hash_config = rss_gethashconfig(); | | 500 | rss_hash_config = rss_gethashconfig(); |
501 | else { | | 501 | else { |
502 | /* | | 502 | /* |
503 | * Disable UDP - IP fragments aren't currently being handled | | 503 | * Disable UDP - IP fragments aren't currently being handled |
504 | * and so we end up with a mix of 2-tuple and 4-tuple | | 504 | * and so we end up with a mix of 2-tuple and 4-tuple |
505 | * traffic. | | 505 | * traffic. |
506 | */ | | 506 | */ |
507 | rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | | 507 | rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
508 | | RSS_HASHTYPE_RSS_TCP_IPV4 | | 508 | | RSS_HASHTYPE_RSS_TCP_IPV4 |
509 | | RSS_HASHTYPE_RSS_IPV6 | | 509 | | RSS_HASHTYPE_RSS_IPV6 |
510 | | RSS_HASHTYPE_RSS_TCP_IPV6 | | 510 | | RSS_HASHTYPE_RSS_TCP_IPV6 |
511 | | RSS_HASHTYPE_RSS_IPV6_EX | | 511 | | RSS_HASHTYPE_RSS_IPV6_EX |
512 | | RSS_HASHTYPE_RSS_TCP_IPV6_EX; | | 512 | | RSS_HASHTYPE_RSS_TCP_IPV6_EX; |
513 | } | | 513 | } |
514 | | | 514 | |
515 | mrqc = IXGBE_MRQC_RSSEN; | | 515 | mrqc = IXGBE_MRQC_RSSEN; |
516 | if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) | | 516 | if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) |
517 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; | | 517 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; |
518 | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) | | 518 | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) |
519 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; | | 519 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; |
520 | if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) | | 520 | if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) |
521 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; | | 521 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; |
522 | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) | | 522 | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) |
523 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | | 523 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; |
524 | if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) | | 524 | if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) |
525 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; | | 525 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; |
526 | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) | | 526 | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) |
527 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; | | 527 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; |
528 | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) | | 528 | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) |
529 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | | 529 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; |
530 | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) | | 530 | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) |
531 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; | | 531 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; |
532 | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) | | 532 | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) |
533 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; | | 533 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; |
534 | mrqc |= ixgbe_get_mrqc(adapter->iov_mode); | | 534 | mrqc |= ixgbe_get_mrqc(adapter->iov_mode); |
535 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | | 535 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
536 | } /* ixgbe_initialize_rss_mapping */ | | 536 | } /* ixgbe_initialize_rss_mapping */ |
537 | | | 537 | |
538 | /************************************************************************ | | 538 | /************************************************************************ |
539 | * ixgbe_initialize_receive_units - Setup receive registers and features. | | 539 | * ixgbe_initialize_receive_units - Setup receive registers and features. |
540 | ************************************************************************/ | | 540 | ************************************************************************/ |
541 | #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) | | 541 | #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) |
542 | | | 542 | |
543 | static void | | 543 | static void |
544 | ixgbe_initialize_receive_units(struct adapter *adapter) | | 544 | ixgbe_initialize_receive_units(struct adapter *adapter) |
545 | { | | 545 | { |
546 | struct rx_ring *rxr = adapter->rx_rings; | | 546 | struct rx_ring *rxr = adapter->rx_rings; |
547 | struct ixgbe_hw *hw = &adapter->hw; | | 547 | struct ixgbe_hw *hw = &adapter->hw; |
548 | struct ifnet *ifp = adapter->ifp; | | 548 | struct ifnet *ifp = adapter->ifp; |
549 | int i, j; | | 549 | int i, j; |
550 | u32 bufsz, fctrl, srrctl, rxcsum; | | 550 | u32 bufsz, fctrl, srrctl, rxcsum; |
551 | u32 hlreg; | | 551 | u32 hlreg; |
552 | | | 552 | |
553 | /* | | 553 | /* |
554 | * Make sure receives are disabled while | | 554 | * Make sure receives are disabled while |
555 | * setting up the descriptor ring | | 555 | * setting up the descriptor ring |
556 | */ | | 556 | */ |
557 | ixgbe_disable_rx(hw); | | 557 | ixgbe_disable_rx(hw); |
558 | | | 558 | |
559 | /* Enable broadcasts */ | | 559 | /* Enable broadcasts */ |
560 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | | 560 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
561 | fctrl |= IXGBE_FCTRL_BAM; | | 561 | fctrl |= IXGBE_FCTRL_BAM; |
562 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | | 562 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
563 | fctrl |= IXGBE_FCTRL_DPF; | | 563 | fctrl |= IXGBE_FCTRL_DPF; |
564 | fctrl |= IXGBE_FCTRL_PMCF; | | 564 | fctrl |= IXGBE_FCTRL_PMCF; |
565 | } | | 565 | } |
566 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | | 566 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
567 | | | 567 | |
568 | /* Set for Jumbo Frames? */ | | 568 | /* Set for Jumbo Frames? */ |
569 | hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); | | 569 | hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
570 | if (ifp->if_mtu > ETHERMTU) | | 570 | if (ifp->if_mtu > ETHERMTU) |
571 | hlreg |= IXGBE_HLREG0_JUMBOEN; | | 571 | hlreg |= IXGBE_HLREG0_JUMBOEN; |
572 | else | | 572 | else |
573 | hlreg &= ~IXGBE_HLREG0_JUMBOEN; | | 573 | hlreg &= ~IXGBE_HLREG0_JUMBOEN; |
574 | | | 574 | |
575 | #ifdef DEV_NETMAP | | 575 | #ifdef DEV_NETMAP |
576 | /* CRC stripping is conditional in Netmap */ | | 576 | /* CRC stripping is conditional in Netmap */ |
577 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && | | 577 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && |
578 | (ifp->if_capenable & IFCAP_NETMAP) && | | 578 | (ifp->if_capenable & IFCAP_NETMAP) && |
579 | !ix_crcstrip) | | 579 | !ix_crcstrip) |
580 | hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; | | 580 | hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; |
581 | else | | 581 | else |
582 | #endif /* DEV_NETMAP */ | | 582 | #endif /* DEV_NETMAP */ |
583 | hlreg |= IXGBE_HLREG0_RXCRCSTRP; | | 583 | hlreg |= IXGBE_HLREG0_RXCRCSTRP; |
584 | | | 584 | |
585 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); | | 585 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); |
586 | | | 586 | |
587 | bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> | | 587 | bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> |
588 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | | 588 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
589 | | | 589 | |
590 | for (i = 0; i < adapter->num_queues; i++, rxr++) { | | 590 | for (i = 0; i < adapter->num_queues; i++, rxr++) { |
591 | u64 rdba = rxr->rxdma.dma_paddr; | | 591 | u64 rdba = rxr->rxdma.dma_paddr; |
592 | u32 reg; | | 592 | u32 reg; |
593 | int regnum = i / 4; /* 1 register per 4 queues */ | | 593 | int regnum = i / 4; /* 1 register per 4 queues */ |
594 | int regshift = i % 4; /* 4 bits per 1 queue */ | | 594 | int regshift = i % 4; /* 4 bits per 1 queue */ |
595 | j = rxr->me; | | 595 | j = rxr->me; |
596 | | | 596 | |
597 | /* Setup the Base and Length of the Rx Descriptor Ring */ | | 597 | /* Setup the Base and Length of the Rx Descriptor Ring */ |
598 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), | | 598 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), |
599 | (rdba & 0x00000000ffffffffULL)); | | 599 | (rdba & 0x00000000ffffffffULL)); |
600 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); | | 600 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); |
601 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), | | 601 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), |
602 | adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); | | 602 | adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); |
603 | | | 603 | |
604 | /* Set up the SRRCTL register */ | | 604 | /* Set up the SRRCTL register */ |
605 | srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); | | 605 | srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); |
606 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | | 606 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
607 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | | 607 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
608 | srrctl |= bufsz; | | 608 | srrctl |= bufsz; |
609 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | | 609 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
610 | | | 610 | |
611 | /* Set RQSMR (Receive Queue Statistic Mapping) register */ | | 611 | /* Set RQSMR (Receive Queue Statistic Mapping) register */ |
612 | reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); | | 612 | reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); |
613 | reg &= ~(0x000000ffUL << (regshift * 8)); | | 613 | reg &= ~(0x000000ffUL << (regshift * 8)); |
614 | reg |= i << (regshift * 8); | | 614 | reg |= i << (regshift * 8); |
615 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); | | 615 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); |
616 | | | 616 | |
617 | /* | | 617 | /* |
618 | * Set DROP_EN iff we have no flow control and >1 queue. | | 618 | * Set DROP_EN iff we have no flow control and >1 queue. |
619 | * Note that srrctl was cleared shortly before during reset, | | 619 | * Note that srrctl was cleared shortly before during reset, |
620 | * so we do not need to clear the bit, but do it just in case | | 620 | * so we do not need to clear the bit, but do it just in case |
621 | * this code is moved elsewhere. | | 621 | * this code is moved elsewhere. |
622 | */ | | 622 | */ |
623 | if (adapter->num_queues > 1 && | | 623 | if (adapter->num_queues > 1 && |
624 | adapter->hw.fc.requested_mode == ixgbe_fc_none) { | | 624 | adapter->hw.fc.requested_mode == ixgbe_fc_none) { |
625 | srrctl |= IXGBE_SRRCTL_DROP_EN; | | 625 | srrctl |= IXGBE_SRRCTL_DROP_EN; |
626 | } else { | | 626 | } else { |
627 | srrctl &= ~IXGBE_SRRCTL_DROP_EN; | | 627 | srrctl &= ~IXGBE_SRRCTL_DROP_EN; |
628 | } | | 628 | } |
629 | | | 629 | |
630 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); | | 630 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); |
631 | | | 631 | |
632 | /* Setup the HW Rx Head and Tail Descriptor Pointers */ | | 632 | /* Setup the HW Rx Head and Tail Descriptor Pointers */ |
633 | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); | | 633 | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); |
634 | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); | | 634 | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); |
635 | | | 635 | |
636 | /* Set the driver rx tail address */ | | 636 | /* Set the driver rx tail address */ |
637 | rxr->tail = IXGBE_RDT(rxr->me); | | 637 | rxr->tail = IXGBE_RDT(rxr->me); |
638 | } | | 638 | } |
639 | | | 639 | |
640 | if (adapter->hw.mac.type != ixgbe_mac_82598EB) { | | 640 | if (adapter->hw.mac.type != ixgbe_mac_82598EB) { |
641 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | | 641 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
642 | | IXGBE_PSRTYPE_UDPHDR | | 642 | | IXGBE_PSRTYPE_UDPHDR |
643 | | IXGBE_PSRTYPE_IPV4HDR | | 643 | | IXGBE_PSRTYPE_IPV4HDR |
644 | | IXGBE_PSRTYPE_IPV6HDR; | | 644 | | IXGBE_PSRTYPE_IPV6HDR; |
645 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | | 645 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); |
646 | } | | 646 | } |
647 | | | 647 | |
648 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | | 648 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
649 | | | 649 | |
650 | ixgbe_initialize_rss_mapping(adapter); | | 650 | ixgbe_initialize_rss_mapping(adapter); |
651 | | | 651 | |
652 | if (adapter->num_queues > 1) { | | 652 | if (adapter->num_queues > 1) { |
653 | /* RSS and RX IPP Checksum are mutually exclusive */ | | 653 | /* RSS and RX IPP Checksum are mutually exclusive */ |
654 | rxcsum |= IXGBE_RXCSUM_PCSD; | | 654 | rxcsum |= IXGBE_RXCSUM_PCSD; |
655 | } | | 655 | } |
656 | | | 656 | |
657 | if (ifp->if_capenable & IFCAP_RXCSUM) | | 657 | if (ifp->if_capenable & IFCAP_RXCSUM) |
658 | rxcsum |= IXGBE_RXCSUM_PCSD; | | 658 | rxcsum |= IXGBE_RXCSUM_PCSD; |
659 | | | 659 | |
660 | /* This is useful for calculating UDP/IP fragment checksums */ | | 660 | /* This is useful for calculating UDP/IP fragment checksums */ |
661 | if (!(rxcsum & IXGBE_RXCSUM_PCSD)) | | 661 | if (!(rxcsum & IXGBE_RXCSUM_PCSD)) |
662 | rxcsum |= IXGBE_RXCSUM_IPPCSE; | | 662 | rxcsum |= IXGBE_RXCSUM_IPPCSE; |
663 | | | 663 | |
664 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | | 664 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); |
665 | | | 665 | |
666 | } /* ixgbe_initialize_receive_units */ | | 666 | } /* ixgbe_initialize_receive_units */ |
667 | | | 667 | |
668 | /************************************************************************ | | 668 | /************************************************************************ |
669 | * ixgbe_initialize_transmit_units - Enable transmit units. | | 669 | * ixgbe_initialize_transmit_units - Enable transmit units. |
670 | ************************************************************************/ | | 670 | ************************************************************************/ |
671 | static void | | 671 | static void |
672 | ixgbe_initialize_transmit_units(struct adapter *adapter) | | 672 | ixgbe_initialize_transmit_units(struct adapter *adapter) |
673 | { | | 673 | { |
674 | struct tx_ring *txr = adapter->tx_rings; | | 674 | struct tx_ring *txr = adapter->tx_rings; |
675 | struct ixgbe_hw *hw = &adapter->hw; | | 675 | struct ixgbe_hw *hw = &adapter->hw; |
676 | int i; | | 676 | int i; |
677 | | | 677 | |
678 | /* Setup the Base and Length of the Tx Descriptor Ring */ | | 678 | /* Setup the Base and Length of the Tx Descriptor Ring */ |
679 | for (i = 0; i < adapter->num_queues; i++, txr++) { | | 679 | for (i = 0; i < adapter->num_queues; i++, txr++) { |
680 | u64 tdba = txr->txdma.dma_paddr; | | 680 | u64 tdba = txr->txdma.dma_paddr; |
681 | u32 txctrl = 0; | | 681 | u32 txctrl = 0; |
682 | u32 tqsmreg, reg; | | 682 | u32 tqsmreg, reg; |
683 | int regnum = i / 4; /* 1 register per 4 queues */ | | 683 | int regnum = i / 4; /* 1 register per 4 queues */ |
684 | int regshift = i % 4; /* 4 bits per 1 queue */ | | 684 | int regshift = i % 4; /* 4 bits per 1 queue */ |
685 | int j = txr->me; | | 685 | int j = txr->me; |
686 | | | 686 | |
687 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), | | 687 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), |
688 | (tdba & 0x00000000ffffffffULL)); | | 688 | (tdba & 0x00000000ffffffffULL)); |
689 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); | | 689 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); |
690 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), | | 690 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), |
691 | adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); | | 691 | adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); |
692 | | | 692 | |
693 | /* | | 693 | /* |
694 | * Set TQSMR (Transmit Queue Statistic Mapping) register. | | 694 | * Set TQSMR (Transmit Queue Statistic Mapping) register. |
695 | * Register location is different between 82598 and others. | | 695 | * Register location is different between 82598 and others. |
696 | */ | | 696 | */ |
697 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | | 697 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
698 | tqsmreg = IXGBE_TQSMR(regnum); | | 698 | tqsmreg = IXGBE_TQSMR(regnum); |
699 | else | | 699 | else |
700 | tqsmreg = IXGBE_TQSM(regnum); | | 700 | tqsmreg = IXGBE_TQSM(regnum); |
701 | reg = IXGBE_READ_REG(hw, tqsmreg); | | 701 | reg = IXGBE_READ_REG(hw, tqsmreg); |
702 | reg &= ~(0x000000ffUL << (regshift * 8)); | | 702 | reg &= ~(0x000000ffUL << (regshift * 8)); |
703 | reg |= i << (regshift * 8); | | 703 | reg |= i << (regshift * 8); |
704 | IXGBE_WRITE_REG(hw, tqsmreg, reg); | | 704 | IXGBE_WRITE_REG(hw, tqsmreg, reg); |
705 | | | 705 | |
706 | /* Setup the HW Tx Head and Tail descriptor pointers */ | | 706 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
707 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); | | 707 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); |
708 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); | | 708 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); |
709 | | | 709 | |
710 | /* Cache the tail address */ | | 710 | /* Cache the tail address */ |
711 | txr->tail = IXGBE_TDT(j); | | 711 | txr->tail = IXGBE_TDT(j); |
712 | | | 712 | |
713 | txr->txr_no_space = false; | | 713 | txr->txr_no_space = false; |
714 | | | 714 | |
715 | /* Disable Head Writeback */ | | 715 | /* Disable Head Writeback */ |
716 | /* | | 716 | /* |
717 | * Note: for X550 series devices, these registers are actually | | 717 | * Note: for X550 series devices, these registers are actually |
718 | * prefixed with TPH_ isntead of DCA_, but the addresses and | | 718 | * prefixed with TPH_ isntead of DCA_, but the addresses and |
719 | * fields remain the same. | | 719 | * fields remain the same. |
720 | */ | | 720 | */ |
721 | switch (hw->mac.type) { | | 721 | switch (hw->mac.type) { |
722 | case ixgbe_mac_82598EB: | | 722 | case ixgbe_mac_82598EB: |
723 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); | | 723 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); |
724 | break; | | 724 | break; |
725 | default: | | 725 | default: |
726 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); | | 726 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); |
727 | break; | | 727 | break; |
728 | } | | 728 | } |
729 | txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; | | 729 | txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; |
730 | switch (hw->mac.type) { | | 730 | switch (hw->mac.type) { |
731 | case ixgbe_mac_82598EB: | | 731 | case ixgbe_mac_82598EB: |
732 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); | | 732 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); |
733 | break; | | 733 | break; |
734 | default: | | 734 | default: |
735 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); | | 735 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); |
736 | break; | | 736 | break; |
737 | } | | 737 | } |
738 | | | 738 | |
739 | } | | 739 | } |
740 | | | 740 | |
741 | if (hw->mac.type != ixgbe_mac_82598EB) { | | 741 | if (hw->mac.type != ixgbe_mac_82598EB) { |
742 | u32 dmatxctl, rttdcs; | | 742 | u32 dmatxctl, rttdcs; |
743 | | | 743 | |
744 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | | 744 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); |
745 | dmatxctl |= IXGBE_DMATXCTL_TE; | | 745 | dmatxctl |= IXGBE_DMATXCTL_TE; |
746 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | | 746 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); |
747 | /* Disable arbiter to set MTQC */ | | 747 | /* Disable arbiter to set MTQC */ |
748 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | | 748 | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); |
749 | rttdcs |= IXGBE_RTTDCS_ARBDIS; | | 749 | rttdcs |= IXGBE_RTTDCS_ARBDIS; |
750 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | | 750 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
751 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | | 751 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, |
752 | ixgbe_get_mtqc(adapter->iov_mode)); | | 752 | ixgbe_get_mtqc(adapter->iov_mode)); |
753 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | | 753 | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; |
754 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | | 754 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
755 | } | | 755 | } |
756 | | | 756 | |
757 | return; | | 757 | return; |
758 | } /* ixgbe_initialize_transmit_units */ | | 758 | } /* ixgbe_initialize_transmit_units */ |
759 | | | 759 | |
760 | /************************************************************************ | | 760 | /************************************************************************ |
761 | * ixgbe_attach - Device initialization routine | | 761 | * ixgbe_attach - Device initialization routine |
762 | * | | 762 | * |
763 | * Called when the driver is being loaded. | | 763 | * Called when the driver is being loaded. |
764 | * Identifies the type of hardware, allocates all resources | | 764 | * Identifies the type of hardware, allocates all resources |
765 | * and initializes the hardware. | | 765 | * and initializes the hardware. |
766 | * | | 766 | * |
767 | * return 0 on success, positive on failure | | 767 | * return 0 on success, positive on failure |
768 | ************************************************************************/ | | 768 | ************************************************************************/ |
769 | static void | | 769 | static void |
770 | ixgbe_attach(device_t parent, device_t dev, void *aux) | | 770 | ixgbe_attach(device_t parent, device_t dev, void *aux) |
771 | { | | 771 | { |
772 | struct adapter *adapter; | | 772 | struct adapter *adapter; |
773 | struct ixgbe_hw *hw; | | 773 | struct ixgbe_hw *hw; |
774 | int error = -1; | | 774 | int error = -1; |
775 | u32 ctrl_ext; | | 775 | u32 ctrl_ext; |
776 | u16 high, low, nvmreg; | | 776 | u16 high, low, nvmreg; |
777 | pcireg_t id, subid; | | 777 | pcireg_t id, subid; |
778 | const ixgbe_vendor_info_t *ent; | | 778 | const ixgbe_vendor_info_t *ent; |
779 | struct pci_attach_args *pa = aux; | | 779 | struct pci_attach_args *pa = aux; |
780 | const char *str; | | 780 | const char *str; |
781 | char buf[256]; | | 781 | char buf[256]; |
782 | | | 782 | |
783 | INIT_DEBUGOUT("ixgbe_attach: begin"); | | 783 | INIT_DEBUGOUT("ixgbe_attach: begin"); |
784 | | | 784 | |
785 | /* Allocate, clear, and link in our adapter structure */ | | 785 | /* Allocate, clear, and link in our adapter structure */ |
786 | adapter = device_private(dev); | | 786 | adapter = device_private(dev); |
787 | adapter->hw.back = adapter; | | 787 | adapter->hw.back = adapter; |
788 | adapter->dev = dev; | | 788 | adapter->dev = dev; |
789 | hw = &adapter->hw; | | 789 | hw = &adapter->hw; |
790 | adapter->osdep.pc = pa->pa_pc; | | 790 | adapter->osdep.pc = pa->pa_pc; |
791 | adapter->osdep.tag = pa->pa_tag; | | 791 | adapter->osdep.tag = pa->pa_tag; |
792 | if (pci_dma64_available(pa)) | | 792 | if (pci_dma64_available(pa)) |
793 | adapter->osdep.dmat = pa->pa_dmat64; | | 793 | adapter->osdep.dmat = pa->pa_dmat64; |
794 | else | | 794 | else |
795 | adapter->osdep.dmat = pa->pa_dmat; | | 795 | adapter->osdep.dmat = pa->pa_dmat; |
796 | adapter->osdep.attached = false; | | 796 | adapter->osdep.attached = false; |
797 | | | 797 | |
798 | ent = ixgbe_lookup(pa); | | 798 | ent = ixgbe_lookup(pa); |
799 | | | 799 | |
800 | KASSERT(ent != NULL); | | 800 | KASSERT(ent != NULL); |
801 | | | 801 | |
802 | aprint_normal(": %s, Version - %s\n", | | 802 | aprint_normal(": %s, Version - %s\n", |
803 | ixgbe_strings[ent->index], ixgbe_driver_version); | | 803 | ixgbe_strings[ent->index], ixgbe_driver_version); |
804 | | | 804 | |
805 | /* Core Lock Init*/ | | 805 | /* Core Lock Init*/ |
806 | IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); | | 806 | IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); |
807 | | | 807 | |
808 | /* Set up the timer callout */ | | 808 | /* Set up the timer callout */ |
809 | callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); | | 809 | callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); |
810 | | | 810 | |
811 | /* Determine hardware revision */ | | 811 | /* Determine hardware revision */ |
812 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); | | 812 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); |
813 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); | | 813 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); |
814 | | | 814 | |
815 | hw->vendor_id = PCI_VENDOR(id); | | 815 | hw->vendor_id = PCI_VENDOR(id); |
816 | hw->device_id = PCI_PRODUCT(id); | | 816 | hw->device_id = PCI_PRODUCT(id); |
817 | hw->revision_id = | | 817 | hw->revision_id = |
818 | PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); | | 818 | PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); |
819 | hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); | | 819 | hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); |
820 | hw->subsystem_device_id = PCI_SUBSYS_ID(subid); | | 820 | hw->subsystem_device_id = PCI_SUBSYS_ID(subid); |
821 | | | 821 | |
822 | /* | | 822 | /* |
823 | * Make sure BUSMASTER is set | | 823 | * Make sure BUSMASTER is set |
824 | */ | | 824 | */ |
825 | ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); | | 825 | ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); |
826 | | | 826 | |
827 | /* Do base PCI setup - map BAR0 */ | | 827 | /* Do base PCI setup - map BAR0 */ |
828 | if (ixgbe_allocate_pci_resources(adapter, pa)) { | | 828 | if (ixgbe_allocate_pci_resources(adapter, pa)) { |
829 | aprint_error_dev(dev, "Allocation of PCI resources failed\n"); | | 829 | aprint_error_dev(dev, "Allocation of PCI resources failed\n"); |
830 | error = ENXIO; | | 830 | error = ENXIO; |
831 | goto err_out; | | 831 | goto err_out; |
832 | } | | 832 | } |
833 | | | 833 | |
834 | /* let hardware know driver is loaded */ | | 834 | /* let hardware know driver is loaded */ |
835 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | | 835 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
836 | ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; | | 836 | ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; |
837 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | | 837 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); |
838 | | | 838 | |
839 | /* | | 839 | /* |
840 | * Initialize the shared code | | 840 | * Initialize the shared code |
841 | */ | | 841 | */ |
842 | if (ixgbe_init_shared_code(hw) != 0) { | | 842 | if (ixgbe_init_shared_code(hw) != 0) { |
843 | aprint_error_dev(dev, "Unable to initialize the shared code\n"); | | 843 | aprint_error_dev(dev, "Unable to initialize the shared code\n"); |
844 | error = ENXIO; | | 844 | error = ENXIO; |
845 | goto err_out; | | 845 | goto err_out; |
846 | } | | 846 | } |
847 | | | 847 | |
848 | switch (hw->mac.type) { | | 848 | switch (hw->mac.type) { |
849 | case ixgbe_mac_82598EB: | | 849 | case ixgbe_mac_82598EB: |
850 | str = "82598EB"; | | 850 | str = "82598EB"; |
851 | break; | | 851 | break; |
852 | case ixgbe_mac_82599EB: | | 852 | case ixgbe_mac_82599EB: |
853 | str = "82599EB"; | | 853 | str = "82599EB"; |
854 | break; | | 854 | break; |
855 | case ixgbe_mac_X540: | | 855 | case ixgbe_mac_X540: |
856 | str = "X540"; | | 856 | str = "X540"; |
857 | break; | | 857 | break; |
858 | case ixgbe_mac_X550: | | 858 | case ixgbe_mac_X550: |
859 | str = "X550"; | | 859 | str = "X550"; |
860 | break; | | 860 | break; |
861 | case ixgbe_mac_X550EM_x: | | 861 | case ixgbe_mac_X550EM_x: |
862 | str = "X550EM"; | | 862 | str = "X550EM"; |
863 | break; | | 863 | break; |
864 | case ixgbe_mac_X550EM_a: | | 864 | case ixgbe_mac_X550EM_a: |
865 | str = "X550EM A"; | | 865 | str = "X550EM A"; |
866 | break; | | 866 | break; |
867 | default: | | 867 | default: |
868 | str = "Unknown"; | | 868 | str = "Unknown"; |
869 | break; | | 869 | break; |
870 | } | | 870 | } |
871 | aprint_normal_dev(dev, "device %s\n", str); | | 871 | aprint_normal_dev(dev, "device %s\n", str); |
872 | | | 872 | |
873 | if (hw->mbx.ops.init_params) | | 873 | if (hw->mbx.ops.init_params) |
874 | hw->mbx.ops.init_params(hw); | | 874 | hw->mbx.ops.init_params(hw); |
875 | | | 875 | |
876 | hw->allow_unsupported_sfp = allow_unsupported_sfp; | | 876 | hw->allow_unsupported_sfp = allow_unsupported_sfp; |
877 | | | 877 | |
878 | /* Pick up the 82599 settings */ | | 878 | /* Pick up the 82599 settings */ |
879 | if (hw->mac.type != ixgbe_mac_82598EB) { | | 879 | if (hw->mac.type != ixgbe_mac_82598EB) { |
880 | hw->phy.smart_speed = ixgbe_smart_speed; | | 880 | hw->phy.smart_speed = ixgbe_smart_speed; |
881 | adapter->num_segs = IXGBE_82599_SCATTER; | | 881 | adapter->num_segs = IXGBE_82599_SCATTER; |
882 | } else | | 882 | } else |
883 | adapter->num_segs = IXGBE_82598_SCATTER; | | 883 | adapter->num_segs = IXGBE_82598_SCATTER; |
884 | | | 884 | |
885 | /* Ensure SW/FW semaphore is free */ | | 885 | /* Ensure SW/FW semaphore is free */ |
886 | ixgbe_init_swfw_semaphore(hw); | | 886 | ixgbe_init_swfw_semaphore(hw); |
887 | | | 887 | |
888 | hw->mac.ops.set_lan_id(hw); | | 888 | hw->mac.ops.set_lan_id(hw); |
889 | ixgbe_init_device_features(adapter); | | 889 | ixgbe_init_device_features(adapter); |
890 | | | 890 | |
891 | if (ixgbe_configure_interrupts(adapter)) { | | 891 | if (ixgbe_configure_interrupts(adapter)) { |
892 | error = ENXIO; | | 892 | error = ENXIO; |
893 | goto err_out; | | 893 | goto err_out; |
894 | } | | 894 | } |
895 | | | 895 | |
896 | /* Allocate multicast array memory. */ | | 896 | /* Allocate multicast array memory. */ |
897 | adapter->mta = malloc(sizeof(*adapter->mta) * | | 897 | adapter->mta = malloc(sizeof(*adapter->mta) * |
898 | MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); | | 898 | MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); |
899 | if (adapter->mta == NULL) { | | 899 | if (adapter->mta == NULL) { |
900 | aprint_error_dev(dev, "Cannot allocate multicast setup array\n"); | | 900 | aprint_error_dev(dev, "Cannot allocate multicast setup array\n"); |
901 | error = ENOMEM; | | 901 | error = ENOMEM; |
902 | goto err_out; | | 902 | goto err_out; |
903 | } | | 903 | } |
904 | | | 904 | |
905 | /* Enable WoL (if supported) */ | | 905 | /* Enable WoL (if supported) */ |
906 | ixgbe_check_wol_support(adapter); | | 906 | ixgbe_check_wol_support(adapter); |
907 | | | 907 | |
908 | /* Register for VLAN events */ | | 908 | /* Register for VLAN events */ |
909 | ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); | | 909 | ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); |
910 | | | 910 | |
911 | /* Verify adapter fan is still functional (if applicable) */ | | 911 | /* Verify adapter fan is still functional (if applicable) */ |
912 | if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { | | 912 | if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { |
913 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | | 913 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); |
914 | ixgbe_check_fan_failure(adapter, esdp, FALSE); | | 914 | ixgbe_check_fan_failure(adapter, esdp, FALSE); |
915 | } | | 915 | } |
916 | | | 916 | |
917 | /* Set an initial default flow control value */ | | 917 | /* Set an initial default flow control value */ |
918 | hw->fc.requested_mode = ixgbe_flow_control; | | 918 | hw->fc.requested_mode = ixgbe_flow_control; |
919 | | | 919 | |
920 | /* Sysctls for limiting the amount of work done in the taskqueues */ | | 920 | /* Sysctls for limiting the amount of work done in the taskqueues */ |
921 | ixgbe_set_sysctl_value(adapter, "rx_processing_limit", | | 921 | ixgbe_set_sysctl_value(adapter, "rx_processing_limit", |
922 | "max number of rx packets to process", | | 922 | "max number of rx packets to process", |
923 | &adapter->rx_process_limit, ixgbe_rx_process_limit); | | 923 | &adapter->rx_process_limit, ixgbe_rx_process_limit); |
924 | | | 924 | |
925 | ixgbe_set_sysctl_value(adapter, "tx_processing_limit", | | 925 | ixgbe_set_sysctl_value(adapter, "tx_processing_limit", |
926 | "max number of tx packets to process", | | 926 | "max number of tx packets to process", |
927 | &adapter->tx_process_limit, ixgbe_tx_process_limit); | | 927 | &adapter->tx_process_limit, ixgbe_tx_process_limit); |
928 | | | 928 | |
929 | /* Do descriptor calc and sanity checks */ | | 929 | /* Do descriptor calc and sanity checks */ |
930 | if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || | | 930 | if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || |
931 | ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { | | 931 | ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { |
932 | aprint_error_dev(dev, "TXD config issue, using default!\n"); | | 932 | aprint_error_dev(dev, "TXD config issue, using default!\n"); |
933 | adapter->num_tx_desc = DEFAULT_TXD; | | 933 | adapter->num_tx_desc = DEFAULT_TXD; |
934 | } else | | 934 | } else |
935 | adapter->num_tx_desc = ixgbe_txd; | | 935 | adapter->num_tx_desc = ixgbe_txd; |
936 | | | 936 | |
937 | if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || | | 937 | if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || |
938 | ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { | | 938 | ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { |
939 | aprint_error_dev(dev, "RXD config issue, using default!\n"); | | 939 | aprint_error_dev(dev, "RXD config issue, using default!\n"); |
940 | adapter->num_rx_desc = DEFAULT_RXD; | | 940 | adapter->num_rx_desc = DEFAULT_RXD; |
941 | } else | | 941 | } else |
942 | adapter->num_rx_desc = ixgbe_rxd; | | 942 | adapter->num_rx_desc = ixgbe_rxd; |
943 | | | 943 | |
944 | /* Allocate our TX/RX Queues */ | | 944 | /* Allocate our TX/RX Queues */ |
945 | if (ixgbe_allocate_queues(adapter)) { | | 945 | if (ixgbe_allocate_queues(adapter)) { |
946 | error = ENOMEM; | | 946 | error = ENOMEM; |
947 | goto err_out; | | 947 | goto err_out; |
948 | } | | 948 | } |
949 | | | 949 | |
950 | hw->phy.reset_if_overtemp = TRUE; | | 950 | hw->phy.reset_if_overtemp = TRUE; |
951 | error = ixgbe_reset_hw(hw); | | 951 | error = ixgbe_reset_hw(hw); |
952 | hw->phy.reset_if_overtemp = FALSE; | | 952 | hw->phy.reset_if_overtemp = FALSE; |
953 | if (error == IXGBE_ERR_SFP_NOT_PRESENT) { | | 953 | if (error == IXGBE_ERR_SFP_NOT_PRESENT) { |
954 | /* | | 954 | /* |
955 | * No optics in this port, set up | | 955 | * No optics in this port, set up |
956 | * so the timer routine will probe | | 956 | * so the timer routine will probe |
957 | * for later insertion. | | 957 | * for later insertion. |
958 | */ | | 958 | */ |
959 | adapter->sfp_probe = TRUE; | | 959 | adapter->sfp_probe = TRUE; |
960 | error = IXGBE_SUCCESS; | | 960 | error = IXGBE_SUCCESS; |
961 | } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { | | 961 | } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
962 | aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); | | 962 | aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); |
963 | error = EIO; | | 963 | error = EIO; |
964 | goto err_late; | | 964 | goto err_late; |
965 | } else if (error) { | | 965 | } else if (error) { |
966 | aprint_error_dev(dev, "Hardware initialization failed\n"); | | 966 | aprint_error_dev(dev, "Hardware initialization failed\n"); |
967 | error = EIO; | | 967 | error = EIO; |
968 | goto err_late; | | 968 | goto err_late; |
969 | } | | 969 | } |
970 | | | 970 | |
971 | /* Make sure we have a good EEPROM before we read from it */ | | 971 | /* Make sure we have a good EEPROM before we read from it */ |
972 | if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { | | 972 | if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { |
973 | aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); | | 973 | aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); |
974 | error = EIO; | | 974 | error = EIO; |
975 | goto err_late; | | 975 | goto err_late; |
976 | } | | 976 | } |
977 | | | 977 | |
978 | aprint_normal("%s:", device_xname(dev)); | | 978 | aprint_normal("%s:", device_xname(dev)); |
979 | /* NVM Image Version */ | | 979 | /* NVM Image Version */ |
980 | high = low = 0; | | 980 | high = low = 0; |
981 | switch (hw->mac.type) { | | 981 | switch (hw->mac.type) { |
982 | case ixgbe_mac_X540: | | 982 | case ixgbe_mac_X540: |
983 | case ixgbe_mac_X550EM_a: | | 983 | case ixgbe_mac_X550EM_a: |
984 | hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); | | 984 | hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); |
985 | if (nvmreg == 0xffff) | | 985 | if (nvmreg == 0xffff) |
986 | break; | | 986 | break; |
987 | high = (nvmreg >> 12) & 0x0f; | | 987 | high = (nvmreg >> 12) & 0x0f; |
988 | low = (nvmreg >> 4) & 0xff; | | 988 | low = (nvmreg >> 4) & 0xff; |
989 | id = nvmreg & 0x0f; | | 989 | id = nvmreg & 0x0f; |
990 | aprint_normal(" NVM Image Version %u.", high); | | 990 | aprint_normal(" NVM Image Version %u.", high); |
991 | if (hw->mac.type == ixgbe_mac_X540) | | 991 | if (hw->mac.type == ixgbe_mac_X540) |
992 | str = "%x"; | | 992 | str = "%x"; |
993 | else | | 993 | else |
994 | str = "%02x"; | | 994 | str = "%02x"; |
995 | aprint_normal(str, low); | | 995 | aprint_normal(str, low); |
996 | aprint_normal(" ID 0x%x,", id); | | 996 | aprint_normal(" ID 0x%x,", id); |
997 | break; | | 997 | break; |
998 | case ixgbe_mac_X550EM_x: | | 998 | case ixgbe_mac_X550EM_x: |
999 | case ixgbe_mac_X550: | | 999 | case ixgbe_mac_X550: |
1000 | hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); | | 1000 | hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); |
1001 | if (nvmreg == 0xffff) | | 1001 | if (nvmreg == 0xffff) |
1002 | break; | | 1002 | break; |
1003 | high = (nvmreg >> 12) & 0x0f; | | 1003 | high = (nvmreg >> 12) & 0x0f; |
1004 | low = nvmreg & 0xff; | | 1004 | low = nvmreg & 0xff; |
1005 | aprint_normal(" NVM Image Version %u.%02x,", high, low); | | 1005 | aprint_normal(" NVM Image Version %u.%02x,", high, low); |
1006 | break; | | 1006 | break; |
1007 | default: | | 1007 | default: |
1008 | break; | | 1008 | break; |
1009 | } | | 1009 | } |
1010 | hw->eeprom.nvm_image_ver_high = high; | | 1010 | hw->eeprom.nvm_image_ver_high = high; |
1011 | hw->eeprom.nvm_image_ver_low = low; | | 1011 | hw->eeprom.nvm_image_ver_low = low; |
1012 | | | 1012 | |
1013 | /* PHY firmware revision */ | | 1013 | /* PHY firmware revision */ |
1014 | switch (hw->mac.type) { | | 1014 | switch (hw->mac.type) { |
1015 | case ixgbe_mac_X540: | | 1015 | case ixgbe_mac_X540: |
1016 | case ixgbe_mac_X550: | | 1016 | case ixgbe_mac_X550: |
1017 | hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); | | 1017 | hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); |
1018 | if (nvmreg == 0xffff) | | 1018 | if (nvmreg == 0xffff) |
1019 | break; | | 1019 | break; |
1020 | high = (nvmreg >> 12) & 0x0f; | | 1020 | high = (nvmreg >> 12) & 0x0f; |
1021 | low = (nvmreg >> 4) & 0xff; | | 1021 | low = (nvmreg >> 4) & 0xff; |
1022 | id = nvmreg & 0x000f; | | 1022 | id = nvmreg & 0x000f; |
1023 | aprint_normal(" PHY FW Revision %u.", high); | | 1023 | aprint_normal(" PHY FW Revision %u.", high); |
1024 | if (hw->mac.type == ixgbe_mac_X540) | | 1024 | if (hw->mac.type == ixgbe_mac_X540) |
1025 | str = "%x"; | | 1025 | str = "%x"; |
1026 | else | | 1026 | else |
1027 | str = "%02x"; | | 1027 | str = "%02x"; |
1028 | aprint_normal(str, low); | | 1028 | aprint_normal(str, low); |
1029 | aprint_normal(" ID 0x%x,", id); | | 1029 | aprint_normal(" ID 0x%x,", id); |
1030 | break; | | 1030 | break; |
1031 | default: | | 1031 | default: |
1032 | break; | | 1032 | break; |
1033 | } | | 1033 | } |
1034 | | | 1034 | |
1035 | /* NVM Map version & OEM NVM Image version */ | | 1035 | /* NVM Map version & OEM NVM Image version */ |
1036 | switch (hw->mac.type) { | | 1036 | switch (hw->mac.type) { |
1037 | case ixgbe_mac_X550: | | 1037 | case ixgbe_mac_X550: |
1038 | case ixgbe_mac_X550EM_x: | | 1038 | case ixgbe_mac_X550EM_x: |
1039 | case ixgbe_mac_X550EM_a: | | 1039 | case ixgbe_mac_X550EM_a: |
1040 | hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); | | 1040 | hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); |
1041 | if (nvmreg != 0xffff) { | | 1041 | if (nvmreg != 0xffff) { |
1042 | high = (nvmreg >> 12) & 0x0f; | | 1042 | high = (nvmreg >> 12) & 0x0f; |
1043 | low = nvmreg & 0x00ff; | | 1043 | low = nvmreg & 0x00ff; |
1044 | aprint_normal(" NVM Map version %u.%02x,", high, low); | | 1044 | aprint_normal(" NVM Map version %u.%02x,", high, low); |
1045 | } | | 1045 | } |
1046 | hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); | | 1046 | hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); |
1047 | if (nvmreg != 0xffff) { | | 1047 | if (nvmreg != 0xffff) { |
1048 | high = (nvmreg >> 12) & 0x0f; | | 1048 | high = (nvmreg >> 12) & 0x0f; |
1049 | low = nvmreg & 0x00ff; | | 1049 | low = nvmreg & 0x00ff; |
1050 | aprint_verbose(" OEM NVM Image version %u.%02x,", high, | | 1050 | aprint_verbose(" OEM NVM Image version %u.%02x,", high, |
1051 | low); | | 1051 | low); |
1052 | } | | 1052 | } |
1053 | break; | | 1053 | break; |
1054 | default: | | 1054 | default: |
1055 | break; | | 1055 | break; |
1056 | } | | 1056 | } |
1057 | | | 1057 | |
1058 | /* Print the ETrackID */ | | 1058 | /* Print the ETrackID */ |
1059 | hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); | | 1059 | hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); |
1060 | hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); | | 1060 | hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); |
1061 | aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); | | 1061 | aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); |
1062 | | | 1062 | |
1063 | if (adapter->feat_en & IXGBE_FEATURE_MSIX) { | | 1063 | if (adapter->feat_en & IXGBE_FEATURE_MSIX) { |
1064 | error = ixgbe_allocate_msix(adapter, pa); | | 1064 | error = ixgbe_allocate_msix(adapter, pa); |
1065 | if (error) { | | 1065 | if (error) { |
1066 | /* Free allocated queue structures first */ | | 1066 | /* Free allocated queue structures first */ |
1067 | ixgbe_free_transmit_structures(adapter); | | 1067 | ixgbe_free_transmit_structures(adapter); |
1068 | ixgbe_free_receive_structures(adapter); | | 1068 | ixgbe_free_receive_structures(adapter); |
1069 | free(adapter->queues, M_DEVBUF); | | 1069 | free(adapter->queues, M_DEVBUF); |
1070 | | | 1070 | |
1071 | /* Fallback to legacy interrupt */ | | 1071 | /* Fallback to legacy interrupt */ |
1072 | adapter->feat_en &= ~IXGBE_FEATURE_MSIX; | | 1072 | adapter->feat_en &= ~IXGBE_FEATURE_MSIX; |
1073 | if (adapter->feat_cap & IXGBE_FEATURE_MSI) | | 1073 | if (adapter->feat_cap & IXGBE_FEATURE_MSI) |
1074 | adapter->feat_en |= IXGBE_FEATURE_MSI; | | 1074 | adapter->feat_en |= IXGBE_FEATURE_MSI; |
1075 | adapter->num_queues = 1; | | 1075 | adapter->num_queues = 1; |
1076 | | | 1076 | |
1077 | /* Allocate our TX/RX Queues again */ | | 1077 | /* Allocate our TX/RX Queues again */ |
1078 | if (ixgbe_allocate_queues(adapter)) { | | 1078 | if (ixgbe_allocate_queues(adapter)) { |
1079 | error = ENOMEM; | | 1079 | error = ENOMEM; |
1080 | goto err_out; | | 1080 | goto err_out; |
1081 | } | | 1081 | } |
1082 | } | | 1082 | } |
1083 | } | | 1083 | } |
1084 | /* Recovery mode */ | | 1084 | /* Recovery mode */ |
1085 | switch (adapter->hw.mac.type) { | | 1085 | switch (adapter->hw.mac.type) { |
1086 | case ixgbe_mac_X550: | | 1086 | case ixgbe_mac_X550: |
1087 | case ixgbe_mac_X550EM_x: | | 1087 | case ixgbe_mac_X550EM_x: |
1088 | case ixgbe_mac_X550EM_a: | | 1088 | case ixgbe_mac_X550EM_a: |
1089 | /* >= 2.00 */ | | 1089 | /* >= 2.00 */ |
1090 | if (hw->eeprom.nvm_image_ver_high >= 2) { | | 1090 | if (hw->eeprom.nvm_image_ver_high >= 2) { |
1091 | adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; | | 1091 | adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; |
1092 | adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; | | 1092 | adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; |
1093 | } | | 1093 | } |
1094 | break; | | 1094 | break; |
1095 | default: | | 1095 | default: |
1096 | break; | | 1096 | break; |
1097 | } | | 1097 | } |
1098 | | | 1098 | |
1099 | if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0) | | 1099 | if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0) |
1100 | error = ixgbe_allocate_legacy(adapter, pa); | | 1100 | error = ixgbe_allocate_legacy(adapter, pa); |
1101 | if (error) | | 1101 | if (error) |
1102 | goto err_late; | | 1102 | goto err_late; |
1103 | | | 1103 | |
1104 | /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ | | 1104 | /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ |
1105 | adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS, | | 1105 | adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINFT_FLAGS, |
1106 | ixgbe_handle_link, adapter); | | 1106 | ixgbe_handle_link, adapter); |
1107 | adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, | | 1107 | adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, |
1108 | ixgbe_handle_mod, adapter); | | 1108 | ixgbe_handle_mod, adapter); |
1109 | adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, | | 1109 | adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, |
1110 | ixgbe_handle_msf, adapter); | | 1110 | ixgbe_handle_msf, adapter); |
1111 | adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, | | 1111 | adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, |
1112 | ixgbe_handle_phy, adapter); | | 1112 | ixgbe_handle_phy, adapter); |
1113 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) | | 1113 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) |
1114 | adapter->fdir_si = | | 1114 | adapter->fdir_si = |
1115 | softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, | | 1115 | softint_establish(SOFTINT_NET | IXGBE_SOFTINFT_FLAGS, |
1116 | ixgbe_reinit_fdir, adapter); | | 1116 | ixgbe_reinit_fdir, adapter); |
1117 | if ((adapter->link_si == NULL) || (adapter->mod_si == NULL) | | 1117 | if ((adapter->link_si == NULL) || (adapter->mod_si == NULL) |
1118 | || (adapter->msf_si == NULL) || (adapter->phy_si == NULL) | | 1118 | || (adapter->msf_si == NULL) || (adapter->phy_si == NULL) |
1119 | || ((adapter->feat_en & IXGBE_FEATURE_FDIR) | | 1119 | || ((adapter->feat_en & IXGBE_FEATURE_FDIR) |
1120 | && (adapter->fdir_si == NULL))) { | | 1120 | && (adapter->fdir_si == NULL))) { |
1121 | aprint_error_dev(dev, | | 1121 | aprint_error_dev(dev, |
1122 | "could not establish software interrupts ()\n"); | | 1122 | "could not establish software interrupts ()\n"); |
1123 | goto err_out; | | 1123 | goto err_out; |
1124 | } | | 1124 | } |
1125 | | | 1125 | |
1126 | error = ixgbe_start_hw(hw); | | 1126 | error = ixgbe_start_hw(hw); |
1127 | switch (error) { | | 1127 | switch (error) { |
1128 | case IXGBE_ERR_EEPROM_VERSION: | | 1128 | case IXGBE_ERR_EEPROM_VERSION: |
1129 | aprint_error_dev(dev, "This device is a pre-production adapter/" | | 1129 | aprint_error_dev(dev, "This device is a pre-production adapter/" |
1130 | "LOM. Please be aware there may be issues associated " | | 1130 | "LOM. Please be aware there may be issues associated " |
1131 | "with your hardware.\nIf you are experiencing problems " | | 1131 | "with your hardware.\nIf you are experiencing problems " |
1132 | "please contact your Intel or hardware representative " | | 1132 | "please contact your Intel or hardware representative " |
1133 | "who provided you with this hardware.\n"); | | 1133 | "who provided you with this hardware.\n"); |
1134 | break; | | 1134 | break; |
1135 | case IXGBE_ERR_SFP_NOT_SUPPORTED: | | 1135 | case IXGBE_ERR_SFP_NOT_SUPPORTED: |
1136 | aprint_error_dev(dev, "Unsupported SFP+ Module\n"); | | 1136 | aprint_error_dev(dev, "Unsupported SFP+ Module\n"); |
1137 | error = EIO; | | 1137 | error = EIO; |
1138 | goto err_late; | | 1138 | goto err_late; |
1139 | case IXGBE_ERR_SFP_NOT_PRESENT: | | 1139 | case IXGBE_ERR_SFP_NOT_PRESENT: |
1140 | aprint_error_dev(dev, "No SFP+ Module found\n"); | | 1140 | aprint_error_dev(dev, "No SFP+ Module found\n"); |
1141 | /* falls thru */ | | 1141 | /* falls thru */ |
1142 | default: | | 1142 | default: |
1143 | break; | | 1143 | break; |
1144 | } | | 1144 | } |
1145 | | | 1145 | |
1146 | /* Setup OS specific network interface */ | | 1146 | /* Setup OS specific network interface */ |
1147 | if (ixgbe_setup_interface(dev, adapter) != 0) | | 1147 | if (ixgbe_setup_interface(dev, adapter) != 0) |
1148 | goto err_late; | | 1148 | goto err_late; |
1149 | | | 1149 | |
1150 | /* | | 1150 | /* |
1151 | * Print PHY ID only for copper PHY. On device which has SFP(+) cage | | 1151 | * Print PHY ID only for copper PHY. On device which has SFP(+) cage |
1152 | * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. | | 1152 | * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. |
1153 | */ | | 1153 | */ |
1154 | if (hw->phy.media_type == ixgbe_media_type_copper) { | | 1154 | if (hw->phy.media_type == ixgbe_media_type_copper) { |
1155 | uint16_t id1, id2; | | 1155 | uint16_t id1, id2; |
1156 | int oui, model, rev; | | 1156 | int oui, model, rev; |
1157 | const char *descr; | | 1157 | const char *descr; |
1158 | | | 1158 | |
1159 | id1 = hw->phy.id >> 16; | | 1159 | id1 = hw->phy.id >> 16; |
1160 | id2 = hw->phy.id & 0xffff; | | 1160 | id2 = hw->phy.id & 0xffff; |
1161 | oui = MII_OUI(id1, id2); | | 1161 | oui = MII_OUI(id1, id2); |
1162 | model = MII_MODEL(id2); | | 1162 | model = MII_MODEL(id2); |
1163 | rev = MII_REV(id2); | | 1163 | rev = MII_REV(id2); |
1164 | if ((descr = mii_get_descr(oui, model)) != NULL) | | 1164 | if ((descr = mii_get_descr(oui, model)) != NULL) |
1165 | aprint_normal_dev(dev, | | 1165 | aprint_normal_dev(dev, |
1166 | "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n", | | 1166 | "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n", |
1167 | descr, oui, model, rev); | | 1167 | descr, oui, model, rev); |
1168 | else | | 1168 | else |
1169 | aprint_normal_dev(dev, | | 1169 | aprint_normal_dev(dev, |
1170 | "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", | | 1170 | "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", |
1171 | oui, model, rev); | | 1171 | oui, model, rev); |
1172 | } | | 1172 | } |
1173 | | | 1173 | |
1174 | /* Enable the optics for 82599 SFP+ fiber */ | | 1174 | /* Enable the optics for 82599 SFP+ fiber */ |
1175 | ixgbe_enable_tx_laser(hw); | | 1175 | ixgbe_enable_tx_laser(hw); |
1176 | | | 1176 | |
1177 | /* Enable EEE power saving */ | | 1177 | /* Enable EEE power saving */ |
1178 | if (adapter->feat_cap & IXGBE_FEATURE_EEE) | | 1178 | if (adapter->feat_cap & IXGBE_FEATURE_EEE) |
1179 | hw->mac.ops.setup_eee(hw, | | 1179 | hw->mac.ops.setup_eee(hw, |
1180 | adapter->feat_en & IXGBE_FEATURE_EEE); | | 1180 | adapter->feat_en & IXGBE_FEATURE_EEE); |
1181 | | | 1181 | |
1182 | /* Enable power to the phy. */ | | 1182 | /* Enable power to the phy. */ |
1183 | ixgbe_set_phy_power(hw, TRUE); | | 1183 | ixgbe_set_phy_power(hw, TRUE); |
1184 | | | 1184 | |
1185 | /* Initialize statistics */ | | 1185 | /* Initialize statistics */ |
1186 | ixgbe_update_stats_counters(adapter); | | 1186 | ixgbe_update_stats_counters(adapter); |
1187 | | | 1187 | |
1188 | /* Check PCIE slot type/speed/width */ | | 1188 | /* Check PCIE slot type/speed/width */ |
1189 | ixgbe_get_slot_info(adapter); | | 1189 | ixgbe_get_slot_info(adapter); |
1190 | | | 1190 | |
1191 | /* | | 1191 | /* |
1192 | * Do time init and sysctl init here, but | | 1192 | * Do time init and sysctl init here, but |
1193 | * only on the first port of a bypass adapter. | | 1193 | * only on the first port of a bypass adapter. |
1194 | */ | | 1194 | */ |
1195 | ixgbe_bypass_init(adapter); | | 1195 | ixgbe_bypass_init(adapter); |
1196 | | | 1196 | |
1197 | /* Set an initial dmac value */ | | 1197 | /* Set an initial dmac value */ |
1198 | adapter->dmac = 0; | | 1198 | adapter->dmac = 0; |
1199 | /* Set initial advertised speeds (if applicable) */ | | 1199 | /* Set initial advertised speeds (if applicable) */ |
1200 | adapter->advertise = ixgbe_get_advertise(adapter); | | 1200 | adapter->advertise = ixgbe_get_advertise(adapter); |
1201 | | | 1201 | |
1202 | if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) | | 1202 | if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) |
1203 | ixgbe_define_iov_schemas(dev, &error); | | 1203 | ixgbe_define_iov_schemas(dev, &error); |
1204 | | | 1204 | |
1205 | /* Add sysctls */ | | 1205 | /* Add sysctls */ |
1206 | ixgbe_add_device_sysctls(adapter); | | 1206 | ixgbe_add_device_sysctls(adapter); |
1207 | ixgbe_add_hw_stats(adapter); | | 1207 | ixgbe_add_hw_stats(adapter); |
1208 | | | 1208 | |
1209 | /* For Netmap */ | | 1209 | /* For Netmap */ |
1210 | adapter->init_locked = ixgbe_init_locked; | | 1210 | adapter->init_locked = ixgbe_init_locked; |
1211 | adapter->stop_locked = ixgbe_stop; | | 1211 | adapter->stop_locked = ixgbe_stop; |
1212 | | | 1212 | |
1213 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 1213 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
1214 | ixgbe_netmap_attach(adapter); | | 1214 | ixgbe_netmap_attach(adapter); |
1215 | | | 1215 | |
1216 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); | | 1216 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); |
1217 | aprint_verbose_dev(dev, "feature cap %s\n", buf); | | 1217 | aprint_verbose_dev(dev, "feature cap %s\n", buf); |
1218 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); | | 1218 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); |
1219 | aprint_verbose_dev(dev, "feature ena %s\n", buf); | | 1219 | aprint_verbose_dev(dev, "feature ena %s\n", buf); |
1220 | | | 1220 | |
1221 | if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) | | 1221 | if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) |
1222 | pmf_class_network_register(dev, adapter->ifp); | | 1222 | pmf_class_network_register(dev, adapter->ifp); |
1223 | else | | 1223 | else |
1224 | aprint_error_dev(dev, "couldn't establish power handler\n"); | | 1224 | aprint_error_dev(dev, "couldn't establish power handler\n"); |
1225 | | | 1225 | |
1226 | /* Init recovery mode timer and state variable */ | | 1226 | /* Init recovery mode timer and state variable */ |
1227 | if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { | | 1227 | if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { |
1228 | adapter->recovery_mode = 0; | | 1228 | adapter->recovery_mode = 0; |
1229 | | | 1229 | |
1230 | /* Set up the timer callout */ | | 1230 | /* Set up the timer callout */ |
1231 | callout_init(&adapter->recovery_mode_timer, | | 1231 | callout_init(&adapter->recovery_mode_timer, |
1232 | IXGBE_CALLOUT_FLAGS); | | 1232 | IXGBE_CALLOUT_FLAGS); |
1233 | | | 1233 | |
1234 | /* Start the task */ | | 1234 | /* Start the task */ |
1235 | callout_reset(&adapter->recovery_mode_timer, hz, | | 1235 | callout_reset(&adapter->recovery_mode_timer, hz, |
1236 | ixgbe_recovery_mode_timer, adapter); | | 1236 | ixgbe_recovery_mode_timer, adapter); |
1237 | } | | 1237 | } |
1238 | | | 1238 | |
1239 | INIT_DEBUGOUT("ixgbe_attach: end"); | | 1239 | INIT_DEBUGOUT("ixgbe_attach: end"); |
1240 | adapter->osdep.attached = true; | | 1240 | adapter->osdep.attached = true; |
1241 | | | 1241 | |
1242 | return; | | 1242 | return; |
1243 | | | 1243 | |
1244 | err_late: | | 1244 | err_late: |
1245 | ixgbe_free_transmit_structures(adapter); | | 1245 | ixgbe_free_transmit_structures(adapter); |
1246 | ixgbe_free_receive_structures(adapter); | | 1246 | ixgbe_free_receive_structures(adapter); |
1247 | free(adapter->queues, M_DEVBUF); | | 1247 | free(adapter->queues, M_DEVBUF); |
1248 | err_out: | | 1248 | err_out: |
1249 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | | 1249 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
1250 | ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; | | 1250 | ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; |
1251 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); | | 1251 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); |
1252 | ixgbe_free_softint(adapter); | | 1252 | ixgbe_free_softint(adapter); |
1253 | ixgbe_free_pci_resources(adapter); | | 1253 | ixgbe_free_pci_resources(adapter); |
1254 | if (adapter->mta != NULL) | | 1254 | if (adapter->mta != NULL) |
1255 | free(adapter->mta, M_DEVBUF); | | 1255 | free(adapter->mta, M_DEVBUF); |
1256 | IXGBE_CORE_LOCK_DESTROY(adapter); | | 1256 | IXGBE_CORE_LOCK_DESTROY(adapter); |
1257 | | | 1257 | |
1258 | return; | | 1258 | return; |
1259 | } /* ixgbe_attach */ | | 1259 | } /* ixgbe_attach */ |
1260 | | | 1260 | |
1261 | /************************************************************************ | | 1261 | /************************************************************************ |
1262 | * ixgbe_check_wol_support | | 1262 | * ixgbe_check_wol_support |
1263 | * | | 1263 | * |
1264 | * Checks whether the adapter's ports are capable of | | 1264 | * Checks whether the adapter's ports are capable of |
1265 | * Wake On LAN by reading the adapter's NVM. | | 1265 | * Wake On LAN by reading the adapter's NVM. |
1266 | * | | 1266 | * |
1267 | * Sets each port's hw->wol_enabled value depending | | 1267 | * Sets each port's hw->wol_enabled value depending |
1268 | * on the value read here. | | 1268 | * on the value read here. |
1269 | ************************************************************************/ | | 1269 | ************************************************************************/ |
1270 | static void | | 1270 | static void |
1271 | ixgbe_check_wol_support(struct adapter *adapter) | | 1271 | ixgbe_check_wol_support(struct adapter *adapter) |
1272 | { | | 1272 | { |
1273 | struct ixgbe_hw *hw = &adapter->hw; | | 1273 | struct ixgbe_hw *hw = &adapter->hw; |
1274 | u16 dev_caps = 0; | | 1274 | u16 dev_caps = 0; |
1275 | | | 1275 | |
1276 | /* Find out WoL support for port */ | | 1276 | /* Find out WoL support for port */ |
1277 | adapter->wol_support = hw->wol_enabled = 0; | | 1277 | adapter->wol_support = hw->wol_enabled = 0; |
1278 | ixgbe_get_device_caps(hw, &dev_caps); | | 1278 | ixgbe_get_device_caps(hw, &dev_caps); |
1279 | if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || | | 1279 | if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || |
1280 | ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && | | 1280 | ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && |
1281 | hw->bus.func == 0)) | | 1281 | hw->bus.func == 0)) |
1282 | adapter->wol_support = hw->wol_enabled = 1; | | 1282 | adapter->wol_support = hw->wol_enabled = 1; |
1283 | | | 1283 | |
1284 | /* Save initial wake up filter configuration */ | | 1284 | /* Save initial wake up filter configuration */ |
1285 | adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); | | 1285 | adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); |
1286 | | | 1286 | |
1287 | return; | | 1287 | return; |
1288 | } /* ixgbe_check_wol_support */ | | 1288 | } /* ixgbe_check_wol_support */ |
1289 | | | 1289 | |
1290 | /************************************************************************ | | 1290 | /************************************************************************ |
1291 | * ixgbe_setup_interface | | 1291 | * ixgbe_setup_interface |
1292 | * | | 1292 | * |
1293 | * Setup networking device structure and register an interface. | | 1293 | * Setup networking device structure and register an interface. |
1294 | ************************************************************************/ | | 1294 | ************************************************************************/ |
1295 | static int | | 1295 | static int |
1296 | ixgbe_setup_interface(device_t dev, struct adapter *adapter) | | 1296 | ixgbe_setup_interface(device_t dev, struct adapter *adapter) |
1297 | { | | 1297 | { |
1298 | struct ethercom *ec = &adapter->osdep.ec; | | 1298 | struct ethercom *ec = &adapter->osdep.ec; |
1299 | struct ifnet *ifp; | | 1299 | struct ifnet *ifp; |
1300 | int rv; | | 1300 | int rv; |
1301 | | | 1301 | |
1302 | INIT_DEBUGOUT("ixgbe_setup_interface: begin"); | | 1302 | INIT_DEBUGOUT("ixgbe_setup_interface: begin"); |
1303 | | | 1303 | |
1304 | ifp = adapter->ifp = &ec->ec_if; | | 1304 | ifp = adapter->ifp = &ec->ec_if; |
1305 | strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); | | 1305 | strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); |
1306 | ifp->if_baudrate = IF_Gbps(10); | | 1306 | ifp->if_baudrate = IF_Gbps(10); |
1307 | ifp->if_init = ixgbe_init; | | 1307 | ifp->if_init = ixgbe_init; |
1308 | ifp->if_stop = ixgbe_ifstop; | | 1308 | ifp->if_stop = ixgbe_ifstop; |
1309 | ifp->if_softc = adapter; | | 1309 | ifp->if_softc = adapter; |
1310 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 1310 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1311 | #ifdef IXGBE_MPSAFE | | 1311 | #ifdef IXGBE_MPSAFE |
1312 | ifp->if_extflags = IFEF_MPSAFE; | | 1312 | ifp->if_extflags = IFEF_MPSAFE; |
1313 | #endif | | 1313 | #endif |
1314 | ifp->if_ioctl = ixgbe_ioctl; | | 1314 | ifp->if_ioctl = ixgbe_ioctl; |
1315 | #if __FreeBSD_version >= 1100045 | | 1315 | #if __FreeBSD_version >= 1100045 |
1316 | /* TSO parameters */ | | 1316 | /* TSO parameters */ |
1317 | ifp->if_hw_tsomax = 65518; | | 1317 | ifp->if_hw_tsomax = 65518; |
1318 | ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; | | 1318 | ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; |
1319 | ifp->if_hw_tsomaxsegsize = 2048; | | 1319 | ifp->if_hw_tsomaxsegsize = 2048; |
1320 | #endif | | 1320 | #endif |
1321 | if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { | | 1321 | if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { |
1322 | #if 0 | | 1322 | #if 0 |
1323 | ixgbe_start_locked = ixgbe_legacy_start_locked; | | 1323 | ixgbe_start_locked = ixgbe_legacy_start_locked; |
1324 | #endif | | 1324 | #endif |
1325 | } else { | | 1325 | } else { |
1326 | ifp->if_transmit = ixgbe_mq_start; | | 1326 | ifp->if_transmit = ixgbe_mq_start; |
1327 | #if 0 | | 1327 | #if 0 |
1328 | ixgbe_start_locked = ixgbe_mq_start_locked; | | 1328 | ixgbe_start_locked = ixgbe_mq_start_locked; |
1329 | #endif | | 1329 | #endif |
1330 | } | | 1330 | } |
1331 | ifp->if_start = ixgbe_legacy_start; | | 1331 | ifp->if_start = ixgbe_legacy_start; |
1332 | IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); | | 1332 | IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); |
1333 | IFQ_SET_READY(&ifp->if_snd); | | 1333 | IFQ_SET_READY(&ifp->if_snd); |
1334 | | | 1334 | |
1335 | rv = if_initialize(ifp); | | 1335 | rv = if_initialize(ifp); |
1336 | if (rv != 0) { | | 1336 | if (rv != 0) { |
1337 | aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); | | 1337 | aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); |
1338 | return rv; | | 1338 | return rv; |
1339 | } | | 1339 | } |
1340 | adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); | | 1340 | adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); |
1341 | ether_ifattach(ifp, adapter->hw.mac.addr); | | 1341 | ether_ifattach(ifp, adapter->hw.mac.addr); |
1342 | /* | | 1342 | /* |
1343 | * We use per TX queue softint, so if_deferred_start_init() isn't | | 1343 | * We use per TX queue softint, so if_deferred_start_init() isn't |
1344 | * used. | | 1344 | * used. |
1345 | */ | | 1345 | */ |
1346 | ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); | | 1346 | ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); |
1347 | | | 1347 | |
1348 | adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; | | 1348 | adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; |
1349 | | | 1349 | |
1350 | /* | | 1350 | /* |
1351 | * Tell the upper layer(s) we support long frames. | | 1351 | * Tell the upper layer(s) we support long frames. |
1352 | */ | | 1352 | */ |
1353 | ifp->if_hdrlen = sizeof(struct ether_vlan_header); | | 1353 | ifp->if_hdrlen = sizeof(struct ether_vlan_header); |
1354 | | | 1354 | |
1355 | /* Set capability flags */ | | 1355 | /* Set capability flags */ |
1356 | ifp->if_capabilities |= IFCAP_RXCSUM | | 1356 | ifp->if_capabilities |= IFCAP_RXCSUM |
1357 | | IFCAP_TXCSUM | | 1357 | | IFCAP_TXCSUM |
1358 | | IFCAP_TSOv4 | | 1358 | | IFCAP_TSOv4 |
1359 | | IFCAP_TSOv6; | | 1359 | | IFCAP_TSOv6; |
1360 | ifp->if_capenable = 0; | | 1360 | ifp->if_capenable = 0; |
1361 | | | 1361 | |
1362 | ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | | 1362 | ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING |
1363 | | ETHERCAP_VLAN_HWCSUM | | 1363 | | ETHERCAP_VLAN_HWCSUM |
1364 | | ETHERCAP_JUMBO_MTU | | 1364 | | ETHERCAP_JUMBO_MTU |
1365 | | ETHERCAP_VLAN_MTU; | | 1365 | | ETHERCAP_VLAN_MTU; |
1366 | | | 1366 | |
1367 | /* Enable the above capabilities by default */ | | 1367 | /* Enable the above capabilities by default */ |
1368 | ec->ec_capenable = ec->ec_capabilities; | | 1368 | ec->ec_capenable = ec->ec_capabilities; |
1369 | | | 1369 | |
1370 | /* | | 1370 | /* |
1371 | * Don't turn this on by default, if vlans are | | 1371 | * Don't turn this on by default, if vlans are |
1372 | * created on another pseudo device (eg. lagg) | | 1372 | * created on another pseudo device (eg. lagg) |
1373 | * then vlan events are not passed thru, breaking | | 1373 | * then vlan events are not passed thru, breaking |
1374 | * operation, but with HW FILTER off it works. If | | 1374 | * operation, but with HW FILTER off it works. If |
1375 | * using vlans directly on the ixgbe driver you can | | 1375 | * using vlans directly on the ixgbe driver you can |
1376 | * enable this and get full hardware tag filtering. | | 1376 | * enable this and get full hardware tag filtering. |
1377 | */ | | 1377 | */ |
1378 | ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; | | 1378 | ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; |
1379 | | | 1379 | |
1380 | /* | | 1380 | /* |
1381 | * Specify the media types supported by this adapter and register | | 1381 | * Specify the media types supported by this adapter and register |
1382 | * callbacks to update media and link information | | 1382 | * callbacks to update media and link information |
1383 | */ | | 1383 | */ |
1384 | ec->ec_ifmedia = &adapter->media; | | 1384 | ec->ec_ifmedia = &adapter->media; |
1385 | ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, | | 1385 | ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, |
1386 | ixgbe_media_status); | | 1386 | ixgbe_media_status); |
1387 | | | 1387 | |
1388 | adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); | | 1388 | adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); |
1389 | ixgbe_add_media_types(adapter); | | 1389 | ixgbe_add_media_types(adapter); |
1390 | | | 1390 | |
1391 | /* Set autoselect media by default */ | | 1391 | /* Set autoselect media by default */ |
1392 | ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); | | 1392 | ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); |
1393 | | | 1393 | |
1394 | if_register(ifp); | | 1394 | if_register(ifp); |
1395 | | | 1395 | |
1396 | return (0); | | 1396 | return (0); |
1397 | } /* ixgbe_setup_interface */ | | 1397 | } /* ixgbe_setup_interface */ |
1398 | | | 1398 | |
1399 | /************************************************************************ | | 1399 | /************************************************************************ |
1400 | * ixgbe_add_media_types | | 1400 | * ixgbe_add_media_types |
1401 | ************************************************************************/ | | 1401 | ************************************************************************/ |
1402 | static void | | 1402 | static void |
1403 | ixgbe_add_media_types(struct adapter *adapter) | | 1403 | ixgbe_add_media_types(struct adapter *adapter) |
1404 | { | | 1404 | { |
1405 | struct ixgbe_hw *hw = &adapter->hw; | | 1405 | struct ixgbe_hw *hw = &adapter->hw; |
1406 | device_t dev = adapter->dev; | | 1406 | device_t dev = adapter->dev; |
1407 | u64 layer; | | 1407 | u64 layer; |
1408 | | | 1408 | |
1409 | layer = adapter->phy_layer; | | 1409 | layer = adapter->phy_layer; |
1410 | | | 1410 | |
1411 | #define ADD(mm, dd) \ | | 1411 | #define ADD(mm, dd) \ |
1412 | ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL); | | 1412 | ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL); |
1413 | | | 1413 | |
1414 | ADD(IFM_NONE, 0); | | 1414 | ADD(IFM_NONE, 0); |
1415 | | | 1415 | |
1416 | /* Media types with matching NetBSD media defines */ | | 1416 | /* Media types with matching NetBSD media defines */ |
1417 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { | | 1417 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { |
1418 | ADD(IFM_10G_T | IFM_FDX, 0); | | 1418 | ADD(IFM_10G_T | IFM_FDX, 0); |
1419 | } | | 1419 | } |
1420 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { | | 1420 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { |
1421 | ADD(IFM_1000_T | IFM_FDX, 0); | | 1421 | ADD(IFM_1000_T | IFM_FDX, 0); |
1422 | } | | 1422 | } |
1423 | if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) { | | 1423 | if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) { |
1424 | ADD(IFM_100_TX | IFM_FDX, 0); | | 1424 | ADD(IFM_100_TX | IFM_FDX, 0); |
1425 | } | | 1425 | } |
1426 | if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) { | | 1426 | if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) { |
1427 | ADD(IFM_10_T | IFM_FDX, 0); | | 1427 | ADD(IFM_10_T | IFM_FDX, 0); |
1428 | } | | 1428 | } |
1429 | | | 1429 | |
1430 | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || | | 1430 | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || |
1431 | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) { | | 1431 | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) { |
1432 | ADD(IFM_10G_TWINAX | IFM_FDX, 0); | | 1432 | ADD(IFM_10G_TWINAX | IFM_FDX, 0); |
1433 | } | | 1433 | } |
1434 | | | 1434 | |
1435 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { | | 1435 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { |
1436 | ADD(IFM_10G_LR | IFM_FDX, 0); | | 1436 | ADD(IFM_10G_LR | IFM_FDX, 0); |
1437 | if (hw->phy.multispeed_fiber) { | | 1437 | if (hw->phy.multispeed_fiber) { |
1438 | ADD(IFM_1000_LX | IFM_FDX, 0); | | 1438 | ADD(IFM_1000_LX | IFM_FDX, 0); |
1439 | } | | 1439 | } |
1440 | } | | 1440 | } |
1441 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { | | 1441 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { |
1442 | ADD(IFM_10G_SR | IFM_FDX, 0); | | 1442 | ADD(IFM_10G_SR | IFM_FDX, 0); |
1443 | if (hw->phy.multispeed_fiber) { | | 1443 | if (hw->phy.multispeed_fiber) { |
1444 | ADD(IFM_1000_SX | IFM_FDX, 0); | | 1444 | ADD(IFM_1000_SX | IFM_FDX, 0); |
1445 | } | | 1445 | } |
1446 | } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { | | 1446 | } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { |
1447 | ADD(IFM_1000_SX | IFM_FDX, 0); | | 1447 | ADD(IFM_1000_SX | IFM_FDX, 0); |
1448 | } | | 1448 | } |
1449 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) { | | 1449 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) { |
1450 | ADD(IFM_10G_CX4 | IFM_FDX, 0); | | 1450 | ADD(IFM_10G_CX4 | IFM_FDX, 0); |
1451 | } | | 1451 | } |
1452 | | | 1452 | |
1453 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { | | 1453 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { |
1454 | ADD(IFM_10G_KR | IFM_FDX, 0); | | 1454 | ADD(IFM_10G_KR | IFM_FDX, 0); |
1455 | } | | 1455 | } |
1456 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { | | 1456 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { |
1457 | ADD(IFM_10G_KX4 | IFM_FDX, 0); | | 1457 | ADD(IFM_10G_KX4 | IFM_FDX, 0); |
1458 | } | | 1458 | } |
1459 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { | | 1459 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { |
1460 | ADD(IFM_1000_KX | IFM_FDX, 0); | | 1460 | ADD(IFM_1000_KX | IFM_FDX, 0); |
1461 | } | | 1461 | } |
1462 | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { | | 1462 | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { |
1463 | ADD(IFM_2500_KX | IFM_FDX, 0); | | 1463 | ADD(IFM_2500_KX | IFM_FDX, 0); |
1464 | } | | 1464 | } |
1465 | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) { | | 1465 | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) { |
1466 | ADD(IFM_2500_T | IFM_FDX, 0); | | 1466 | ADD(IFM_2500_T | IFM_FDX, 0); |
1467 | } | | 1467 | } |
1468 | if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) { | | 1468 | if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) { |
1469 | ADD(IFM_5000_T | IFM_FDX, 0); | | 1469 | ADD(IFM_5000_T | IFM_FDX, 0); |
1470 | } | | 1470 | } |
1471 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) | | 1471 | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) |
1472 | device_printf(dev, "Media supported: 1000baseBX\n"); | | 1472 | device_printf(dev, "Media supported: 1000baseBX\n"); |
1473 | /* XXX no ifmedia_set? */ | | 1473 | /* XXX no ifmedia_set? */ |
1474 | | | 1474 | |
1475 | ADD(IFM_AUTO, 0); | | 1475 | ADD(IFM_AUTO, 0); |
1476 | | | 1476 | |
1477 | #undef ADD | | 1477 | #undef ADD |
1478 | } /* ixgbe_add_media_types */ | | 1478 | } /* ixgbe_add_media_types */ |
1479 | | | 1479 | |
1480 | /************************************************************************ | | 1480 | /************************************************************************ |
1481 | * ixgbe_is_sfp | | 1481 | * ixgbe_is_sfp |
1482 | ************************************************************************/ | | 1482 | ************************************************************************/ |
1483 | static inline bool | | 1483 | static inline bool |
1484 | ixgbe_is_sfp(struct ixgbe_hw *hw) | | 1484 | ixgbe_is_sfp(struct ixgbe_hw *hw) |
1485 | { | | 1485 | { |
1486 | switch (hw->mac.type) { | | 1486 | switch (hw->mac.type) { |
1487 | case ixgbe_mac_82598EB: | | 1487 | case ixgbe_mac_82598EB: |
1488 | if (hw->phy.type == ixgbe_phy_nl) | | 1488 | if (hw->phy.type == ixgbe_phy_nl) |
1489 | return (TRUE); | | 1489 | return (TRUE); |
1490 | return (FALSE); | | 1490 | return (FALSE); |
1491 | case ixgbe_mac_82599EB: | | 1491 | case ixgbe_mac_82599EB: |
| | | 1492 | case ixgbe_mac_X550EM_x: |
| | | 1493 | case ixgbe_mac_X550EM_a: |
1492 | switch (hw->mac.ops.get_media_type(hw)) { | | 1494 | switch (hw->mac.ops.get_media_type(hw)) { |
1493 | case ixgbe_media_type_fiber: | | 1495 | case ixgbe_media_type_fiber: |
1494 | case ixgbe_media_type_fiber_qsfp: | | 1496 | case ixgbe_media_type_fiber_qsfp: |
1495 | return (TRUE); | | 1497 | return (TRUE); |
1496 | default: | | 1498 | default: |
1497 | return (FALSE); | | 1499 | return (FALSE); |
1498 | } | | 1500 | } |
1499 | case ixgbe_mac_X550EM_x: | | | |
1500 | case ixgbe_mac_X550EM_a: | | | |
1501 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) | | | |
1502 | return (TRUE); | | | |
1503 | return (FALSE); | | | |
1504 | default: | | 1501 | default: |
1505 | return (FALSE); | | 1502 | return (FALSE); |
1506 | } | | 1503 | } |
1507 | } /* ixgbe_is_sfp */ | | 1504 | } /* ixgbe_is_sfp */ |
1508 | | | 1505 | |
1509 | /************************************************************************ | | 1506 | /************************************************************************ |
1510 | * ixgbe_config_link | | 1507 | * ixgbe_config_link |
1511 | ************************************************************************/ | | 1508 | ************************************************************************/ |
1512 | static void | | 1509 | static void |
1513 | ixgbe_config_link(struct adapter *adapter) | | 1510 | ixgbe_config_link(struct adapter *adapter) |
1514 | { | | 1511 | { |
1515 | struct ixgbe_hw *hw = &adapter->hw; | | 1512 | struct ixgbe_hw *hw = &adapter->hw; |
1516 | u32 autoneg, err = 0; | | 1513 | u32 autoneg, err = 0; |
1517 | bool sfp, negotiate = false; | | 1514 | bool sfp, negotiate = false; |
1518 | | | 1515 | |
1519 | sfp = ixgbe_is_sfp(hw); | | 1516 | sfp = ixgbe_is_sfp(hw); |
1520 | | | 1517 | |
1521 | if (sfp) { | | 1518 | if (sfp) { |
1522 | if (hw->phy.multispeed_fiber) { | | 1519 | if (hw->phy.multispeed_fiber) { |
1523 | ixgbe_enable_tx_laser(hw); | | 1520 | ixgbe_enable_tx_laser(hw); |
1524 | kpreempt_disable(); | | 1521 | kpreempt_disable(); |
1525 | softint_schedule(adapter->msf_si); | | 1522 | softint_schedule(adapter->msf_si); |
1526 | kpreempt_enable(); | | 1523 | kpreempt_enable(); |
1527 | } | | 1524 | } |
1528 | kpreempt_disable(); | | 1525 | kpreempt_disable(); |
1529 | softint_schedule(adapter->mod_si); | | 1526 | softint_schedule(adapter->mod_si); |
1530 | kpreempt_enable(); | | 1527 | kpreempt_enable(); |
1531 | } else { | | 1528 | } else { |
1532 | struct ifmedia *ifm = &adapter->media; | | 1529 | struct ifmedia *ifm = &adapter->media; |
1533 | | | 1530 | |
1534 | if (hw->mac.ops.check_link) | | 1531 | if (hw->mac.ops.check_link) |
1535 | err = ixgbe_check_link(hw, &adapter->link_speed, | | 1532 | err = ixgbe_check_link(hw, &adapter->link_speed, |
1536 | &adapter->link_up, FALSE); | | 1533 | &adapter->link_up, FALSE); |
1537 | if (err) | | 1534 | if (err) |
1538 | return; | | 1535 | return; |
1539 | | | 1536 | |
1540 | /* | | 1537 | /* |
1541 | * Check if it's the first call. If it's the first call, | | 1538 | * Check if it's the first call. If it's the first call, |
1542 | * get value for auto negotiation. | | 1539 | * get value for auto negotiation. |
1543 | */ | | 1540 | */ |
1544 | autoneg = hw->phy.autoneg_advertised; | | 1541 | autoneg = hw->phy.autoneg_advertised; |
1545 | if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) | | 1542 | if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) |
1546 | && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) | | 1543 | && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) |
1547 | err = hw->mac.ops.get_link_capabilities(hw, &autoneg, | | 1544 | err = hw->mac.ops.get_link_capabilities(hw, &autoneg, |
1548 | &negotiate); | | 1545 | &negotiate); |
1549 | if (err) | | 1546 | if (err) |
1550 | return; | | 1547 | return; |
1551 | if (hw->mac.ops.setup_link) | | 1548 | if (hw->mac.ops.setup_link) |
1552 | err = hw->mac.ops.setup_link(hw, autoneg, | | 1549 | err = hw->mac.ops.setup_link(hw, autoneg, |
1553 | adapter->link_up); | | 1550 | adapter->link_up); |
1554 | } | | 1551 | } |
1555 | | | 1552 | |
1556 | } /* ixgbe_config_link */ | | 1553 | } /* ixgbe_config_link */ |
1557 | | | 1554 | |
1558 | /************************************************************************ | | 1555 | /************************************************************************ |
1559 | * ixgbe_update_stats_counters - Update board statistics counters. | | 1556 | * ixgbe_update_stats_counters - Update board statistics counters. |
1560 | ************************************************************************/ | | 1557 | ************************************************************************/ |
1561 | static void | | 1558 | static void |
1562 | ixgbe_update_stats_counters(struct adapter *adapter) | | 1559 | ixgbe_update_stats_counters(struct adapter *adapter) |
1563 | { | | 1560 | { |
1564 | struct ifnet *ifp = adapter->ifp; | | 1561 | struct ifnet *ifp = adapter->ifp; |
1565 | struct ixgbe_hw *hw = &adapter->hw; | | 1562 | struct ixgbe_hw *hw = &adapter->hw; |
1566 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; | | 1563 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; |
1567 | u32 missed_rx = 0, bprc, lxon, lxoff, total; | | 1564 | u32 missed_rx = 0, bprc, lxon, lxoff, total; |
1568 | u64 total_missed_rx = 0; | | 1565 | u64 total_missed_rx = 0; |
1569 | uint64_t crcerrs, rlec; | | 1566 | uint64_t crcerrs, rlec; |
1570 | unsigned int queue_counters; | | 1567 | unsigned int queue_counters; |
1571 | int i; | | 1568 | int i; |
1572 | | | 1569 | |
1573 | crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS); | | 1570 | crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
1574 | stats->crcerrs.ev_count += crcerrs; | | 1571 | stats->crcerrs.ev_count += crcerrs; |
1575 | stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC); | | 1572 | stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC); |
1576 | stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC); | | 1573 | stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC); |
1577 | stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC); | | 1574 | stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC); |
1578 | if (hw->mac.type == ixgbe_mac_X550) | | 1575 | if (hw->mac.type == ixgbe_mac_X550) |
1579 | stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC); | | 1576 | stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC); |
1580 | | | 1577 | |
1581 | /* 16 registers exist */ | | 1578 | /* 16 registers exist */ |
1582 | queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues); | | 1579 | queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues); |
1583 | for (i = 0; i < queue_counters; i++) { | | 1580 | for (i = 0; i < queue_counters; i++) { |
1584 | stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); | | 1581 | stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); |
1585 | stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); | | 1582 | stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); |
1586 | if (hw->mac.type >= ixgbe_mac_82599EB) { | | 1583 | if (hw->mac.type >= ixgbe_mac_82599EB) { |
1587 | stats->qprdc[i].ev_count | | 1584 | stats->qprdc[i].ev_count |
1588 | += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | | 1585 | += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
1589 | } | | 1586 | } |
1590 | } | | 1587 | } |
1591 | | | 1588 | |
1592 | /* 8 registers exist */ | | 1589 | /* 8 registers exist */ |
1593 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { | | 1590 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { |
1594 | uint32_t mp; | | 1591 | uint32_t mp; |
1595 | | | 1592 | |
1596 | /* MPC */ | | 1593 | /* MPC */ |
1597 | mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); | | 1594 | mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); |
1598 | /* global total per queue */ | | 1595 | /* global total per queue */ |
1599 | stats->mpc[i].ev_count += mp; | | 1596 | stats->mpc[i].ev_count += mp; |
1600 | /* running comprehensive total for stats display */ | | 1597 | /* running comprehensive total for stats display */ |
1601 | total_missed_rx += mp; | | 1598 | total_missed_rx += mp; |
1602 | | | 1599 | |
1603 | if (hw->mac.type == ixgbe_mac_82598EB) | | 1600 | if (hw->mac.type == ixgbe_mac_82598EB) |
1604 | stats->rnbc[i].ev_count | | 1601 | stats->rnbc[i].ev_count |
1605 | += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); | | 1602 | += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); |
1606 | | | 1603 | |
1607 | stats->pxontxc[i].ev_count | | 1604 | stats->pxontxc[i].ev_count |
1608 | += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); | | 1605 | += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); |
1609 | stats->pxofftxc[i].ev_count | | 1606 | stats->pxofftxc[i].ev_count |
1610 | += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); | | 1607 | += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); |
1611 | if (hw->mac.type >= ixgbe_mac_82599EB) { | | 1608 | if (hw->mac.type >= ixgbe_mac_82599EB) { |
1612 | stats->pxonrxc[i].ev_count | | 1609 | stats->pxonrxc[i].ev_count |
1613 | += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); | | 1610 | += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); |
1614 | stats->pxoffrxc[i].ev_count | | 1611 | stats->pxoffrxc[i].ev_count |
1615 | += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); | | 1612 | += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); |
1616 | stats->pxon2offc[i].ev_count | | 1613 | stats->pxon2offc[i].ev_count |
1617 | += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); | | 1614 | += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); |
1618 | } else { | | 1615 | } else { |
1619 | stats->pxonrxc[i].ev_count | | 1616 | stats->pxonrxc[i].ev_count |
1620 | += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); | | 1617 | += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); |
1621 | stats->pxoffrxc[i].ev_count | | 1618 | stats->pxoffrxc[i].ev_count |
1622 | += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); | | 1619 | += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); |
1623 | } | | 1620 | } |
1624 | } | | 1621 | } |
1625 | stats->mpctotal.ev_count += total_missed_rx; | | 1622 | stats->mpctotal.ev_count += total_missed_rx; |
1626 | | | 1623 | |
1627 | /* Document says M[LR]FC are valid when link is up and 10Gbps */ | | 1624 | /* Document says M[LR]FC are valid when link is up and 10Gbps */ |
1628 | if ((adapter->link_active == LINK_STATE_UP) | | 1625 | if ((adapter->link_active == LINK_STATE_UP) |
1629 | && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { | | 1626 | && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { |
1630 | stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC); | | 1627 | stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC); |
1631 | stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC); | | 1628 | stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC); |
1632 | } | | 1629 | } |
1633 | rlec = IXGBE_READ_REG(hw, IXGBE_RLEC); | | 1630 | rlec = IXGBE_READ_REG(hw, IXGBE_RLEC); |
1634 | stats->rlec.ev_count += rlec; | | 1631 | stats->rlec.ev_count += rlec; |
1635 | | | 1632 | |
1636 | /* Hardware workaround, gprc counts missed packets */ | | 1633 | /* Hardware workaround, gprc counts missed packets */ |
1637 | stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx; | | 1634 | stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx; |
1638 | | | 1635 | |
1639 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); | | 1636 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); |
1640 | stats->lxontxc.ev_count += lxon; | | 1637 | stats->lxontxc.ev_count += lxon; |
1641 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); | | 1638 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); |
1642 | stats->lxofftxc.ev_count += lxoff; | | 1639 | stats->lxofftxc.ev_count += lxoff; |
1643 | total = lxon + lxoff; | | 1640 | total = lxon + lxoff; |
1644 | | | 1641 | |
1645 | if (hw->mac.type != ixgbe_mac_82598EB) { | | 1642 | if (hw->mac.type != ixgbe_mac_82598EB) { |
1646 | stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) + | | 1643 | stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) + |
1647 | ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); | | 1644 | ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); |
1648 | stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) + | | 1645 | stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) + |
1649 | ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN; | | 1646 | ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN; |
1650 | stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) + | | 1647 | stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) + |
1651 | ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); | | 1648 | ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); |
1652 | stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); | | 1649 | stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
1653 | stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | | 1650 | stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); |
1654 | } else { | | 1651 | } else { |
1655 | stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC); | | 1652 | stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC); |
1656 | stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); | | 1653 | stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); |
1657 | /* 82598 only has a counter in the high register */ | | 1654 | /* 82598 only has a counter in the high register */ |
1658 | stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH); | | 1655 | stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH); |
1659 | stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN; | | 1656 | stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN; |
1660 | stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH); | | 1657 | stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH); |
1661 | } | | 1658 | } |
1662 | | | 1659 | |
1663 | /* | | 1660 | /* |
1664 | * Workaround: mprc hardware is incorrectly counting | | 1661 | * Workaround: mprc hardware is incorrectly counting |
1665 | * broadcasts, so for now we subtract those. | | 1662 | * broadcasts, so for now we subtract those. |
1666 | */ | | 1663 | */ |
1667 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); | | 1664 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); |
1668 | stats->bprc.ev_count += bprc; | | 1665 | stats->bprc.ev_count += bprc; |
1669 | stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) | | 1666 | stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) |
1670 | - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0); | | 1667 | - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0); |
1671 | | | 1668 | |
1672 | stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64); | | 1669 | stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64); |
1673 | stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127); | | 1670 | stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127); |
1674 | stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255); | | 1671 | stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255); |
1675 | stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511); | | 1672 | stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511); |
1676 | stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023); | | 1673 | stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023); |
1677 | stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522); | | 1674 | stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522); |
1678 | | | 1675 | |
1679 | stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total; | | 1676 | stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total; |
1680 | stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total; | | 1677 | stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total; |
1681 | stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total; | | 1678 | stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total; |
1682 | | | 1679 | |
1683 | stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC); | | 1680 | stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC); |
1684 | stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC); | | 1681 | stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC); |
1685 | stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC); | | 1682 | stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC); |
1686 | stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC); | | 1683 | stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC); |
1687 | stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC); | | 1684 | stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC); |
1688 | stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC); | | 1685 | stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC); |
1689 | stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC); | | 1686 | stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC); |
1690 | stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR); | | 1687 | stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR); |
1691 | stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT); | | 1688 | stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT); |
1692 | stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127); | | 1689 | stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127); |
1693 | stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255); | | 1690 | stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255); |
1694 | stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511); | | 1691 | stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511); |
1695 | stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023); | | 1692 | stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023); |
1696 | stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522); | | 1693 | stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522); |
1697 | stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC); | | 1694 | stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC); |
1698 | stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC); | | 1695 | stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC); |
1699 | stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC); | | 1696 | stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC); |
1700 | stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST); | | 1697 | stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST); |
1701 | /* Only read FCOE on 82599 */ | | 1698 | /* Only read FCOE on 82599 */ |
1702 | if (hw->mac.type != ixgbe_mac_82598EB) { | | 1699 | if (hw->mac.type != ixgbe_mac_82598EB) { |
1703 | stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | | 1700 | stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); |
1704 | stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | | 1701 | stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); |
1705 | stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | | 1702 | stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); |
1706 | stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | | 1703 | stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); |
1707 | stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | | 1704 | stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); |
1708 | } | | 1705 | } |
1709 | | | 1706 | |
1710 | /* Fill out the OS statistics structure */ | | 1707 | /* Fill out the OS statistics structure */ |
1711 | /* | | 1708 | /* |
1712 | * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with | | 1709 | * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with |
1713 | * adapter->stats counters. It's required to make ifconfig -z | | 1710 | * adapter->stats counters. It's required to make ifconfig -z |
1714 | * (SOICZIFDATA) work. | | 1711 | * (SOICZIFDATA) work. |
1715 | */ | | 1712 | */ |
1716 | ifp->if_collisions = 0; | | 1713 | ifp->if_collisions = 0; |
1717 | | | 1714 | |
1718 | /* Rx Errors */ | | 1715 | /* Rx Errors */ |
1719 | ifp->if_iqdrops += total_missed_rx; | | 1716 | ifp->if_iqdrops += total_missed_rx; |
1720 | ifp->if_ierrors += crcerrs + rlec; | | 1717 | ifp->if_ierrors += crcerrs + rlec; |
1721 | } /* ixgbe_update_stats_counters */ | | 1718 | } /* ixgbe_update_stats_counters */ |
1722 | | | 1719 | |
1723 | /************************************************************************ | | 1720 | /************************************************************************ |
1724 | * ixgbe_add_hw_stats | | 1721 | * ixgbe_add_hw_stats |
1725 | * | | 1722 | * |
1726 | * Add sysctl variables, one per statistic, to the system. | | 1723 | * Add sysctl variables, one per statistic, to the system. |
1727 | ************************************************************************/ | | 1724 | ************************************************************************/ |
1728 | static void | | 1725 | static void |
1729 | ixgbe_add_hw_stats(struct adapter *adapter) | | 1726 | ixgbe_add_hw_stats(struct adapter *adapter) |
1730 | { | | 1727 | { |
1731 | device_t dev = adapter->dev; | | 1728 | device_t dev = adapter->dev; |
1732 | const struct sysctlnode *rnode, *cnode; | | 1729 | const struct sysctlnode *rnode, *cnode; |
1733 | struct sysctllog **log = &adapter->sysctllog; | | 1730 | struct sysctllog **log = &adapter->sysctllog; |
1734 | struct tx_ring *txr = adapter->tx_rings; | | 1731 | struct tx_ring *txr = adapter->tx_rings; |
1735 | struct rx_ring *rxr = adapter->rx_rings; | | 1732 | struct rx_ring *rxr = adapter->rx_rings; |
1736 | struct ixgbe_hw *hw = &adapter->hw; | | 1733 | struct ixgbe_hw *hw = &adapter->hw; |
1737 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; | | 1734 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; |
1738 | const char *xname = device_xname(dev); | | 1735 | const char *xname = device_xname(dev); |
1739 | int i; | | 1736 | int i; |
1740 | | | 1737 | |
1741 | /* Driver Statistics */ | | 1738 | /* Driver Statistics */ |
1742 | evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, | | 1739 | evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, |
1743 | NULL, xname, "Driver tx dma soft fail EFBIG"); | | 1740 | NULL, xname, "Driver tx dma soft fail EFBIG"); |
1744 | evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, | | 1741 | evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, |
1745 | NULL, xname, "m_defrag() failed"); | | 1742 | NULL, xname, "m_defrag() failed"); |
1746 | evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, | | 1743 | evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, |
1747 | NULL, xname, "Driver tx dma hard fail EFBIG"); | | 1744 | NULL, xname, "Driver tx dma hard fail EFBIG"); |
1748 | evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, | | 1745 | evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, |
1749 | NULL, xname, "Driver tx dma hard fail EINVAL"); | | 1746 | NULL, xname, "Driver tx dma hard fail EINVAL"); |
1750 | evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, | | 1747 | evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, |
1751 | NULL, xname, "Driver tx dma hard fail other"); | | 1748 | NULL, xname, "Driver tx dma hard fail other"); |
1752 | evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, | | 1749 | evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, |
1753 | NULL, xname, "Driver tx dma soft fail EAGAIN"); | | 1750 | NULL, xname, "Driver tx dma soft fail EAGAIN"); |
1754 | evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, | | 1751 | evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, |
1755 | NULL, xname, "Driver tx dma soft fail ENOMEM"); | | 1752 | NULL, xname, "Driver tx dma soft fail ENOMEM"); |
1756 | evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, | | 1753 | evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, |
1757 | NULL, xname, "Watchdog timeouts"); | | 1754 | NULL, xname, "Watchdog timeouts"); |
1758 | evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, | | 1755 | evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, |
1759 | NULL, xname, "TSO errors"); | | 1756 | NULL, xname, "TSO errors"); |
1760 | evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, | | 1757 | evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, |
1761 | NULL, xname, "Link MSI-X IRQ Handled"); | | 1758 | NULL, xname, "Link MSI-X IRQ Handled"); |
1762 | evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR, | | 1759 | evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR, |
1763 | NULL, xname, "Link softint"); | | 1760 | NULL, xname, "Link softint"); |
1764 | evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR, | | 1761 | evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR, |
1765 | NULL, xname, "module softint"); | | 1762 | NULL, xname, "module softint"); |
1766 | evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR, | | 1763 | evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR, |
1767 | NULL, xname, "multimode softint"); | | 1764 | NULL, xname, "multimode softint"); |
1768 | evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR, | | 1765 | evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR, |
1769 | NULL, xname, "external PHY softint"); | | 1766 | NULL, xname, "external PHY softint"); |
1770 | | | 1767 | |
1771 | /* Max number of traffic class is 8 */ | | 1768 | /* Max number of traffic class is 8 */ |
1772 | KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); | | 1769 | KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); |
1773 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { | | 1770 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { |
1774 | snprintf(adapter->tcs[i].evnamebuf, | | 1771 | snprintf(adapter->tcs[i].evnamebuf, |
1775 | sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", | | 1772 | sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", |
1776 | xname, i); | | 1773 | xname, i); |
1777 | if (i < __arraycount(stats->mpc)) { | | 1774 | if (i < __arraycount(stats->mpc)) { |
1778 | evcnt_attach_dynamic(&stats->mpc[i], | | 1775 | evcnt_attach_dynamic(&stats->mpc[i], |
1779 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, | | 1776 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, |
1780 | "RX Missed Packet Count"); | | 1777 | "RX Missed Packet Count"); |
1781 | if (hw->mac.type == ixgbe_mac_82598EB) | | 1778 | if (hw->mac.type == ixgbe_mac_82598EB) |
1782 | evcnt_attach_dynamic(&stats->rnbc[i], | | 1779 | evcnt_attach_dynamic(&stats->rnbc[i], |
1783 | EVCNT_TYPE_MISC, NULL, | | 1780 | EVCNT_TYPE_MISC, NULL, |
1784 | adapter->tcs[i].evnamebuf, | | 1781 | adapter->tcs[i].evnamebuf, |
1785 | "Receive No Buffers"); | | 1782 | "Receive No Buffers"); |
1786 | } | | 1783 | } |
1787 | if (i < __arraycount(stats->pxontxc)) { | | 1784 | if (i < __arraycount(stats->pxontxc)) { |
1788 | evcnt_attach_dynamic(&stats->pxontxc[i], | | 1785 | evcnt_attach_dynamic(&stats->pxontxc[i], |
1789 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, | | 1786 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, |
1790 | "pxontxc"); | | 1787 | "pxontxc"); |
1791 | evcnt_attach_dynamic(&stats->pxonrxc[i], | | 1788 | evcnt_attach_dynamic(&stats->pxonrxc[i], |
1792 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, | | 1789 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, |
1793 | "pxonrxc"); | | 1790 | "pxonrxc"); |
1794 | evcnt_attach_dynamic(&stats->pxofftxc[i], | | 1791 | evcnt_attach_dynamic(&stats->pxofftxc[i], |
1795 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, | | 1792 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, |
1796 | "pxofftxc"); | | 1793 | "pxofftxc"); |
1797 | evcnt_attach_dynamic(&stats->pxoffrxc[i], | | 1794 | evcnt_attach_dynamic(&stats->pxoffrxc[i], |
1798 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, | | 1795 | EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, |
1799 | "pxoffrxc"); | | 1796 | "pxoffrxc"); |
1800 | if (hw->mac.type >= ixgbe_mac_82599EB) | | 1797 | if (hw->mac.type >= ixgbe_mac_82599EB) |
1801 | evcnt_attach_dynamic(&stats->pxon2offc[i], | | 1798 | evcnt_attach_dynamic(&stats->pxon2offc[i], |
1802 | EVCNT_TYPE_MISC, NULL, | | 1799 | EVCNT_TYPE_MISC, NULL, |
1803 | adapter->tcs[i].evnamebuf, | | 1800 | adapter->tcs[i].evnamebuf, |
1804 | "pxon2offc"); | | 1801 | "pxon2offc"); |
1805 | } | | 1802 | } |
1806 | } | | 1803 | } |
1807 | | | 1804 | |
1808 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 1805 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
1809 | #ifdef LRO | | 1806 | #ifdef LRO |
1810 | struct lro_ctrl *lro = &rxr->lro; | | 1807 | struct lro_ctrl *lro = &rxr->lro; |
1811 | #endif /* LRO */ | | 1808 | #endif /* LRO */ |
1812 | | | 1809 | |
1813 | snprintf(adapter->queues[i].evnamebuf, | | 1810 | snprintf(adapter->queues[i].evnamebuf, |
1814 | sizeof(adapter->queues[i].evnamebuf), "%s q%d", | | 1811 | sizeof(adapter->queues[i].evnamebuf), "%s q%d", |
1815 | xname, i); | | 1812 | xname, i); |
1816 | snprintf(adapter->queues[i].namebuf, | | 1813 | snprintf(adapter->queues[i].namebuf, |
1817 | sizeof(adapter->queues[i].namebuf), "q%d", i); | | 1814 | sizeof(adapter->queues[i].namebuf), "q%d", i); |
1818 | | | 1815 | |
1819 | if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { | | 1816 | if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { |
1820 | aprint_error_dev(dev, "could not create sysctl root\n"); | | 1817 | aprint_error_dev(dev, "could not create sysctl root\n"); |
1821 | break; | | 1818 | break; |
1822 | } | | 1819 | } |
1823 | | | 1820 | |
1824 | if (sysctl_createv(log, 0, &rnode, &rnode, | | 1821 | if (sysctl_createv(log, 0, &rnode, &rnode, |
1825 | 0, CTLTYPE_NODE, | | 1822 | 0, CTLTYPE_NODE, |
1826 | adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), | | 1823 | adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), |
1827 | NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) | | 1824 | NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) |
1828 | break; | | 1825 | break; |
1829 | | | 1826 | |
1830 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 1827 | if (sysctl_createv(log, 0, &rnode, &cnode, |
1831 | CTLFLAG_READWRITE, CTLTYPE_INT, | | 1828 | CTLFLAG_READWRITE, CTLTYPE_INT, |
1832 | "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), | | 1829 | "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), |
1833 | ixgbe_sysctl_interrupt_rate_handler, 0, | | 1830 | ixgbe_sysctl_interrupt_rate_handler, 0, |
1834 | (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) | | 1831 | (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) |
1835 | break; | | 1832 | break; |
1836 | | | 1833 | |
1837 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 1834 | if (sysctl_createv(log, 0, &rnode, &cnode, |
1838 | CTLFLAG_READONLY, CTLTYPE_INT, | | 1835 | CTLFLAG_READONLY, CTLTYPE_INT, |
1839 | "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), | | 1836 | "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), |
1840 | ixgbe_sysctl_tdh_handler, 0, (void *)txr, | | 1837 | ixgbe_sysctl_tdh_handler, 0, (void *)txr, |
1841 | 0, CTL_CREATE, CTL_EOL) != 0) | | 1838 | 0, CTL_CREATE, CTL_EOL) != 0) |
1842 | break; | | 1839 | break; |
1843 | | | 1840 | |
1844 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 1841 | if (sysctl_createv(log, 0, &rnode, &cnode, |
1845 | CTLFLAG_READONLY, CTLTYPE_INT, | | 1842 | CTLFLAG_READONLY, CTLTYPE_INT, |
1846 | "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), | | 1843 | "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), |
1847 | ixgbe_sysctl_tdt_handler, 0, (void *)txr, | | 1844 | ixgbe_sysctl_tdt_handler, 0, (void *)txr, |
1848 | 0, CTL_CREATE, CTL_EOL) != 0) | | 1845 | 0, CTL_CREATE, CTL_EOL) != 0) |
1849 | break; | | 1846 | break; |
1850 | | | 1847 | |
1851 | evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, | | 1848 | evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, |
1852 | NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); | | 1849 | NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); |
1853 | evcnt_attach_dynamic(&adapter->queues[i].handleq, | | 1850 | evcnt_attach_dynamic(&adapter->queues[i].handleq, |
1854 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, | | 1851 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, |
1855 | "Handled queue in softint"); | | 1852 | "Handled queue in softint"); |
1856 | evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, | | 1853 | evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, |
1857 | NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); | | 1854 | NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); |
1858 | evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, | | 1855 | evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, |
1859 | NULL, adapter->queues[i].evnamebuf, "TSO"); | | 1856 | NULL, adapter->queues[i].evnamebuf, "TSO"); |
1860 | evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, | | 1857 | evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, |
1861 | NULL, adapter->queues[i].evnamebuf, | | 1858 | NULL, adapter->queues[i].evnamebuf, |
1862 | "Queue No Descriptor Available"); | | 1859 | "Queue No Descriptor Available"); |
1863 | evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, | | 1860 | evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, |
1864 | NULL, adapter->queues[i].evnamebuf, | | 1861 | NULL, adapter->queues[i].evnamebuf, |
1865 | "Queue Packets Transmitted"); | | 1862 | "Queue Packets Transmitted"); |
1866 | #ifndef IXGBE_LEGACY_TX | | 1863 | #ifndef IXGBE_LEGACY_TX |
1867 | evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, | | 1864 | evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, |
1868 | NULL, adapter->queues[i].evnamebuf, | | 1865 | NULL, adapter->queues[i].evnamebuf, |
1869 | "Packets dropped in pcq"); | | 1866 | "Packets dropped in pcq"); |
1870 | #endif | | 1867 | #endif |
1871 | | | 1868 | |
1872 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 1869 | if (sysctl_createv(log, 0, &rnode, &cnode, |
1873 | CTLFLAG_READONLY, | | 1870 | CTLFLAG_READONLY, |
1874 | CTLTYPE_INT, | | 1871 | CTLTYPE_INT, |
1875 | "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), | | 1872 | "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), |
1876 | ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, | | 1873 | ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, |
1877 | CTL_CREATE, CTL_EOL) != 0) | | 1874 | CTL_CREATE, CTL_EOL) != 0) |
1878 | break; | | 1875 | break; |
1879 | | | 1876 | |
1880 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 1877 | if (sysctl_createv(log, 0, &rnode, &cnode, |
1881 | CTLFLAG_READONLY, | | 1878 | CTLFLAG_READONLY, |
1882 | CTLTYPE_INT, | | 1879 | CTLTYPE_INT, |
1883 | "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), | | 1880 | "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), |
1884 | ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, | | 1881 | ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, |
1885 | CTL_CREATE, CTL_EOL) != 0) | | 1882 | CTL_CREATE, CTL_EOL) != 0) |
1886 | break; | | 1883 | break; |
1887 | | | 1884 | |
1888 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 1885 | if (sysctl_createv(log, 0, &rnode, &cnode, |
1889 | CTLFLAG_READONLY, | | 1886 | CTLFLAG_READONLY, |
1890 | CTLTYPE_INT, | | 1887 | CTLTYPE_INT, |
1891 | "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), | | 1888 | "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), |
1892 | ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, | | 1889 | ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, |
1893 | CTL_CREATE, CTL_EOL) != 0) | | 1890 | CTL_CREATE, CTL_EOL) != 0) |
1894 | break; | | 1891 | break; |
1895 | | | 1892 | |
1896 | if (i < __arraycount(stats->qprc)) { | | 1893 | if (i < __arraycount(stats->qprc)) { |
1897 | evcnt_attach_dynamic(&stats->qprc[i], | | 1894 | evcnt_attach_dynamic(&stats->qprc[i], |
1898 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, | | 1895 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, |
1899 | "qprc"); | | 1896 | "qprc"); |
1900 | evcnt_attach_dynamic(&stats->qptc[i], | | 1897 | evcnt_attach_dynamic(&stats->qptc[i], |
1901 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, | | 1898 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, |
1902 | "qptc"); | | 1899 | "qptc"); |
1903 | evcnt_attach_dynamic(&stats->qbrc[i], | | 1900 | evcnt_attach_dynamic(&stats->qbrc[i], |
1904 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, | | 1901 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, |
1905 | "qbrc"); | | 1902 | "qbrc"); |
1906 | evcnt_attach_dynamic(&stats->qbtc[i], | | 1903 | evcnt_attach_dynamic(&stats->qbtc[i], |
1907 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, | | 1904 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, |
1908 | "qbtc"); | | 1905 | "qbtc"); |
1909 | if (hw->mac.type >= ixgbe_mac_82599EB) | | 1906 | if (hw->mac.type >= ixgbe_mac_82599EB) |
1910 | evcnt_attach_dynamic(&stats->qprdc[i], | | 1907 | evcnt_attach_dynamic(&stats->qprdc[i], |
1911 | EVCNT_TYPE_MISC, NULL, | | 1908 | EVCNT_TYPE_MISC, NULL, |
1912 | adapter->queues[i].evnamebuf, "qprdc"); | | 1909 | adapter->queues[i].evnamebuf, "qprdc"); |
1913 | } | | 1910 | } |
1914 | | | 1911 | |
1915 | evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, | | 1912 | evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, |
1916 | NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); | | 1913 | NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); |
1917 | evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, | | 1914 | evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, |
1918 | NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); | | 1915 | NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); |
1919 | evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, | | 1916 | evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, |
1920 | NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); | | 1917 | NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); |
1921 | evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, | | 1918 | evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, |
1922 | NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); | | 1919 | NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); |
1923 | evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, | | 1920 | evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, |
1924 | NULL, adapter->queues[i].evnamebuf, "Rx discarded"); | | 1921 | NULL, adapter->queues[i].evnamebuf, "Rx discarded"); |
1925 | #ifdef LRO | | 1922 | #ifdef LRO |
1926 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", | | 1923 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", |
1927 | CTLFLAG_RD, &lro->lro_queued, 0, | | 1924 | CTLFLAG_RD, &lro->lro_queued, 0, |
1928 | "LRO Queued"); | | 1925 | "LRO Queued"); |
1929 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", | | 1926 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", |
1930 | CTLFLAG_RD, &lro->lro_flushed, 0, | | 1927 | CTLFLAG_RD, &lro->lro_flushed, 0, |
1931 | "LRO Flushed"); | | 1928 | "LRO Flushed"); |
1932 | #endif /* LRO */ | | 1929 | #endif /* LRO */ |
1933 | } | | 1930 | } |
1934 | | | 1931 | |
1935 | /* MAC stats get their own sub node */ | | 1932 | /* MAC stats get their own sub node */ |
1936 | | | 1933 | |
1937 | snprintf(stats->namebuf, | | 1934 | snprintf(stats->namebuf, |
1938 | sizeof(stats->namebuf), "%s MAC Statistics", xname); | | 1935 | sizeof(stats->namebuf), "%s MAC Statistics", xname); |
1939 | | | 1936 | |
1940 | evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, | | 1937 | evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, |
1941 | stats->namebuf, "rx csum offload - IP"); | | 1938 | stats->namebuf, "rx csum offload - IP"); |
1942 | evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, | | 1939 | evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, |
1943 | stats->namebuf, "rx csum offload - L4"); | | 1940 | stats->namebuf, "rx csum offload - L4"); |
1944 | evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, | | 1941 | evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, |
1945 | stats->namebuf, "rx csum offload - IP bad"); | | 1942 | stats->namebuf, "rx csum offload - IP bad"); |
1946 | evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, | | 1943 | evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, |
1947 | stats->namebuf, "rx csum offload - L4 bad"); | | 1944 | stats->namebuf, "rx csum offload - L4 bad"); |
1948 | evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, | | 1945 | evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, |
1949 | stats->namebuf, "Interrupt conditions zero"); | | 1946 | stats->namebuf, "Interrupt conditions zero"); |
1950 | evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, | | 1947 | evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, |
1951 | stats->namebuf, "Legacy interrupts"); | | 1948 | stats->namebuf, "Legacy interrupts"); |
1952 | | | 1949 | |
1953 | evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, | | 1950 | evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, |
1954 | stats->namebuf, "CRC Errors"); | | 1951 | stats->namebuf, "CRC Errors"); |
1955 | evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, | | 1952 | evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, |
1956 | stats->namebuf, "Illegal Byte Errors"); | | 1953 | stats->namebuf, "Illegal Byte Errors"); |
1957 | evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, | | 1954 | evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, |
1958 | stats->namebuf, "Byte Errors"); | | 1955 | stats->namebuf, "Byte Errors"); |
1959 | evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, | | 1956 | evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, |
1960 | stats->namebuf, "MAC Short Packets Discarded"); | | 1957 | stats->namebuf, "MAC Short Packets Discarded"); |
1961 | if (hw->mac.type >= ixgbe_mac_X550) | | 1958 | if (hw->mac.type >= ixgbe_mac_X550) |
1962 | evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, | | 1959 | evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, |
1963 | stats->namebuf, "Bad SFD"); | | 1960 | stats->namebuf, "Bad SFD"); |
1964 | evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, | | 1961 | evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, |
1965 | stats->namebuf, "Total Packets Missed"); | | 1962 | stats->namebuf, "Total Packets Missed"); |
1966 | evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, | | 1963 | evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, |
1967 | stats->namebuf, "MAC Local Faults"); | | 1964 | stats->namebuf, "MAC Local Faults"); |
1968 | evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, | | 1965 | evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, |
1969 | stats->namebuf, "MAC Remote Faults"); | | 1966 | stats->namebuf, "MAC Remote Faults"); |
1970 | evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, | | 1967 | evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, |
1971 | stats->namebuf, "Receive Length Errors"); | | 1968 | stats->namebuf, "Receive Length Errors"); |
1972 | evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, | | 1969 | evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, |
1973 | stats->namebuf, "Link XON Transmitted"); | | 1970 | stats->namebuf, "Link XON Transmitted"); |
1974 | evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, | | 1971 | evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, |
1975 | stats->namebuf, "Link XON Received"); | | 1972 | stats->namebuf, "Link XON Received"); |
1976 | evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, | | 1973 | evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, |
1977 | stats->namebuf, "Link XOFF Transmitted"); | | 1974 | stats->namebuf, "Link XOFF Transmitted"); |
1978 | evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, | | 1975 | evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, |
1979 | stats->namebuf, "Link XOFF Received"); | | 1976 | stats->namebuf, "Link XOFF Received"); |
1980 | | | 1977 | |
1981 | /* Packet Reception Stats */ | | 1978 | /* Packet Reception Stats */ |
1982 | evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, | | 1979 | evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, |
1983 | stats->namebuf, "Total Octets Received"); | | 1980 | stats->namebuf, "Total Octets Received"); |
1984 | evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, | | 1981 | evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, |
1985 | stats->namebuf, "Good Octets Received"); | | 1982 | stats->namebuf, "Good Octets Received"); |
1986 | evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, | | 1983 | evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, |
1987 | stats->namebuf, "Total Packets Received"); | | 1984 | stats->namebuf, "Total Packets Received"); |
1988 | evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, | | 1985 | evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, |
1989 | stats->namebuf, "Good Packets Received"); | | 1986 | stats->namebuf, "Good Packets Received"); |
1990 | evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, | | 1987 | evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, |
1991 | stats->namebuf, "Multicast Packets Received"); | | 1988 | stats->namebuf, "Multicast Packets Received"); |
1992 | evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, | | 1989 | evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, |
1993 | stats->namebuf, "Broadcast Packets Received"); | | 1990 | stats->namebuf, "Broadcast Packets Received"); |
1994 | evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, | | 1991 | evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, |
1995 | stats->namebuf, "64 byte frames received "); | | 1992 | stats->namebuf, "64 byte frames received "); |
1996 | evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, | | 1993 | evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, |
1997 | stats->namebuf, "65-127 byte frames received"); | | 1994 | stats->namebuf, "65-127 byte frames received"); |
1998 | evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, | | 1995 | evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, |
1999 | stats->namebuf, "128-255 byte frames received"); | | 1996 | stats->namebuf, "128-255 byte frames received"); |
2000 | evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, | | 1997 | evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, |
2001 | stats->namebuf, "256-511 byte frames received"); | | 1998 | stats->namebuf, "256-511 byte frames received"); |
2002 | evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, | | 1999 | evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, |
2003 | stats->namebuf, "512-1023 byte frames received"); | | 2000 | stats->namebuf, "512-1023 byte frames received"); |
2004 | evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, | | 2001 | evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, |
2005 | stats->namebuf, "1023-1522 byte frames received"); | | 2002 | stats->namebuf, "1023-1522 byte frames received"); |
2006 | evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, | | 2003 | evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, |
2007 | stats->namebuf, "Receive Undersized"); | | 2004 | stats->namebuf, "Receive Undersized"); |
2008 | evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, | | 2005 | evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, |
2009 | stats->namebuf, "Fragmented Packets Received "); | | 2006 | stats->namebuf, "Fragmented Packets Received "); |
2010 | evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, | | 2007 | evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, |
2011 | stats->namebuf, "Oversized Packets Received"); | | 2008 | stats->namebuf, "Oversized Packets Received"); |
2012 | evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, | | 2009 | evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, |
2013 | stats->namebuf, "Received Jabber"); | | 2010 | stats->namebuf, "Received Jabber"); |
2014 | evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, | | 2011 | evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, |
2015 | stats->namebuf, "Management Packets Received"); | | 2012 | stats->namebuf, "Management Packets Received"); |
2016 | evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, | | 2013 | evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, |
2017 | stats->namebuf, "Management Packets Dropped"); | | 2014 | stats->namebuf, "Management Packets Dropped"); |
2018 | evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, | | 2015 | evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, |
2019 | stats->namebuf, "Checksum Errors"); | | 2016 | stats->namebuf, "Checksum Errors"); |
2020 | | | 2017 | |
2021 | /* Packet Transmission Stats */ | | 2018 | /* Packet Transmission Stats */ |
2022 | evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, | | 2019 | evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, |
2023 | stats->namebuf, "Good Octets Transmitted"); | | 2020 | stats->namebuf, "Good Octets Transmitted"); |
2024 | evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, | | 2021 | evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, |
2025 | stats->namebuf, "Total Packets Transmitted"); | | 2022 | stats->namebuf, "Total Packets Transmitted"); |
2026 | evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, | | 2023 | evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, |
2027 | stats->namebuf, "Good Packets Transmitted"); | | 2024 | stats->namebuf, "Good Packets Transmitted"); |
2028 | evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, | | 2025 | evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, |
2029 | stats->namebuf, "Broadcast Packets Transmitted"); | | 2026 | stats->namebuf, "Broadcast Packets Transmitted"); |
2030 | evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, | | 2027 | evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, |
2031 | stats->namebuf, "Multicast Packets Transmitted"); | | 2028 | stats->namebuf, "Multicast Packets Transmitted"); |
2032 | evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, | | 2029 | evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, |
2033 | stats->namebuf, "Management Packets Transmitted"); | | 2030 | stats->namebuf, "Management Packets Transmitted"); |
2034 | evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, | | 2031 | evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, |
2035 | stats->namebuf, "64 byte frames transmitted "); | | 2032 | stats->namebuf, "64 byte frames transmitted "); |
2036 | evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, | | 2033 | evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, |
2037 | stats->namebuf, "65-127 byte frames transmitted"); | | 2034 | stats->namebuf, "65-127 byte frames transmitted"); |
2038 | evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, | | 2035 | evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, |
2039 | stats->namebuf, "128-255 byte frames transmitted"); | | 2036 | stats->namebuf, "128-255 byte frames transmitted"); |
2040 | evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, | | 2037 | evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, |
2041 | stats->namebuf, "256-511 byte frames transmitted"); | | 2038 | stats->namebuf, "256-511 byte frames transmitted"); |
2042 | evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, | | 2039 | evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, |
2043 | stats->namebuf, "512-1023 byte frames transmitted"); | | 2040 | stats->namebuf, "512-1023 byte frames transmitted"); |
2044 | evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, | | 2041 | evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, |
2045 | stats->namebuf, "1024-1522 byte frames transmitted"); | | 2042 | stats->namebuf, "1024-1522 byte frames transmitted"); |
2046 | } /* ixgbe_add_hw_stats */ | | 2043 | } /* ixgbe_add_hw_stats */ |
2047 | | | 2044 | |
2048 | static void | | 2045 | static void |
2049 | ixgbe_clear_evcnt(struct adapter *adapter) | | 2046 | ixgbe_clear_evcnt(struct adapter *adapter) |
2050 | { | | 2047 | { |
2051 | struct tx_ring *txr = adapter->tx_rings; | | 2048 | struct tx_ring *txr = adapter->tx_rings; |
2052 | struct rx_ring *rxr = adapter->rx_rings; | | 2049 | struct rx_ring *rxr = adapter->rx_rings; |
2053 | struct ixgbe_hw *hw = &adapter->hw; | | 2050 | struct ixgbe_hw *hw = &adapter->hw; |
2054 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; | | 2051 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; |
2055 | int i; | | 2052 | int i; |
2056 | | | 2053 | |
2057 | adapter->efbig_tx_dma_setup.ev_count = 0; | | 2054 | adapter->efbig_tx_dma_setup.ev_count = 0; |
2058 | adapter->mbuf_defrag_failed.ev_count = 0; | | 2055 | adapter->mbuf_defrag_failed.ev_count = 0; |
2059 | adapter->efbig2_tx_dma_setup.ev_count = 0; | | 2056 | adapter->efbig2_tx_dma_setup.ev_count = 0; |
2060 | adapter->einval_tx_dma_setup.ev_count = 0; | | 2057 | adapter->einval_tx_dma_setup.ev_count = 0; |
2061 | adapter->other_tx_dma_setup.ev_count = 0; | | 2058 | adapter->other_tx_dma_setup.ev_count = 0; |
2062 | adapter->eagain_tx_dma_setup.ev_count = 0; | | 2059 | adapter->eagain_tx_dma_setup.ev_count = 0; |
2063 | adapter->enomem_tx_dma_setup.ev_count = 0; | | 2060 | adapter->enomem_tx_dma_setup.ev_count = 0; |
2064 | adapter->tso_err.ev_count = 0; | | 2061 | adapter->tso_err.ev_count = 0; |
2065 | adapter->watchdog_events.ev_count = 0; | | 2062 | adapter->watchdog_events.ev_count = 0; |
2066 | adapter->link_irq.ev_count = 0; | | 2063 | adapter->link_irq.ev_count = 0; |
2067 | adapter->link_sicount.ev_count = 0; | | 2064 | adapter->link_sicount.ev_count = 0; |
2068 | adapter->mod_sicount.ev_count = 0; | | 2065 | adapter->mod_sicount.ev_count = 0; |
2069 | adapter->msf_sicount.ev_count = 0; | | 2066 | adapter->msf_sicount.ev_count = 0; |
2070 | adapter->phy_sicount.ev_count = 0; | | 2067 | adapter->phy_sicount.ev_count = 0; |
2071 | | | 2068 | |
2072 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { | | 2069 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { |
2073 | if (i < __arraycount(stats->mpc)) { | | 2070 | if (i < __arraycount(stats->mpc)) { |
2074 | stats->mpc[i].ev_count = 0; | | 2071 | stats->mpc[i].ev_count = 0; |
2075 | if (hw->mac.type == ixgbe_mac_82598EB) | | 2072 | if (hw->mac.type == ixgbe_mac_82598EB) |
2076 | stats->rnbc[i].ev_count = 0; | | 2073 | stats->rnbc[i].ev_count = 0; |
2077 | } | | 2074 | } |
2078 | if (i < __arraycount(stats->pxontxc)) { | | 2075 | if (i < __arraycount(stats->pxontxc)) { |
2079 | stats->pxontxc[i].ev_count = 0; | | 2076 | stats->pxontxc[i].ev_count = 0; |
2080 | stats->pxonrxc[i].ev_count = 0; | | 2077 | stats->pxonrxc[i].ev_count = 0; |
2081 | stats->pxofftxc[i].ev_count = 0; | | 2078 | stats->pxofftxc[i].ev_count = 0; |
2082 | stats->pxoffrxc[i].ev_count = 0; | | 2079 | stats->pxoffrxc[i].ev_count = 0; |
2083 | if (hw->mac.type >= ixgbe_mac_82599EB) | | 2080 | if (hw->mac.type >= ixgbe_mac_82599EB) |
2084 | stats->pxon2offc[i].ev_count = 0; | | 2081 | stats->pxon2offc[i].ev_count = 0; |
2085 | } | | 2082 | } |
2086 | } | | 2083 | } |
2087 | | | 2084 | |
2088 | txr = adapter->tx_rings; | | 2085 | txr = adapter->tx_rings; |
2089 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 2086 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
2090 | adapter->queues[i].irqs.ev_count = 0; | | 2087 | adapter->queues[i].irqs.ev_count = 0; |
2091 | adapter->queues[i].handleq.ev_count = 0; | | 2088 | adapter->queues[i].handleq.ev_count = 0; |
2092 | adapter->queues[i].req.ev_count = 0; | | 2089 | adapter->queues[i].req.ev_count = 0; |
2093 | txr->no_desc_avail.ev_count = 0; | | 2090 | txr->no_desc_avail.ev_count = 0; |
2094 | txr->total_packets.ev_count = 0; | | 2091 | txr->total_packets.ev_count = 0; |
2095 | txr->tso_tx.ev_count = 0; | | 2092 | txr->tso_tx.ev_count = 0; |
2096 | #ifndef IXGBE_LEGACY_TX | | 2093 | #ifndef IXGBE_LEGACY_TX |
2097 | txr->pcq_drops.ev_count = 0; | | 2094 | txr->pcq_drops.ev_count = 0; |
2098 | #endif | | 2095 | #endif |
2099 | txr->q_efbig_tx_dma_setup = 0; | | 2096 | txr->q_efbig_tx_dma_setup = 0; |
2100 | txr->q_mbuf_defrag_failed = 0; | | 2097 | txr->q_mbuf_defrag_failed = 0; |
2101 | txr->q_efbig2_tx_dma_setup = 0; | | 2098 | txr->q_efbig2_tx_dma_setup = 0; |
2102 | txr->q_einval_tx_dma_setup = 0; | | 2099 | txr->q_einval_tx_dma_setup = 0; |
2103 | txr->q_other_tx_dma_setup = 0; | | 2100 | txr->q_other_tx_dma_setup = 0; |
2104 | txr->q_eagain_tx_dma_setup = 0; | | 2101 | txr->q_eagain_tx_dma_setup = 0; |
2105 | txr->q_enomem_tx_dma_setup = 0; | | 2102 | txr->q_enomem_tx_dma_setup = 0; |
2106 | txr->q_tso_err = 0; | | 2103 | txr->q_tso_err = 0; |
2107 | | | 2104 | |
2108 | if (i < __arraycount(stats->qprc)) { | | 2105 | if (i < __arraycount(stats->qprc)) { |
2109 | stats->qprc[i].ev_count = 0; | | 2106 | stats->qprc[i].ev_count = 0; |
2110 | stats->qptc[i].ev_count = 0; | | 2107 | stats->qptc[i].ev_count = 0; |
2111 | stats->qbrc[i].ev_count = 0; | | 2108 | stats->qbrc[i].ev_count = 0; |
2112 | stats->qbtc[i].ev_count = 0; | | 2109 | stats->qbtc[i].ev_count = 0; |
2113 | if (hw->mac.type >= ixgbe_mac_82599EB) | | 2110 | if (hw->mac.type >= ixgbe_mac_82599EB) |
2114 | stats->qprdc[i].ev_count = 0; | | 2111 | stats->qprdc[i].ev_count = 0; |
2115 | } | | 2112 | } |
2116 | | | 2113 | |
2117 | rxr->rx_packets.ev_count = 0; | | 2114 | rxr->rx_packets.ev_count = 0; |
2118 | rxr->rx_bytes.ev_count = 0; | | 2115 | rxr->rx_bytes.ev_count = 0; |
2119 | rxr->rx_copies.ev_count = 0; | | 2116 | rxr->rx_copies.ev_count = 0; |
2120 | rxr->no_jmbuf.ev_count = 0; | | 2117 | rxr->no_jmbuf.ev_count = 0; |
2121 | rxr->rx_discarded.ev_count = 0; | | 2118 | rxr->rx_discarded.ev_count = 0; |
2122 | } | | 2119 | } |
2123 | stats->ipcs.ev_count = 0; | | 2120 | stats->ipcs.ev_count = 0; |
2124 | stats->l4cs.ev_count = 0; | | 2121 | stats->l4cs.ev_count = 0; |
2125 | stats->ipcs_bad.ev_count = 0; | | 2122 | stats->ipcs_bad.ev_count = 0; |
2126 | stats->l4cs_bad.ev_count = 0; | | 2123 | stats->l4cs_bad.ev_count = 0; |
2127 | stats->intzero.ev_count = 0; | | 2124 | stats->intzero.ev_count = 0; |
2128 | stats->legint.ev_count = 0; | | 2125 | stats->legint.ev_count = 0; |
2129 | stats->crcerrs.ev_count = 0; | | 2126 | stats->crcerrs.ev_count = 0; |
2130 | stats->illerrc.ev_count = 0; | | 2127 | stats->illerrc.ev_count = 0; |
2131 | stats->errbc.ev_count = 0; | | 2128 | stats->errbc.ev_count = 0; |
2132 | stats->mspdc.ev_count = 0; | | 2129 | stats->mspdc.ev_count = 0; |
2133 | stats->mbsdc.ev_count = 0; | | 2130 | stats->mbsdc.ev_count = 0; |
2134 | stats->mpctotal.ev_count = 0; | | 2131 | stats->mpctotal.ev_count = 0; |
2135 | stats->mlfc.ev_count = 0; | | 2132 | stats->mlfc.ev_count = 0; |
2136 | stats->mrfc.ev_count = 0; | | 2133 | stats->mrfc.ev_count = 0; |
2137 | stats->rlec.ev_count = 0; | | 2134 | stats->rlec.ev_count = 0; |
2138 | stats->lxontxc.ev_count = 0; | | 2135 | stats->lxontxc.ev_count = 0; |
2139 | stats->lxonrxc.ev_count = 0; | | 2136 | stats->lxonrxc.ev_count = 0; |
2140 | stats->lxofftxc.ev_count = 0; | | 2137 | stats->lxofftxc.ev_count = 0; |
2141 | stats->lxoffrxc.ev_count = 0; | | 2138 | stats->lxoffrxc.ev_count = 0; |
2142 | | | 2139 | |
2143 | /* Packet Reception Stats */ | | 2140 | /* Packet Reception Stats */ |
2144 | stats->tor.ev_count = 0; | | 2141 | stats->tor.ev_count = 0; |
2145 | stats->gorc.ev_count = 0; | | 2142 | stats->gorc.ev_count = 0; |
2146 | stats->tpr.ev_count = 0; | | 2143 | stats->tpr.ev_count = 0; |
2147 | stats->gprc.ev_count = 0; | | 2144 | stats->gprc.ev_count = 0; |
2148 | stats->mprc.ev_count = 0; | | 2145 | stats->mprc.ev_count = 0; |
2149 | stats->bprc.ev_count = 0; | | 2146 | stats->bprc.ev_count = 0; |
2150 | stats->prc64.ev_count = 0; | | 2147 | stats->prc64.ev_count = 0; |
2151 | stats->prc127.ev_count = 0; | | 2148 | stats->prc127.ev_count = 0; |
2152 | stats->prc255.ev_count = 0; | | 2149 | stats->prc255.ev_count = 0; |
2153 | stats->prc511.ev_count = 0; | | 2150 | stats->prc511.ev_count = 0; |
2154 | stats->prc1023.ev_count = 0; | | 2151 | stats->prc1023.ev_count = 0; |
2155 | stats->prc1522.ev_count = 0; | | 2152 | stats->prc1522.ev_count = 0; |
2156 | stats->ruc.ev_count = 0; | | 2153 | stats->ruc.ev_count = 0; |
2157 | stats->rfc.ev_count = 0; | | 2154 | stats->rfc.ev_count = 0; |
2158 | stats->roc.ev_count = 0; | | 2155 | stats->roc.ev_count = 0; |
2159 | stats->rjc.ev_count = 0; | | 2156 | stats->rjc.ev_count = 0; |
2160 | stats->mngprc.ev_count = 0; | | 2157 | stats->mngprc.ev_count = 0; |
2161 | stats->mngpdc.ev_count = 0; | | 2158 | stats->mngpdc.ev_count = 0; |
2162 | stats->xec.ev_count = 0; | | 2159 | stats->xec.ev_count = 0; |
2163 | | | 2160 | |
2164 | /* Packet Transmission Stats */ | | 2161 | /* Packet Transmission Stats */ |
2165 | stats->gotc.ev_count = 0; | | 2162 | stats->gotc.ev_count = 0; |
2166 | stats->tpt.ev_count = 0; | | 2163 | stats->tpt.ev_count = 0; |
2167 | stats->gptc.ev_count = 0; | | 2164 | stats->gptc.ev_count = 0; |
2168 | stats->bptc.ev_count = 0; | | 2165 | stats->bptc.ev_count = 0; |
2169 | stats->mptc.ev_count = 0; | | 2166 | stats->mptc.ev_count = 0; |
2170 | stats->mngptc.ev_count = 0; | | 2167 | stats->mngptc.ev_count = 0; |
2171 | stats->ptc64.ev_count = 0; | | 2168 | stats->ptc64.ev_count = 0; |
2172 | stats->ptc127.ev_count = 0; | | 2169 | stats->ptc127.ev_count = 0; |
2173 | stats->ptc255.ev_count = 0; | | 2170 | stats->ptc255.ev_count = 0; |
2174 | stats->ptc511.ev_count = 0; | | 2171 | stats->ptc511.ev_count = 0; |
2175 | stats->ptc1023.ev_count = 0; | | 2172 | stats->ptc1023.ev_count = 0; |
2176 | stats->ptc1522.ev_count = 0; | | 2173 | stats->ptc1522.ev_count = 0; |
2177 | } | | 2174 | } |
2178 | | | 2175 | |
2179 | /************************************************************************ | | 2176 | /************************************************************************ |
2180 | * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function | | 2177 | * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function |
2181 | * | | 2178 | * |
2182 | * Retrieves the TDH value from the hardware | | 2179 | * Retrieves the TDH value from the hardware |
2183 | ************************************************************************/ | | 2180 | ************************************************************************/ |
2184 | static int | | 2181 | static int |
2185 | ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS) | | 2182 | ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS) |
2186 | { | | 2183 | { |
2187 | struct sysctlnode node = *rnode; | | 2184 | struct sysctlnode node = *rnode; |
2188 | struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; | | 2185 | struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; |
2189 | struct adapter *adapter; | | 2186 | struct adapter *adapter; |
2190 | uint32_t val; | | 2187 | uint32_t val; |
2191 | | | 2188 | |
2192 | if (!txr) | | 2189 | if (!txr) |
2193 | return (0); | | 2190 | return (0); |
2194 | | | 2191 | |
2195 | adapter = txr->adapter; | | 2192 | adapter = txr->adapter; |
2196 | if (ixgbe_fw_recovery_mode_swflag(adapter)) | | 2193 | if (ixgbe_fw_recovery_mode_swflag(adapter)) |
2197 | return (EPERM); | | 2194 | return (EPERM); |
2198 | | | 2195 | |
2199 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)); | | 2196 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)); |
2200 | node.sysctl_data = &val; | | 2197 | node.sysctl_data = &val; |
2201 | return sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2198 | return sysctl_lookup(SYSCTLFN_CALL(&node)); |
2202 | } /* ixgbe_sysctl_tdh_handler */ | | 2199 | } /* ixgbe_sysctl_tdh_handler */ |
2203 | | | 2200 | |
2204 | /************************************************************************ | | 2201 | /************************************************************************ |
2205 | * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function | | 2202 | * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function |
2206 | * | | 2203 | * |
2207 | * Retrieves the TDT value from the hardware | | 2204 | * Retrieves the TDT value from the hardware |
2208 | ************************************************************************/ | | 2205 | ************************************************************************/ |
2209 | static int | | 2206 | static int |
2210 | ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS) | | 2207 | ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS) |
2211 | { | | 2208 | { |
2212 | struct sysctlnode node = *rnode; | | 2209 | struct sysctlnode node = *rnode; |
2213 | struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; | | 2210 | struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; |
2214 | struct adapter *adapter; | | 2211 | struct adapter *adapter; |
2215 | uint32_t val; | | 2212 | uint32_t val; |
2216 | | | 2213 | |
2217 | if (!txr) | | 2214 | if (!txr) |
2218 | return (0); | | 2215 | return (0); |
2219 | | | 2216 | |
2220 | adapter = txr->adapter; | | 2217 | adapter = txr->adapter; |
2221 | if (ixgbe_fw_recovery_mode_swflag(adapter)) | | 2218 | if (ixgbe_fw_recovery_mode_swflag(adapter)) |
2222 | return (EPERM); | | 2219 | return (EPERM); |
2223 | | | 2220 | |
2224 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)); | | 2221 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)); |
2225 | node.sysctl_data = &val; | | 2222 | node.sysctl_data = &val; |
2226 | return sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2223 | return sysctl_lookup(SYSCTLFN_CALL(&node)); |
2227 | } /* ixgbe_sysctl_tdt_handler */ | | 2224 | } /* ixgbe_sysctl_tdt_handler */ |
2228 | | | 2225 | |
2229 | /************************************************************************ | | 2226 | /************************************************************************ |
2230 | * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check | | 2227 | * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check |
2231 | * handler function | | 2228 | * handler function |
2232 | * | | 2229 | * |
2233 | * Retrieves the next_to_check value | | 2230 | * Retrieves the next_to_check value |
2234 | ************************************************************************/ | | 2231 | ************************************************************************/ |
2235 | static int | | 2232 | static int |
2236 | ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS) | | 2233 | ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS) |
2237 | { | | 2234 | { |
2238 | struct sysctlnode node = *rnode; | | 2235 | struct sysctlnode node = *rnode; |
2239 | struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; | | 2236 | struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; |
2240 | struct adapter *adapter; | | 2237 | struct adapter *adapter; |
2241 | uint32_t val; | | 2238 | uint32_t val; |
2242 | | | 2239 | |
2243 | if (!rxr) | | 2240 | if (!rxr) |
2244 | return (0); | | 2241 | return (0); |
2245 | | | 2242 | |
2246 | adapter = rxr->adapter; | | 2243 | adapter = rxr->adapter; |
2247 | if (ixgbe_fw_recovery_mode_swflag(adapter)) | | 2244 | if (ixgbe_fw_recovery_mode_swflag(adapter)) |
2248 | return (EPERM); | | 2245 | return (EPERM); |
2249 | | | 2246 | |
2250 | val = rxr->next_to_check; | | 2247 | val = rxr->next_to_check; |
2251 | node.sysctl_data = &val; | | 2248 | node.sysctl_data = &val; |
2252 | return sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2249 | return sysctl_lookup(SYSCTLFN_CALL(&node)); |
2253 | } /* ixgbe_sysctl_next_to_check_handler */ | | 2250 | } /* ixgbe_sysctl_next_to_check_handler */ |
2254 | | | 2251 | |
2255 | /************************************************************************ | | 2252 | /************************************************************************ |
2256 | * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function | | 2253 | * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function |
2257 | * | | 2254 | * |
2258 | * Retrieves the RDH value from the hardware | | 2255 | * Retrieves the RDH value from the hardware |
2259 | ************************************************************************/ | | 2256 | ************************************************************************/ |
2260 | static int | | 2257 | static int |
2261 | ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS) | | 2258 | ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS) |
2262 | { | | 2259 | { |
2263 | struct sysctlnode node = *rnode; | | 2260 | struct sysctlnode node = *rnode; |
2264 | struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; | | 2261 | struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; |
2265 | struct adapter *adapter; | | 2262 | struct adapter *adapter; |
2266 | uint32_t val; | | 2263 | uint32_t val; |
2267 | | | 2264 | |
2268 | if (!rxr) | | 2265 | if (!rxr) |
2269 | return (0); | | 2266 | return (0); |
2270 | | | 2267 | |
2271 | adapter = rxr->adapter; | | 2268 | adapter = rxr->adapter; |
2272 | if (ixgbe_fw_recovery_mode_swflag(adapter)) | | 2269 | if (ixgbe_fw_recovery_mode_swflag(adapter)) |
2273 | return (EPERM); | | 2270 | return (EPERM); |
2274 | | | 2271 | |
2275 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me)); | | 2272 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me)); |
2276 | node.sysctl_data = &val; | | 2273 | node.sysctl_data = &val; |
2277 | return sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2274 | return sysctl_lookup(SYSCTLFN_CALL(&node)); |
2278 | } /* ixgbe_sysctl_rdh_handler */ | | 2275 | } /* ixgbe_sysctl_rdh_handler */ |
2279 | | | 2276 | |
2280 | /************************************************************************ | | 2277 | /************************************************************************ |
2281 | * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function | | 2278 | * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function |
2282 | * | | 2279 | * |
2283 | * Retrieves the RDT value from the hardware | | 2280 | * Retrieves the RDT value from the hardware |
2284 | ************************************************************************/ | | 2281 | ************************************************************************/ |
2285 | static int | | 2282 | static int |
2286 | ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) | | 2283 | ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) |
2287 | { | | 2284 | { |
2288 | struct sysctlnode node = *rnode; | | 2285 | struct sysctlnode node = *rnode; |
2289 | struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; | | 2286 | struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; |
2290 | struct adapter *adapter; | | 2287 | struct adapter *adapter; |
2291 | uint32_t val; | | 2288 | uint32_t val; |
2292 | | | 2289 | |
2293 | if (!rxr) | | 2290 | if (!rxr) |
2294 | return (0); | | 2291 | return (0); |
2295 | | | 2292 | |
2296 | adapter = rxr->adapter; | | 2293 | adapter = rxr->adapter; |
2297 | if (ixgbe_fw_recovery_mode_swflag(adapter)) | | 2294 | if (ixgbe_fw_recovery_mode_swflag(adapter)) |
2298 | return (EPERM); | | 2295 | return (EPERM); |
2299 | | | 2296 | |
2300 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me)); | | 2297 | val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me)); |
2301 | node.sysctl_data = &val; | | 2298 | node.sysctl_data = &val; |
2302 | return sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2299 | return sysctl_lookup(SYSCTLFN_CALL(&node)); |
2303 | } /* ixgbe_sysctl_rdt_handler */ | | 2300 | } /* ixgbe_sysctl_rdt_handler */ |
2304 | | | 2301 | |
2305 | static int | | 2302 | static int |
2306 | ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) | | 2303 | ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) |
2307 | { | | 2304 | { |
2308 | struct ifnet *ifp = &ec->ec_if; | | 2305 | struct ifnet *ifp = &ec->ec_if; |
2309 | struct adapter *adapter = ifp->if_softc; | | 2306 | struct adapter *adapter = ifp->if_softc; |
2310 | int rv; | | 2307 | int rv; |
2311 | | | 2308 | |
2312 | if (set) | | 2309 | if (set) |
2313 | rv = ixgbe_register_vlan(ifp->if_softc, ifp, vid); | | 2310 | rv = ixgbe_register_vlan(adapter, vid); |
2314 | else | | 2311 | else |
2315 | rv = ixgbe_unregister_vlan(ifp->if_softc, ifp, vid); | | 2312 | rv = ixgbe_unregister_vlan(adapter, vid); |
2316 | | | 2313 | |
2317 | if (rv != 0) | | 2314 | if (rv != 0) |
2318 | return rv; | | 2315 | return rv; |
2319 | | | 2316 | |
2320 | /* | | 2317 | /* |
2321 | * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 | | 2318 | * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 |
2322 | * or 0 to 1. | | 2319 | * or 0 to 1. |
2323 | */ | | 2320 | */ |
2324 | if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) | | 2321 | if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) |
2325 | ixgbe_setup_vlan_hw_tagging(adapter); | | 2322 | ixgbe_setup_vlan_hw_tagging(adapter); |
2326 | | | 2323 | |
2327 | return rv; | | 2324 | return rv; |
2328 | } | | 2325 | } |
2329 | | | 2326 | |
2330 | /************************************************************************ | | 2327 | /************************************************************************ |
2331 | * ixgbe_register_vlan | | 2328 | * ixgbe_register_vlan |
2332 | * | | 2329 | * |
2333 | * Run via vlan config EVENT, it enables us to use the | | 2330 | * Run via vlan config EVENT, it enables us to use the |
2334 | * HW Filter table since we can get the vlan id. This | | 2331 | * HW Filter table since we can get the vlan id. This |
2335 | * just creates the entry in the soft version of the | | 2332 | * just creates the entry in the soft version of the |
2336 | * VFTA, init will repopulate the real table. | | 2333 | * VFTA, init will repopulate the real table. |
2337 | ************************************************************************/ | | 2334 | ************************************************************************/ |
2338 | static int | | 2335 | static int |
2339 | ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) | | 2336 | ixgbe_register_vlan(struct adapter *adapter, u16 vtag) |
2340 | { | | 2337 | { |
2341 | struct adapter *adapter = ifp->if_softc; | | | |
2342 | u16 index, bit; | | 2338 | u16 index, bit; |
2343 | int error; | | 2339 | int error; |
2344 | | | 2340 | |
2345 | if (ifp->if_softc != arg) /* Not our event */ | | | |
2346 | return EINVAL; | | | |
2347 | | | | |
2348 | if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | | 2341 | if ((vtag == 0) || (vtag > 4095)) /* Invalid */ |
2349 | return EINVAL; | | 2342 | return EINVAL; |
2350 | | | 2343 | |
2351 | IXGBE_CORE_LOCK(adapter); | | 2344 | IXGBE_CORE_LOCK(adapter); |
2352 | index = (vtag >> 5) & 0x7F; | | 2345 | index = (vtag >> 5) & 0x7F; |
2353 | bit = vtag & 0x1F; | | 2346 | bit = vtag & 0x1F; |
2354 | adapter->shadow_vfta[index] |= ((u32)1 << bit); | | 2347 | adapter->shadow_vfta[index] |= ((u32)1 << bit); |
2355 | error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true, | | 2348 | error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true, |
2356 | true); | | 2349 | true); |
2357 | IXGBE_CORE_UNLOCK(adapter); | | 2350 | IXGBE_CORE_UNLOCK(adapter); |
2358 | if (error != 0) | | 2351 | if (error != 0) |
2359 | error = EACCES; | | 2352 | error = EACCES; |
2360 | | | 2353 | |
2361 | return error; | | 2354 | return error; |
2362 | } /* ixgbe_register_vlan */ | | 2355 | } /* ixgbe_register_vlan */ |
2363 | | | 2356 | |
2364 | /************************************************************************ | | 2357 | /************************************************************************ |
2365 | * ixgbe_unregister_vlan | | 2358 | * ixgbe_unregister_vlan |
2366 | * | | 2359 | * |
2367 | * Run via vlan unconfig EVENT, remove our entry in the soft vfta. | | 2360 | * Run via vlan unconfig EVENT, remove our entry in the soft vfta. |
2368 | ************************************************************************/ | | 2361 | ************************************************************************/ |
2369 | static int | | 2362 | static int |
2370 | ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) | | 2363 | ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag) |
2371 | { | | 2364 | { |
2372 | struct adapter *adapter = ifp->if_softc; | | | |
2373 | u16 index, bit; | | 2365 | u16 index, bit; |
2374 | int error; | | 2366 | int error; |
2375 | | | 2367 | |
2376 | if (ifp->if_softc != arg) | | | |
2377 | return EINVAL; | | | |
2378 | | | | |
2379 | if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | | 2368 | if ((vtag == 0) || (vtag > 4095)) /* Invalid */ |
2380 | return EINVAL; | | 2369 | return EINVAL; |
2381 | | | 2370 | |
2382 | IXGBE_CORE_LOCK(adapter); | | 2371 | IXGBE_CORE_LOCK(adapter); |
2383 | index = (vtag >> 5) & 0x7F; | | 2372 | index = (vtag >> 5) & 0x7F; |
2384 | bit = vtag & 0x1F; | | 2373 | bit = vtag & 0x1F; |
2385 | adapter->shadow_vfta[index] &= ~((u32)1 << bit); | | 2374 | adapter->shadow_vfta[index] &= ~((u32)1 << bit); |
2386 | error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false, | | 2375 | error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false, |
2387 | true); | | 2376 | true); |
2388 | IXGBE_CORE_UNLOCK(adapter); | | 2377 | IXGBE_CORE_UNLOCK(adapter); |
2389 | if (error != 0) | | 2378 | if (error != 0) |
2390 | error = EACCES; | | 2379 | error = EACCES; |
2391 | | | 2380 | |
2392 | return error; | | 2381 | return error; |
2393 | } /* ixgbe_unregister_vlan */ | | 2382 | } /* ixgbe_unregister_vlan */ |
2394 | | | 2383 | |
2395 | static void | | 2384 | static void |
2396 | ixgbe_setup_vlan_hw_tagging(struct adapter *adapter) | | 2385 | ixgbe_setup_vlan_hw_tagging(struct adapter *adapter) |
2397 | { | | 2386 | { |
2398 | struct ethercom *ec = &adapter->osdep.ec; | | 2387 | struct ethercom *ec = &adapter->osdep.ec; |
2399 | struct ixgbe_hw *hw = &adapter->hw; | | 2388 | struct ixgbe_hw *hw = &adapter->hw; |
2400 | struct rx_ring *rxr; | | 2389 | struct rx_ring *rxr; |
2401 | u32 ctrl; | | 2390 | u32 ctrl; |
2402 | int i; | | 2391 | int i; |
2403 | bool hwtagging; | | 2392 | bool hwtagging; |
2404 | | | 2393 | |
2405 | /* Enable HW tagging only if any vlan is attached */ | | 2394 | /* Enable HW tagging only if any vlan is attached */ |
2406 | hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) | | 2395 | hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) |
2407 | && VLAN_ATTACHED(ec); | | 2396 | && VLAN_ATTACHED(ec); |
2408 | | | 2397 | |
2409 | /* Setup the queues for vlans */ | | 2398 | /* Setup the queues for vlans */ |
2410 | for (i = 0; i < adapter->num_queues; i++) { | | 2399 | for (i = 0; i < adapter->num_queues; i++) { |
2411 | rxr = &adapter->rx_rings[i]; | | 2400 | rxr = &adapter->rx_rings[i]; |
2412 | /* | | 2401 | /* |
2413 | * On 82599 and later, the VLAN enable is per/queue in RXDCTL. | | 2402 | * On 82599 and later, the VLAN enable is per/queue in RXDCTL. |
2414 | */ | | 2403 | */ |
2415 | if (hw->mac.type != ixgbe_mac_82598EB) { | | 2404 | if (hw->mac.type != ixgbe_mac_82598EB) { |
2416 | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); | | 2405 | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); |
2417 | if (hwtagging) | | 2406 | if (hwtagging) |
2418 | ctrl |= IXGBE_RXDCTL_VME; | | 2407 | ctrl |= IXGBE_RXDCTL_VME; |
2419 | else | | 2408 | else |
2420 | ctrl &= ~IXGBE_RXDCTL_VME; | | 2409 | ctrl &= ~IXGBE_RXDCTL_VME; |
2421 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); | | 2410 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); |
2422 | } | | 2411 | } |
2423 | rxr->vtag_strip = hwtagging ? TRUE : FALSE; | | 2412 | rxr->vtag_strip = hwtagging ? TRUE : FALSE; |
2424 | } | | 2413 | } |
2425 | | | 2414 | |
2426 | /* VLAN hw tagging for 82598 */ | | 2415 | /* VLAN hw tagging for 82598 */ |
2427 | if (hw->mac.type == ixgbe_mac_82598EB) { | | 2416 | if (hw->mac.type == ixgbe_mac_82598EB) { |
2428 | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | | 2417 | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
2429 | if (hwtagging) | | 2418 | if (hwtagging) |
2430 | ctrl |= IXGBE_VLNCTRL_VME; | | 2419 | ctrl |= IXGBE_VLNCTRL_VME; |
2431 | else | | 2420 | else |
2432 | ctrl &= ~IXGBE_VLNCTRL_VME; | | 2421 | ctrl &= ~IXGBE_VLNCTRL_VME; |
2433 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); | | 2422 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); |
2434 | } | | 2423 | } |
2435 | } /* ixgbe_setup_vlan_hw_tagging */ | | 2424 | } /* ixgbe_setup_vlan_hw_tagging */ |
2436 | | | 2425 | |
2437 | static void | | 2426 | static void |
2438 | ixgbe_setup_vlan_hw_support(struct adapter *adapter) | | 2427 | ixgbe_setup_vlan_hw_support(struct adapter *adapter) |
2439 | { | | 2428 | { |
2440 | struct ethercom *ec = &adapter->osdep.ec; | | 2429 | struct ethercom *ec = &adapter->osdep.ec; |
2441 | struct ixgbe_hw *hw = &adapter->hw; | | 2430 | struct ixgbe_hw *hw = &adapter->hw; |
2442 | int i; | | 2431 | int i; |
2443 | u32 ctrl; | | 2432 | u32 ctrl; |
2444 | struct vlanid_list *vlanidp; | | 2433 | struct vlanid_list *vlanidp; |
2445 | | | 2434 | |
2446 | /* | | 2435 | /* |
2447 | * This function is called from both if_init and ifflags_cb() | | 2436 | * This function is called from both if_init and ifflags_cb() |
2448 | * on NetBSD. | | 2437 | * on NetBSD. |
2449 | */ | | 2438 | */ |
2450 | | | 2439 | |
2451 | /* | | 2440 | /* |
2452 | * Part 1: | | 2441 | * Part 1: |
2453 | * Setup VLAN HW tagging | | 2442 | * Setup VLAN HW tagging |
2454 | */ | | 2443 | */ |
2455 | ixgbe_setup_vlan_hw_tagging(adapter); | | 2444 | ixgbe_setup_vlan_hw_tagging(adapter); |
2456 | | | 2445 | |
2457 | /* | | 2446 | /* |
2458 | * Part 2: | | 2447 | * Part 2: |
2459 | * Setup VLAN HW filter | | 2448 | * Setup VLAN HW filter |
2460 | */ | | 2449 | */ |
2461 | /* Cleanup shadow_vfta */ | | 2450 | /* Cleanup shadow_vfta */ |
2462 | for (i = 0; i < IXGBE_VFTA_SIZE; i++) | | 2451 | for (i = 0; i < IXGBE_VFTA_SIZE; i++) |
2463 | adapter->shadow_vfta[i] = 0; | | 2452 | adapter->shadow_vfta[i] = 0; |
2464 | /* Generate shadow_vfta from ec_vids */ | | 2453 | /* Generate shadow_vfta from ec_vids */ |
2465 | ETHER_LOCK(ec); | | 2454 | ETHER_LOCK(ec); |
2466 | SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { | | 2455 | SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { |
2467 | uint32_t idx; | | 2456 | uint32_t idx; |
2468 | | | 2457 | |
2469 | idx = vlanidp->vid / 32; | | 2458 | idx = vlanidp->vid / 32; |
2470 | KASSERT(idx < IXGBE_VFTA_SIZE); | | 2459 | KASSERT(idx < IXGBE_VFTA_SIZE); |
2471 | adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); | | 2460 | adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); |
2472 | } | | 2461 | } |
2473 | ETHER_UNLOCK(ec); | | 2462 | ETHER_UNLOCK(ec); |
2474 | for (i = 0; i < IXGBE_VFTA_SIZE; i++) | | 2463 | for (i = 0; i < IXGBE_VFTA_SIZE; i++) |
2475 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]); | | 2464 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]); |
2476 | | | 2465 | |
2477 | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | | 2466 | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); |
2478 | /* Enable the Filter Table if enabled */ | | 2467 | /* Enable the Filter Table if enabled */ |
2479 | if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) | | 2468 | if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) |
2480 | ctrl |= IXGBE_VLNCTRL_VFE; | | 2469 | ctrl |= IXGBE_VLNCTRL_VFE; |
2481 | else | | 2470 | else |
2482 | ctrl &= ~IXGBE_VLNCTRL_VFE; | | 2471 | ctrl &= ~IXGBE_VLNCTRL_VFE; |
2483 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); | | 2472 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); |
2484 | } /* ixgbe_setup_vlan_hw_support */ | | 2473 | } /* ixgbe_setup_vlan_hw_support */ |
2485 | | | 2474 | |
2486 | /************************************************************************ | | 2475 | /************************************************************************ |
2487 | * ixgbe_get_slot_info | | 2476 | * ixgbe_get_slot_info |
2488 | * | | 2477 | * |
2489 | * Get the width and transaction speed of | | 2478 | * Get the width and transaction speed of |
2490 | * the slot this adapter is plugged into. | | 2479 | * the slot this adapter is plugged into. |
2491 | ************************************************************************/ | | 2480 | ************************************************************************/ |
2492 | static void | | 2481 | static void |
2493 | ixgbe_get_slot_info(struct adapter *adapter) | | 2482 | ixgbe_get_slot_info(struct adapter *adapter) |
2494 | { | | 2483 | { |
2495 | device_t dev = adapter->dev; | | 2484 | device_t dev = adapter->dev; |
2496 | struct ixgbe_hw *hw = &adapter->hw; | | 2485 | struct ixgbe_hw *hw = &adapter->hw; |
2497 | u32 offset; | | 2486 | u32 offset; |
2498 | u16 link; | | 2487 | u16 link; |
2499 | int bus_info_valid = TRUE; | | 2488 | int bus_info_valid = TRUE; |
2500 | | | 2489 | |
2501 | /* Some devices are behind an internal bridge */ | | 2490 | /* Some devices are behind an internal bridge */ |
2502 | switch (hw->device_id) { | | 2491 | switch (hw->device_id) { |
2503 | case IXGBE_DEV_ID_82599_SFP_SF_QP: | | 2492 | case IXGBE_DEV_ID_82599_SFP_SF_QP: |
2504 | case IXGBE_DEV_ID_82599_QSFP_SF_QP: | | 2493 | case IXGBE_DEV_ID_82599_QSFP_SF_QP: |
2505 | goto get_parent_info; | | 2494 | goto get_parent_info; |
2506 | default: | | 2495 | default: |
2507 | break; | | 2496 | break; |
2508 | } | | 2497 | } |
2509 | | | 2498 | |
2510 | ixgbe_get_bus_info(hw); | | 2499 | ixgbe_get_bus_info(hw); |
2511 | | | 2500 | |
2512 | /* | | 2501 | /* |
2513 | * Some devices don't use PCI-E, but there is no need | | 2502 | * Some devices don't use PCI-E, but there is no need |
2514 | * to display "Unknown" for bus speed and width. | | 2503 | * to display "Unknown" for bus speed and width. |
2515 | */ | | 2504 | */ |
2516 | switch (hw->mac.type) { | | 2505 | switch (hw->mac.type) { |
2517 | case ixgbe_mac_X550EM_x: | | 2506 | case ixgbe_mac_X550EM_x: |
2518 | case ixgbe_mac_X550EM_a: | | 2507 | case ixgbe_mac_X550EM_a: |
2519 | return; | | 2508 | return; |
2520 | default: | | 2509 | default: |
2521 | goto display; | | 2510 | goto display; |
2522 | } | | 2511 | } |
2523 | | | 2512 | |
2524 | get_parent_info: | | 2513 | get_parent_info: |
2525 | /* | | 2514 | /* |
2526 | * For the Quad port adapter we need to parse back | | 2515 | * For the Quad port adapter we need to parse back |
2527 | * up the PCI tree to find the speed of the expansion | | 2516 | * up the PCI tree to find the speed of the expansion |
2528 | * slot into which this adapter is plugged. A bit more work. | | 2517 | * slot into which this adapter is plugged. A bit more work. |
2529 | */ | | 2518 | */ |
2530 | dev = device_parent(device_parent(dev)); | | 2519 | dev = device_parent(device_parent(dev)); |
2531 | #if 0 | | 2520 | #if 0 |
2532 | #ifdef IXGBE_DEBUG | | 2521 | #ifdef IXGBE_DEBUG |
2533 | device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), | | 2522 | device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), |
2534 | pci_get_slot(dev), pci_get_function(dev)); | | 2523 | pci_get_slot(dev), pci_get_function(dev)); |
2535 | #endif | | 2524 | #endif |
2536 | dev = device_parent(device_parent(dev)); | | 2525 | dev = device_parent(device_parent(dev)); |
2537 | #ifdef IXGBE_DEBUG | | 2526 | #ifdef IXGBE_DEBUG |
2538 | device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), | | 2527 | device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), |
2539 | pci_get_slot(dev), pci_get_function(dev)); | | 2528 | pci_get_slot(dev), pci_get_function(dev)); |
2540 | #endif | | 2529 | #endif |
2541 | #endif | | 2530 | #endif |
2542 | /* Now get the PCI Express Capabilities offset */ | | 2531 | /* Now get the PCI Express Capabilities offset */ |
2543 | if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag, | | 2532 | if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag, |
2544 | PCI_CAP_PCIEXPRESS, &offset, NULL)) { | | 2533 | PCI_CAP_PCIEXPRESS, &offset, NULL)) { |
2545 | /* | | 2534 | /* |
2546 | * Hmm...can't get PCI-Express capabilities. | | 2535 | * Hmm...can't get PCI-Express capabilities. |
2547 | * Falling back to default method. | | 2536 | * Falling back to default method. |
2548 | */ | | 2537 | */ |
2549 | bus_info_valid = FALSE; | | 2538 | bus_info_valid = FALSE; |
2550 | ixgbe_get_bus_info(hw); | | 2539 | ixgbe_get_bus_info(hw); |
2551 | goto display; | | 2540 | goto display; |
2552 | } | | 2541 | } |
2553 | /* ...and read the Link Status Register */ | | 2542 | /* ...and read the Link Status Register */ |
2554 | link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag, | | 2543 | link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag, |
2555 | offset + PCIE_LCSR) >> 16; | | 2544 | offset + PCIE_LCSR) >> 16; |
2556 | ixgbe_set_pci_config_data_generic(hw, link); | | 2545 | ixgbe_set_pci_config_data_generic(hw, link); |
2557 | | | 2546 | |
2558 | display: | | 2547 | display: |
2559 | device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", | | 2548 | device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", |
2560 | ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : | | 2549 | ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : |
2561 | (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : | | 2550 | (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : |
2562 | (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : | | 2551 | (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : |
2563 | "Unknown"), | | 2552 | "Unknown"), |
2564 | ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" : | | 2553 | ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" : |
2565 | (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" : | | 2554 | (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" : |
2566 | (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" : | | 2555 | (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" : |
2567 | "Unknown")); | | 2556 | "Unknown")); |
2568 | | | 2557 | |
2569 | if (bus_info_valid) { | | 2558 | if (bus_info_valid) { |
2570 | if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && | | 2559 | if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && |
2571 | ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && | | 2560 | ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && |
2572 | (hw->bus.speed == ixgbe_bus_speed_2500))) { | | 2561 | (hw->bus.speed == ixgbe_bus_speed_2500))) { |
2573 | device_printf(dev, "PCI-Express bandwidth available" | | 2562 | device_printf(dev, "PCI-Express bandwidth available" |
2574 | " for this card\n is not sufficient for" | | 2563 | " for this card\n is not sufficient for" |
2575 | " optimal performance.\n"); | | 2564 | " optimal performance.\n"); |
2576 | device_printf(dev, "For optimal performance a x8 " | | 2565 | device_printf(dev, "For optimal performance a x8 " |
2577 | "PCIE, or x4 PCIE Gen2 slot is required.\n"); | | 2566 | "PCIE, or x4 PCIE Gen2 slot is required.\n"); |
2578 | } | | 2567 | } |
2579 | if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && | | 2568 | if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && |
2580 | ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && | | 2569 | ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && |
2581 | (hw->bus.speed < ixgbe_bus_speed_8000))) { | | 2570 | (hw->bus.speed < ixgbe_bus_speed_8000))) { |
2582 | device_printf(dev, "PCI-Express bandwidth available" | | 2571 | device_printf(dev, "PCI-Express bandwidth available" |
2583 | " for this card\n is not sufficient for" | | 2572 | " for this card\n is not sufficient for" |
2584 | " optimal performance.\n"); | | 2573 | " optimal performance.\n"); |
2585 | device_printf(dev, "For optimal performance a x8 " | | 2574 | device_printf(dev, "For optimal performance a x8 " |
2586 | "PCIE Gen3 slot is required.\n"); | | 2575 | "PCIE Gen3 slot is required.\n"); |
2587 | } | | 2576 | } |
2588 | } else | | 2577 | } else |
2589 | device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); | | 2578 | device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); |
2590 | | | 2579 | |
2591 | return; | | 2580 | return; |
2592 | } /* ixgbe_get_slot_info */ | | 2581 | } /* ixgbe_get_slot_info */ |
2593 | | | 2582 | |
2594 | /************************************************************************ | | 2583 | /************************************************************************ |
2595 | * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets | | 2584 | * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets |
2596 | ************************************************************************/ | | 2585 | ************************************************************************/ |
2597 | static inline void | | 2586 | static inline void |
2598 | ixgbe_enable_queue(struct adapter *adapter, u32 vector) | | 2587 | ixgbe_enable_queue(struct adapter *adapter, u32 vector) |
2599 | { | | 2588 | { |
2600 | struct ixgbe_hw *hw = &adapter->hw; | | 2589 | struct ixgbe_hw *hw = &adapter->hw; |
2601 | struct ix_queue *que = &adapter->queues[vector]; | | 2590 | struct ix_queue *que = &adapter->queues[vector]; |
2602 | u64 queue = 1ULL << vector; | | 2591 | u64 queue = 1ULL << vector; |
2603 | u32 mask; | | 2592 | u32 mask; |
2604 | | | 2593 | |
2605 | mutex_enter(&que->dc_mtx); | | 2594 | mutex_enter(&que->dc_mtx); |
2606 | if (que->disabled_count > 0 && --que->disabled_count > 0) | | 2595 | if (que->disabled_count > 0 && --que->disabled_count > 0) |
2607 | goto out; | | 2596 | goto out; |
2608 | | | 2597 | |
2609 | if (hw->mac.type == ixgbe_mac_82598EB) { | | 2598 | if (hw->mac.type == ixgbe_mac_82598EB) { |
2610 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); | | 2599 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); |
2611 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); | | 2600 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); |
2612 | } else { | | 2601 | } else { |
2613 | mask = (queue & 0xFFFFFFFF); | | 2602 | mask = (queue & 0xFFFFFFFF); |
2614 | if (mask) | | 2603 | if (mask) |
2615 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); | | 2604 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); |
2616 | mask = (queue >> 32); | | 2605 | mask = (queue >> 32); |
2617 | if (mask) | | 2606 | if (mask) |
2618 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); | | 2607 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); |
2619 | } | | 2608 | } |
2620 | out: | | 2609 | out: |
2621 | mutex_exit(&que->dc_mtx); | | 2610 | mutex_exit(&que->dc_mtx); |
2622 | } /* ixgbe_enable_queue */ | | 2611 | } /* ixgbe_enable_queue */ |
2623 | | | 2612 | |
2624 | /************************************************************************ | | 2613 | /************************************************************************ |
2625 | * ixgbe_disable_queue_internal | | 2614 | * ixgbe_disable_queue_internal |
2626 | ************************************************************************/ | | 2615 | ************************************************************************/ |
2627 | static inline void | | 2616 | static inline void |
2628 | ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok) | | 2617 | ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok) |
2629 | { | | 2618 | { |
2630 | struct ixgbe_hw *hw = &adapter->hw; | | 2619 | struct ixgbe_hw *hw = &adapter->hw; |
2631 | struct ix_queue *que = &adapter->queues[vector]; | | 2620 | struct ix_queue *que = &adapter->queues[vector]; |
2632 | u64 queue = 1ULL << vector; | | 2621 | u64 queue = 1ULL << vector; |
2633 | u32 mask; | | 2622 | u32 mask; |
2634 | | | 2623 | |
2635 | mutex_enter(&que->dc_mtx); | | 2624 | mutex_enter(&que->dc_mtx); |
2636 | | | 2625 | |
2637 | if (que->disabled_count > 0) { | | 2626 | if (que->disabled_count > 0) { |
2638 | if (nestok) | | 2627 | if (nestok) |
2639 | que->disabled_count++; | | 2628 | que->disabled_count++; |
2640 | goto out; | | 2629 | goto out; |
2641 | } | | 2630 | } |
2642 | que->disabled_count++; | | 2631 | que->disabled_count++; |
2643 | | | 2632 | |
2644 | if (hw->mac.type == ixgbe_mac_82598EB) { | | 2633 | if (hw->mac.type == ixgbe_mac_82598EB) { |
2645 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); | | 2634 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); |
2646 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); | | 2635 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); |
2647 | } else { | | 2636 | } else { |
2648 | mask = (queue & 0xFFFFFFFF); | | 2637 | mask = (queue & 0xFFFFFFFF); |
2649 | if (mask) | | 2638 | if (mask) |
2650 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); | | 2639 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); |
2651 | mask = (queue >> 32); | | 2640 | mask = (queue >> 32); |
2652 | if (mask) | | 2641 | if (mask) |
2653 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); | | 2642 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); |
2654 | } | | 2643 | } |
2655 | out: | | 2644 | out: |
2656 | mutex_exit(&que->dc_mtx); | | 2645 | mutex_exit(&que->dc_mtx); |
2657 | } /* ixgbe_disable_queue_internal */ | | 2646 | } /* ixgbe_disable_queue_internal */ |
2658 | | | 2647 | |
2659 | /************************************************************************ | | 2648 | /************************************************************************ |
2660 | * ixgbe_disable_queue | | 2649 | * ixgbe_disable_queue |
2661 | ************************************************************************/ | | 2650 | ************************************************************************/ |
2662 | static inline void | | 2651 | static inline void |
2663 | ixgbe_disable_queue(struct adapter *adapter, u32 vector) | | 2652 | ixgbe_disable_queue(struct adapter *adapter, u32 vector) |
2664 | { | | 2653 | { |
2665 | | | 2654 | |
2666 | ixgbe_disable_queue_internal(adapter, vector, true); | | 2655 | ixgbe_disable_queue_internal(adapter, vector, true); |
2667 | } /* ixgbe_disable_queue */ | | 2656 | } /* ixgbe_disable_queue */ |
2668 | | | 2657 | |
2669 | /************************************************************************ | | 2658 | /************************************************************************ |
2670 | * ixgbe_sched_handle_que - schedule deferred packet processing | | 2659 | * ixgbe_sched_handle_que - schedule deferred packet processing |
2671 | ************************************************************************/ | | 2660 | ************************************************************************/ |
2672 | static inline void | | 2661 | static inline void |
2673 | ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que) | | 2662 | ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que) |
2674 | { | | 2663 | { |
2675 | | | 2664 | |
2676 | if (que->txrx_use_workqueue) { | | 2665 | if (que->txrx_use_workqueue) { |
2677 | /* | | 2666 | /* |
2678 | * adapter->que_wq is bound to each CPU instead of | | 2667 | * adapter->que_wq is bound to each CPU instead of |
2679 | * each NIC queue to reduce workqueue kthread. As we | | 2668 | * each NIC queue to reduce workqueue kthread. As we |
2680 | * should consider about interrupt affinity in this | | 2669 | * should consider about interrupt affinity in this |
2681 | * function, the workqueue kthread must be WQ_PERCPU. | | 2670 | * function, the workqueue kthread must be WQ_PERCPU. |
2682 | * If create WQ_PERCPU workqueue kthread for each NIC | | 2671 | * If create WQ_PERCPU workqueue kthread for each NIC |
2683 | * queue, that number of created workqueue kthread is | | 2672 | * queue, that number of created workqueue kthread is |
2684 | * (number of used NIC queue) * (number of CPUs) = | | 2673 | * (number of used NIC queue) * (number of CPUs) = |
2685 | * (number of CPUs) ^ 2 most often. | | 2674 | * (number of CPUs) ^ 2 most often. |
2686 | * | | 2675 | * |
2687 | * The same NIC queue's interrupts are avoided by | | 2676 | * The same NIC queue's interrupts are avoided by |
2688 | * masking the queue's interrupt. And different | | 2677 | * masking the queue's interrupt. And different |
2689 | * NIC queue's interrupts use different struct work | | 2678 | * NIC queue's interrupts use different struct work |
2690 | * (que->wq_cookie). So, "enqueued flag" to avoid | | 2679 | * (que->wq_cookie). So, "enqueued flag" to avoid |
2691 | * twice workqueue_enqueue() is not required . | | 2680 | * twice workqueue_enqueue() is not required . |
2692 | */ | | 2681 | */ |
2693 | workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu()); | | 2682 | workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu()); |
2694 | } else { | | 2683 | } else { |
2695 | softint_schedule(que->que_si); | | 2684 | softint_schedule(que->que_si); |
2696 | } | | 2685 | } |
2697 | } | | 2686 | } |
2698 | | | 2687 | |
2699 | /************************************************************************ | | 2688 | /************************************************************************ |
2700 | * ixgbe_msix_que - MSI-X Queue Interrupt Service routine | | 2689 | * ixgbe_msix_que - MSI-X Queue Interrupt Service routine |
2701 | ************************************************************************/ | | 2690 | ************************************************************************/ |
2702 | static int | | 2691 | static int |
2703 | ixgbe_msix_que(void *arg) | | 2692 | ixgbe_msix_que(void *arg) |
2704 | { | | 2693 | { |
2705 | struct ix_queue *que = arg; | | 2694 | struct ix_queue *que = arg; |
2706 | struct adapter *adapter = que->adapter; | | 2695 | struct adapter *adapter = que->adapter; |
2707 | struct ifnet *ifp = adapter->ifp; | | 2696 | struct ifnet *ifp = adapter->ifp; |
2708 | struct tx_ring *txr = que->txr; | | 2697 | struct tx_ring *txr = que->txr; |
2709 | struct rx_ring *rxr = que->rxr; | | 2698 | struct rx_ring *rxr = que->rxr; |
2710 | bool more; | | 2699 | bool more; |
2711 | u32 newitr = 0; | | 2700 | u32 newitr = 0; |
2712 | | | 2701 | |
2713 | /* Protect against spurious interrupts */ | | 2702 | /* Protect against spurious interrupts */ |
2714 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 2703 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
2715 | return 0; | | 2704 | return 0; |
2716 | | | 2705 | |
2717 | ixgbe_disable_queue(adapter, que->msix); | | 2706 | ixgbe_disable_queue(adapter, que->msix); |
2718 | ++que->irqs.ev_count; | | 2707 | ++que->irqs.ev_count; |
2719 | | | 2708 | |
2720 | /* | | 2709 | /* |
2721 | * Don't change "que->txrx_use_workqueue" from this point to avoid | | 2710 | * Don't change "que->txrx_use_workqueue" from this point to avoid |
2722 | * flip-flopping softint/workqueue mode in one deferred processing. | | 2711 | * flip-flopping softint/workqueue mode in one deferred processing. |
2723 | */ | | 2712 | */ |
2724 | que->txrx_use_workqueue = adapter->txrx_use_workqueue; | | 2713 | que->txrx_use_workqueue = adapter->txrx_use_workqueue; |
2725 | | | 2714 | |
2726 | #ifdef __NetBSD__ | | 2715 | #ifdef __NetBSD__ |
2727 | /* Don't run ixgbe_rxeof in interrupt context */ | | 2716 | /* Don't run ixgbe_rxeof in interrupt context */ |
2728 | more = true; | | 2717 | more = true; |
2729 | #else | | 2718 | #else |
2730 | more = ixgbe_rxeof(que); | | 2719 | more = ixgbe_rxeof(que); |
2731 | #endif | | 2720 | #endif |
2732 | | | 2721 | |
2733 | IXGBE_TX_LOCK(txr); | | 2722 | IXGBE_TX_LOCK(txr); |
2734 | ixgbe_txeof(txr); | | 2723 | ixgbe_txeof(txr); |
2735 | IXGBE_TX_UNLOCK(txr); | | 2724 | IXGBE_TX_UNLOCK(txr); |
2736 | | | 2725 | |
2737 | /* Do AIM now? */ | | 2726 | /* Do AIM now? */ |
2738 | | | 2727 | |
2739 | if (adapter->enable_aim == false) | | 2728 | if (adapter->enable_aim == false) |
2740 | goto no_calc; | | 2729 | goto no_calc; |
2741 | /* | | 2730 | /* |
2742 | * Do Adaptive Interrupt Moderation: | | 2731 | * Do Adaptive Interrupt Moderation: |
2743 | * - Write out last calculated setting | | 2732 | * - Write out last calculated setting |
2744 | * - Calculate based on average size over | | 2733 | * - Calculate based on average size over |
2745 | * the last interval. | | 2734 | * the last interval. |
2746 | */ | | 2735 | */ |
2747 | if (que->eitr_setting) | | 2736 | if (que->eitr_setting) |
2748 | ixgbe_eitr_write(adapter, que->msix, que->eitr_setting); | | 2737 | ixgbe_eitr_write(adapter, que->msix, que->eitr_setting); |
2749 | | | 2738 | |
2750 | que->eitr_setting = 0; | | 2739 | que->eitr_setting = 0; |
2751 | | | 2740 | |
2752 | /* Idle, do nothing */ | | 2741 | /* Idle, do nothing */ |
2753 | if ((txr->bytes == 0) && (rxr->bytes == 0)) | | 2742 | if ((txr->bytes == 0) && (rxr->bytes == 0)) |
2754 | goto no_calc; | | 2743 | goto no_calc; |
2755 | | | 2744 | |
2756 | if ((txr->bytes) && (txr->packets)) | | 2745 | if ((txr->bytes) && (txr->packets)) |
2757 | newitr = txr->bytes/txr->packets; | | 2746 | newitr = txr->bytes/txr->packets; |
2758 | if ((rxr->bytes) && (rxr->packets)) | | 2747 | if ((rxr->bytes) && (rxr->packets)) |
2759 | newitr = uimax(newitr, (rxr->bytes / rxr->packets)); | | 2748 | newitr = uimax(newitr, (rxr->bytes / rxr->packets)); |
2760 | newitr += 24; /* account for hardware frame, crc */ | | 2749 | newitr += 24; /* account for hardware frame, crc */ |
2761 | | | 2750 | |
2762 | /* set an upper boundary */ | | 2751 | /* set an upper boundary */ |
2763 | newitr = uimin(newitr, 3000); | | 2752 | newitr = uimin(newitr, 3000); |
2764 | | | 2753 | |
2765 | /* Be nice to the mid range */ | | 2754 | /* Be nice to the mid range */ |
2766 | if ((newitr > 300) && (newitr < 1200)) | | 2755 | if ((newitr > 300) && (newitr < 1200)) |
2767 | newitr = (newitr / 3); | | 2756 | newitr = (newitr / 3); |
2768 | else | | 2757 | else |
2769 | newitr = (newitr / 2); | | 2758 | newitr = (newitr / 2); |
2770 | | | 2759 | |
2771 | /* | | 2760 | /* |
2772 | * When RSC is used, ITR interval must be larger than RSC_DELAY. | | 2761 | * When RSC is used, ITR interval must be larger than RSC_DELAY. |
2773 | * Currently, we use 2us for RSC_DELAY. The minimum value is always | | 2762 | * Currently, we use 2us for RSC_DELAY. The minimum value is always |
2774 | * greater than 2us on 100M (and 10M?(not documented)), but it's not | | 2763 | * greater than 2us on 100M (and 10M?(not documented)), but it's not |
2775 | * on 1G and higher. | | 2764 | * on 1G and higher. |
2776 | */ | | 2765 | */ |
2777 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) | | 2766 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) |
2778 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { | | 2767 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { |
2779 | if (newitr < IXGBE_MIN_RSC_EITR_10G1G) | | 2768 | if (newitr < IXGBE_MIN_RSC_EITR_10G1G) |
2780 | newitr = IXGBE_MIN_RSC_EITR_10G1G; | | 2769 | newitr = IXGBE_MIN_RSC_EITR_10G1G; |
2781 | } | | 2770 | } |
2782 | | | 2771 | |
2783 | /* save for next interrupt */ | | 2772 | /* save for next interrupt */ |
2784 | que->eitr_setting = newitr; | | 2773 | que->eitr_setting = newitr; |
2785 | | | 2774 | |
2786 | /* Reset state */ | | 2775 | /* Reset state */ |
2787 | txr->bytes = 0; | | 2776 | txr->bytes = 0; |
2788 | txr->packets = 0; | | 2777 | txr->packets = 0; |
2789 | rxr->bytes = 0; | | 2778 | rxr->bytes = 0; |
2790 | rxr->packets = 0; | | 2779 | rxr->packets = 0; |
2791 | | | 2780 | |
2792 | no_calc: | | 2781 | no_calc: |
2793 | if (more) | | 2782 | if (more) |
2794 | ixgbe_sched_handle_que(adapter, que); | | 2783 | ixgbe_sched_handle_que(adapter, que); |
2795 | else | | 2784 | else |
2796 | ixgbe_enable_queue(adapter, que->msix); | | 2785 | ixgbe_enable_queue(adapter, que->msix); |
2797 | | | 2786 | |
2798 | return 1; | | 2787 | return 1; |
2799 | } /* ixgbe_msix_que */ | | 2788 | } /* ixgbe_msix_que */ |
2800 | | | 2789 | |
2801 | /************************************************************************ | | 2790 | /************************************************************************ |
2802 | * ixgbe_media_status - Media Ioctl callback | | 2791 | * ixgbe_media_status - Media Ioctl callback |
2803 | * | | 2792 | * |
2804 | * Called whenever the user queries the status of | | 2793 | * Called whenever the user queries the status of |
2805 | * the interface using ifconfig. | | 2794 | * the interface using ifconfig. |
2806 | ************************************************************************/ | | 2795 | ************************************************************************/ |
2807 | static void | | 2796 | static void |
2808 | ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) | | 2797 | ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
2809 | { | | 2798 | { |
2810 | struct adapter *adapter = ifp->if_softc; | | 2799 | struct adapter *adapter = ifp->if_softc; |
2811 | struct ixgbe_hw *hw = &adapter->hw; | | 2800 | struct ixgbe_hw *hw = &adapter->hw; |
2812 | int layer; | | 2801 | int layer; |
2813 | | | 2802 | |
2814 | INIT_DEBUGOUT("ixgbe_media_status: begin"); | | 2803 | INIT_DEBUGOUT("ixgbe_media_status: begin"); |
2815 | IXGBE_CORE_LOCK(adapter); | | 2804 | IXGBE_CORE_LOCK(adapter); |
2816 | ixgbe_update_link_status(adapter); | | 2805 | ixgbe_update_link_status(adapter); |
2817 | | | 2806 | |
2818 | ifmr->ifm_status = IFM_AVALID; | | 2807 | ifmr->ifm_status = IFM_AVALID; |
2819 | ifmr->ifm_active = IFM_ETHER; | | 2808 | ifmr->ifm_active = IFM_ETHER; |
2820 | | | 2809 | |
2821 | if (adapter->link_active != LINK_STATE_UP) { | | 2810 | if (adapter->link_active != LINK_STATE_UP) { |
2822 | ifmr->ifm_active |= IFM_NONE; | | 2811 | ifmr->ifm_active |= IFM_NONE; |
2823 | IXGBE_CORE_UNLOCK(adapter); | | 2812 | IXGBE_CORE_UNLOCK(adapter); |
2824 | return; | | 2813 | return; |
2825 | } | | 2814 | } |
2826 | | | 2815 | |
2827 | ifmr->ifm_status |= IFM_ACTIVE; | | 2816 | ifmr->ifm_status |= IFM_ACTIVE; |
2828 | layer = adapter->phy_layer; | | 2817 | layer = adapter->phy_layer; |
2829 | | | 2818 | |
2830 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || | | 2819 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || |
2831 | layer & IXGBE_PHYSICAL_LAYER_5GBASE_T || | | 2820 | layer & IXGBE_PHYSICAL_LAYER_5GBASE_T || |
2832 | layer & IXGBE_PHYSICAL_LAYER_2500BASE_T || | | 2821 | layer & IXGBE_PHYSICAL_LAYER_2500BASE_T || |
2833 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || | | 2822 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || |
2834 | layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || | | 2823 | layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || |
2835 | layer & IXGBE_PHYSICAL_LAYER_10BASE_T) | | 2824 | layer & IXGBE_PHYSICAL_LAYER_10BASE_T) |
2836 | switch (adapter->link_speed) { | | 2825 | switch (adapter->link_speed) { |
2837 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2826 | case IXGBE_LINK_SPEED_10GB_FULL: |
2838 | ifmr->ifm_active |= IFM_10G_T | IFM_FDX; | | 2827 | ifmr->ifm_active |= IFM_10G_T | IFM_FDX; |
2839 | break; | | 2828 | break; |
2840 | case IXGBE_LINK_SPEED_5GB_FULL: | | 2829 | case IXGBE_LINK_SPEED_5GB_FULL: |
2841 | ifmr->ifm_active |= IFM_5000_T | IFM_FDX; | | 2830 | ifmr->ifm_active |= IFM_5000_T | IFM_FDX; |
2842 | break; | | 2831 | break; |
2843 | case IXGBE_LINK_SPEED_2_5GB_FULL: | | 2832 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
2844 | ifmr->ifm_active |= IFM_2500_T | IFM_FDX; | | 2833 | ifmr->ifm_active |= IFM_2500_T | IFM_FDX; |
2845 | break; | | 2834 | break; |
2846 | case IXGBE_LINK_SPEED_1GB_FULL: | | 2835 | case IXGBE_LINK_SPEED_1GB_FULL: |
2847 | ifmr->ifm_active |= IFM_1000_T | IFM_FDX; | | 2836 | ifmr->ifm_active |= IFM_1000_T | IFM_FDX; |
2848 | break; | | 2837 | break; |
2849 | case IXGBE_LINK_SPEED_100_FULL: | | 2838 | case IXGBE_LINK_SPEED_100_FULL: |
2850 | ifmr->ifm_active |= IFM_100_TX | IFM_FDX; | | 2839 | ifmr->ifm_active |= IFM_100_TX | IFM_FDX; |
2851 | break; | | 2840 | break; |
2852 | case IXGBE_LINK_SPEED_10_FULL: | | 2841 | case IXGBE_LINK_SPEED_10_FULL: |
2853 | ifmr->ifm_active |= IFM_10_T | IFM_FDX; | | 2842 | ifmr->ifm_active |= IFM_10_T | IFM_FDX; |
2854 | break; | | 2843 | break; |
2855 | } | | 2844 | } |
2856 | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || | | 2845 | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || |
2857 | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) | | 2846 | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) |
2858 | switch (adapter->link_speed) { | | 2847 | switch (adapter->link_speed) { |
2859 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2848 | case IXGBE_LINK_SPEED_10GB_FULL: |
2860 | ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; | | 2849 | ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; |
2861 | break; | | 2850 | break; |
2862 | } | | 2851 | } |
2863 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) | | 2852 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) |
2864 | switch (adapter->link_speed) { | | 2853 | switch (adapter->link_speed) { |
2865 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2854 | case IXGBE_LINK_SPEED_10GB_FULL: |
2866 | ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; | | 2855 | ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; |
2867 | break; | | 2856 | break; |
2868 | case IXGBE_LINK_SPEED_1GB_FULL: | | 2857 | case IXGBE_LINK_SPEED_1GB_FULL: |
2869 | ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; | | 2858 | ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; |
2870 | break; | | 2859 | break; |
2871 | } | | 2860 | } |
2872 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) | | 2861 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) |
2873 | switch (adapter->link_speed) { | | 2862 | switch (adapter->link_speed) { |
2874 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2863 | case IXGBE_LINK_SPEED_10GB_FULL: |
2875 | ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; | | 2864 | ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; |
2876 | break; | | 2865 | break; |
2877 | case IXGBE_LINK_SPEED_1GB_FULL: | | 2866 | case IXGBE_LINK_SPEED_1GB_FULL: |
2878 | ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; | | 2867 | ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; |
2879 | break; | | 2868 | break; |
2880 | } | | 2869 | } |
2881 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || | | 2870 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || |
2882 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) | | 2871 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) |
2883 | switch (adapter->link_speed) { | | 2872 | switch (adapter->link_speed) { |
2884 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2873 | case IXGBE_LINK_SPEED_10GB_FULL: |
2885 | ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; | | 2874 | ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; |
2886 | break; | | 2875 | break; |
2887 | case IXGBE_LINK_SPEED_1GB_FULL: | | 2876 | case IXGBE_LINK_SPEED_1GB_FULL: |
2888 | ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; | | 2877 | ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; |
2889 | break; | | 2878 | break; |
2890 | } | | 2879 | } |
2891 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) | | 2880 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) |
2892 | switch (adapter->link_speed) { | | 2881 | switch (adapter->link_speed) { |
2893 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2882 | case IXGBE_LINK_SPEED_10GB_FULL: |
2894 | ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; | | 2883 | ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; |
2895 | break; | | 2884 | break; |
2896 | } | | 2885 | } |
2897 | /* | | 2886 | /* |
2898 | * XXX: These need to use the proper media types once | | 2887 | * XXX: These need to use the proper media types once |
2899 | * they're added. | | 2888 | * they're added. |
2900 | */ | | 2889 | */ |
2901 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) | | 2890 | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) |
2902 | switch (adapter->link_speed) { | | 2891 | switch (adapter->link_speed) { |
2903 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2892 | case IXGBE_LINK_SPEED_10GB_FULL: |
2904 | ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; | | 2893 | ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; |
2905 | break; | | 2894 | break; |
2906 | case IXGBE_LINK_SPEED_2_5GB_FULL: | | 2895 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
2907 | ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; | | 2896 | ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; |
2908 | break; | | 2897 | break; |
2909 | case IXGBE_LINK_SPEED_1GB_FULL: | | 2898 | case IXGBE_LINK_SPEED_1GB_FULL: |
2910 | ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; | | 2899 | ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; |
2911 | break; | | 2900 | break; |
2912 | } | | 2901 | } |
2913 | else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || | | 2902 | else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || |
2914 | layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || | | 2903 | layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || |
2915 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) | | 2904 | layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) |
2916 | switch (adapter->link_speed) { | | 2905 | switch (adapter->link_speed) { |
2917 | case IXGBE_LINK_SPEED_10GB_FULL: | | 2906 | case IXGBE_LINK_SPEED_10GB_FULL: |
2918 | ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; | | 2907 | ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; |
2919 | break; | | 2908 | break; |
2920 | case IXGBE_LINK_SPEED_2_5GB_FULL: | | 2909 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
2921 | ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; | | 2910 | ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; |
2922 | break; | | 2911 | break; |
2923 | case IXGBE_LINK_SPEED_1GB_FULL: | | 2912 | case IXGBE_LINK_SPEED_1GB_FULL: |
2924 | ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; | | 2913 | ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; |
2925 | break; | | 2914 | break; |
2926 | } | | 2915 | } |
2927 | | | 2916 | |
2928 | /* If nothing is recognized... */ | | 2917 | /* If nothing is recognized... */ |
2929 | #if 0 | | 2918 | #if 0 |
2930 | if (IFM_SUBTYPE(ifmr->ifm_active) == 0) | | 2919 | if (IFM_SUBTYPE(ifmr->ifm_active) == 0) |
2931 | ifmr->ifm_active |= IFM_UNKNOWN; | | 2920 | ifmr->ifm_active |= IFM_UNKNOWN; |
2932 | #endif | | 2921 | #endif |
2933 | | | 2922 | |
2934 | ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); | | 2923 | ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); |
2935 | | | 2924 | |
2936 | /* Display current flow control setting used on link */ | | 2925 | /* Display current flow control setting used on link */ |
2937 | if (hw->fc.current_mode == ixgbe_fc_rx_pause || | | 2926 | if (hw->fc.current_mode == ixgbe_fc_rx_pause || |
2938 | hw->fc.current_mode == ixgbe_fc_full) | | 2927 | hw->fc.current_mode == ixgbe_fc_full) |
2939 | ifmr->ifm_active |= IFM_ETH_RXPAUSE; | | 2928 | ifmr->ifm_active |= IFM_ETH_RXPAUSE; |
2940 | if (hw->fc.current_mode == ixgbe_fc_tx_pause || | | 2929 | if (hw->fc.current_mode == ixgbe_fc_tx_pause || |
2941 | hw->fc.current_mode == ixgbe_fc_full) | | 2930 | hw->fc.current_mode == ixgbe_fc_full) |
2942 | ifmr->ifm_active |= IFM_ETH_TXPAUSE; | | 2931 | ifmr->ifm_active |= IFM_ETH_TXPAUSE; |
2943 | | | 2932 | |
2944 | IXGBE_CORE_UNLOCK(adapter); | | 2933 | IXGBE_CORE_UNLOCK(adapter); |
2945 | | | 2934 | |
2946 | return; | | 2935 | return; |
2947 | } /* ixgbe_media_status */ | | 2936 | } /* ixgbe_media_status */ |
2948 | | | 2937 | |
2949 | /************************************************************************ | | 2938 | /************************************************************************ |
2950 | * ixgbe_media_change - Media Ioctl callback | | 2939 | * ixgbe_media_change - Media Ioctl callback |
2951 | * | | 2940 | * |
2952 | * Called when the user changes speed/duplex using | | 2941 | * Called when the user changes speed/duplex using |
2953 | * media/mediopt option with ifconfig. | | 2942 | * media/mediopt option with ifconfig. |
2954 | ************************************************************************/ | | 2943 | ************************************************************************/ |
2955 | static int | | 2944 | static int |
2956 | ixgbe_media_change(struct ifnet *ifp) | | 2945 | ixgbe_media_change(struct ifnet *ifp) |
2957 | { | | 2946 | { |
2958 | struct adapter *adapter = ifp->if_softc; | | 2947 | struct adapter *adapter = ifp->if_softc; |
2959 | struct ifmedia *ifm = &adapter->media; | | 2948 | struct ifmedia *ifm = &adapter->media; |
2960 | struct ixgbe_hw *hw = &adapter->hw; | | 2949 | struct ixgbe_hw *hw = &adapter->hw; |
2961 | ixgbe_link_speed speed = 0; | | 2950 | ixgbe_link_speed speed = 0; |
2962 | ixgbe_link_speed link_caps = 0; | | 2951 | ixgbe_link_speed link_caps = 0; |
2963 | bool negotiate = false; | | 2952 | bool negotiate = false; |
2964 | s32 err = IXGBE_NOT_IMPLEMENTED; | | 2953 | s32 err = IXGBE_NOT_IMPLEMENTED; |
2965 | | | 2954 | |
2966 | INIT_DEBUGOUT("ixgbe_media_change: begin"); | | 2955 | INIT_DEBUGOUT("ixgbe_media_change: begin"); |
2967 | | | 2956 | |
2968 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) | | 2957 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
2969 | return (EINVAL); | | 2958 | return (EINVAL); |
2970 | | | 2959 | |
2971 | if (hw->phy.media_type == ixgbe_media_type_backplane) | | 2960 | if (hw->phy.media_type == ixgbe_media_type_backplane) |
2972 | return (EPERM); | | 2961 | return (EPERM); |
2973 | | | 2962 | |
2974 | IXGBE_CORE_LOCK(adapter); | | 2963 | IXGBE_CORE_LOCK(adapter); |
2975 | /* | | 2964 | /* |
2976 | * We don't actually need to check against the supported | | 2965 | * We don't actually need to check against the supported |
2977 | * media types of the adapter; ifmedia will take care of | | 2966 | * media types of the adapter; ifmedia will take care of |
2978 | * that for us. | | 2967 | * that for us. |
2979 | */ | | 2968 | */ |
2980 | switch (IFM_SUBTYPE(ifm->ifm_media)) { | | 2969 | switch (IFM_SUBTYPE(ifm->ifm_media)) { |
2981 | case IFM_AUTO: | | 2970 | case IFM_AUTO: |
2982 | err = hw->mac.ops.get_link_capabilities(hw, &link_caps, | | 2971 | err = hw->mac.ops.get_link_capabilities(hw, &link_caps, |
2983 | &negotiate); | | 2972 | &negotiate); |
2984 | if (err != IXGBE_SUCCESS) { | | 2973 | if (err != IXGBE_SUCCESS) { |
2985 | device_printf(adapter->dev, "Unable to determine " | | 2974 | device_printf(adapter->dev, "Unable to determine " |
2986 | "supported advertise speeds\n"); | | 2975 | "supported advertise speeds\n"); |
2987 | IXGBE_CORE_UNLOCK(adapter); | | 2976 | IXGBE_CORE_UNLOCK(adapter); |
2988 | return (ENODEV); | | 2977 | return (ENODEV); |
2989 | } | | 2978 | } |
2990 | speed |= link_caps; | | 2979 | speed |= link_caps; |
2991 | break; | | 2980 | break; |
2992 | case IFM_10G_T: | | 2981 | case IFM_10G_T: |
2993 | case IFM_10G_LRM: | | 2982 | case IFM_10G_LRM: |
2994 | case IFM_10G_LR: | | 2983 | case IFM_10G_LR: |
2995 | case IFM_10G_TWINAX: | | 2984 | case IFM_10G_TWINAX: |
2996 | case IFM_10G_SR: | | 2985 | case IFM_10G_SR: |
2997 | case IFM_10G_CX4: | | 2986 | case IFM_10G_CX4: |
2998 | case IFM_10G_KR: | | 2987 | case IFM_10G_KR: |
2999 | case IFM_10G_KX4: | | 2988 | case IFM_10G_KX4: |
3000 | speed |= IXGBE_LINK_SPEED_10GB_FULL; | | 2989 | speed |= IXGBE_LINK_SPEED_10GB_FULL; |
3001 | break; | | 2990 | break; |
3002 | case IFM_5000_T: | | 2991 | case IFM_5000_T: |
3003 | speed |= IXGBE_LINK_SPEED_5GB_FULL; | | 2992 | speed |= IXGBE_LINK_SPEED_5GB_FULL; |
3004 | break; | | 2993 | break; |
3005 | case IFM_2500_T: | | 2994 | case IFM_2500_T: |
3006 | case IFM_2500_KX: | | 2995 | case IFM_2500_KX: |
3007 | speed |= IXGBE_LINK_SPEED_2_5GB_FULL; | | 2996 | speed |= IXGBE_LINK_SPEED_2_5GB_FULL; |
3008 | break; | | 2997 | break; |
3009 | case IFM_1000_T: | | 2998 | case IFM_1000_T: |
3010 | case IFM_1000_LX: | | 2999 | case IFM_1000_LX: |
3011 | case IFM_1000_SX: | | 3000 | case IFM_1000_SX: |
3012 | case IFM_1000_KX: | | 3001 | case IFM_1000_KX: |
3013 | speed |= IXGBE_LINK_SPEED_1GB_FULL; | | 3002 | speed |= IXGBE_LINK_SPEED_1GB_FULL; |
3014 | break; | | 3003 | break; |
3015 | case IFM_100_TX: | | 3004 | case IFM_100_TX: |
3016 | speed |= IXGBE_LINK_SPEED_100_FULL; | | 3005 | speed |= IXGBE_LINK_SPEED_100_FULL; |
3017 | break; | | 3006 | break; |
3018 | case IFM_10_T: | | 3007 | case IFM_10_T: |
3019 | speed |= IXGBE_LINK_SPEED_10_FULL; | | 3008 | speed |= IXGBE_LINK_SPEED_10_FULL; |
3020 | break; | | 3009 | break; |
3021 | case IFM_NONE: | | 3010 | case IFM_NONE: |
3022 | break; | | 3011 | break; |
3023 | default: | | 3012 | default: |
3024 | goto invalid; | | 3013 | goto invalid; |
3025 | } | | 3014 | } |
3026 | | | 3015 | |
3027 | hw->mac.autotry_restart = TRUE; | | 3016 | hw->mac.autotry_restart = TRUE; |
3028 | hw->mac.ops.setup_link(hw, speed, TRUE); | | 3017 | hw->mac.ops.setup_link(hw, speed, TRUE); |
3029 | adapter->advertise = 0; | | 3018 | adapter->advertise = 0; |
3030 | if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { | | 3019 | if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { |
3031 | if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) | | 3020 | if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) |
3032 | adapter->advertise |= 1 << 2; | | 3021 | adapter->advertise |= 1 << 2; |
3033 | if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) | | 3022 | if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) |
3034 | adapter->advertise |= 1 << 1; | | 3023 | adapter->advertise |= 1 << 1; |
3035 | if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) | | 3024 | if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) |
3036 | adapter->advertise |= 1 << 0; | | 3025 | adapter->advertise |= 1 << 0; |
3037 | if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0) | | 3026 | if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0) |
3038 | adapter->advertise |= 1 << 3; | | 3027 | adapter->advertise |= 1 << 3; |
3039 | if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0) | | 3028 | if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0) |
3040 | adapter->advertise |= 1 << 4; | | 3029 | adapter->advertise |= 1 << 4; |
3041 | if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0) | | 3030 | if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0) |
3042 | adapter->advertise |= 1 << 5; | | 3031 | adapter->advertise |= 1 << 5; |
3043 | } | | 3032 | } |
3044 | | | 3033 | |
3045 | IXGBE_CORE_UNLOCK(adapter); | | 3034 | IXGBE_CORE_UNLOCK(adapter); |
3046 | return (0); | | 3035 | return (0); |
3047 | | | 3036 | |
3048 | invalid: | | 3037 | invalid: |
3049 | device_printf(adapter->dev, "Invalid media type!\n"); | | 3038 | device_printf(adapter->dev, "Invalid media type!\n"); |
3050 | IXGBE_CORE_UNLOCK(adapter); | | 3039 | IXGBE_CORE_UNLOCK(adapter); |
3051 | | | 3040 | |
3052 | return (EINVAL); | | 3041 | return (EINVAL); |
3053 | } /* ixgbe_media_change */ | | 3042 | } /* ixgbe_media_change */ |
3054 | | | 3043 | |
3055 | /************************************************************************ | | 3044 | /************************************************************************ |
3056 | * ixgbe_set_promisc | | 3045 | * ixgbe_set_promisc |
3057 | ************************************************************************/ | | 3046 | ************************************************************************/ |
3058 | static void | | 3047 | static void |
3059 | ixgbe_set_promisc(struct adapter *adapter) | | 3048 | ixgbe_set_promisc(struct adapter *adapter) |
3060 | { | | 3049 | { |
3061 | struct ifnet *ifp = adapter->ifp; | | 3050 | struct ifnet *ifp = adapter->ifp; |
3062 | int mcnt = 0; | | 3051 | int mcnt = 0; |
3063 | u32 rctl; | | 3052 | u32 rctl; |
3064 | struct ether_multi *enm; | | 3053 | struct ether_multi *enm; |
3065 | struct ether_multistep step; | | 3054 | struct ether_multistep step; |
3066 | struct ethercom *ec = &adapter->osdep.ec; | | 3055 | struct ethercom *ec = &adapter->osdep.ec; |
3067 | | | 3056 | |
3068 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 3057 | KASSERT(mutex_owned(&adapter->core_mtx)); |
3069 | rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); | | 3058 | rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); |
3070 | rctl &= (~IXGBE_FCTRL_UPE); | | 3059 | rctl &= (~IXGBE_FCTRL_UPE); |
3071 | ETHER_LOCK(ec); | | 3060 | ETHER_LOCK(ec); |
3072 | if (ec->ec_flags & ETHER_F_ALLMULTI) | | 3061 | if (ec->ec_flags & ETHER_F_ALLMULTI) |
3073 | mcnt = MAX_NUM_MULTICAST_ADDRESSES; | | 3062 | mcnt = MAX_NUM_MULTICAST_ADDRESSES; |
3074 | else { | | 3063 | else { |
3075 | ETHER_FIRST_MULTI(step, ec, enm); | | 3064 | ETHER_FIRST_MULTI(step, ec, enm); |
3076 | while (enm != NULL) { | | 3065 | while (enm != NULL) { |
3077 | if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) | | 3066 | if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) |
3078 | break; | | 3067 | break; |
3079 | mcnt++; | | 3068 | mcnt++; |
3080 | ETHER_NEXT_MULTI(step, enm); | | 3069 | ETHER_NEXT_MULTI(step, enm); |
3081 | } | | 3070 | } |
3082 | } | | 3071 | } |
3083 | if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) | | 3072 | if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) |
3084 | rctl &= (~IXGBE_FCTRL_MPE); | | 3073 | rctl &= (~IXGBE_FCTRL_MPE); |
3085 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); | | 3074 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); |
3086 | | | 3075 | |
3087 | if (ifp->if_flags & IFF_PROMISC) { | | 3076 | if (ifp->if_flags & IFF_PROMISC) { |
3088 | rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | | 3077 | rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); |
3089 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); | | 3078 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); |
3090 | } else if (ec->ec_flags & ETHER_F_ALLMULTI) { | | 3079 | } else if (ec->ec_flags & ETHER_F_ALLMULTI) { |
3091 | rctl |= IXGBE_FCTRL_MPE; | | 3080 | rctl |= IXGBE_FCTRL_MPE; |
3092 | rctl &= ~IXGBE_FCTRL_UPE; | | 3081 | rctl &= ~IXGBE_FCTRL_UPE; |
3093 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); | | 3082 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); |
3094 | } | | 3083 | } |
3095 | ETHER_UNLOCK(ec); | | 3084 | ETHER_UNLOCK(ec); |
3096 | } /* ixgbe_set_promisc */ | | 3085 | } /* ixgbe_set_promisc */ |
3097 | | | 3086 | |
3098 | /************************************************************************ | | 3087 | /************************************************************************ |
3099 | * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) | | 3088 | * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) |
3100 | ************************************************************************/ | | 3089 | ************************************************************************/ |
3101 | static int | | 3090 | static int |
3102 | ixgbe_msix_link(void *arg) | | 3091 | ixgbe_msix_link(void *arg) |
3103 | { | | 3092 | { |
3104 | struct adapter *adapter = arg; | | 3093 | struct adapter *adapter = arg; |
3105 | struct ixgbe_hw *hw = &adapter->hw; | | 3094 | struct ixgbe_hw *hw = &adapter->hw; |
3106 | u32 eicr, eicr_mask; | | 3095 | u32 eicr, eicr_mask; |
3107 | s32 retval; | | 3096 | s32 retval; |
3108 | | | 3097 | |
3109 | ++adapter->link_irq.ev_count; | | 3098 | ++adapter->link_irq.ev_count; |
3110 | | | 3099 | |
3111 | /* Pause other interrupts */ | | 3100 | /* Pause other interrupts */ |
3112 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); | | 3101 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); |
3113 | | | 3102 | |
3114 | /* First get the cause */ | | 3103 | /* First get the cause */ |
3115 | /* | | 3104 | /* |
3116 | * The specifications of 82598, 82599, X540 and X550 say EICS register | | 3105 | * The specifications of 82598, 82599, X540 and X550 say EICS register |
3117 | * is write only. However, Linux says it is a workaround for silicon | | 3106 | * is write only. However, Linux says it is a workaround for silicon |
3118 | * errata to read EICS instead of EICR to get interrupt cause. It seems | | 3107 | * errata to read EICS instead of EICR to get interrupt cause. It seems |
3119 | * there is a problem about read clear mechanism for EICR register. | | 3108 | * there is a problem about read clear mechanism for EICR register. |
3120 | */ | | 3109 | */ |
3121 | eicr = IXGBE_READ_REG(hw, IXGBE_EICS); | | 3110 | eicr = IXGBE_READ_REG(hw, IXGBE_EICS); |
3122 | /* Be sure the queue bits are not cleared */ | | 3111 | /* Be sure the queue bits are not cleared */ |
3123 | eicr &= ~IXGBE_EICR_RTX_QUEUE; | | 3112 | eicr &= ~IXGBE_EICR_RTX_QUEUE; |
3124 | /* Clear interrupt with write */ | | 3113 | /* Clear interrupt with write */ |
3125 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); | | 3114 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); |
3126 | | | 3115 | |
| | | 3116 | if (ixgbe_is_sfp(hw)) { |
| | | 3117 | /* Pluggable optics-related interrupt */ |
| | | 3118 | if (hw->mac.type >= ixgbe_mac_X540) |
| | | 3119 | eicr_mask = IXGBE_EICR_GPI_SDP0_X540; |
| | | 3120 | else |
| | | 3121 | eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); |
| | | 3122 | |
| | | 3123 | /* |
| | | 3124 | * An interrupt might not arrive when a module is inserted. |
| | | 3125 | * When an link status change interrupt occurred and the driver |
| | | 3126 | * still regard SFP as unplugged, issue the module softint |
| | | 3127 | * and then issue LSC interrupt. |
| | | 3128 | */ |
| | | 3129 | if ((eicr & eicr_mask) |
| | | 3130 | || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present) |
| | | 3131 | && (eicr & IXGBE_EICR_LSC))) { |
| | | 3132 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); |
| | | 3133 | softint_schedule(adapter->mod_si); |
| | | 3134 | } |
| | | 3135 | |
| | | 3136 | if ((hw->mac.type == ixgbe_mac_82599EB) && |
| | | 3137 | (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { |
| | | 3138 | IXGBE_WRITE_REG(hw, IXGBE_EICR, |
| | | 3139 | IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); |
| | | 3140 | softint_schedule(adapter->msf_si); |
| | | 3141 | } |
| | | 3142 | } |
| | | 3143 | |
3127 | /* Link status change */ | | 3144 | /* Link status change */ |
3128 | if (eicr & IXGBE_EICR_LSC) { | | 3145 | if (eicr & IXGBE_EICR_LSC) { |
3129 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); | | 3146 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); |
3130 | softint_schedule(adapter->link_si); | | 3147 | softint_schedule(adapter->link_si); |
3131 | } | | 3148 | } |
3132 | | | 3149 | |
3133 | if (adapter->hw.mac.type != ixgbe_mac_82598EB) { | | 3150 | if (adapter->hw.mac.type != ixgbe_mac_82598EB) { |
3134 | if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && | | 3151 | if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && |
3135 | (eicr & IXGBE_EICR_FLOW_DIR)) { | | 3152 | (eicr & IXGBE_EICR_FLOW_DIR)) { |
3136 | /* This is probably overkill :) */ | | 3153 | /* This is probably overkill :) */ |
3137 | if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) | | 3154 | if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) |
3138 | return 1; | | 3155 | return 1; |
3139 | /* Disable the interrupt */ | | 3156 | /* Disable the interrupt */ |
3140 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); | | 3157 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); |
3141 | softint_schedule(adapter->fdir_si); | | 3158 | softint_schedule(adapter->fdir_si); |
3142 | } | | 3159 | } |
3143 | | | 3160 | |
3144 | if (eicr & IXGBE_EICR_ECC) { | | 3161 | if (eicr & IXGBE_EICR_ECC) { |
3145 | device_printf(adapter->dev, | | 3162 | device_printf(adapter->dev, |
3146 | "CRITICAL: ECC ERROR!! Please Reboot!!\n"); | | 3163 | "CRITICAL: ECC ERROR!! Please Reboot!!\n"); |
3147 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); | | 3164 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); |
3148 | } | | 3165 | } |
3149 | | | 3166 | |
3150 | /* Check for over temp condition */ | | 3167 | /* Check for over temp condition */ |
3151 | if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { | | 3168 | if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { |
3152 | switch (adapter->hw.mac.type) { | | 3169 | switch (adapter->hw.mac.type) { |
3153 | case ixgbe_mac_X550EM_a: | | 3170 | case ixgbe_mac_X550EM_a: |
3154 | if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) | | 3171 | if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) |
3155 | break; | | 3172 | break; |
3156 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, | | 3173 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, |
3157 | IXGBE_EICR_GPI_SDP0_X550EM_a); | | 3174 | IXGBE_EICR_GPI_SDP0_X550EM_a); |
3158 | IXGBE_WRITE_REG(hw, IXGBE_EICR, | | 3175 | IXGBE_WRITE_REG(hw, IXGBE_EICR, |
3159 | IXGBE_EICR_GPI_SDP0_X550EM_a); | | 3176 | IXGBE_EICR_GPI_SDP0_X550EM_a); |
3160 | retval = hw->phy.ops.check_overtemp(hw); | | 3177 | retval = hw->phy.ops.check_overtemp(hw); |
3161 | if (retval != IXGBE_ERR_OVERTEMP) | | 3178 | if (retval != IXGBE_ERR_OVERTEMP) |
3162 | break; | | 3179 | break; |
3163 | device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); | | 3180 | device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); |
3164 | device_printf(adapter->dev, "System shutdown required!\n"); | | 3181 | device_printf(adapter->dev, "System shutdown required!\n"); |
3165 | break; | | 3182 | break; |
3166 | default: | | 3183 | default: |
3167 | if (!(eicr & IXGBE_EICR_TS)) | | 3184 | if (!(eicr & IXGBE_EICR_TS)) |
3168 | break; | | 3185 | break; |
3169 | retval = hw->phy.ops.check_overtemp(hw); | | 3186 | retval = hw->phy.ops.check_overtemp(hw); |
3170 | if (retval != IXGBE_ERR_OVERTEMP) | | 3187 | if (retval != IXGBE_ERR_OVERTEMP) |
3171 | break; | | 3188 | break; |
3172 | device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); | | 3189 | device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); |
3173 | device_printf(adapter->dev, "System shutdown required!\n"); | | 3190 | device_printf(adapter->dev, "System shutdown required!\n"); |
3174 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); | | 3191 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); |
3175 | break; | | 3192 | break; |
3176 | } | | 3193 | } |
3177 | } | | 3194 | } |
3178 | | | 3195 | |
3179 | /* Check for VF message */ | | 3196 | /* Check for VF message */ |
3180 | if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && | | 3197 | if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && |
3181 | (eicr & IXGBE_EICR_MAILBOX)) | | 3198 | (eicr & IXGBE_EICR_MAILBOX)) |
3182 | softint_schedule(adapter->mbx_si); | | 3199 | softint_schedule(adapter->mbx_si); |
3183 | } | | 3200 | } |
3184 | | | 3201 | |
3185 | if (ixgbe_is_sfp(hw)) { | | | |
3186 | /* Pluggable optics-related interrupt */ | | | |
3187 | if (hw->mac.type >= ixgbe_mac_X540) | | | |
3188 | eicr_mask = IXGBE_EICR_GPI_SDP0_X540; | | | |
3189 | else | | | |
3190 | eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); | | | |
3191 | | | | |
3192 | if (eicr & eicr_mask) { | | | |
3193 | IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); | | | |
3194 | softint_schedule(adapter->mod_si); | | | |
3195 | } | | | |
3196 | | | | |
3197 | if ((hw->mac.type == ixgbe_mac_82599EB) && | | | |
3198 | (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { | | | |
3199 | IXGBE_WRITE_REG(hw, IXGBE_EICR, | | | |
3200 | IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); | | | |
3201 | softint_schedule(adapter->msf_si); | | | |
3202 | } | | | |
3203 | } | | | |
3204 | | | | |
3205 | /* Check for fan failure */ | | 3202 | /* Check for fan failure */ |
3206 | if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { | | 3203 | if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { |
3207 | ixgbe_check_fan_failure(adapter, eicr, TRUE); | | 3204 | ixgbe_check_fan_failure(adapter, eicr, TRUE); |
3208 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); | | 3205 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); |
3209 | } | | 3206 | } |
3210 | | | 3207 | |
3211 | /* External PHY interrupt */ | | 3208 | /* External PHY interrupt */ |
3212 | if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && | | 3209 | if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && |
3213 | (eicr & IXGBE_EICR_GPI_SDP0_X540)) { | | 3210 | (eicr & IXGBE_EICR_GPI_SDP0_X540)) { |
3214 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); | | 3211 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); |
3215 | softint_schedule(adapter->phy_si); | | 3212 | softint_schedule(adapter->phy_si); |
3216 | } | | 3213 | } |
3217 | | | 3214 | |
3218 | /* Re-enable other interrupts */ | | 3215 | /* Re-enable other interrupts */ |
3219 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | | 3216 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); |
3220 | return 1; | | 3217 | return 1; |
3221 | } /* ixgbe_msix_link */ | | 3218 | } /* ixgbe_msix_link */ |
3222 | | | 3219 | |
3223 | static void | | 3220 | static void |
3224 | ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) | | 3221 | ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) |
3225 | { | | 3222 | { |
3226 | | | 3223 | |
3227 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | | 3224 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
3228 | itr |= itr << 16; | | 3225 | itr |= itr << 16; |
3229 | else | | 3226 | else |
3230 | itr |= IXGBE_EITR_CNT_WDIS; | | 3227 | itr |= IXGBE_EITR_CNT_WDIS; |
3231 | | | 3228 | |
3232 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr); | | 3229 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr); |
3233 | } | | 3230 | } |
3234 | | | 3231 | |
3235 | | | 3232 | |
3236 | /************************************************************************ | | 3233 | /************************************************************************ |
3237 | * ixgbe_sysctl_interrupt_rate_handler | | 3234 | * ixgbe_sysctl_interrupt_rate_handler |
3238 | ************************************************************************/ | | 3235 | ************************************************************************/ |
3239 | static int | | 3236 | static int |
3240 | ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) | | 3237 | ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) |
3241 | { | | 3238 | { |
3242 | struct sysctlnode node = *rnode; | | 3239 | struct sysctlnode node = *rnode; |
3243 | struct ix_queue *que = (struct ix_queue *)node.sysctl_data; | | 3240 | struct ix_queue *que = (struct ix_queue *)node.sysctl_data; |
3244 | struct adapter *adapter; | | 3241 | struct adapter *adapter; |
3245 | uint32_t reg, usec, rate; | | 3242 | uint32_t reg, usec, rate; |
3246 | int error; | | 3243 | int error; |
3247 | | | 3244 | |
3248 | if (que == NULL) | | 3245 | if (que == NULL) |
3249 | return 0; | | 3246 | return 0; |
3250 | | | 3247 | |
3251 | adapter = que->adapter; | | 3248 | adapter = que->adapter; |
3252 | if (ixgbe_fw_recovery_mode_swflag(adapter)) | | 3249 | if (ixgbe_fw_recovery_mode_swflag(adapter)) |
3253 | return (EPERM); | | 3250 | return (EPERM); |
3254 | | | 3251 | |
3255 | reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix)); | | 3252 | reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix)); |
3256 | usec = ((reg & 0x0FF8) >> 3); | | 3253 | usec = ((reg & 0x0FF8) >> 3); |
3257 | if (usec > 0) | | 3254 | if (usec > 0) |
3258 | rate = 500000 / usec; | | 3255 | rate = 500000 / usec; |
3259 | else | | 3256 | else |
3260 | rate = 0; | | 3257 | rate = 0; |
3261 | node.sysctl_data = &rate; | | 3258 | node.sysctl_data = &rate; |
3262 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); | | 3259 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
3263 | if (error || newp == NULL) | | 3260 | if (error || newp == NULL) |
3264 | return error; | | 3261 | return error; |
3265 | reg &= ~0xfff; /* default, no limitation */ | | 3262 | reg &= ~0xfff; /* default, no limitation */ |
3266 | if (rate > 0 && rate < 500000) { | | 3263 | if (rate > 0 && rate < 500000) { |
3267 | if (rate < 1000) | | 3264 | if (rate < 1000) |
3268 | rate = 1000; | | 3265 | rate = 1000; |
3269 | reg |= ((4000000/rate) & 0xff8); | | 3266 | reg |= ((4000000/rate) & 0xff8); |
3270 | /* | | 3267 | /* |
3271 | * When RSC is used, ITR interval must be larger than | | 3268 | * When RSC is used, ITR interval must be larger than |
3272 | * RSC_DELAY. Currently, we use 2us for RSC_DELAY. | | 3269 | * RSC_DELAY. Currently, we use 2us for RSC_DELAY. |
3273 | * The minimum value is always greater than 2us on 100M | | 3270 | * The minimum value is always greater than 2us on 100M |
3274 | * (and 10M?(not documented)), but it's not on 1G and higher. | | 3271 | * (and 10M?(not documented)), but it's not on 1G and higher. |
3275 | */ | | 3272 | */ |
3276 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) | | 3273 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) |
3277 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { | | 3274 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { |
3278 | if ((adapter->num_queues > 1) | | 3275 | if ((adapter->num_queues > 1) |
3279 | && (reg < IXGBE_MIN_RSC_EITR_10G1G)) | | 3276 | && (reg < IXGBE_MIN_RSC_EITR_10G1G)) |
3280 | return EINVAL; | | 3277 | return EINVAL; |
3281 | } | | 3278 | } |
3282 | ixgbe_max_interrupt_rate = rate; | | 3279 | ixgbe_max_interrupt_rate = rate; |
3283 | } else | | 3280 | } else |
3284 | ixgbe_max_interrupt_rate = 0; | | 3281 | ixgbe_max_interrupt_rate = 0; |
3285 | ixgbe_eitr_write(adapter, que->msix, reg); | | 3282 | ixgbe_eitr_write(adapter, que->msix, reg); |
3286 | | | 3283 | |
3287 | return (0); | | 3284 | return (0); |
3288 | } /* ixgbe_sysctl_interrupt_rate_handler */ | | 3285 | } /* ixgbe_sysctl_interrupt_rate_handler */ |
3289 | | | 3286 | |
3290 | const struct sysctlnode * | | 3287 | const struct sysctlnode * |
3291 | ixgbe_sysctl_instance(struct adapter *adapter) | | 3288 | ixgbe_sysctl_instance(struct adapter *adapter) |
3292 | { | | 3289 | { |
3293 | const char *dvname; | | 3290 | const char *dvname; |
3294 | struct sysctllog **log; | | 3291 | struct sysctllog **log; |
3295 | int rc; | | 3292 | int rc; |
3296 | const struct sysctlnode *rnode; | | 3293 | const struct sysctlnode *rnode; |
3297 | | | 3294 | |
3298 | if (adapter->sysctltop != NULL) | | 3295 | if (adapter->sysctltop != NULL) |
3299 | return adapter->sysctltop; | | 3296 | return adapter->sysctltop; |
3300 | | | 3297 | |
3301 | log = &adapter->sysctllog; | | 3298 | log = &adapter->sysctllog; |
3302 | dvname = device_xname(adapter->dev); | | 3299 | dvname = device_xname(adapter->dev); |
3303 | | | 3300 | |
3304 | if ((rc = sysctl_createv(log, 0, NULL, &rnode, | | 3301 | if ((rc = sysctl_createv(log, 0, NULL, &rnode, |
3305 | 0, CTLTYPE_NODE, dvname, | | 3302 | 0, CTLTYPE_NODE, dvname, |
3306 | SYSCTL_DESCR("ixgbe information and settings"), | | 3303 | SYSCTL_DESCR("ixgbe information and settings"), |
3307 | NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) | | 3304 | NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) |
3308 | goto err; | | 3305 | goto err; |
3309 | | | 3306 | |
3310 | return rnode; | | 3307 | return rnode; |
3311 | err: | | 3308 | err: |
3312 | printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); | | 3309 | printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); |
3313 | return NULL; | | 3310 | return NULL; |
3314 | } | | 3311 | } |
3315 | | | 3312 | |
3316 | /************************************************************************ | | 3313 | /************************************************************************ |
3317 | * ixgbe_add_device_sysctls | | 3314 | * ixgbe_add_device_sysctls |
3318 | ************************************************************************/ | | 3315 | ************************************************************************/ |
3319 | static void | | 3316 | static void |
3320 | ixgbe_add_device_sysctls(struct adapter *adapter) | | 3317 | ixgbe_add_device_sysctls(struct adapter *adapter) |
3321 | { | | 3318 | { |
3322 | device_t dev = adapter->dev; | | 3319 | device_t dev = adapter->dev; |
3323 | struct ixgbe_hw *hw = &adapter->hw; | | 3320 | struct ixgbe_hw *hw = &adapter->hw; |
3324 | struct sysctllog **log; | | 3321 | struct sysctllog **log; |
3325 | const struct sysctlnode *rnode, *cnode; | | 3322 | const struct sysctlnode *rnode, *cnode; |
3326 | | | 3323 | |
3327 | log = &adapter->sysctllog; | | 3324 | log = &adapter->sysctllog; |
3328 | | | 3325 | |
3329 | if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { | | 3326 | if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { |
3330 | aprint_error_dev(dev, "could not create sysctl root\n"); | | 3327 | aprint_error_dev(dev, "could not create sysctl root\n"); |
3331 | return; | | 3328 | return; |
3332 | } | | 3329 | } |
3333 | | | 3330 | |
3334 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 3331 | if (sysctl_createv(log, 0, &rnode, &cnode, |
3335 | CTLFLAG_READWRITE, CTLTYPE_INT, | | 3332 | CTLFLAG_READWRITE, CTLTYPE_INT, |
3336 | "debug", SYSCTL_DESCR("Debug Info"), | | 3333 | "debug", SYSCTL_DESCR("Debug Info"), |
3337 | ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) | | 3334 | ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) |
3338 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3335 | aprint_error_dev(dev, "could not create sysctl\n"); |
3339 | | | 3336 | |
3340 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 3337 | if (sysctl_createv(log, 0, &rnode, &cnode, |
3341 | CTLFLAG_READONLY, CTLTYPE_INT, | | 3338 | CTLFLAG_READONLY, CTLTYPE_INT, |
3342 | "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"), | | 3339 | "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"), |
3343 | NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) | | 3340 | NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) |
3344 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3341 | aprint_error_dev(dev, "could not create sysctl\n"); |
3345 | | | 3342 | |
3346 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 3343 | if (sysctl_createv(log, 0, &rnode, &cnode, |
3347 | CTLFLAG_READONLY, CTLTYPE_INT, | | 3344 | CTLFLAG_READONLY, CTLTYPE_INT, |
3348 | "num_queues", SYSCTL_DESCR("Number of queues"), | | 3345 | "num_queues", SYSCTL_DESCR("Number of queues"), |
3349 | NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0) | | 3346 | NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0) |
3350 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3347 | aprint_error_dev(dev, "could not create sysctl\n"); |
3351 | | | 3348 | |
3352 | /* Sysctls for all devices */ | | 3349 | /* Sysctls for all devices */ |
3353 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3350 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3354 | CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC), | | 3351 | CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC), |
3355 | ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, | | 3352 | ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, |
3356 | CTL_EOL) != 0) | | 3353 | CTL_EOL) != 0) |
3357 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3354 | aprint_error_dev(dev, "could not create sysctl\n"); |
3358 | | | 3355 | |
3359 | adapter->enable_aim = ixgbe_enable_aim; | | 3356 | adapter->enable_aim = ixgbe_enable_aim; |
3360 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3357 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3361 | CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), | | 3358 | CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), |
3362 | NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) | | 3359 | NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) |
3363 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3360 | aprint_error_dev(dev, "could not create sysctl\n"); |
3364 | | | 3361 | |
3365 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 3362 | if (sysctl_createv(log, 0, &rnode, &cnode, |
3366 | CTLFLAG_READWRITE, CTLTYPE_INT, | | 3363 | CTLFLAG_READWRITE, CTLTYPE_INT, |
3367 | "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED), | | 3364 | "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED), |
3368 | ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE, | | 3365 | ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE, |
3369 | CTL_EOL) != 0) | | 3366 | CTL_EOL) != 0) |
3370 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3367 | aprint_error_dev(dev, "could not create sysctl\n"); |
3371 | | | 3368 | |
3372 | /* | | 3369 | /* |
3373 | * If each "que->txrx_use_workqueue" is changed in sysctl handler, | | 3370 | * If each "que->txrx_use_workqueue" is changed in sysctl handler, |
3374 | * it causesflip-flopping softint/workqueue mode in one deferred | | 3371 | * it causesflip-flopping softint/workqueue mode in one deferred |
3375 | * processing. Therefore, preempt_disable()/preempt_enable() are | | 3372 | * processing. Therefore, preempt_disable()/preempt_enable() are |
3376 | * required in ixgbe_sched_handle_que() to avoid | | 3373 | * required in ixgbe_sched_handle_que() to avoid |
3377 | * KASSERT(ixgbe_sched_handle_que()) in softint_schedule(). | | 3374 | * KASSERT(ixgbe_sched_handle_que()) in softint_schedule(). |
3378 | * I think changing "que->txrx_use_workqueue" in interrupt handler | | 3375 | * I think changing "que->txrx_use_workqueue" in interrupt handler |
3379 | * is lighter than doing preempt_disable()/preempt_enable() in every | | 3376 | * is lighter than doing preempt_disable()/preempt_enable() in every |
3380 | * ixgbe_sched_handle_que(). | | 3377 | * ixgbe_sched_handle_que(). |
3381 | */ | | 3378 | */ |
3382 | adapter->txrx_use_workqueue = ixgbe_txrx_workqueue; | | 3379 | adapter->txrx_use_workqueue = ixgbe_txrx_workqueue; |
3383 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3380 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3384 | CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), | | 3381 | CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), |
3385 | NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) | | 3382 | NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) |
3386 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3383 | aprint_error_dev(dev, "could not create sysctl\n"); |
3387 | | | 3384 | |
3388 | #ifdef IXGBE_DEBUG | | 3385 | #ifdef IXGBE_DEBUG |
3389 | /* testing sysctls (for all devices) */ | | 3386 | /* testing sysctls (for all devices) */ |
3390 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3387 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3391 | CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"), | | 3388 | CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"), |
3392 | ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE, | | 3389 | ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE, |
3393 | CTL_EOL) != 0) | | 3390 | CTL_EOL) != 0) |
3394 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3391 | aprint_error_dev(dev, "could not create sysctl\n"); |
3395 | | | 3392 | |
3396 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, | | 3393 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, |
3397 | CTLTYPE_STRING, "print_rss_config", | | 3394 | CTLTYPE_STRING, "print_rss_config", |
3398 | SYSCTL_DESCR("Prints RSS Configuration"), | | 3395 | SYSCTL_DESCR("Prints RSS Configuration"), |
3399 | ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE, | | 3396 | ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE, |
3400 | CTL_EOL) != 0) | | 3397 | CTL_EOL) != 0) |
3401 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3398 | aprint_error_dev(dev, "could not create sysctl\n"); |
3402 | #endif | | 3399 | #endif |
3403 | /* for X550 series devices */ | | 3400 | /* for X550 series devices */ |
3404 | if (hw->mac.type >= ixgbe_mac_X550) | | 3401 | if (hw->mac.type >= ixgbe_mac_X550) |
3405 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3402 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3406 | CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"), | | 3403 | CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"), |
3407 | ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, | | 3404 | ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, |
3408 | CTL_EOL) != 0) | | 3405 | CTL_EOL) != 0) |
3409 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3406 | aprint_error_dev(dev, "could not create sysctl\n"); |
3410 | | | 3407 | |
3411 | /* for WoL-capable devices */ | | 3408 | /* for WoL-capable devices */ |
3412 | if (adapter->wol_support) { | | 3409 | if (adapter->wol_support) { |
3413 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3410 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3414 | CTLTYPE_BOOL, "wol_enable", | | 3411 | CTLTYPE_BOOL, "wol_enable", |
3415 | SYSCTL_DESCR("Enable/Disable Wake on LAN"), | | 3412 | SYSCTL_DESCR("Enable/Disable Wake on LAN"), |
3416 | ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, | | 3413 | ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, |
3417 | CTL_EOL) != 0) | | 3414 | CTL_EOL) != 0) |
3418 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3415 | aprint_error_dev(dev, "could not create sysctl\n"); |
3419 | | | 3416 | |
3420 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3417 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3421 | CTLTYPE_INT, "wufc", | | 3418 | CTLTYPE_INT, "wufc", |
3422 | SYSCTL_DESCR("Enable/Disable Wake Up Filters"), | | 3419 | SYSCTL_DESCR("Enable/Disable Wake Up Filters"), |
3423 | ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, | | 3420 | ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, |
3424 | CTL_EOL) != 0) | | 3421 | CTL_EOL) != 0) |
3425 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3422 | aprint_error_dev(dev, "could not create sysctl\n"); |
3426 | } | | 3423 | } |
3427 | | | 3424 | |
3428 | /* for X552/X557-AT devices */ | | 3425 | /* for X552/X557-AT devices */ |
3429 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { | | 3426 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { |
3430 | const struct sysctlnode *phy_node; | | 3427 | const struct sysctlnode *phy_node; |
3431 | | | 3428 | |
3432 | if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE, | | 3429 | if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE, |
3433 | "phy", SYSCTL_DESCR("External PHY sysctls"), | | 3430 | "phy", SYSCTL_DESCR("External PHY sysctls"), |
3434 | NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) { | | 3431 | NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) { |
3435 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3432 | aprint_error_dev(dev, "could not create sysctl\n"); |
3436 | return; | | 3433 | return; |
3437 | } | | 3434 | } |
3438 | | | 3435 | |
3439 | if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, | | 3436 | if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, |
3440 | CTLTYPE_INT, "temp", | | 3437 | CTLTYPE_INT, "temp", |
3441 | SYSCTL_DESCR("Current External PHY Temperature (Celsius)"), | | 3438 | SYSCTL_DESCR("Current External PHY Temperature (Celsius)"), |
3442 | ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, | | 3439 | ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, |
3443 | CTL_EOL) != 0) | | 3440 | CTL_EOL) != 0) |
3444 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3441 | aprint_error_dev(dev, "could not create sysctl\n"); |
3445 | | | 3442 | |
3446 | if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, | | 3443 | if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, |
3447 | CTLTYPE_INT, "overtemp_occurred", | | 3444 | CTLTYPE_INT, "overtemp_occurred", |
3448 | SYSCTL_DESCR("External PHY High Temperature Event Occurred"), | | 3445 | SYSCTL_DESCR("External PHY High Temperature Event Occurred"), |
3449 | ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, | | 3446 | ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, |
3450 | CTL_CREATE, CTL_EOL) != 0) | | 3447 | CTL_CREATE, CTL_EOL) != 0) |
3451 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3448 | aprint_error_dev(dev, "could not create sysctl\n"); |
3452 | } | | 3449 | } |
3453 | | | 3450 | |
3454 | if ((hw->mac.type == ixgbe_mac_X550EM_a) | | 3451 | if ((hw->mac.type == ixgbe_mac_X550EM_a) |
3455 | && (hw->phy.type == ixgbe_phy_fw)) | | 3452 | && (hw->phy.type == ixgbe_phy_fw)) |
3456 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3453 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3457 | CTLTYPE_BOOL, "force_10_100_autonego", | | 3454 | CTLTYPE_BOOL, "force_10_100_autonego", |
3458 | SYSCTL_DESCR("Force autonego on 10M and 100M"), | | 3455 | SYSCTL_DESCR("Force autonego on 10M and 100M"), |
3459 | NULL, 0, &hw->phy.force_10_100_autonego, 0, | | 3456 | NULL, 0, &hw->phy.force_10_100_autonego, 0, |
3460 | CTL_CREATE, CTL_EOL) != 0) | | 3457 | CTL_CREATE, CTL_EOL) != 0) |
3461 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3458 | aprint_error_dev(dev, "could not create sysctl\n"); |
3462 | | | 3459 | |
3463 | if (adapter->feat_cap & IXGBE_FEATURE_EEE) { | | 3460 | if (adapter->feat_cap & IXGBE_FEATURE_EEE) { |
3464 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, | | 3461 | if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, |
3465 | CTLTYPE_INT, "eee_state", | | 3462 | CTLTYPE_INT, "eee_state", |
3466 | SYSCTL_DESCR("EEE Power Save State"), | | 3463 | SYSCTL_DESCR("EEE Power Save State"), |
3467 | ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE, | | 3464 | ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE, |
3468 | CTL_EOL) != 0) | | 3465 | CTL_EOL) != 0) |
3469 | aprint_error_dev(dev, "could not create sysctl\n"); | | 3466 | aprint_error_dev(dev, "could not create sysctl\n"); |
3470 | } | | 3467 | } |
3471 | } /* ixgbe_add_device_sysctls */ | | 3468 | } /* ixgbe_add_device_sysctls */ |
3472 | | | 3469 | |
3473 | /************************************************************************ | | 3470 | /************************************************************************ |
3474 | * ixgbe_allocate_pci_resources | | 3471 | * ixgbe_allocate_pci_resources |
3475 | ************************************************************************/ | | 3472 | ************************************************************************/ |
3476 | static int | | 3473 | static int |
3477 | ixgbe_allocate_pci_resources(struct adapter *adapter, | | 3474 | ixgbe_allocate_pci_resources(struct adapter *adapter, |
3478 | const struct pci_attach_args *pa) | | 3475 | const struct pci_attach_args *pa) |
3479 | { | | 3476 | { |
3480 | pcireg_t memtype, csr; | | 3477 | pcireg_t memtype, csr; |
3481 | device_t dev = adapter->dev; | | 3478 | device_t dev = adapter->dev; |
3482 | bus_addr_t addr; | | 3479 | bus_addr_t addr; |
3483 | int flags; | | 3480 | int flags; |
3484 | | | 3481 | |
3485 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); | | 3482 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); |
3486 | switch (memtype) { | | 3483 | switch (memtype) { |
3487 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: | | 3484 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
3488 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: | | 3485 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
3489 | adapter->osdep.mem_bus_space_tag = pa->pa_memt; | | 3486 | adapter->osdep.mem_bus_space_tag = pa->pa_memt; |
3490 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), | | 3487 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), |
3491 | memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) | | 3488 | memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) |
3492 | goto map_err; | | 3489 | goto map_err; |
3493 | if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { | | 3490 | if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { |
3494 | aprint_normal_dev(dev, "clearing prefetchable bit\n"); | | 3491 | aprint_normal_dev(dev, "clearing prefetchable bit\n"); |
3495 | flags &= ~BUS_SPACE_MAP_PREFETCHABLE; | | 3492 | flags &= ~BUS_SPACE_MAP_PREFETCHABLE; |
3496 | } | | 3493 | } |
3497 | if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, | | 3494 | if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, |
3498 | adapter->osdep.mem_size, flags, | | 3495 | adapter->osdep.mem_size, flags, |
3499 | &adapter->osdep.mem_bus_space_handle) != 0) { | | 3496 | &adapter->osdep.mem_bus_space_handle) != 0) { |
3500 | map_err: | | 3497 | map_err: |
3501 | adapter->osdep.mem_size = 0; | | 3498 | adapter->osdep.mem_size = 0; |
3502 | aprint_error_dev(dev, "unable to map BAR0\n"); | | 3499 | aprint_error_dev(dev, "unable to map BAR0\n"); |
3503 | return ENXIO; | | 3500 | return ENXIO; |
3504 | } | | 3501 | } |
3505 | /* | | 3502 | /* |
3506 | * Enable address decoding for memory range in case BIOS or | | 3503 | * Enable address decoding for memory range in case BIOS or |
3507 | * UEFI don't set it. | | 3504 | * UEFI don't set it. |
3508 | */ | | 3505 | */ |
3509 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, | | 3506 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, |
3510 | PCI_COMMAND_STATUS_REG); | | 3507 | PCI_COMMAND_STATUS_REG); |
3511 | csr |= PCI_COMMAND_MEM_ENABLE; | | 3508 | csr |= PCI_COMMAND_MEM_ENABLE; |
3512 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, | | 3509 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, |
3513 | csr); | | 3510 | csr); |
3514 | break; | | 3511 | break; |
3515 | default: | | 3512 | default: |
3516 | aprint_error_dev(dev, "unexpected type on BAR0\n"); | | 3513 | aprint_error_dev(dev, "unexpected type on BAR0\n"); |
3517 | return ENXIO; | | 3514 | return ENXIO; |
3518 | } | | 3515 | } |
3519 | | | 3516 | |
3520 | return (0); | | 3517 | return (0); |
3521 | } /* ixgbe_allocate_pci_resources */ | | 3518 | } /* ixgbe_allocate_pci_resources */ |
3522 | | | 3519 | |
3523 | static void | | 3520 | static void |
3524 | ixgbe_free_softint(struct adapter *adapter) | | 3521 | ixgbe_free_softint(struct adapter *adapter) |
3525 | { | | 3522 | { |
3526 | struct ix_queue *que = adapter->queues; | | 3523 | struct ix_queue *que = adapter->queues; |
3527 | struct tx_ring *txr = adapter->tx_rings; | | 3524 | struct tx_ring *txr = adapter->tx_rings; |
3528 | int i; | | 3525 | int i; |
3529 | | | 3526 | |
3530 | for (i = 0; i < adapter->num_queues; i++, que++, txr++) { | | 3527 | for (i = 0; i < adapter->num_queues; i++, que++, txr++) { |
3531 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { | | 3528 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { |
3532 | if (txr->txr_si != NULL) | | 3529 | if (txr->txr_si != NULL) |
3533 | softint_disestablish(txr->txr_si); | | 3530 | softint_disestablish(txr->txr_si); |
3534 | } | | 3531 | } |
3535 | if (que->que_si != NULL) | | 3532 | if (que->que_si != NULL) |
3536 | softint_disestablish(que->que_si); | | 3533 | softint_disestablish(que->que_si); |
3537 | } | | 3534 | } |
3538 | if (adapter->txr_wq != NULL) | | 3535 | if (adapter->txr_wq != NULL) |
3539 | workqueue_destroy(adapter->txr_wq); | | 3536 | workqueue_destroy(adapter->txr_wq); |
3540 | if (adapter->txr_wq_enqueued != NULL) | | 3537 | if (adapter->txr_wq_enqueued != NULL) |
3541 | percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); | | 3538 | percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); |
3542 | if (adapter->que_wq != NULL) | | 3539 | if (adapter->que_wq != NULL) |
3543 | workqueue_destroy(adapter->que_wq); | | 3540 | workqueue_destroy(adapter->que_wq); |
3544 | | | 3541 | |
3545 | /* Drain the Link queue */ | | 3542 | /* Drain the Link queue */ |
3546 | if (adapter->link_si != NULL) { | | 3543 | if (adapter->link_si != NULL) { |
3547 | softint_disestablish(adapter->link_si); | | 3544 | softint_disestablish(adapter->link_si); |
3548 | adapter->link_si = NULL; | | 3545 | adapter->link_si = NULL; |
3549 | } | | 3546 | } |
3550 | if (adapter->mod_si != NULL) { | | 3547 | if (adapter->mod_si != NULL) { |
3551 | softint_disestablish(adapter->mod_si); | | 3548 | softint_disestablish(adapter->mod_si); |
3552 | adapter->mod_si = NULL; | | 3549 | adapter->mod_si = NULL; |
3553 | } | | 3550 | } |
3554 | if (adapter->msf_si != NULL) { | | 3551 | if (adapter->msf_si != NULL) { |
3555 | softint_disestablish(adapter->msf_si); | | 3552 | softint_disestablish(adapter->msf_si); |
3556 | adapter->msf_si = NULL; | | 3553 | adapter->msf_si = NULL; |
3557 | } | | 3554 | } |
3558 | if (adapter->phy_si != NULL) { | | 3555 | if (adapter->phy_si != NULL) { |
3559 | softint_disestablish(adapter->phy_si); | | 3556 | softint_disestablish(adapter->phy_si); |
3560 | adapter->phy_si = NULL; | | 3557 | adapter->phy_si = NULL; |
3561 | } | | 3558 | } |
3562 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) { | | 3559 | if (adapter->feat_en & IXGBE_FEATURE_FDIR) { |
3563 | if (adapter->fdir_si != NULL) { | | 3560 | if (adapter->fdir_si != NULL) { |
3564 | softint_disestablish(adapter->fdir_si); | | 3561 | softint_disestablish(adapter->fdir_si); |
3565 | adapter->fdir_si = NULL; | | 3562 | adapter->fdir_si = NULL; |
3566 | } | | 3563 | } |
3567 | } | | 3564 | } |
3568 | if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) { | | 3565 | if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) { |
3569 | if (adapter->mbx_si != NULL) { | | 3566 | if (adapter->mbx_si != NULL) { |
3570 | softint_disestablish(adapter->mbx_si); | | 3567 | softint_disestablish(adapter->mbx_si); |
3571 | adapter->mbx_si = NULL; | | 3568 | adapter->mbx_si = NULL; |
3572 | } | | 3569 | } |
3573 | } | | 3570 | } |
3574 | } /* ixgbe_free_softint */ | | 3571 | } /* ixgbe_free_softint */ |
3575 | | | 3572 | |
3576 | /************************************************************************ | | 3573 | /************************************************************************ |
3577 | * ixgbe_detach - Device removal routine | | 3574 | * ixgbe_detach - Device removal routine |
3578 | * | | 3575 | * |
3579 | * Called when the driver is being removed. | | 3576 | * Called when the driver is being removed. |
3580 | * Stops the adapter and deallocates all the resources | | 3577 | * Stops the adapter and deallocates all the resources |
3581 | * that were allocated for driver operation. | | 3578 | * that were allocated for driver operation. |
3582 | * | | 3579 | * |
3583 | * return 0 on success, positive on failure | | 3580 | * return 0 on success, positive on failure |
3584 | ************************************************************************/ | | 3581 | ************************************************************************/ |
3585 | static int | | 3582 | static int |
3586 | ixgbe_detach(device_t dev, int flags) | | 3583 | ixgbe_detach(device_t dev, int flags) |
3587 | { | | 3584 | { |
3588 | struct adapter *adapter = device_private(dev); | | 3585 | struct adapter *adapter = device_private(dev); |
3589 | struct rx_ring *rxr = adapter->rx_rings; | | 3586 | struct rx_ring *rxr = adapter->rx_rings; |
3590 | struct tx_ring *txr = adapter->tx_rings; | | 3587 | struct tx_ring *txr = adapter->tx_rings; |
3591 | struct ixgbe_hw *hw = &adapter->hw; | | 3588 | struct ixgbe_hw *hw = &adapter->hw; |
3592 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; | | 3589 | struct ixgbe_hw_stats *stats = &adapter->stats.pf; |
3593 | u32 ctrl_ext; | | 3590 | u32 ctrl_ext; |
3594 | int i; | | 3591 | int i; |
3595 | | | 3592 | |
3596 | INIT_DEBUGOUT("ixgbe_detach: begin"); | | 3593 | INIT_DEBUGOUT("ixgbe_detach: begin"); |
3597 | if (adapter->osdep.attached == false) | | 3594 | if (adapter->osdep.attached == false) |
3598 | return 0; | | 3595 | return 0; |
3599 | | | 3596 | |
3600 | if (ixgbe_pci_iov_detach(dev) != 0) { | | 3597 | if (ixgbe_pci_iov_detach(dev) != 0) { |
3601 | device_printf(dev, "SR-IOV in use; detach first.\n"); | | 3598 | device_printf(dev, "SR-IOV in use; detach first.\n"); |
3602 | return (EBUSY); | | 3599 | return (EBUSY); |
3603 | } | | 3600 | } |
3604 | | | 3601 | |
3605 | /* | | 3602 | /* |
3606 | * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(), | | 3603 | * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(), |
3607 | * so it's not required to call ixgbe_stop() directly. | | 3604 | * so it's not required to call ixgbe_stop() directly. |
3608 | */ | | 3605 | */ |
3609 | IXGBE_CORE_LOCK(adapter); | | 3606 | IXGBE_CORE_LOCK(adapter); |
3610 | ixgbe_setup_low_power_mode(adapter); | | 3607 | ixgbe_setup_low_power_mode(adapter); |
3611 | IXGBE_CORE_UNLOCK(adapter); | | 3608 | IXGBE_CORE_UNLOCK(adapter); |
3612 | #if NVLAN > 0 | | 3609 | #if NVLAN > 0 |
3613 | /* Make sure VLANs are not using driver */ | | 3610 | /* Make sure VLANs are not using driver */ |
3614 | if (!VLAN_ATTACHED(&adapter->osdep.ec)) | | 3611 | if (!VLAN_ATTACHED(&adapter->osdep.ec)) |
3615 | ; /* nothing to do: no VLANs */ | | 3612 | ; /* nothing to do: no VLANs */ |
3616 | else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) | | 3613 | else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) |
3617 | vlan_ifdetach(adapter->ifp); | | 3614 | vlan_ifdetach(adapter->ifp); |
3618 | else { | | 3615 | else { |
3619 | aprint_error_dev(dev, "VLANs in use, detach first\n"); | | 3616 | aprint_error_dev(dev, "VLANs in use, detach first\n"); |
3620 | return (EBUSY); | | 3617 | return (EBUSY); |
3621 | } | | 3618 | } |
3622 | #endif | | 3619 | #endif |
3623 | | | 3620 | |
3624 | pmf_device_deregister(dev); | | 3621 | pmf_device_deregister(dev); |
3625 | | | 3622 | |
3626 | ether_ifdetach(adapter->ifp); | | 3623 | ether_ifdetach(adapter->ifp); |
3627 | | | 3624 | |
3628 | ixgbe_free_softint(adapter); | | 3625 | ixgbe_free_softint(adapter); |
3629 | | | 3626 | |
3630 | /* let hardware know driver is unloading */ | | 3627 | /* let hardware know driver is unloading */ |
3631 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | | 3628 | ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); |
3632 | ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; | | 3629 | ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; |
3633 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); | | 3630 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); |
3634 | | | 3631 | |
3635 | callout_halt(&adapter->timer, NULL); | | 3632 | callout_halt(&adapter->timer, NULL); |
3636 | if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) | | 3633 | if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) |
3637 | callout_halt(&adapter->recovery_mode_timer, NULL); | | 3634 | callout_halt(&adapter->recovery_mode_timer, NULL); |
3638 | | | 3635 | |
3639 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 3636 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
3640 | netmap_detach(adapter->ifp); | | 3637 | netmap_detach(adapter->ifp); |
3641 | | | 3638 | |
3642 | ixgbe_free_pci_resources(adapter); | | 3639 | ixgbe_free_pci_resources(adapter); |
3643 | #if 0 /* XXX the NetBSD port is probably missing something here */ | | 3640 | #if 0 /* XXX the NetBSD port is probably missing something here */ |
3644 | bus_generic_detach(dev); | | 3641 | bus_generic_detach(dev); |
3645 | #endif | | 3642 | #endif |
3646 | if_detach(adapter->ifp); | | 3643 | if_detach(adapter->ifp); |
3647 | if_percpuq_destroy(adapter->ipq); | | 3644 | if_percpuq_destroy(adapter->ipq); |
3648 | | | 3645 | |
3649 | sysctl_teardown(&adapter->sysctllog); | | 3646 | sysctl_teardown(&adapter->sysctllog); |
3650 | evcnt_detach(&adapter->efbig_tx_dma_setup); | | 3647 | evcnt_detach(&adapter->efbig_tx_dma_setup); |
3651 | evcnt_detach(&adapter->mbuf_defrag_failed); | | 3648 | evcnt_detach(&adapter->mbuf_defrag_failed); |
3652 | evcnt_detach(&adapter->efbig2_tx_dma_setup); | | 3649 | evcnt_detach(&adapter->efbig2_tx_dma_setup); |
3653 | evcnt_detach(&adapter->einval_tx_dma_setup); | | 3650 | evcnt_detach(&adapter->einval_tx_dma_setup); |
3654 | evcnt_detach(&adapter->other_tx_dma_setup); | | 3651 | evcnt_detach(&adapter->other_tx_dma_setup); |
3655 | evcnt_detach(&adapter->eagain_tx_dma_setup); | | 3652 | evcnt_detach(&adapter->eagain_tx_dma_setup); |
3656 | evcnt_detach(&adapter->enomem_tx_dma_setup); | | 3653 | evcnt_detach(&adapter->enomem_tx_dma_setup); |
3657 | evcnt_detach(&adapter->watchdog_events); | | 3654 | evcnt_detach(&adapter->watchdog_events); |
3658 | evcnt_detach(&adapter->tso_err); | | 3655 | evcnt_detach(&adapter->tso_err); |
3659 | evcnt_detach(&adapter->link_irq); | | 3656 | evcnt_detach(&adapter->link_irq); |
3660 | evcnt_detach(&adapter->link_sicount); | | 3657 | evcnt_detach(&adapter->link_sicount); |
3661 | evcnt_detach(&adapter->mod_sicount); | | 3658 | evcnt_detach(&adapter->mod_sicount); |
3662 | evcnt_detach(&adapter->msf_sicount); | | 3659 | evcnt_detach(&adapter->msf_sicount); |
3663 | evcnt_detach(&adapter->phy_sicount); | | 3660 | evcnt_detach(&adapter->phy_sicount); |
3664 | | | 3661 | |
3665 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { | | 3662 | for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { |
3666 | if (i < __arraycount(stats->mpc)) { | | 3663 | if (i < __arraycount(stats->mpc)) { |
3667 | evcnt_detach(&stats->mpc[i]); | | 3664 | evcnt_detach(&stats->mpc[i]); |
3668 | if (hw->mac.type == ixgbe_mac_82598EB) | | 3665 | if (hw->mac.type == ixgbe_mac_82598EB) |
3669 | evcnt_detach(&stats->rnbc[i]); | | 3666 | evcnt_detach(&stats->rnbc[i]); |
3670 | } | | 3667 | } |
3671 | if (i < __arraycount(stats->pxontxc)) { | | 3668 | if (i < __arraycount(stats->pxontxc)) { |
3672 | evcnt_detach(&stats->pxontxc[i]); | | 3669 | evcnt_detach(&stats->pxontxc[i]); |
3673 | evcnt_detach(&stats->pxonrxc[i]); | | 3670 | evcnt_detach(&stats->pxonrxc[i]); |
3674 | evcnt_detach(&stats->pxofftxc[i]); | | 3671 | evcnt_detach(&stats->pxofftxc[i]); |
3675 | evcnt_detach(&stats->pxoffrxc[i]); | | 3672 | evcnt_detach(&stats->pxoffrxc[i]); |
3676 | if (hw->mac.type >= ixgbe_mac_82599EB) | | 3673 | if (hw->mac.type >= ixgbe_mac_82599EB) |
3677 | evcnt_detach(&stats->pxon2offc[i]); | | 3674 | evcnt_detach(&stats->pxon2offc[i]); |
3678 | } | | 3675 | } |
3679 | } | | 3676 | } |
3680 | | | 3677 | |
3681 | txr = adapter->tx_rings; | | 3678 | txr = adapter->tx_rings; |
3682 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 3679 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
3683 | evcnt_detach(&adapter->queues[i].irqs); | | 3680 | evcnt_detach(&adapter->queues[i].irqs); |
3684 | evcnt_detach(&adapter->queues[i].handleq); | | 3681 | evcnt_detach(&adapter->queues[i].handleq); |
3685 | evcnt_detach(&adapter->queues[i].req); | | 3682 | evcnt_detach(&adapter->queues[i].req); |
3686 | evcnt_detach(&txr->no_desc_avail); | | 3683 | evcnt_detach(&txr->no_desc_avail); |
3687 | evcnt_detach(&txr->total_packets); | | 3684 | evcnt_detach(&txr->total_packets); |
3688 | evcnt_detach(&txr->tso_tx); | | 3685 | evcnt_detach(&txr->tso_tx); |
3689 | #ifndef IXGBE_LEGACY_TX | | 3686 | #ifndef IXGBE_LEGACY_TX |
3690 | evcnt_detach(&txr->pcq_drops); | | 3687 | evcnt_detach(&txr->pcq_drops); |
3691 | #endif | | 3688 | #endif |
3692 | | | 3689 | |
3693 | if (i < __arraycount(stats->qprc)) { | | 3690 | if (i < __arraycount(stats->qprc)) { |
3694 | evcnt_detach(&stats->qprc[i]); | | 3691 | evcnt_detach(&stats->qprc[i]); |
3695 | evcnt_detach(&stats->qptc[i]); | | 3692 | evcnt_detach(&stats->qptc[i]); |
3696 | evcnt_detach(&stats->qbrc[i]); | | 3693 | evcnt_detach(&stats->qbrc[i]); |
3697 | evcnt_detach(&stats->qbtc[i]); | | 3694 | evcnt_detach(&stats->qbtc[i]); |
3698 | if (hw->mac.type >= ixgbe_mac_82599EB) | | 3695 | if (hw->mac.type >= ixgbe_mac_82599EB) |
3699 | evcnt_detach(&stats->qprdc[i]); | | 3696 | evcnt_detach(&stats->qprdc[i]); |
3700 | } | | 3697 | } |
3701 | | | 3698 | |
3702 | evcnt_detach(&rxr->rx_packets); | | 3699 | evcnt_detach(&rxr->rx_packets); |
3703 | evcnt_detach(&rxr->rx_bytes); | | 3700 | evcnt_detach(&rxr->rx_bytes); |
3704 | evcnt_detach(&rxr->rx_copies); | | 3701 | evcnt_detach(&rxr->rx_copies); |
3705 | evcnt_detach(&rxr->no_jmbuf); | | 3702 | evcnt_detach(&rxr->no_jmbuf); |
3706 | evcnt_detach(&rxr->rx_discarded); | | 3703 | evcnt_detach(&rxr->rx_discarded); |
3707 | } | | 3704 | } |
3708 | evcnt_detach(&stats->ipcs); | | 3705 | evcnt_detach(&stats->ipcs); |
3709 | evcnt_detach(&stats->l4cs); | | 3706 | evcnt_detach(&stats->l4cs); |
3710 | evcnt_detach(&stats->ipcs_bad); | | 3707 | evcnt_detach(&stats->ipcs_bad); |
3711 | evcnt_detach(&stats->l4cs_bad); | | 3708 | evcnt_detach(&stats->l4cs_bad); |
3712 | evcnt_detach(&stats->intzero); | | 3709 | evcnt_detach(&stats->intzero); |
3713 | evcnt_detach(&stats->legint); | | 3710 | evcnt_detach(&stats->legint); |
3714 | evcnt_detach(&stats->crcerrs); | | 3711 | evcnt_detach(&stats->crcerrs); |
3715 | evcnt_detach(&stats->illerrc); | | 3712 | evcnt_detach(&stats->illerrc); |
3716 | evcnt_detach(&stats->errbc); | | 3713 | evcnt_detach(&stats->errbc); |
3717 | evcnt_detach(&stats->mspdc); | | 3714 | evcnt_detach(&stats->mspdc); |
3718 | if (hw->mac.type >= ixgbe_mac_X550) | | 3715 | if (hw->mac.type >= ixgbe_mac_X550) |
3719 | evcnt_detach(&stats->mbsdc); | | 3716 | evcnt_detach(&stats->mbsdc); |
3720 | evcnt_detach(&stats->mpctotal); | | 3717 | evcnt_detach(&stats->mpctotal); |
3721 | evcnt_detach(&stats->mlfc); | | 3718 | evcnt_detach(&stats->mlfc); |
3722 | evcnt_detach(&stats->mrfc); | | 3719 | evcnt_detach(&stats->mrfc); |
3723 | evcnt_detach(&stats->rlec); | | 3720 | evcnt_detach(&stats->rlec); |
3724 | evcnt_detach(&stats->lxontxc); | | 3721 | evcnt_detach(&stats->lxontxc); |
3725 | evcnt_detach(&stats->lxonrxc); | | 3722 | evcnt_detach(&stats->lxonrxc); |
3726 | evcnt_detach(&stats->lxofftxc); | | 3723 | evcnt_detach(&stats->lxofftxc); |
3727 | evcnt_detach(&stats->lxoffrxc); | | 3724 | evcnt_detach(&stats->lxoffrxc); |
3728 | | | 3725 | |
3729 | /* Packet Reception Stats */ | | 3726 | /* Packet Reception Stats */ |
3730 | evcnt_detach(&stats->tor); | | 3727 | evcnt_detach(&stats->tor); |
3731 | evcnt_detach(&stats->gorc); | | 3728 | evcnt_detach(&stats->gorc); |
3732 | evcnt_detach(&stats->tpr); | | 3729 | evcnt_detach(&stats->tpr); |
3733 | evcnt_detach(&stats->gprc); | | 3730 | evcnt_detach(&stats->gprc); |
3734 | evcnt_detach(&stats->mprc); | | 3731 | evcnt_detach(&stats->mprc); |
3735 | evcnt_detach(&stats->bprc); | | 3732 | evcnt_detach(&stats->bprc); |
3736 | evcnt_detach(&stats->prc64); | | 3733 | evcnt_detach(&stats->prc64); |
3737 | evcnt_detach(&stats->prc127); | | 3734 | evcnt_detach(&stats->prc127); |
3738 | evcnt_detach(&stats->prc255); | | 3735 | evcnt_detach(&stats->prc255); |
3739 | evcnt_detach(&stats->prc511); | | 3736 | evcnt_detach(&stats->prc511); |
3740 | evcnt_detach(&stats->prc1023); | | 3737 | evcnt_detach(&stats->prc1023); |
3741 | evcnt_detach(&stats->prc1522); | | 3738 | evcnt_detach(&stats->prc1522); |
3742 | evcnt_detach(&stats->ruc); | | 3739 | evcnt_detach(&stats->ruc); |
3743 | evcnt_detach(&stats->rfc); | | 3740 | evcnt_detach(&stats->rfc); |
3744 | evcnt_detach(&stats->roc); | | 3741 | evcnt_detach(&stats->roc); |
3745 | evcnt_detach(&stats->rjc); | | 3742 | evcnt_detach(&stats->rjc); |
3746 | evcnt_detach(&stats->mngprc); | | 3743 | evcnt_detach(&stats->mngprc); |
3747 | evcnt_detach(&stats->mngpdc); | | 3744 | evcnt_detach(&stats->mngpdc); |
3748 | evcnt_detach(&stats->xec); | | 3745 | evcnt_detach(&stats->xec); |
3749 | | | 3746 | |
3750 | /* Packet Transmission Stats */ | | 3747 | /* Packet Transmission Stats */ |
3751 | evcnt_detach(&stats->gotc); | | 3748 | evcnt_detach(&stats->gotc); |
3752 | evcnt_detach(&stats->tpt); | | 3749 | evcnt_detach(&stats->tpt); |
3753 | evcnt_detach(&stats->gptc); | | 3750 | evcnt_detach(&stats->gptc); |
3754 | evcnt_detach(&stats->bptc); | | 3751 | evcnt_detach(&stats->bptc); |
3755 | evcnt_detach(&stats->mptc); | | 3752 | evcnt_detach(&stats->mptc); |
3756 | evcnt_detach(&stats->mngptc); | | 3753 | evcnt_detach(&stats->mngptc); |
3757 | evcnt_detach(&stats->ptc64); | | 3754 | evcnt_detach(&stats->ptc64); |
3758 | evcnt_detach(&stats->ptc127); | | 3755 | evcnt_detach(&stats->ptc127); |
3759 | evcnt_detach(&stats->ptc255); | | 3756 | evcnt_detach(&stats->ptc255); |
3760 | evcnt_detach(&stats->ptc511); | | 3757 | evcnt_detach(&stats->ptc511); |
3761 | evcnt_detach(&stats->ptc1023); | | 3758 | evcnt_detach(&stats->ptc1023); |
3762 | evcnt_detach(&stats->ptc1522); | | 3759 | evcnt_detach(&stats->ptc1522); |
3763 | | | 3760 | |
3764 | ixgbe_free_transmit_structures(adapter); | | 3761 | ixgbe_free_transmit_structures(adapter); |
3765 | ixgbe_free_receive_structures(adapter); | | 3762 | ixgbe_free_receive_structures(adapter); |
3766 | for (i = 0; i < adapter->num_queues; i++) { | | 3763 | for (i = 0; i < adapter->num_queues; i++) { |
3767 | struct ix_queue * que = &adapter->queues[i]; | | 3764 | struct ix_queue * que = &adapter->queues[i]; |
3768 | mutex_destroy(&que->dc_mtx); | | 3765 | mutex_destroy(&que->dc_mtx); |
3769 | } | | 3766 | } |
3770 | free(adapter->queues, M_DEVBUF); | | 3767 | free(adapter->queues, M_DEVBUF); |
3771 | free(adapter->mta, M_DEVBUF); | | 3768 | free(adapter->mta, M_DEVBUF); |
3772 | | | 3769 | |
3773 | IXGBE_CORE_LOCK_DESTROY(adapter); | | 3770 | IXGBE_CORE_LOCK_DESTROY(adapter); |
3774 | | | 3771 | |
3775 | return (0); | | 3772 | return (0); |
3776 | } /* ixgbe_detach */ | | 3773 | } /* ixgbe_detach */ |
3777 | | | 3774 | |
3778 | /************************************************************************ | | 3775 | /************************************************************************ |
3779 | * ixgbe_setup_low_power_mode - LPLU/WoL preparation | | 3776 | * ixgbe_setup_low_power_mode - LPLU/WoL preparation |
3780 | * | | 3777 | * |
3781 | * Prepare the adapter/port for LPLU and/or WoL | | 3778 | * Prepare the adapter/port for LPLU and/or WoL |
3782 | ************************************************************************/ | | 3779 | ************************************************************************/ |
3783 | static int | | 3780 | static int |
3784 | ixgbe_setup_low_power_mode(struct adapter *adapter) | | 3781 | ixgbe_setup_low_power_mode(struct adapter *adapter) |
3785 | { | | 3782 | { |
3786 | struct ixgbe_hw *hw = &adapter->hw; | | 3783 | struct ixgbe_hw *hw = &adapter->hw; |
3787 | device_t dev = adapter->dev; | | 3784 | device_t dev = adapter->dev; |
3788 | s32 error = 0; | | 3785 | s32 error = 0; |
3789 | | | 3786 | |
3790 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 3787 | KASSERT(mutex_owned(&adapter->core_mtx)); |
3791 | | | 3788 | |
3792 | /* Limit power management flow to X550EM baseT */ | | 3789 | /* Limit power management flow to X550EM baseT */ |
3793 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && | | 3790 | if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && |
3794 | hw->phy.ops.enter_lplu) { | | 3791 | hw->phy.ops.enter_lplu) { |
3795 | /* X550EM baseT adapters need a special LPLU flow */ | | 3792 | /* X550EM baseT adapters need a special LPLU flow */ |
3796 | hw->phy.reset_disable = true; | | 3793 | hw->phy.reset_disable = true; |
3797 | ixgbe_stop(adapter); | | 3794 | ixgbe_stop(adapter); |
3798 | error = hw->phy.ops.enter_lplu(hw); | | 3795 | error = hw->phy.ops.enter_lplu(hw); |
3799 | if (error) | | 3796 | if (error) |
3800 | device_printf(dev, | | 3797 | device_printf(dev, |
3801 | "Error entering LPLU: %d\n", error); | | 3798 | "Error entering LPLU: %d\n", error); |
3802 | hw->phy.reset_disable = false; | | 3799 | hw->phy.reset_disable = false; |
3803 | } else { | | 3800 | } else { |
3804 | /* Just stop for other adapters */ | | 3801 | /* Just stop for other adapters */ |
3805 | ixgbe_stop(adapter); | | 3802 | ixgbe_stop(adapter); |
3806 | } | | 3803 | } |
3807 | | | 3804 | |
3808 | if (!hw->wol_enabled) { | | 3805 | if (!hw->wol_enabled) { |
3809 | ixgbe_set_phy_power(hw, FALSE); | | 3806 | ixgbe_set_phy_power(hw, FALSE); |
3810 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); | | 3807 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); |
3811 | IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); | | 3808 | IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); |
3812 | } else { | | 3809 | } else { |
3813 | /* Turn off support for APM wakeup. (Using ACPI instead) */ | | 3810 | /* Turn off support for APM wakeup. (Using ACPI instead) */ |
3814 | IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), | | 3811 | IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), |
3815 | IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); | | 3812 | IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); |
3816 | | | 3813 | |
3817 | /* | | 3814 | /* |
3818 | * Clear Wake Up Status register to prevent any previous wakeup | | 3815 | * Clear Wake Up Status register to prevent any previous wakeup |
3819 | * events from waking us up immediately after we suspend. | | 3816 | * events from waking us up immediately after we suspend. |
3820 | */ | | 3817 | */ |
3821 | IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); | | 3818 | IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); |
3822 | | | 3819 | |
3823 | /* | | 3820 | /* |
3824 | * Program the Wakeup Filter Control register with user filter | | 3821 | * Program the Wakeup Filter Control register with user filter |
3825 | * settings | | 3822 | * settings |
3826 | */ | | 3823 | */ |
3827 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); | | 3824 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); |
3828 | | | 3825 | |
3829 | /* Enable wakeups and power management in Wakeup Control */ | | 3826 | /* Enable wakeups and power management in Wakeup Control */ |
3830 | IXGBE_WRITE_REG(hw, IXGBE_WUC, | | 3827 | IXGBE_WRITE_REG(hw, IXGBE_WUC, |
3831 | IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); | | 3828 | IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); |
3832 | | | 3829 | |
3833 | } | | 3830 | } |
3834 | | | 3831 | |
3835 | return error; | | 3832 | return error; |
3836 | } /* ixgbe_setup_low_power_mode */ | | 3833 | } /* ixgbe_setup_low_power_mode */ |
3837 | | | 3834 | |
3838 | /************************************************************************ | | 3835 | /************************************************************************ |
3839 | * ixgbe_shutdown - Shutdown entry point | | 3836 | * ixgbe_shutdown - Shutdown entry point |
3840 | ************************************************************************/ | | 3837 | ************************************************************************/ |
3841 | #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ | | 3838 | #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ |
3842 | static int | | 3839 | static int |
3843 | ixgbe_shutdown(device_t dev) | | 3840 | ixgbe_shutdown(device_t dev) |
3844 | { | | 3841 | { |
3845 | struct adapter *adapter = device_private(dev); | | 3842 | struct adapter *adapter = device_private(dev); |
3846 | int error = 0; | | 3843 | int error = 0; |
3847 | | | 3844 | |
3848 | INIT_DEBUGOUT("ixgbe_shutdown: begin"); | | 3845 | INIT_DEBUGOUT("ixgbe_shutdown: begin"); |
3849 | | | 3846 | |
3850 | IXGBE_CORE_LOCK(adapter); | | 3847 | IXGBE_CORE_LOCK(adapter); |
3851 | error = ixgbe_setup_low_power_mode(adapter); | | 3848 | error = ixgbe_setup_low_power_mode(adapter); |
3852 | IXGBE_CORE_UNLOCK(adapter); | | 3849 | IXGBE_CORE_UNLOCK(adapter); |
3853 | | | 3850 | |
3854 | return (error); | | 3851 | return (error); |
3855 | } /* ixgbe_shutdown */ | | 3852 | } /* ixgbe_shutdown */ |
3856 | #endif | | 3853 | #endif |
3857 | | | 3854 | |
3858 | /************************************************************************ | | 3855 | /************************************************************************ |
3859 | * ixgbe_suspend | | 3856 | * ixgbe_suspend |
3860 | * | | 3857 | * |
3861 | * From D0 to D3 | | 3858 | * From D0 to D3 |
3862 | ************************************************************************/ | | 3859 | ************************************************************************/ |
3863 | static bool | | 3860 | static bool |
3864 | ixgbe_suspend(device_t dev, const pmf_qual_t *qual) | | 3861 | ixgbe_suspend(device_t dev, const pmf_qual_t *qual) |
3865 | { | | 3862 | { |
3866 | struct adapter *adapter = device_private(dev); | | 3863 | struct adapter *adapter = device_private(dev); |
3867 | int error = 0; | | 3864 | int error = 0; |
3868 | | | 3865 | |
3869 | INIT_DEBUGOUT("ixgbe_suspend: begin"); | | 3866 | INIT_DEBUGOUT("ixgbe_suspend: begin"); |
3870 | | | 3867 | |
3871 | IXGBE_CORE_LOCK(adapter); | | 3868 | IXGBE_CORE_LOCK(adapter); |
3872 | | | 3869 | |
3873 | error = ixgbe_setup_low_power_mode(adapter); | | 3870 | error = ixgbe_setup_low_power_mode(adapter); |
3874 | | | 3871 | |
3875 | IXGBE_CORE_UNLOCK(adapter); | | 3872 | IXGBE_CORE_UNLOCK(adapter); |
3876 | | | 3873 | |
3877 | return (error); | | 3874 | return (error); |
3878 | } /* ixgbe_suspend */ | | 3875 | } /* ixgbe_suspend */ |
3879 | | | 3876 | |
3880 | /************************************************************************ | | 3877 | /************************************************************************ |
3881 | * ixgbe_resume | | 3878 | * ixgbe_resume |
3882 | * | | 3879 | * |
3883 | * From D3 to D0 | | 3880 | * From D3 to D0 |
3884 | ************************************************************************/ | | 3881 | ************************************************************************/ |
3885 | static bool | | 3882 | static bool |
3886 | ixgbe_resume(device_t dev, const pmf_qual_t *qual) | | 3883 | ixgbe_resume(device_t dev, const pmf_qual_t *qual) |
3887 | { | | 3884 | { |
3888 | struct adapter *adapter = device_private(dev); | | 3885 | struct adapter *adapter = device_private(dev); |
3889 | struct ifnet *ifp = adapter->ifp; | | 3886 | struct ifnet *ifp = adapter->ifp; |
3890 | struct ixgbe_hw *hw = &adapter->hw; | | 3887 | struct ixgbe_hw *hw = &adapter->hw; |
3891 | u32 wus; | | 3888 | u32 wus; |
3892 | | | 3889 | |
3893 | INIT_DEBUGOUT("ixgbe_resume: begin"); | | 3890 | INIT_DEBUGOUT("ixgbe_resume: begin"); |
3894 | | | 3891 | |
3895 | IXGBE_CORE_LOCK(adapter); | | 3892 | IXGBE_CORE_LOCK(adapter); |
3896 | | | 3893 | |
3897 | /* Read & clear WUS register */ | | 3894 | /* Read & clear WUS register */ |
3898 | wus = IXGBE_READ_REG(hw, IXGBE_WUS); | | 3895 | wus = IXGBE_READ_REG(hw, IXGBE_WUS); |
3899 | if (wus) | | 3896 | if (wus) |
3900 | device_printf(dev, "Woken up by (WUS): %#010x\n", | | 3897 | device_printf(dev, "Woken up by (WUS): %#010x\n", |
3901 | IXGBE_READ_REG(hw, IXGBE_WUS)); | | 3898 | IXGBE_READ_REG(hw, IXGBE_WUS)); |
3902 | IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); | | 3899 | IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); |
3903 | /* And clear WUFC until next low-power transition */ | | 3900 | /* And clear WUFC until next low-power transition */ |
3904 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); | | 3901 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); |
3905 | | | 3902 | |
3906 | /* | | 3903 | /* |
3907 | * Required after D3->D0 transition; | | 3904 | * Required after D3->D0 transition; |
3908 | * will re-advertise all previous advertised speeds | | 3905 | * will re-advertise all previous advertised speeds |
3909 | */ | | 3906 | */ |
3910 | if (ifp->if_flags & IFF_UP) | | 3907 | if (ifp->if_flags & IFF_UP) |
3911 | ixgbe_init_locked(adapter); | | 3908 | ixgbe_init_locked(adapter); |
3912 | | | 3909 | |
3913 | IXGBE_CORE_UNLOCK(adapter); | | 3910 | IXGBE_CORE_UNLOCK(adapter); |
3914 | | | 3911 | |
3915 | return true; | | 3912 | return true; |
3916 | } /* ixgbe_resume */ | | 3913 | } /* ixgbe_resume */ |
3917 | | | 3914 | |
3918 | /* | | 3915 | /* |
3919 | * Set the various hardware offload abilities. | | 3916 | * Set the various hardware offload abilities. |
3920 | * | | 3917 | * |
3921 | * This takes the ifnet's if_capenable flags (e.g. set by the user using | | 3918 | * This takes the ifnet's if_capenable flags (e.g. set by the user using |
3922 | * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what | | 3919 | * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what |
3923 | * mbuf offload flags the driver will understand. | | 3920 | * mbuf offload flags the driver will understand. |
3924 | */ | | 3921 | */ |
3925 | static void | | 3922 | static void |
3926 | ixgbe_set_if_hwassist(struct adapter *adapter) | | 3923 | ixgbe_set_if_hwassist(struct adapter *adapter) |
3927 | { | | 3924 | { |
3928 | /* XXX */ | | 3925 | /* XXX */ |
3929 | } | | 3926 | } |
3930 | | | 3927 | |
3931 | /************************************************************************ | | 3928 | /************************************************************************ |
3932 | * ixgbe_init_locked - Init entry point | | 3929 | * ixgbe_init_locked - Init entry point |
3933 | * | | 3930 | * |
3934 | * Used in two ways: It is used by the stack as an init | | 3931 | * Used in two ways: It is used by the stack as an init |
3935 | * entry point in network interface structure. It is also | | 3932 | * entry point in network interface structure. It is also |
3936 | * used by the driver as a hw/sw initialization routine to | | 3933 | * used by the driver as a hw/sw initialization routine to |
3937 | * get to a consistent state. | | 3934 | * get to a consistent state. |
3938 | * | | 3935 | * |
3939 | * return 0 on success, positive on failure | | 3936 | * return 0 on success, positive on failure |
3940 | ************************************************************************/ | | 3937 | ************************************************************************/ |
3941 | static void | | 3938 | static void |
3942 | ixgbe_init_locked(struct adapter *adapter) | | 3939 | ixgbe_init_locked(struct adapter *adapter) |
3943 | { | | 3940 | { |
3944 | struct ifnet *ifp = adapter->ifp; | | 3941 | struct ifnet *ifp = adapter->ifp; |
3945 | device_t dev = adapter->dev; | | 3942 | device_t dev = adapter->dev; |
3946 | struct ixgbe_hw *hw = &adapter->hw; | | 3943 | struct ixgbe_hw *hw = &adapter->hw; |
3947 | struct ix_queue *que; | | 3944 | struct ix_queue *que; |
3948 | struct tx_ring *txr; | | 3945 | struct tx_ring *txr; |
3949 | struct rx_ring *rxr; | | 3946 | struct rx_ring *rxr; |
3950 | u32 txdctl, mhadd; | | 3947 | u32 txdctl, mhadd; |
3951 | u32 rxdctl, rxctrl; | | 3948 | u32 rxdctl, rxctrl; |
3952 | u32 ctrl_ext; | | 3949 | u32 ctrl_ext; |
3953 | int i, j, err; | | 3950 | int i, j, err; |
3954 | | | 3951 | |
3955 | /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ | | 3952 | /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ |
3956 | | | 3953 | |
3957 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 3954 | KASSERT(mutex_owned(&adapter->core_mtx)); |
3958 | INIT_DEBUGOUT("ixgbe_init_locked: begin"); | | 3955 | INIT_DEBUGOUT("ixgbe_init_locked: begin"); |
3959 | | | 3956 | |
3960 | hw->adapter_stopped = FALSE; | | 3957 | hw->adapter_stopped = FALSE; |
3961 | ixgbe_stop_adapter(hw); | | 3958 | ixgbe_stop_adapter(hw); |
3962 | callout_stop(&adapter->timer); | | 3959 | callout_stop(&adapter->timer); |
3963 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) | | 3960 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) |
3964 | que->disabled_count = 0; | | 3961 | que->disabled_count = 0; |
3965 | | | 3962 | |
3966 | /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ | | 3963 | /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ |
3967 | adapter->max_frame_size = | | 3964 | adapter->max_frame_size = |
3968 | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; | | 3965 | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; |
3969 | | | 3966 | |
3970 | /* Queue indices may change with IOV mode */ | | 3967 | /* Queue indices may change with IOV mode */ |
3971 | ixgbe_align_all_queue_indices(adapter); | | 3968 | ixgbe_align_all_queue_indices(adapter); |
3972 | | | 3969 | |
3973 | /* reprogram the RAR[0] in case user changed it. */ | | 3970 | /* reprogram the RAR[0] in case user changed it. */ |
3974 | ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); | | 3971 | ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); |
3975 | | | 3972 | |
3976 | /* Get the latest mac address, User can use a LAA */ | | 3973 | /* Get the latest mac address, User can use a LAA */ |
3977 | memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), | | 3974 | memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), |
3978 | IXGBE_ETH_LENGTH_OF_ADDRESS); | | 3975 | IXGBE_ETH_LENGTH_OF_ADDRESS); |
3979 | ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); | | 3976 | ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); |
3980 | hw->addr_ctrl.rar_used_count = 1; | | 3977 | hw->addr_ctrl.rar_used_count = 1; |
3981 | | | 3978 | |
3982 | /* Set hardware offload abilities from ifnet flags */ | | 3979 | /* Set hardware offload abilities from ifnet flags */ |
3983 | ixgbe_set_if_hwassist(adapter); | | 3980 | ixgbe_set_if_hwassist(adapter); |
3984 | | | 3981 | |
3985 | /* Prepare transmit descriptors and buffers */ | | 3982 | /* Prepare transmit descriptors and buffers */ |
3986 | if (ixgbe_setup_transmit_structures(adapter)) { | | 3983 | if (ixgbe_setup_transmit_structures(adapter)) { |
3987 | device_printf(dev, "Could not setup transmit structures\n"); | | 3984 | device_printf(dev, "Could not setup transmit structures\n"); |
3988 | ixgbe_stop(adapter); | | 3985 | ixgbe_stop(adapter); |
3989 | return; | | 3986 | return; |
3990 | } | | 3987 | } |
3991 | | | 3988 | |
3992 | ixgbe_init_hw(hw); | | 3989 | ixgbe_init_hw(hw); |
3993 | | | 3990 | |
3994 | ixgbe_initialize_iov(adapter); | | 3991 | ixgbe_initialize_iov(adapter); |
3995 | | | 3992 | |
3996 | ixgbe_initialize_transmit_units(adapter); | | 3993 | ixgbe_initialize_transmit_units(adapter); |
3997 | | | 3994 | |
3998 | /* Setup Multicast table */ | | 3995 | /* Setup Multicast table */ |
3999 | ixgbe_set_multi(adapter); | | 3996 | ixgbe_set_multi(adapter); |
4000 | | | 3997 | |
4001 | /* Determine the correct mbuf pool, based on frame size */ | | 3998 | /* Determine the correct mbuf pool, based on frame size */ |
4002 | if (adapter->max_frame_size <= MCLBYTES) | | 3999 | if (adapter->max_frame_size <= MCLBYTES) |
4003 | adapter->rx_mbuf_sz = MCLBYTES; | | 4000 | adapter->rx_mbuf_sz = MCLBYTES; |
4004 | else | | 4001 | else |
4005 | adapter->rx_mbuf_sz = MJUMPAGESIZE; | | 4002 | adapter->rx_mbuf_sz = MJUMPAGESIZE; |
4006 | | | 4003 | |
4007 | /* Prepare receive descriptors and buffers */ | | 4004 | /* Prepare receive descriptors and buffers */ |
4008 | if (ixgbe_setup_receive_structures(adapter)) { | | 4005 | if (ixgbe_setup_receive_structures(adapter)) { |
4009 | device_printf(dev, "Could not setup receive structures\n"); | | 4006 | device_printf(dev, "Could not setup receive structures\n"); |
4010 | ixgbe_stop(adapter); | | 4007 | ixgbe_stop(adapter); |
4011 | return; | | 4008 | return; |
4012 | } | | 4009 | } |
4013 | | | 4010 | |
4014 | /* Configure RX settings */ | | 4011 | /* Configure RX settings */ |
4015 | ixgbe_initialize_receive_units(adapter); | | 4012 | ixgbe_initialize_receive_units(adapter); |
4016 | | | 4013 | |
4017 | /* Enable SDP & MSI-X interrupts based on adapter */ | | 4014 | /* Enable SDP & MSI-X interrupts based on adapter */ |
4018 | ixgbe_config_gpie(adapter); | | 4015 | ixgbe_config_gpie(adapter); |
4019 | | | 4016 | |
4020 | /* Set MTU size */ | | 4017 | /* Set MTU size */ |
4021 | if (ifp->if_mtu > ETHERMTU) { | | 4018 | if (ifp->if_mtu > ETHERMTU) { |
4022 | /* aka IXGBE_MAXFRS on 82599 and newer */ | | 4019 | /* aka IXGBE_MAXFRS on 82599 and newer */ |
4023 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | | 4020 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); |
4024 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | | 4021 | mhadd &= ~IXGBE_MHADD_MFS_MASK; |
4025 | mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; | | 4022 | mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; |
4026 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | | 4023 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); |
4027 | } | | 4024 | } |
4028 | | | 4025 | |
4029 | /* Now enable all the queues */ | | 4026 | /* Now enable all the queues */ |
4030 | for (i = 0; i < adapter->num_queues; i++) { | | 4027 | for (i = 0; i < adapter->num_queues; i++) { |
4031 | txr = &adapter->tx_rings[i]; | | 4028 | txr = &adapter->tx_rings[i]; |
4032 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); | | 4029 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); |
4033 | txdctl |= IXGBE_TXDCTL_ENABLE; | | 4030 | txdctl |= IXGBE_TXDCTL_ENABLE; |
4034 | /* Set WTHRESH to 8, burst writeback */ | | 4031 | /* Set WTHRESH to 8, burst writeback */ |
4035 | txdctl |= (8 << 16); | | 4032 | txdctl |= (8 << 16); |
4036 | /* | | 4033 | /* |
4037 | * When the internal queue falls below PTHRESH (32), | | 4034 | * When the internal queue falls below PTHRESH (32), |
4038 | * start prefetching as long as there are at least | | 4035 | * start prefetching as long as there are at least |
4039 | * HTHRESH (1) buffers ready. The values are taken | | 4036 | * HTHRESH (1) buffers ready. The values are taken |
4040 | * from the Intel linux driver 3.8.21. | | 4037 | * from the Intel linux driver 3.8.21. |
4041 | * Prefetching enables tx line rate even with 1 queue. | | 4038 | * Prefetching enables tx line rate even with 1 queue. |
4042 | */ | | 4039 | */ |
4043 | txdctl |= (32 << 0) | (1 << 8); | | 4040 | txdctl |= (32 << 0) | (1 << 8); |
4044 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); | | 4041 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); |
4045 | } | | 4042 | } |
4046 | | | 4043 | |
4047 | for (i = 0; i < adapter->num_queues; i++) { | | 4044 | for (i = 0; i < adapter->num_queues; i++) { |
4048 | rxr = &adapter->rx_rings[i]; | | 4045 | rxr = &adapter->rx_rings[i]; |
4049 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); | | 4046 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); |
4050 | if (hw->mac.type == ixgbe_mac_82598EB) { | | 4047 | if (hw->mac.type == ixgbe_mac_82598EB) { |
4051 | /* | | 4048 | /* |
4052 | * PTHRESH = 21 | | 4049 | * PTHRESH = 21 |
4053 | * HTHRESH = 4 | | 4050 | * HTHRESH = 4 |
4054 | * WTHRESH = 8 | | 4051 | * WTHRESH = 8 |
4055 | */ | | 4052 | */ |
4056 | rxdctl &= ~0x3FFFFF; | | 4053 | rxdctl &= ~0x3FFFFF; |
4057 | rxdctl |= 0x080420; | | 4054 | rxdctl |= 0x080420; |
4058 | } | | 4055 | } |
4059 | rxdctl |= IXGBE_RXDCTL_ENABLE; | | 4056 | rxdctl |= IXGBE_RXDCTL_ENABLE; |
4060 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); | | 4057 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); |
4061 | for (j = 0; j < 10; j++) { | | 4058 | for (j = 0; j < 10; j++) { |
4062 | if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & | | 4059 | if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & |
4063 | IXGBE_RXDCTL_ENABLE) | | 4060 | IXGBE_RXDCTL_ENABLE) |
4064 | break; | | 4061 | break; |
4065 | else | | 4062 | else |
4066 | msec_delay(1); | | 4063 | msec_delay(1); |
4067 | } | | 4064 | } |
4068 | wmb(); | | 4065 | wmb(); |
4069 | | | 4066 | |
4070 | /* | | 4067 | /* |
4071 | * In netmap mode, we must preserve the buffers made | | 4068 | * In netmap mode, we must preserve the buffers made |
4072 | * available to userspace before the if_init() | | 4069 | * available to userspace before the if_init() |
4073 | * (this is true by default on the TX side, because | | 4070 | * (this is true by default on the TX side, because |
4074 | * init makes all buffers available to userspace). | | 4071 | * init makes all buffers available to userspace). |
4075 | * | | 4072 | * |
4076 | * netmap_reset() and the device specific routines | | 4073 | * netmap_reset() and the device specific routines |
4077 | * (e.g. ixgbe_setup_receive_rings()) map these | | 4074 | * (e.g. ixgbe_setup_receive_rings()) map these |
4078 | * buffers at the end of the NIC ring, so here we | | 4075 | * buffers at the end of the NIC ring, so here we |
4079 | * must set the RDT (tail) register to make sure | | 4076 | * must set the RDT (tail) register to make sure |
4080 | * they are not overwritten. | | 4077 | * they are not overwritten. |
4081 | * | | 4078 | * |
4082 | * In this driver the NIC ring starts at RDH = 0, | | 4079 | * In this driver the NIC ring starts at RDH = 0, |
4083 | * RDT points to the last slot available for reception (?), | | 4080 | * RDT points to the last slot available for reception (?), |
4084 | * so RDT = num_rx_desc - 1 means the whole ring is available. | | 4081 | * so RDT = num_rx_desc - 1 means the whole ring is available. |
4085 | */ | | 4082 | */ |
4086 | #ifdef DEV_NETMAP | | 4083 | #ifdef DEV_NETMAP |
4087 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && | | 4084 | if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && |
4088 | (ifp->if_capenable & IFCAP_NETMAP)) { | | 4085 | (ifp->if_capenable & IFCAP_NETMAP)) { |
4089 | struct netmap_adapter *na = NA(adapter->ifp); | | 4086 | struct netmap_adapter *na = NA(adapter->ifp); |
4090 | struct netmap_kring *kring = na->rx_rings[i]; | | 4087 | struct netmap_kring *kring = na->rx_rings[i]; |
4091 | int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | | 4088 | int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); |
4092 | | | 4089 | |
4093 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); | | 4090 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); |
4094 | } else | | 4091 | } else |
4095 | #endif /* DEV_NETMAP */ | | 4092 | #endif /* DEV_NETMAP */ |
4096 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), | | 4093 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), |
4097 | adapter->num_rx_desc - 1); | | 4094 | adapter->num_rx_desc - 1); |
4098 | } | | 4095 | } |
4099 | | | 4096 | |
4100 | /* Enable Receive engine */ | | 4097 | /* Enable Receive engine */ |
4101 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | | 4098 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
4102 | if (hw->mac.type == ixgbe_mac_82598EB) | | 4099 | if (hw->mac.type == ixgbe_mac_82598EB) |
4103 | rxctrl |= IXGBE_RXCTRL_DMBYPS; | | 4100 | rxctrl |= IXGBE_RXCTRL_DMBYPS; |
4104 | rxctrl |= IXGBE_RXCTRL_RXEN; | | 4101 | rxctrl |= IXGBE_RXCTRL_RXEN; |
4105 | ixgbe_enable_rx_dma(hw, rxctrl); | | 4102 | ixgbe_enable_rx_dma(hw, rxctrl); |
4106 | | | 4103 | |
4107 | callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); | | 4104 | callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); |
4108 | | | 4105 | |
4109 | /* Set up MSI/MSI-X routing */ | | 4106 | /* Set up MSI/MSI-X routing */ |
4110 | if (adapter->feat_en & IXGBE_FEATURE_MSIX) { | | 4107 | if (adapter->feat_en & IXGBE_FEATURE_MSIX) { |
4111 | ixgbe_configure_ivars(adapter); | | 4108 | ixgbe_configure_ivars(adapter); |
4112 | /* Set up auto-mask */ | | 4109 | /* Set up auto-mask */ |
4113 | if (hw->mac.type == ixgbe_mac_82598EB) | | 4110 | if (hw->mac.type == ixgbe_mac_82598EB) |
4114 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | | 4111 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
4115 | else { | | 4112 | else { |
4116 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); | | 4113 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); |
4117 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); | | 4114 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); |
4118 | } | | 4115 | } |
4119 | } else { /* Simple settings for Legacy/MSI */ | | 4116 | } else { /* Simple settings for Legacy/MSI */ |
4120 | ixgbe_set_ivar(adapter, 0, 0, 0); | | 4117 | ixgbe_set_ivar(adapter, 0, 0, 0); |
4121 | ixgbe_set_ivar(adapter, 0, 0, 1); | | 4118 | ixgbe_set_ivar(adapter, 0, 0, 1); |
4122 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | | 4119 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
4123 | } | | 4120 | } |
4124 | | | 4121 | |
4125 | ixgbe_init_fdir(adapter); | | 4122 | ixgbe_init_fdir(adapter); |
4126 | | | 4123 | |
4127 | /* | | 4124 | /* |
4128 | * Check on any SFP devices that | | 4125 | * Check on any SFP devices that |
4129 | * need to be kick-started | | 4126 | * need to be kick-started |
4130 | */ | | 4127 | */ |
4131 | if (hw->phy.type == ixgbe_phy_none) { | | 4128 | if (hw->phy.type == ixgbe_phy_none) { |
4132 | err = hw->phy.ops.identify(hw); | | 4129 | err = hw->phy.ops.identify(hw); |
4133 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | | 4130 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
4134 | device_printf(dev, | | 4131 | device_printf(dev, |
4135 | "Unsupported SFP+ module type was detected.\n"); | | 4132 | "Unsupported SFP+ module type was detected.\n"); |
4136 | return; | | 4133 | return; |
4137 | } | | 4134 | } |
4138 | } | | 4135 | } |
4139 | | | 4136 | |
4140 | /* Set moderation on the Link interrupt */ | | 4137 | /* Set moderation on the Link interrupt */ |
4141 | ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); | | 4138 | ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); |
4142 | | | 4139 | |
4143 | /* Enable EEE power saving */ | | 4140 | /* Enable EEE power saving */ |
4144 | if (adapter->feat_cap & IXGBE_FEATURE_EEE) | | 4141 | if (adapter->feat_cap & IXGBE_FEATURE_EEE) |
4145 | hw->mac.ops.setup_eee(hw, | | 4142 | hw->mac.ops.setup_eee(hw, |
4146 | adapter->feat_en & IXGBE_FEATURE_EEE); | | 4143 | adapter->feat_en & IXGBE_FEATURE_EEE); |
4147 | | | 4144 | |
4148 | /* Enable power to the phy. */ | | 4145 | /* Enable power to the phy. */ |
4149 | ixgbe_set_phy_power(hw, TRUE); | | 4146 | ixgbe_set_phy_power(hw, TRUE); |
4150 | | | 4147 | |
4151 | /* Config/Enable Link */ | | 4148 | /* Config/Enable Link */ |
4152 | ixgbe_config_link(adapter); | | 4149 | ixgbe_config_link(adapter); |
4153 | | | 4150 | |
4154 | /* Hardware Packet Buffer & Flow Control setup */ | | 4151 | /* Hardware Packet Buffer & Flow Control setup */ |
4155 | ixgbe_config_delay_values(adapter); | | 4152 | ixgbe_config_delay_values(adapter); |
4156 | | | 4153 | |
4157 | /* Initialize the FC settings */ | | 4154 | /* Initialize the FC settings */ |
4158 | ixgbe_start_hw(hw); | | 4155 | ixgbe_start_hw(hw); |
4159 | | | 4156 | |
4160 | /* Set up VLAN support and filter */ | | 4157 | /* Set up VLAN support and filter */ |
4161 | ixgbe_setup_vlan_hw_support(adapter); | | 4158 | ixgbe_setup_vlan_hw_support(adapter); |
4162 | | | 4159 | |
4163 | /* Setup DMA Coalescing */ | | 4160 | /* Setup DMA Coalescing */ |
4164 | ixgbe_config_dmac(adapter); | | 4161 | ixgbe_config_dmac(adapter); |
4165 | | | 4162 | |
4166 | /* And now turn on interrupts */ | | 4163 | /* And now turn on interrupts */ |
4167 | ixgbe_enable_intr(adapter); | | 4164 | ixgbe_enable_intr(adapter); |
4168 | | | 4165 | |
4169 | /* Enable the use of the MBX by the VF's */ | | 4166 | /* Enable the use of the MBX by the VF's */ |
4170 | if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { | | 4167 | if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { |
4171 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | | 4168 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); |
4172 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | | 4169 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; |
4173 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | | 4170 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); |
4174 | } | | 4171 | } |
4175 | | | 4172 | |
4176 | /* Update saved flags. See ixgbe_ifflags_cb() */ | | 4173 | /* Update saved flags. See ixgbe_ifflags_cb() */ |
4177 | adapter->if_flags = ifp->if_flags; | | 4174 | adapter->if_flags = ifp->if_flags; |
4178 | adapter->ec_capenable = adapter->osdep.ec.ec_capenable; | | 4175 | adapter->ec_capenable = adapter->osdep.ec.ec_capenable; |
4179 | | | 4176 | |
4180 | /* Now inform the stack we're ready */ | | 4177 | /* Now inform the stack we're ready */ |
4181 | ifp->if_flags |= IFF_RUNNING; | | 4178 | ifp->if_flags |= IFF_RUNNING; |
4182 | | | 4179 | |
4183 | return; | | 4180 | return; |
4184 | } /* ixgbe_init_locked */ | | 4181 | } /* ixgbe_init_locked */ |
4185 | | | 4182 | |
4186 | /************************************************************************ | | 4183 | /************************************************************************ |
4187 | * ixgbe_init | | 4184 | * ixgbe_init |
4188 | ************************************************************************/ | | 4185 | ************************************************************************/ |
4189 | static int | | 4186 | static int |
4190 | ixgbe_init(struct ifnet *ifp) | | 4187 | ixgbe_init(struct ifnet *ifp) |
4191 | { | | 4188 | { |
4192 | struct adapter *adapter = ifp->if_softc; | | 4189 | struct adapter *adapter = ifp->if_softc; |
4193 | | | 4190 | |
4194 | IXGBE_CORE_LOCK(adapter); | | 4191 | IXGBE_CORE_LOCK(adapter); |
4195 | ixgbe_init_locked(adapter); | | 4192 | ixgbe_init_locked(adapter); |
4196 | IXGBE_CORE_UNLOCK(adapter); | | 4193 | IXGBE_CORE_UNLOCK(adapter); |
4197 | | | 4194 | |
4198 | return 0; /* XXX ixgbe_init_locked cannot fail? really? */ | | 4195 | return 0; /* XXX ixgbe_init_locked cannot fail? really? */ |
4199 | } /* ixgbe_init */ | | 4196 | } /* ixgbe_init */ |
4200 | | | 4197 | |
4201 | /************************************************************************ | | 4198 | /************************************************************************ |
4202 | * ixgbe_set_ivar | | 4199 | * ixgbe_set_ivar |
4203 | * | | 4200 | * |