| @@ -1,1000 +1,1000 @@ | | | @@ -1,1000 +1,1000 @@ |
1 | /*$NetBSD: ixv.c,v 1.156 2021/03/11 02:30:47 msaitoh Exp $*/ | | 1 | /*$NetBSD: ixv.c,v 1.157 2021/03/31 07:52:14 msaitoh Exp $*/ |
2 | | | 2 | |
3 | /****************************************************************************** | | 3 | /****************************************************************************** |
4 | | | 4 | |
5 | Copyright (c) 2001-2017, Intel Corporation | | 5 | Copyright (c) 2001-2017, Intel Corporation |
6 | All rights reserved. | | 6 | All rights reserved. |
7 | | | 7 | |
8 | Redistribution and use in source and binary forms, with or without | | 8 | Redistribution and use in source and binary forms, with or without |
9 | modification, are permitted provided that the following conditions are met: | | 9 | modification, are permitted provided that the following conditions are met: |
10 | | | 10 | |
11 | 1. Redistributions of source code must retain the above copyright notice, | | 11 | 1. Redistributions of source code must retain the above copyright notice, |
12 | this list of conditions and the following disclaimer. | | 12 | this list of conditions and the following disclaimer. |
13 | | | 13 | |
14 | 2. Redistributions in binary form must reproduce the above copyright | | 14 | 2. Redistributions in binary form must reproduce the above copyright |
15 | notice, this list of conditions and the following disclaimer in the | | 15 | notice, this list of conditions and the following disclaimer in the |
16 | documentation and/or other materials provided with the distribution. | | 16 | documentation and/or other materials provided with the distribution. |
17 | | | 17 | |
18 | 3. Neither the name of the Intel Corporation nor the names of its | | 18 | 3. Neither the name of the Intel Corporation nor the names of its |
19 | contributors may be used to endorse or promote products derived from | | 19 | contributors may be used to endorse or promote products derived from |
20 | this software without specific prior written permission. | | 20 | this software without specific prior written permission. |
21 | | | 21 | |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | POSSIBILITY OF SUCH DAMAGE. | | 32 | POSSIBILITY OF SUCH DAMAGE. |
33 | | | 33 | |
34 | ******************************************************************************/ | | 34 | ******************************************************************************/ |
35 | /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ | | 35 | /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ |
36 | | | 36 | |
37 | #ifdef _KERNEL_OPT | | 37 | #ifdef _KERNEL_OPT |
38 | #include "opt_inet.h" | | 38 | #include "opt_inet.h" |
39 | #include "opt_inet6.h" | | 39 | #include "opt_inet6.h" |
40 | #include "opt_net_mpsafe.h" | | 40 | #include "opt_net_mpsafe.h" |
41 | #include "opt_ixgbe.h" | | 41 | #include "opt_ixgbe.h" |
42 | #endif | | 42 | #endif |
43 | | | 43 | |
44 | #include "ixgbe.h" | | 44 | #include "ixgbe.h" |
45 | #include "vlan.h" | | 45 | #include "vlan.h" |
46 | | | 46 | |
47 | /************************************************************************ | | 47 | /************************************************************************ |
48 | * Driver version | | 48 | * Driver version |
49 | ************************************************************************/ | | 49 | ************************************************************************/ |
50 | static const char ixv_driver_version[] = "2.0.1-k"; | | 50 | static const char ixv_driver_version[] = "2.0.1-k"; |
51 | /* XXX NetBSD: + 1.5.17 */ | | 51 | /* XXX NetBSD: + 1.5.17 */ |
52 | | | 52 | |
53 | /************************************************************************ | | 53 | /************************************************************************ |
54 | * PCI Device ID Table | | 54 | * PCI Device ID Table |
55 | * | | 55 | * |
56 | * Used by probe to select devices to load on | | 56 | * Used by probe to select devices to load on |
57 | * Last field stores an index into ixv_strings | | 57 | * Last field stores an index into ixv_strings |
58 | * Last entry must be all 0s | | 58 | * Last entry must be all 0s |
59 | * | | 59 | * |
60 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | | 60 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } |
61 | ************************************************************************/ | | 61 | ************************************************************************/ |
62 | static const ixgbe_vendor_info_t ixv_vendor_info_array[] = | | 62 | static const ixgbe_vendor_info_t ixv_vendor_info_array[] = |
63 | { | | 63 | { |
64 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, | | 64 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, |
65 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, | | 65 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, |
66 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, | | 66 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, |
67 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, | | 67 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, |
68 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, | | 68 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, |
69 | /* required last entry */ | | 69 | /* required last entry */ |
70 | {0, 0, 0, 0, 0} | | 70 | {0, 0, 0, 0, 0} |
71 | }; | | 71 | }; |
72 | | | 72 | |
73 | /************************************************************************ | | 73 | /************************************************************************ |
74 | * Table of branding strings | | 74 | * Table of branding strings |
75 | ************************************************************************/ | | 75 | ************************************************************************/ |
76 | static const char *ixv_strings[] = { | | 76 | static const char *ixv_strings[] = { |
77 | "Intel(R) PRO/10GbE Virtual Function Network Driver" | | 77 | "Intel(R) PRO/10GbE Virtual Function Network Driver" |
78 | }; | | 78 | }; |
79 | | | 79 | |
80 | /********************************************************************* | | 80 | /********************************************************************* |
81 | * Function prototypes | | 81 | * Function prototypes |
82 | *********************************************************************/ | | 82 | *********************************************************************/ |
83 | static int ixv_probe(device_t, cfdata_t, void *); | | 83 | static int ixv_probe(device_t, cfdata_t, void *); |
84 | static void ixv_attach(device_t, device_t, void *); | | 84 | static void ixv_attach(device_t, device_t, void *); |
85 | static int ixv_detach(device_t, int); | | 85 | static int ixv_detach(device_t, int); |
86 | #if 0 | | 86 | #if 0 |
87 | static int ixv_shutdown(device_t); | | 87 | static int ixv_shutdown(device_t); |
88 | #endif | | 88 | #endif |
89 | static int ixv_ifflags_cb(struct ethercom *); | | 89 | static int ixv_ifflags_cb(struct ethercom *); |
90 | static int ixv_ioctl(struct ifnet *, u_long, void *); | | 90 | static int ixv_ioctl(struct ifnet *, u_long, void *); |
91 | static int ixv_init(struct ifnet *); | | 91 | static int ixv_init(struct ifnet *); |
92 | static void ixv_init_locked(struct adapter *); | | 92 | static void ixv_init_locked(struct adapter *); |
93 | static void ixv_ifstop(struct ifnet *, int); | | 93 | static void ixv_ifstop(struct ifnet *, int); |
94 | static void ixv_stop_locked(void *); | | 94 | static void ixv_stop_locked(void *); |
95 | static void ixv_init_device_features(struct adapter *); | | 95 | static void ixv_init_device_features(struct adapter *); |
96 | static void ixv_media_status(struct ifnet *, struct ifmediareq *); | | 96 | static void ixv_media_status(struct ifnet *, struct ifmediareq *); |
97 | static int ixv_media_change(struct ifnet *); | | 97 | static int ixv_media_change(struct ifnet *); |
98 | static int ixv_allocate_pci_resources(struct adapter *, | | 98 | static int ixv_allocate_pci_resources(struct adapter *, |
99 | const struct pci_attach_args *); | | 99 | const struct pci_attach_args *); |
100 | static void ixv_free_deferred_handlers(struct adapter *); | | 100 | static void ixv_free_deferred_handlers(struct adapter *); |
101 | static int ixv_allocate_msix(struct adapter *, | | 101 | static int ixv_allocate_msix(struct adapter *, |
102 | const struct pci_attach_args *); | | 102 | const struct pci_attach_args *); |
103 | static int ixv_configure_interrupts(struct adapter *); | | 103 | static int ixv_configure_interrupts(struct adapter *); |
104 | static void ixv_free_pci_resources(struct adapter *); | | 104 | static void ixv_free_pci_resources(struct adapter *); |
105 | static void ixv_local_timer(void *); | | 105 | static void ixv_local_timer(void *); |
106 | static void ixv_handle_timer(struct work *, void *); | | 106 | static void ixv_handle_timer(struct work *, void *); |
107 | static int ixv_setup_interface(device_t, struct adapter *); | | 107 | static int ixv_setup_interface(device_t, struct adapter *); |
108 | static void ixv_schedule_admin_tasklet(struct adapter *); | | 108 | static void ixv_schedule_admin_tasklet(struct adapter *); |
109 | static int ixv_negotiate_api(struct adapter *); | | 109 | static int ixv_negotiate_api(struct adapter *); |
110 | | | 110 | |
111 | static void ixv_initialize_transmit_units(struct adapter *); | | 111 | static void ixv_initialize_transmit_units(struct adapter *); |
112 | static void ixv_initialize_receive_units(struct adapter *); | | 112 | static void ixv_initialize_receive_units(struct adapter *); |
113 | static void ixv_initialize_rss_mapping(struct adapter *); | | 113 | static void ixv_initialize_rss_mapping(struct adapter *); |
114 | static s32 ixv_check_link(struct adapter *); | | 114 | static s32 ixv_check_link(struct adapter *); |
115 | | | 115 | |
116 | static void ixv_enable_intr(struct adapter *); | | 116 | static void ixv_enable_intr(struct adapter *); |
117 | static void ixv_disable_intr(struct adapter *); | | 117 | static void ixv_disable_intr(struct adapter *); |
118 | static int ixv_set_rxfilter(struct adapter *); | | 118 | static int ixv_set_rxfilter(struct adapter *); |
119 | static void ixv_update_link_status(struct adapter *); | | 119 | static void ixv_update_link_status(struct adapter *); |
120 | static int ixv_sysctl_debug(SYSCTLFN_PROTO); | | 120 | static int ixv_sysctl_debug(SYSCTLFN_PROTO); |
121 | static void ixv_set_ivar(struct adapter *, u8, u8, s8); | | 121 | static void ixv_set_ivar(struct adapter *, u8, u8, s8); |
122 | static void ixv_configure_ivars(struct adapter *); | | 122 | static void ixv_configure_ivars(struct adapter *); |
123 | static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); | | 123 | static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); |
124 | static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); | | 124 | static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); |
125 | | | 125 | |
126 | static void ixv_setup_vlan_tagging(struct adapter *); | | 126 | static void ixv_setup_vlan_tagging(struct adapter *); |
127 | static int ixv_setup_vlan_support(struct adapter *); | | 127 | static int ixv_setup_vlan_support(struct adapter *); |
128 | static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); | | 128 | static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); |
129 | static int ixv_register_vlan(struct adapter *, u16); | | 129 | static int ixv_register_vlan(struct adapter *, u16); |
130 | static int ixv_unregister_vlan(struct adapter *, u16); | | 130 | static int ixv_unregister_vlan(struct adapter *, u16); |
131 | | | 131 | |
132 | static void ixv_add_device_sysctls(struct adapter *); | | 132 | static void ixv_add_device_sysctls(struct adapter *); |
133 | static void ixv_save_stats(struct adapter *); | | 133 | static void ixv_save_stats(struct adapter *); |
134 | static void ixv_init_stats(struct adapter *); | | 134 | static void ixv_init_stats(struct adapter *); |
135 | static void ixv_update_stats(struct adapter *); | | 135 | static void ixv_update_stats(struct adapter *); |
136 | static void ixv_add_stats_sysctls(struct adapter *); | | 136 | static void ixv_add_stats_sysctls(struct adapter *); |
137 | static void ixv_clear_evcnt(struct adapter *); | | 137 | static void ixv_clear_evcnt(struct adapter *); |
138 | | | 138 | |
139 | /* Sysctl handlers */ | | 139 | /* Sysctl handlers */ |
140 | static void ixv_set_sysctl_value(struct adapter *, const char *, | | 140 | static void ixv_set_sysctl_value(struct adapter *, const char *, |
141 | const char *, int *, int); | | 141 | const char *, int *, int); |
142 | static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); | | 142 | static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); |
143 | static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); | | 143 | static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); |
144 | static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); | | 144 | static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); |
145 | static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); | | 145 | static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); |
146 | static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); | | 146 | static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); |
147 | static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); | | 147 | static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); |
148 | | | 148 | |
149 | /* The MSI-X Interrupt handlers */ | | 149 | /* The MSI-X Interrupt handlers */ |
150 | static int ixv_msix_que(void *); | | 150 | static int ixv_msix_que(void *); |
151 | static int ixv_msix_mbx(void *); | | 151 | static int ixv_msix_mbx(void *); |
152 | | | 152 | |
153 | /* Event handlers running on workqueue */ | | 153 | /* Event handlers running on workqueue */ |
154 | static void ixv_handle_que(void *); | | 154 | static void ixv_handle_que(void *); |
155 | | | 155 | |
156 | /* Deferred workqueue handlers */ | | 156 | /* Deferred workqueue handlers */ |
157 | static void ixv_handle_admin(struct work *, void *); | | 157 | static void ixv_handle_admin(struct work *, void *); |
158 | static void ixv_handle_que_work(struct work *, void *); | | 158 | static void ixv_handle_que_work(struct work *, void *); |
159 | | | 159 | |
160 | const struct sysctlnode *ixv_sysctl_instance(struct adapter *); | | 160 | const struct sysctlnode *ixv_sysctl_instance(struct adapter *); |
161 | static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); | | 161 | static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); |
162 | | | 162 | |
163 | /************************************************************************ | | 163 | /************************************************************************ |
164 | * NetBSD Device Interface Entry Points | | 164 | * NetBSD Device Interface Entry Points |
165 | ************************************************************************/ | | 165 | ************************************************************************/ |
166 | CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), | | 166 | CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), |
167 | ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, | | 167 | ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, |
168 | DVF_DETACH_SHUTDOWN); | | 168 | DVF_DETACH_SHUTDOWN); |
169 | | | 169 | |
170 | #if 0 | | 170 | #if 0 |
171 | static driver_t ixv_driver = { | | 171 | static driver_t ixv_driver = { |
172 | "ixv", ixv_methods, sizeof(struct adapter), | | 172 | "ixv", ixv_methods, sizeof(struct adapter), |
173 | }; | | 173 | }; |
174 | | | 174 | |
175 | devclass_t ixv_devclass; | | 175 | devclass_t ixv_devclass; |
176 | DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); | | 176 | DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); |
177 | MODULE_DEPEND(ixv, pci, 1, 1, 1); | | 177 | MODULE_DEPEND(ixv, pci, 1, 1, 1); |
178 | MODULE_DEPEND(ixv, ether, 1, 1, 1); | | 178 | MODULE_DEPEND(ixv, ether, 1, 1, 1); |
179 | #endif | | 179 | #endif |
180 | | | 180 | |
181 | /* | | 181 | /* |
182 | * TUNEABLE PARAMETERS: | | 182 | * TUNEABLE PARAMETERS: |
183 | */ | | 183 | */ |
184 | | | 184 | |
185 | /* Number of Queues - do not exceed MSI-X vectors - 1 */ | | 185 | /* Number of Queues - do not exceed MSI-X vectors - 1 */ |
186 | static int ixv_num_queues = 0; | | 186 | static int ixv_num_queues = 0; |
187 | #define TUNABLE_INT(__x, __y) | | 187 | #define TUNABLE_INT(__x, __y) |
188 | TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); | | 188 | TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); |
189 | | | 189 | |
190 | /* | | 190 | /* |
191 | * AIM: Adaptive Interrupt Moderation | | 191 | * AIM: Adaptive Interrupt Moderation |
192 | * which means that the interrupt rate | | 192 | * which means that the interrupt rate |
193 | * is varied over time based on the | | 193 | * is varied over time based on the |
194 | * traffic for that interrupt vector | | 194 | * traffic for that interrupt vector |
195 | */ | | 195 | */ |
196 | static bool ixv_enable_aim = false; | | 196 | static bool ixv_enable_aim = false; |
197 | TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); | | 197 | TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); |
198 | | | 198 | |
199 | static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); | | 199 | static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); |
200 | TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); | | 200 | TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); |
201 | | | 201 | |
202 | /* How many packets rxeof tries to clean at a time */ | | 202 | /* How many packets rxeof tries to clean at a time */ |
203 | static int ixv_rx_process_limit = 256; | | 203 | static int ixv_rx_process_limit = 256; |
204 | TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); | | 204 | TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); |
205 | | | 205 | |
206 | /* How many packets txeof tries to clean at a time */ | | 206 | /* How many packets txeof tries to clean at a time */ |
207 | static int ixv_tx_process_limit = 256; | | 207 | static int ixv_tx_process_limit = 256; |
208 | TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); | | 208 | TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); |
209 | | | 209 | |
210 | /* Which packet processing uses workqueue or softint */ | | 210 | /* Which packet processing uses workqueue or softint */ |
211 | static bool ixv_txrx_workqueue = false; | | 211 | static bool ixv_txrx_workqueue = false; |
212 | | | 212 | |
213 | /* | | 213 | /* |
214 | * Number of TX descriptors per ring, | | 214 | * Number of TX descriptors per ring, |
215 | * setting higher than RX as this seems | | 215 | * setting higher than RX as this seems |
216 | * the better performing choice. | | 216 | * the better performing choice. |
217 | */ | | 217 | */ |
218 | static int ixv_txd = PERFORM_TXD; | | 218 | static int ixv_txd = PERFORM_TXD; |
219 | TUNABLE_INT("hw.ixv.txd", &ixv_txd); | | 219 | TUNABLE_INT("hw.ixv.txd", &ixv_txd); |
220 | | | 220 | |
221 | /* Number of RX descriptors per ring */ | | 221 | /* Number of RX descriptors per ring */ |
222 | static int ixv_rxd = PERFORM_RXD; | | 222 | static int ixv_rxd = PERFORM_RXD; |
223 | TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); | | 223 | TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); |
224 | | | 224 | |
225 | /* Legacy Transmit (single queue) */ | | 225 | /* Legacy Transmit (single queue) */ |
226 | static int ixv_enable_legacy_tx = 0; | | 226 | static int ixv_enable_legacy_tx = 0; |
227 | TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); | | 227 | TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); |
228 | | | 228 | |
229 | #ifdef NET_MPSAFE | | 229 | #ifdef NET_MPSAFE |
230 | #define IXGBE_MPSAFE 1 | | 230 | #define IXGBE_MPSAFE 1 |
231 | #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE | | 231 | #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE |
232 | #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE | | 232 | #define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE |
233 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE | | 233 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE |
234 | #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE | | 234 | #define IXGBE_TASKLET_WQ_FLAGS WQ_MPSAFE |
235 | #else | | 235 | #else |
236 | #define IXGBE_CALLOUT_FLAGS 0 | | 236 | #define IXGBE_CALLOUT_FLAGS 0 |
237 | #define IXGBE_SOFTINT_FLAGS 0 | | 237 | #define IXGBE_SOFTINT_FLAGS 0 |
238 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | | 238 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU |
239 | #define IXGBE_TASKLET_WQ_FLAGS 0 | | 239 | #define IXGBE_TASKLET_WQ_FLAGS 0 |
240 | #endif | | 240 | #endif |
241 | #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET | | 241 | #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET |
242 | | | 242 | |
243 | #if 0 | | 243 | #if 0 |
244 | static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); | | 244 | static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); |
245 | static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); | | 245 | static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); |
246 | #endif | | 246 | #endif |
247 | | | 247 | |
248 | /************************************************************************ | | 248 | /************************************************************************ |
249 | * ixv_probe - Device identification routine | | 249 | * ixv_probe - Device identification routine |
250 | * | | 250 | * |
251 | * Determines if the driver should be loaded on | | 251 | * Determines if the driver should be loaded on |
252 | * adapter based on its PCI vendor/device ID. | | 252 | * adapter based on its PCI vendor/device ID. |
253 | * | | 253 | * |
254 | * return BUS_PROBE_DEFAULT on success, positive on failure | | 254 | * return BUS_PROBE_DEFAULT on success, positive on failure |
255 | ************************************************************************/ | | 255 | ************************************************************************/ |
256 | static int | | 256 | static int |
257 | ixv_probe(device_t dev, cfdata_t cf, void *aux) | | 257 | ixv_probe(device_t dev, cfdata_t cf, void *aux) |
258 | { | | 258 | { |
259 | #ifdef __HAVE_PCI_MSI_MSIX | | 259 | #ifdef __HAVE_PCI_MSI_MSIX |
260 | const struct pci_attach_args *pa = aux; | | 260 | const struct pci_attach_args *pa = aux; |
261 | | | 261 | |
262 | return (ixv_lookup(pa) != NULL) ? 1 : 0; | | 262 | return (ixv_lookup(pa) != NULL) ? 1 : 0; |
263 | #else | | 263 | #else |
264 | return 0; | | 264 | return 0; |
265 | #endif | | 265 | #endif |
266 | } /* ixv_probe */ | | 266 | } /* ixv_probe */ |
267 | | | 267 | |
268 | static const ixgbe_vendor_info_t * | | 268 | static const ixgbe_vendor_info_t * |
269 | ixv_lookup(const struct pci_attach_args *pa) | | 269 | ixv_lookup(const struct pci_attach_args *pa) |
270 | { | | 270 | { |
271 | const ixgbe_vendor_info_t *ent; | | 271 | const ixgbe_vendor_info_t *ent; |
272 | pcireg_t subid; | | 272 | pcireg_t subid; |
273 | | | 273 | |
274 | INIT_DEBUGOUT("ixv_lookup: begin"); | | 274 | INIT_DEBUGOUT("ixv_lookup: begin"); |
275 | | | 275 | |
276 | if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) | | 276 | if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) |
277 | return NULL; | | 277 | return NULL; |
278 | | | 278 | |
279 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); | | 279 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); |
280 | | | 280 | |
281 | for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { | | 281 | for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { |
282 | if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && | | 282 | if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && |
283 | (PCI_PRODUCT(pa->pa_id) == ent->device_id) && | | 283 | (PCI_PRODUCT(pa->pa_id) == ent->device_id) && |
284 | ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || | | 284 | ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || |
285 | (ent->subvendor_id == 0)) && | | 285 | (ent->subvendor_id == 0)) && |
286 | ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || | | 286 | ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || |
287 | (ent->subdevice_id == 0))) { | | 287 | (ent->subdevice_id == 0))) { |
288 | return ent; | | 288 | return ent; |
289 | } | | 289 | } |
290 | } | | 290 | } |
291 | | | 291 | |
292 | return NULL; | | 292 | return NULL; |
293 | } | | 293 | } |
294 | | | 294 | |
295 | /************************************************************************ | | 295 | /************************************************************************ |
296 | * ixv_attach - Device initialization routine | | 296 | * ixv_attach - Device initialization routine |
297 | * | | 297 | * |
298 | * Called when the driver is being loaded. | | 298 | * Called when the driver is being loaded. |
299 | * Identifies the type of hardware, allocates all resources | | 299 | * Identifies the type of hardware, allocates all resources |
300 | * and initializes the hardware. | | 300 | * and initializes the hardware. |
301 | * | | 301 | * |
302 | * return 0 on success, positive on failure | | 302 | * return 0 on success, positive on failure |
303 | ************************************************************************/ | | 303 | ************************************************************************/ |
304 | static void | | 304 | static void |
305 | ixv_attach(device_t parent, device_t dev, void *aux) | | 305 | ixv_attach(device_t parent, device_t dev, void *aux) |
306 | { | | 306 | { |
307 | struct adapter *adapter; | | 307 | struct adapter *adapter; |
308 | struct ixgbe_hw *hw; | | 308 | struct ixgbe_hw *hw; |
309 | int error = 0; | | 309 | int error = 0; |
310 | pcireg_t id, subid; | | 310 | pcireg_t id, subid; |
311 | const ixgbe_vendor_info_t *ent; | | 311 | const ixgbe_vendor_info_t *ent; |
312 | const struct pci_attach_args *pa = aux; | | 312 | const struct pci_attach_args *pa = aux; |
313 | const char *apivstr; | | 313 | const char *apivstr; |
314 | const char *str; | | 314 | const char *str; |
315 | char wqname[MAXCOMLEN]; | | 315 | char wqname[MAXCOMLEN]; |
316 | char buf[256]; | | 316 | char buf[256]; |
317 | | | 317 | |
318 | INIT_DEBUGOUT("ixv_attach: begin"); | | 318 | INIT_DEBUGOUT("ixv_attach: begin"); |
319 | | | 319 | |
320 | /* | | 320 | /* |
321 | * Make sure BUSMASTER is set, on a VM under | | 321 | * Make sure BUSMASTER is set, on a VM under |
322 | * KVM it may not be and will break things. | | 322 | * KVM it may not be and will break things. |
323 | */ | | 323 | */ |
324 | ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); | | 324 | ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); |
325 | | | 325 | |
326 | /* Allocate, clear, and link in our adapter structure */ | | 326 | /* Allocate, clear, and link in our adapter structure */ |
327 | adapter = device_private(dev); | | 327 | adapter = device_private(dev); |
328 | adapter->hw.back = adapter; | | 328 | adapter->hw.back = adapter; |
329 | adapter->dev = dev; | | 329 | adapter->dev = dev; |
330 | hw = &adapter->hw; | | 330 | hw = &adapter->hw; |
331 | | | 331 | |
332 | adapter->init_locked = ixv_init_locked; | | 332 | adapter->init_locked = ixv_init_locked; |
333 | adapter->stop_locked = ixv_stop_locked; | | 333 | adapter->stop_locked = ixv_stop_locked; |
334 | | | 334 | |
335 | adapter->osdep.pc = pa->pa_pc; | | 335 | adapter->osdep.pc = pa->pa_pc; |
336 | adapter->osdep.tag = pa->pa_tag; | | 336 | adapter->osdep.tag = pa->pa_tag; |
337 | if (pci_dma64_available(pa)) | | 337 | if (pci_dma64_available(pa)) |
338 | adapter->osdep.dmat = pa->pa_dmat64; | | 338 | adapter->osdep.dmat = pa->pa_dmat64; |
339 | else | | 339 | else |
340 | adapter->osdep.dmat = pa->pa_dmat; | | 340 | adapter->osdep.dmat = pa->pa_dmat; |
341 | adapter->osdep.attached = false; | | 341 | adapter->osdep.attached = false; |
342 | | | 342 | |
343 | ent = ixv_lookup(pa); | | 343 | ent = ixv_lookup(pa); |
344 | | | 344 | |
345 | KASSERT(ent != NULL); | | 345 | KASSERT(ent != NULL); |
346 | | | 346 | |
347 | aprint_normal(": %s, Version - %s\n", | | 347 | aprint_normal(": %s, Version - %s\n", |
348 | ixv_strings[ent->index], ixv_driver_version); | | 348 | ixv_strings[ent->index], ixv_driver_version); |
349 | | | 349 | |
350 | /* Core Lock Init */ | | 350 | /* Core Lock Init */ |
351 | IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); | | 351 | IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); |
352 | | | 352 | |
353 | /* Do base PCI setup - map BAR0 */ | | 353 | /* Do base PCI setup - map BAR0 */ |
354 | if (ixv_allocate_pci_resources(adapter, pa)) { | | 354 | if (ixv_allocate_pci_resources(adapter, pa)) { |
355 | aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); | | 355 | aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); |
356 | error = ENXIO; | | 356 | error = ENXIO; |
357 | goto err_out; | | 357 | goto err_out; |
358 | } | | 358 | } |
359 | | | 359 | |
360 | /* SYSCTL APIs */ | | 360 | /* SYSCTL APIs */ |
361 | ixv_add_device_sysctls(adapter); | | 361 | ixv_add_device_sysctls(adapter); |
362 | | | 362 | |
363 | /* Set up the timer callout and workqueue */ | | 363 | /* Set up the timer callout and workqueue */ |
364 | callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); | | 364 | callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); |
365 | snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); | | 365 | snprintf(wqname, sizeof(wqname), "%s-timer", device_xname(dev)); |
366 | error = workqueue_create(&adapter->timer_wq, wqname, | | 366 | error = workqueue_create(&adapter->timer_wq, wqname, |
367 | ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, | | 367 | ixv_handle_timer, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, |
368 | IXGBE_TASKLET_WQ_FLAGS); | | 368 | IXGBE_TASKLET_WQ_FLAGS); |
369 | if (error) { | | 369 | if (error) { |
370 | aprint_error_dev(dev, | | 370 | aprint_error_dev(dev, |
371 | "could not create timer workqueue (%d)\n", error); | | 371 | "could not create timer workqueue (%d)\n", error); |
372 | goto err_out; | | 372 | goto err_out; |
373 | } | | 373 | } |
374 | | | 374 | |
375 | /* Save off the information about this board */ | | 375 | /* Save off the information about this board */ |
376 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); | | 376 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); |
377 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); | | 377 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); |
378 | hw->vendor_id = PCI_VENDOR(id); | | 378 | hw->vendor_id = PCI_VENDOR(id); |
379 | hw->device_id = PCI_PRODUCT(id); | | 379 | hw->device_id = PCI_PRODUCT(id); |
380 | hw->revision_id = | | 380 | hw->revision_id = |
381 | PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); | | 381 | PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); |
382 | hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); | | 382 | hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); |
383 | hw->subsystem_device_id = PCI_SUBSYS_ID(subid); | | 383 | hw->subsystem_device_id = PCI_SUBSYS_ID(subid); |
384 | | | 384 | |
385 | /* A subset of set_mac_type */ | | 385 | /* A subset of set_mac_type */ |
386 | switch (hw->device_id) { | | 386 | switch (hw->device_id) { |
387 | case IXGBE_DEV_ID_82599_VF: | | 387 | case IXGBE_DEV_ID_82599_VF: |
388 | hw->mac.type = ixgbe_mac_82599_vf; | | 388 | hw->mac.type = ixgbe_mac_82599_vf; |
389 | str = "82599 VF"; | | 389 | str = "82599 VF"; |
390 | break; | | 390 | break; |
391 | case IXGBE_DEV_ID_X540_VF: | | 391 | case IXGBE_DEV_ID_X540_VF: |
392 | hw->mac.type = ixgbe_mac_X540_vf; | | 392 | hw->mac.type = ixgbe_mac_X540_vf; |
393 | str = "X540 VF"; | | 393 | str = "X540 VF"; |
394 | break; | | 394 | break; |
395 | case IXGBE_DEV_ID_X550_VF: | | 395 | case IXGBE_DEV_ID_X550_VF: |
396 | hw->mac.type = ixgbe_mac_X550_vf; | | 396 | hw->mac.type = ixgbe_mac_X550_vf; |
397 | str = "X550 VF"; | | 397 | str = "X550 VF"; |
398 | break; | | 398 | break; |
399 | case IXGBE_DEV_ID_X550EM_X_VF: | | 399 | case IXGBE_DEV_ID_X550EM_X_VF: |
400 | hw->mac.type = ixgbe_mac_X550EM_x_vf; | | 400 | hw->mac.type = ixgbe_mac_X550EM_x_vf; |
401 | str = "X550EM X VF"; | | 401 | str = "X550EM X VF"; |
402 | break; | | 402 | break; |
403 | case IXGBE_DEV_ID_X550EM_A_VF: | | 403 | case IXGBE_DEV_ID_X550EM_A_VF: |
404 | hw->mac.type = ixgbe_mac_X550EM_a_vf; | | 404 | hw->mac.type = ixgbe_mac_X550EM_a_vf; |
405 | str = "X550EM A VF"; | | 405 | str = "X550EM A VF"; |
406 | break; | | 406 | break; |
407 | default: | | 407 | default: |
408 | /* Shouldn't get here since probe succeeded */ | | 408 | /* Shouldn't get here since probe succeeded */ |
409 | aprint_error_dev(dev, "Unknown device ID!\n"); | | 409 | aprint_error_dev(dev, "Unknown device ID!\n"); |
410 | error = ENXIO; | | 410 | error = ENXIO; |
411 | goto err_out; | | 411 | goto err_out; |
412 | break; | | 412 | break; |
413 | } | | 413 | } |
414 | aprint_normal_dev(dev, "device %s\n", str); | | 414 | aprint_normal_dev(dev, "device %s\n", str); |
415 | | | 415 | |
416 | ixv_init_device_features(adapter); | | 416 | ixv_init_device_features(adapter); |
417 | | | 417 | |
418 | /* Initialize the shared code */ | | 418 | /* Initialize the shared code */ |
419 | error = ixgbe_init_ops_vf(hw); | | 419 | error = ixgbe_init_ops_vf(hw); |
420 | if (error) { | | 420 | if (error) { |
421 | aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); | | 421 | aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); |
422 | error = EIO; | | 422 | error = EIO; |
423 | goto err_out; | | 423 | goto err_out; |
424 | } | | 424 | } |
425 | | | 425 | |
426 | /* Setup the mailbox */ | | 426 | /* Setup the mailbox */ |
427 | ixgbe_init_mbx_params_vf(hw); | | 427 | ixgbe_init_mbx_params_vf(hw); |
428 | | | 428 | |
429 | /* Set the right number of segments */ | | 429 | /* Set the right number of segments */ |
430 | adapter->num_segs = IXGBE_82599_SCATTER; | | 430 | adapter->num_segs = IXGBE_82599_SCATTER; |
431 | | | 431 | |
432 | /* Reset mbox api to 1.0 */ | | 432 | /* Reset mbox api to 1.0 */ |
433 | error = hw->mac.ops.reset_hw(hw); | | 433 | error = hw->mac.ops.reset_hw(hw); |
434 | if (error == IXGBE_ERR_RESET_FAILED) | | 434 | if (error == IXGBE_ERR_RESET_FAILED) |
435 | aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); | | 435 | aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); |
436 | else if (error) | | 436 | else if (error) |
437 | aprint_error_dev(dev, "...reset_hw() failed with error %d\n", | | 437 | aprint_error_dev(dev, "...reset_hw() failed with error %d\n", |
438 | error); | | 438 | error); |
439 | if (error) { | | 439 | if (error) { |
440 | error = EIO; | | 440 | error = EIO; |
441 | goto err_out; | | 441 | goto err_out; |
442 | } | | 442 | } |
443 | | | 443 | |
444 | error = hw->mac.ops.init_hw(hw); | | 444 | error = hw->mac.ops.init_hw(hw); |
445 | if (error) { | | 445 | if (error) { |
446 | aprint_error_dev(dev, "...init_hw() failed!\n"); | | 446 | aprint_error_dev(dev, "...init_hw() failed!\n"); |
447 | error = EIO; | | 447 | error = EIO; |
448 | goto err_out; | | 448 | goto err_out; |
449 | } | | 449 | } |
450 | | | 450 | |
451 | /* Negotiate mailbox API version */ | | 451 | /* Negotiate mailbox API version */ |
452 | error = ixv_negotiate_api(adapter); | | 452 | error = ixv_negotiate_api(adapter); |
453 | if (error) | | 453 | if (error) |
454 | aprint_normal_dev(dev, | | 454 | aprint_normal_dev(dev, |
455 | "MBX API negotiation failed during attach!\n"); | | 455 | "MBX API negotiation failed during attach!\n"); |
456 | switch (hw->api_version) { | | 456 | switch (hw->api_version) { |
457 | case ixgbe_mbox_api_10: | | 457 | case ixgbe_mbox_api_10: |
458 | apivstr = "1.0"; | | 458 | apivstr = "1.0"; |
459 | break; | | 459 | break; |
460 | case ixgbe_mbox_api_20: | | 460 | case ixgbe_mbox_api_20: |
461 | apivstr = "2.0"; | | 461 | apivstr = "2.0"; |
462 | break; | | 462 | break; |
463 | case ixgbe_mbox_api_11: | | 463 | case ixgbe_mbox_api_11: |
464 | apivstr = "1.1"; | | 464 | apivstr = "1.1"; |
465 | break; | | 465 | break; |
466 | case ixgbe_mbox_api_12: | | 466 | case ixgbe_mbox_api_12: |
467 | apivstr = "1.2"; | | 467 | apivstr = "1.2"; |
468 | break; | | 468 | break; |
469 | case ixgbe_mbox_api_13: | | 469 | case ixgbe_mbox_api_13: |
470 | apivstr = "1.3"; | | 470 | apivstr = "1.3"; |
471 | break; | | 471 | break; |
472 | default: | | 472 | default: |
473 | apivstr = "unknown"; | | 473 | apivstr = "unknown"; |
474 | break; | | 474 | break; |
475 | } | | 475 | } |
476 | aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); | | 476 | aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); |
477 | | | 477 | |
478 | /* If no mac address was assigned, make a random one */ | | 478 | /* If no mac address was assigned, make a random one */ |
479 | if (!ixv_check_ether_addr(hw->mac.addr)) { | | 479 | if (!ixv_check_ether_addr(hw->mac.addr)) { |
480 | u8 addr[ETHER_ADDR_LEN]; | | 480 | u8 addr[ETHER_ADDR_LEN]; |
481 | uint64_t rndval = cprng_strong64(); | | 481 | uint64_t rndval = cprng_strong64(); |
482 | | | 482 | |
483 | memcpy(addr, &rndval, sizeof(addr)); | | 483 | memcpy(addr, &rndval, sizeof(addr)); |
484 | addr[0] &= 0xFE; | | 484 | addr[0] &= 0xFE; |
485 | addr[0] |= 0x02; | | 485 | addr[0] |= 0x02; |
486 | bcopy(addr, hw->mac.addr, sizeof(addr)); | | 486 | bcopy(addr, hw->mac.addr, sizeof(addr)); |
487 | } | | 487 | } |
488 | | | 488 | |
489 | /* Register for VLAN events */ | | 489 | /* Register for VLAN events */ |
490 | ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); | | 490 | ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); |
491 | | | 491 | |
492 | /* Sysctls for limiting the amount of work done in the taskqueues */ | | 492 | /* Sysctls for limiting the amount of work done in the taskqueues */ |
493 | ixv_set_sysctl_value(adapter, "rx_processing_limit", | | 493 | ixv_set_sysctl_value(adapter, "rx_processing_limit", |
494 | "max number of rx packets to process", | | 494 | "max number of rx packets to process", |
495 | &adapter->rx_process_limit, ixv_rx_process_limit); | | 495 | &adapter->rx_process_limit, ixv_rx_process_limit); |
496 | | | 496 | |
497 | ixv_set_sysctl_value(adapter, "tx_processing_limit", | | 497 | ixv_set_sysctl_value(adapter, "tx_processing_limit", |
498 | "max number of tx packets to process", | | 498 | "max number of tx packets to process", |
499 | &adapter->tx_process_limit, ixv_tx_process_limit); | | 499 | &adapter->tx_process_limit, ixv_tx_process_limit); |
500 | | | 500 | |
501 | /* Do descriptor calc and sanity checks */ | | 501 | /* Do descriptor calc and sanity checks */ |
502 | if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || | | 502 | if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || |
503 | ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { | | 503 | ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { |
504 | aprint_error_dev(dev, "TXD config issue, using default!\n"); | | 504 | aprint_error_dev(dev, "TXD config issue, using default!\n"); |
505 | adapter->num_tx_desc = DEFAULT_TXD; | | 505 | adapter->num_tx_desc = DEFAULT_TXD; |
506 | } else | | 506 | } else |
507 | adapter->num_tx_desc = ixv_txd; | | 507 | adapter->num_tx_desc = ixv_txd; |
508 | | | 508 | |
509 | if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || | | 509 | if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || |
510 | ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { | | 510 | ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { |
511 | aprint_error_dev(dev, "RXD config issue, using default!\n"); | | 511 | aprint_error_dev(dev, "RXD config issue, using default!\n"); |
512 | adapter->num_rx_desc = DEFAULT_RXD; | | 512 | adapter->num_rx_desc = DEFAULT_RXD; |
513 | } else | | 513 | } else |
514 | adapter->num_rx_desc = ixv_rxd; | | 514 | adapter->num_rx_desc = ixv_rxd; |
515 | | | 515 | |
516 | adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI; | | 516 | adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI; |
517 | | | 517 | |
518 | /* Setup MSI-X */ | | 518 | /* Setup MSI-X */ |
519 | error = ixv_configure_interrupts(adapter); | | 519 | error = ixv_configure_interrupts(adapter); |
520 | if (error) | | 520 | if (error) |
521 | goto err_out; | | 521 | goto err_out; |
522 | | | 522 | |
523 | /* Allocate our TX/RX Queues */ | | 523 | /* Allocate our TX/RX Queues */ |
524 | if (ixgbe_allocate_queues(adapter)) { | | 524 | if (ixgbe_allocate_queues(adapter)) { |
525 | aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); | | 525 | aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); |
526 | error = ENOMEM; | | 526 | error = ENOMEM; |
527 | goto err_out; | | 527 | goto err_out; |
528 | } | | 528 | } |
529 | | | 529 | |
530 | /* hw.ix defaults init */ | | 530 | /* hw.ix defaults init */ |
531 | adapter->enable_aim = ixv_enable_aim; | | 531 | adapter->enable_aim = ixv_enable_aim; |
532 | | | 532 | |
533 | adapter->txrx_use_workqueue = ixv_txrx_workqueue; | | 533 | adapter->txrx_use_workqueue = ixv_txrx_workqueue; |
534 | | | 534 | |
535 | error = ixv_allocate_msix(adapter, pa); | | 535 | error = ixv_allocate_msix(adapter, pa); |
536 | if (error) { | | 536 | if (error) { |
537 | aprint_error_dev(dev, "ixv_allocate_msix() failed!\n"); | | 537 | aprint_error_dev(dev, "ixv_allocate_msix() failed!\n"); |
538 | goto err_late; | | 538 | goto err_late; |
539 | } | | 539 | } |
540 | | | 540 | |
541 | /* Setup OS specific network interface */ | | 541 | /* Setup OS specific network interface */ |
542 | error = ixv_setup_interface(dev, adapter); | | 542 | error = ixv_setup_interface(dev, adapter); |
543 | if (error != 0) { | | 543 | if (error != 0) { |
544 | aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); | | 544 | aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); |
545 | goto err_late; | | 545 | goto err_late; |
546 | } | | 546 | } |
547 | | | 547 | |
548 | /* Do the stats setup */ | | 548 | /* Do the stats setup */ |
549 | ixv_save_stats(adapter); | | 549 | ixv_save_stats(adapter); |
550 | ixv_init_stats(adapter); | | 550 | ixv_init_stats(adapter); |
551 | ixv_add_stats_sysctls(adapter); | | 551 | ixv_add_stats_sysctls(adapter); |
552 | | | 552 | |
553 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 553 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
554 | ixgbe_netmap_attach(adapter); | | 554 | ixgbe_netmap_attach(adapter); |
555 | | | 555 | |
556 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); | | 556 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); |
557 | aprint_verbose_dev(dev, "feature cap %s\n", buf); | | 557 | aprint_verbose_dev(dev, "feature cap %s\n", buf); |
558 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); | | 558 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); |
559 | aprint_verbose_dev(dev, "feature ena %s\n", buf); | | 559 | aprint_verbose_dev(dev, "feature ena %s\n", buf); |
560 | | | 560 | |
561 | INIT_DEBUGOUT("ixv_attach: end"); | | 561 | INIT_DEBUGOUT("ixv_attach: end"); |
562 | adapter->osdep.attached = true; | | 562 | adapter->osdep.attached = true; |
563 | | | 563 | |
564 | return; | | 564 | return; |
565 | | | 565 | |
566 | err_late: | | 566 | err_late: |
567 | ixgbe_free_queues(adapter); | | 567 | ixgbe_free_queues(adapter); |
568 | err_out: | | 568 | err_out: |
569 | ixv_free_pci_resources(adapter); | | 569 | ixv_free_pci_resources(adapter); |
570 | IXGBE_CORE_LOCK_DESTROY(adapter); | | 570 | IXGBE_CORE_LOCK_DESTROY(adapter); |
571 | | | 571 | |
572 | return; | | 572 | return; |
573 | } /* ixv_attach */ | | 573 | } /* ixv_attach */ |
574 | | | 574 | |
575 | /************************************************************************ | | 575 | /************************************************************************ |
576 | * ixv_detach - Device removal routine | | 576 | * ixv_detach - Device removal routine |
577 | * | | 577 | * |
578 | * Called when the driver is being removed. | | 578 | * Called when the driver is being removed. |
579 | * Stops the adapter and deallocates all the resources | | 579 | * Stops the adapter and deallocates all the resources |
580 | * that were allocated for driver operation. | | 580 | * that were allocated for driver operation. |
581 | * | | 581 | * |
582 | * return 0 on success, positive on failure | | 582 | * return 0 on success, positive on failure |
583 | ************************************************************************/ | | 583 | ************************************************************************/ |
584 | static int | | 584 | static int |
585 | ixv_detach(device_t dev, int flags) | | 585 | ixv_detach(device_t dev, int flags) |
586 | { | | 586 | { |
587 | struct adapter *adapter = device_private(dev); | | 587 | struct adapter *adapter = device_private(dev); |
588 | struct ixgbe_hw *hw = &adapter->hw; | | 588 | struct ixgbe_hw *hw = &adapter->hw; |
589 | struct tx_ring *txr = adapter->tx_rings; | | 589 | struct tx_ring *txr = adapter->tx_rings; |
590 | struct rx_ring *rxr = adapter->rx_rings; | | 590 | struct rx_ring *rxr = adapter->rx_rings; |
591 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; | | 591 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; |
592 | | | 592 | |
593 | INIT_DEBUGOUT("ixv_detach: begin"); | | 593 | INIT_DEBUGOUT("ixv_detach: begin"); |
594 | if (adapter->osdep.attached == false) | | 594 | if (adapter->osdep.attached == false) |
595 | return 0; | | 595 | return 0; |
596 | | | 596 | |
597 | /* Stop the interface. Callouts are stopped in it. */ | | 597 | /* Stop the interface. Callouts are stopped in it. */ |
598 | ixv_ifstop(adapter->ifp, 1); | | 598 | ixv_ifstop(adapter->ifp, 1); |
599 | | | 599 | |
600 | #if NVLAN > 0 | | 600 | #if NVLAN > 0 |
601 | /* Make sure VLANs are not using driver */ | | 601 | /* Make sure VLANs are not using driver */ |
602 | if (!VLAN_ATTACHED(&adapter->osdep.ec)) | | 602 | if (!VLAN_ATTACHED(&adapter->osdep.ec)) |
603 | ; /* nothing to do: no VLANs */ | | 603 | ; /* nothing to do: no VLANs */ |
604 | else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) | | 604 | else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) |
605 | vlan_ifdetach(adapter->ifp); | | 605 | vlan_ifdetach(adapter->ifp); |
606 | else { | | 606 | else { |
607 | aprint_error_dev(dev, "VLANs in use, detach first\n"); | | 607 | aprint_error_dev(dev, "VLANs in use, detach first\n"); |
608 | return EBUSY; | | 608 | return EBUSY; |
609 | } | | 609 | } |
610 | #endif | | 610 | #endif |
611 | | | 611 | |
612 | ether_ifdetach(adapter->ifp); | | 612 | ether_ifdetach(adapter->ifp); |
613 | callout_halt(&adapter->timer, NULL); | | 613 | callout_halt(&adapter->timer, NULL); |
614 | ixv_free_deferred_handlers(adapter); | | 614 | ixv_free_deferred_handlers(adapter); |
615 | | | 615 | |
616 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 616 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
617 | netmap_detach(adapter->ifp); | | 617 | netmap_detach(adapter->ifp); |
618 | | | 618 | |
619 | ixv_free_pci_resources(adapter); | | 619 | ixv_free_pci_resources(adapter); |
620 | #if 0 /* XXX the NetBSD port is probably missing something here */ | | 620 | #if 0 /* XXX the NetBSD port is probably missing something here */ |
621 | bus_generic_detach(dev); | | 621 | bus_generic_detach(dev); |
622 | #endif | | 622 | #endif |
623 | if_detach(adapter->ifp); | | 623 | if_detach(adapter->ifp); |
624 | ifmedia_fini(&adapter->media); | | 624 | ifmedia_fini(&adapter->media); |
625 | if_percpuq_destroy(adapter->ipq); | | 625 | if_percpuq_destroy(adapter->ipq); |
626 | | | 626 | |
627 | sysctl_teardown(&adapter->sysctllog); | | 627 | sysctl_teardown(&adapter->sysctllog); |
628 | evcnt_detach(&adapter->efbig_tx_dma_setup); | | 628 | evcnt_detach(&adapter->efbig_tx_dma_setup); |
629 | evcnt_detach(&adapter->mbuf_defrag_failed); | | 629 | evcnt_detach(&adapter->mbuf_defrag_failed); |
630 | evcnt_detach(&adapter->efbig2_tx_dma_setup); | | 630 | evcnt_detach(&adapter->efbig2_tx_dma_setup); |
631 | evcnt_detach(&adapter->einval_tx_dma_setup); | | 631 | evcnt_detach(&adapter->einval_tx_dma_setup); |
632 | evcnt_detach(&adapter->other_tx_dma_setup); | | 632 | evcnt_detach(&adapter->other_tx_dma_setup); |
633 | evcnt_detach(&adapter->eagain_tx_dma_setup); | | 633 | evcnt_detach(&adapter->eagain_tx_dma_setup); |
634 | evcnt_detach(&adapter->enomem_tx_dma_setup); | | 634 | evcnt_detach(&adapter->enomem_tx_dma_setup); |
635 | evcnt_detach(&adapter->watchdog_events); | | 635 | evcnt_detach(&adapter->watchdog_events); |
636 | evcnt_detach(&adapter->tso_err); | | 636 | evcnt_detach(&adapter->tso_err); |
637 | evcnt_detach(&adapter->admin_irqev); | | 637 | evcnt_detach(&adapter->admin_irqev); |
638 | evcnt_detach(&adapter->link_workev); | | 638 | evcnt_detach(&adapter->link_workev); |
639 | | | 639 | |
640 | txr = adapter->tx_rings; | | 640 | txr = adapter->tx_rings; |
641 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 641 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
642 | evcnt_detach(&adapter->queues[i].irqs); | | 642 | evcnt_detach(&adapter->queues[i].irqs); |
643 | evcnt_detach(&adapter->queues[i].handleq); | | 643 | evcnt_detach(&adapter->queues[i].handleq); |
644 | evcnt_detach(&adapter->queues[i].req); | | 644 | evcnt_detach(&adapter->queues[i].req); |
645 | evcnt_detach(&txr->no_desc_avail); | | 645 | evcnt_detach(&txr->no_desc_avail); |
646 | evcnt_detach(&txr->total_packets); | | 646 | evcnt_detach(&txr->total_packets); |
647 | evcnt_detach(&txr->tso_tx); | | 647 | evcnt_detach(&txr->tso_tx); |
648 | #ifndef IXGBE_LEGACY_TX | | 648 | #ifndef IXGBE_LEGACY_TX |
649 | evcnt_detach(&txr->pcq_drops); | | 649 | evcnt_detach(&txr->pcq_drops); |
650 | #endif | | 650 | #endif |
651 | | | 651 | |
652 | evcnt_detach(&rxr->rx_packets); | | 652 | evcnt_detach(&rxr->rx_packets); |
653 | evcnt_detach(&rxr->rx_bytes); | | 653 | evcnt_detach(&rxr->rx_bytes); |
654 | evcnt_detach(&rxr->rx_copies); | | 654 | evcnt_detach(&rxr->rx_copies); |
655 | evcnt_detach(&rxr->no_jmbuf); | | 655 | evcnt_detach(&rxr->no_jmbuf); |
656 | evcnt_detach(&rxr->rx_discarded); | | 656 | evcnt_detach(&rxr->rx_discarded); |
657 | } | | 657 | } |
658 | evcnt_detach(&stats->ipcs); | | 658 | evcnt_detach(&stats->ipcs); |
659 | evcnt_detach(&stats->l4cs); | | 659 | evcnt_detach(&stats->l4cs); |
660 | evcnt_detach(&stats->ipcs_bad); | | 660 | evcnt_detach(&stats->ipcs_bad); |
661 | evcnt_detach(&stats->l4cs_bad); | | 661 | evcnt_detach(&stats->l4cs_bad); |
662 | | | 662 | |
663 | /* Packet Reception Stats */ | | 663 | /* Packet Reception Stats */ |
664 | evcnt_detach(&stats->vfgorc); | | 664 | evcnt_detach(&stats->vfgorc); |
665 | evcnt_detach(&stats->vfgprc); | | 665 | evcnt_detach(&stats->vfgprc); |
666 | evcnt_detach(&stats->vfmprc); | | 666 | evcnt_detach(&stats->vfmprc); |
667 | | | 667 | |
668 | /* Packet Transmission Stats */ | | 668 | /* Packet Transmission Stats */ |
669 | evcnt_detach(&stats->vfgotc); | | 669 | evcnt_detach(&stats->vfgotc); |
670 | evcnt_detach(&stats->vfgptc); | | 670 | evcnt_detach(&stats->vfgptc); |
671 | | | 671 | |
672 | /* Mailbox Stats */ | | 672 | /* Mailbox Stats */ |
673 | evcnt_detach(&hw->mbx.stats.msgs_tx); | | 673 | evcnt_detach(&hw->mbx.stats.msgs_tx); |
674 | evcnt_detach(&hw->mbx.stats.msgs_rx); | | 674 | evcnt_detach(&hw->mbx.stats.msgs_rx); |
675 | evcnt_detach(&hw->mbx.stats.acks); | | 675 | evcnt_detach(&hw->mbx.stats.acks); |
676 | evcnt_detach(&hw->mbx.stats.reqs); | | 676 | evcnt_detach(&hw->mbx.stats.reqs); |
677 | evcnt_detach(&hw->mbx.stats.rsts); | | 677 | evcnt_detach(&hw->mbx.stats.rsts); |
678 | | | 678 | |
679 | ixgbe_free_queues(adapter); | | 679 | ixgbe_free_queues(adapter); |
680 | | | 680 | |
681 | IXGBE_CORE_LOCK_DESTROY(adapter); | | 681 | IXGBE_CORE_LOCK_DESTROY(adapter); |
682 | | | 682 | |
683 | return (0); | | 683 | return (0); |
684 | } /* ixv_detach */ | | 684 | } /* ixv_detach */ |
685 | | | 685 | |
686 | /************************************************************************ | | 686 | /************************************************************************ |
687 | * ixv_init_locked - Init entry point | | 687 | * ixv_init_locked - Init entry point |
688 | * | | 688 | * |
689 | * Used in two ways: It is used by the stack as an init entry | | 689 | * Used in two ways: It is used by the stack as an init entry |
690 | * point in network interface structure. It is also used | | 690 | * point in network interface structure. It is also used |
691 | * by the driver as a hw/sw initialization routine to get | | 691 | * by the driver as a hw/sw initialization routine to get |
692 | * to a consistent state. | | 692 | * to a consistent state. |
693 | * | | 693 | * |
694 | * return 0 on success, positive on failure | | 694 | * return 0 on success, positive on failure |
695 | ************************************************************************/ | | 695 | ************************************************************************/ |
696 | static void | | 696 | static void |
697 | ixv_init_locked(struct adapter *adapter) | | 697 | ixv_init_locked(struct adapter *adapter) |
698 | { | | 698 | { |
699 | struct ifnet *ifp = adapter->ifp; | | 699 | struct ifnet *ifp = adapter->ifp; |
700 | device_t dev = adapter->dev; | | 700 | device_t dev = adapter->dev; |
701 | struct ixgbe_hw *hw = &adapter->hw; | | 701 | struct ixgbe_hw *hw = &adapter->hw; |
702 | struct ix_queue *que; | | 702 | struct ix_queue *que; |
703 | int error = 0; | | 703 | int error = 0; |
704 | uint32_t mask; | | 704 | uint32_t mask; |
705 | int i; | | 705 | int i; |
706 | | | 706 | |
707 | INIT_DEBUGOUT("ixv_init_locked: begin"); | | 707 | INIT_DEBUGOUT("ixv_init_locked: begin"); |
708 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 708 | KASSERT(mutex_owned(&adapter->core_mtx)); |
709 | hw->adapter_stopped = FALSE; | | 709 | hw->adapter_stopped = FALSE; |
710 | hw->mac.ops.stop_adapter(hw); | | 710 | hw->mac.ops.stop_adapter(hw); |
711 | callout_stop(&adapter->timer); | | 711 | callout_stop(&adapter->timer); |
712 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) | | 712 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) |
713 | que->disabled_count = 0; | | 713 | que->disabled_count = 0; |
714 | | | 714 | |
715 | adapter->max_frame_size = | | 715 | adapter->max_frame_size = |
716 | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; | | 716 | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; |
717 | | | 717 | |
718 | /* reprogram the RAR[0] in case user changed it. */ | | 718 | /* reprogram the RAR[0] in case user changed it. */ |
719 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | | 719 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
720 | | | 720 | |
721 | /* Get the latest mac address, User can use a LAA */ | | 721 | /* Get the latest mac address, User can use a LAA */ |
722 | memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), | | 722 | memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), |
723 | IXGBE_ETH_LENGTH_OF_ADDRESS); | | 723 | IXGBE_ETH_LENGTH_OF_ADDRESS); |
724 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); | | 724 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); |
725 | | | 725 | |
726 | /* Prepare transmit descriptors and buffers */ | | 726 | /* Prepare transmit descriptors and buffers */ |
727 | if (ixgbe_setup_transmit_structures(adapter)) { | | 727 | if (ixgbe_setup_transmit_structures(adapter)) { |
728 | aprint_error_dev(dev, "Could not setup transmit structures\n"); | | 728 | aprint_error_dev(dev, "Could not setup transmit structures\n"); |
729 | ixv_stop_locked(adapter); | | 729 | ixv_stop_locked(adapter); |
730 | return; | | 730 | return; |
731 | } | | 731 | } |
732 | | | 732 | |
733 | /* Reset VF and renegotiate mailbox API version */ | | 733 | /* Reset VF and renegotiate mailbox API version */ |
734 | hw->mac.ops.reset_hw(hw); | | 734 | hw->mac.ops.reset_hw(hw); |
735 | hw->mac.ops.start_hw(hw); | | 735 | hw->mac.ops.start_hw(hw); |
736 | error = ixv_negotiate_api(adapter); | | 736 | error = ixv_negotiate_api(adapter); |
737 | if (error) | | 737 | if (error) |
738 | device_printf(dev, | | 738 | device_printf(dev, |
739 | "Mailbox API negotiation failed in init_locked!\n"); | | 739 | "Mailbox API negotiation failed in init_locked!\n"); |
740 | | | 740 | |
741 | ixv_initialize_transmit_units(adapter); | | 741 | ixv_initialize_transmit_units(adapter); |
742 | | | 742 | |
743 | /* Setup Multicast table */ | | 743 | /* Setup Multicast table */ |
744 | ixv_set_rxfilter(adapter); | | 744 | ixv_set_rxfilter(adapter); |
745 | | | 745 | |
746 | /* | | 746 | /* |
747 | * Determine the correct mbuf pool | | 747 | * Determine the correct mbuf pool |
748 | * for doing jumbo/headersplit | | 748 | * for doing jumbo/headersplit |
749 | */ | | 749 | */ |
750 | if (adapter->max_frame_size <= MCLBYTES) | | 750 | if (adapter->max_frame_size <= MCLBYTES) |
751 | adapter->rx_mbuf_sz = MCLBYTES; | | 751 | adapter->rx_mbuf_sz = MCLBYTES; |
752 | else | | 752 | else |
753 | adapter->rx_mbuf_sz = MJUMPAGESIZE; | | 753 | adapter->rx_mbuf_sz = MJUMPAGESIZE; |
754 | | | 754 | |
755 | /* Prepare receive descriptors and buffers */ | | 755 | /* Prepare receive descriptors and buffers */ |
756 | if (ixgbe_setup_receive_structures(adapter)) { | | 756 | if (ixgbe_setup_receive_structures(adapter)) { |
757 | device_printf(dev, "Could not setup receive structures\n"); | | 757 | device_printf(dev, "Could not setup receive structures\n"); |
758 | ixv_stop_locked(adapter); | | 758 | ixv_stop_locked(adapter); |
759 | return; | | 759 | return; |
760 | } | | 760 | } |
761 | | | 761 | |
762 | /* Configure RX settings */ | | 762 | /* Configure RX settings */ |
763 | ixv_initialize_receive_units(adapter); | | 763 | ixv_initialize_receive_units(adapter); |
764 | | | 764 | |
765 | /* Initialize variable holding task enqueue requests interrupts */ | | 765 | /* Initialize variable holding task enqueue requests interrupts */ |
766 | adapter->task_requests = 0; | | 766 | adapter->task_requests = 0; |
767 | | | 767 | |
768 | /* Set up VLAN offload and filter */ | | 768 | /* Set up VLAN offload and filter */ |
769 | ixv_setup_vlan_support(adapter); | | 769 | ixv_setup_vlan_support(adapter); |
770 | | | 770 | |
771 | /* Set up MSI-X routing */ | | 771 | /* Set up MSI-X routing */ |
772 | ixv_configure_ivars(adapter); | | 772 | ixv_configure_ivars(adapter); |
773 | | | 773 | |
774 | /* Set up auto-mask */ | | 774 | /* Set up auto-mask */ |
775 | mask = (1 << adapter->vector); | | 775 | mask = (1 << adapter->vector); |
776 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) | | 776 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) |
777 | mask |= (1 << que->msix); | | 777 | mask |= (1 << que->msix); |
778 | IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); | | 778 | IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); |
779 | | | 779 | |
780 | /* Set moderation on the Link interrupt */ | | 780 | /* Set moderation on the Link interrupt */ |
781 | ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); | | 781 | ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); |
782 | | | 782 | |
783 | /* Stats init */ | | 783 | /* Stats init */ |
784 | ixv_init_stats(adapter); | | 784 | ixv_init_stats(adapter); |
785 | | | 785 | |
786 | /* Config/Enable Link */ | | 786 | /* Config/Enable Link */ |
787 | hw->mac.get_link_status = TRUE; | | 787 | hw->mac.get_link_status = TRUE; |
788 | hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, | | 788 | hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, |
789 | FALSE); | | 789 | FALSE); |
790 | | | 790 | |
791 | /* Start watchdog */ | | 791 | /* Start watchdog */ |
792 | callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); | | 792 | callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); |
793 | atomic_store_relaxed(&adapter->timer_pending, 0); | | 793 | atomic_store_relaxed(&adapter->timer_pending, 0); |
794 | | | 794 | |
795 | /* OK to schedule workqueues. */ | | 795 | /* OK to schedule workqueues. */ |
796 | adapter->schedule_wqs_ok = true; | | 796 | adapter->schedule_wqs_ok = true; |
797 | | | 797 | |
798 | /* And now turn on interrupts */ | | 798 | /* And now turn on interrupts */ |
799 | ixv_enable_intr(adapter); | | 799 | ixv_enable_intr(adapter); |
800 | | | 800 | |
801 | /* Update saved flags. See ixgbe_ifflags_cb() */ | | 801 | /* Update saved flags. See ixgbe_ifflags_cb() */ |
802 | adapter->if_flags = ifp->if_flags; | | 802 | adapter->if_flags = ifp->if_flags; |
803 | adapter->ec_capenable = adapter->osdep.ec.ec_capenable; | | 803 | adapter->ec_capenable = adapter->osdep.ec.ec_capenable; |
804 | | | 804 | |
805 | /* Now inform the stack we're ready */ | | 805 | /* Now inform the stack we're ready */ |
806 | ifp->if_flags |= IFF_RUNNING; | | 806 | ifp->if_flags |= IFF_RUNNING; |
807 | ifp->if_flags &= ~IFF_OACTIVE; | | 807 | ifp->if_flags &= ~IFF_OACTIVE; |
808 | | | 808 | |
809 | return; | | 809 | return; |
810 | } /* ixv_init_locked */ | | 810 | } /* ixv_init_locked */ |
811 | | | 811 | |
812 | /************************************************************************ | | 812 | /************************************************************************ |
813 | * ixv_enable_queue | | 813 | * ixv_enable_queue |
814 | ************************************************************************/ | | 814 | ************************************************************************/ |
815 | static inline void | | 815 | static inline void |
816 | ixv_enable_queue(struct adapter *adapter, u32 vector) | | 816 | ixv_enable_queue(struct adapter *adapter, u32 vector) |
817 | { | | 817 | { |
818 | struct ixgbe_hw *hw = &adapter->hw; | | 818 | struct ixgbe_hw *hw = &adapter->hw; |
819 | struct ix_queue *que = &adapter->queues[vector]; | | 819 | struct ix_queue *que = &adapter->queues[vector]; |
820 | u32 queue = 1UL << vector; | | 820 | u32 queue = 1UL << vector; |
821 | u32 mask; | | 821 | u32 mask; |
822 | | | 822 | |
823 | mutex_enter(&que->dc_mtx); | | 823 | mutex_enter(&que->dc_mtx); |
824 | if (que->disabled_count > 0 && --que->disabled_count > 0) | | 824 | if (que->disabled_count > 0 && --que->disabled_count > 0) |
825 | goto out; | | 825 | goto out; |
826 | | | 826 | |
827 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); | | 827 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); |
828 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); | | 828 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); |
829 | out: | | 829 | out: |
830 | mutex_exit(&que->dc_mtx); | | 830 | mutex_exit(&que->dc_mtx); |
831 | } /* ixv_enable_queue */ | | 831 | } /* ixv_enable_queue */ |
832 | | | 832 | |
833 | /************************************************************************ | | 833 | /************************************************************************ |
834 | * ixv_disable_queue | | 834 | * ixv_disable_queue |
835 | ************************************************************************/ | | 835 | ************************************************************************/ |
836 | static inline void | | 836 | static inline void |
837 | ixv_disable_queue(struct adapter *adapter, u32 vector) | | 837 | ixv_disable_queue(struct adapter *adapter, u32 vector) |
838 | { | | 838 | { |
839 | struct ixgbe_hw *hw = &adapter->hw; | | 839 | struct ixgbe_hw *hw = &adapter->hw; |
840 | struct ix_queue *que = &adapter->queues[vector]; | | 840 | struct ix_queue *que = &adapter->queues[vector]; |
841 | u32 queue = 1UL << vector; | | 841 | u32 queue = 1UL << vector; |
842 | u32 mask; | | 842 | u32 mask; |
843 | | | 843 | |
844 | mutex_enter(&que->dc_mtx); | | 844 | mutex_enter(&que->dc_mtx); |
845 | if (que->disabled_count++ > 0) | | 845 | if (que->disabled_count++ > 0) |
846 | goto out; | | 846 | goto out; |
847 | | | 847 | |
848 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); | | 848 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); |
849 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); | | 849 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); |
850 | out: | | 850 | out: |
851 | mutex_exit(&que->dc_mtx); | | 851 | mutex_exit(&que->dc_mtx); |
852 | } /* ixv_disable_queue */ | | 852 | } /* ixv_disable_queue */ |
853 | | | 853 | |
854 | #if 0 | | 854 | #if 0 |
855 | static inline void | | 855 | static inline void |
856 | ixv_rearm_queues(struct adapter *adapter, u64 queues) | | 856 | ixv_rearm_queues(struct adapter *adapter, u64 queues) |
857 | { | | 857 | { |
858 | u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); | | 858 | u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); |
859 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); | | 859 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); |
860 | } /* ixv_rearm_queues */ | | 860 | } /* ixv_rearm_queues */ |
861 | #endif | | 861 | #endif |
862 | | | 862 | |
863 | | | 863 | |
864 | /************************************************************************ | | 864 | /************************************************************************ |
865 | * ixv_msix_que - MSI-X Queue Interrupt Service routine | | 865 | * ixv_msix_que - MSI-X Queue Interrupt Service routine |
866 | ************************************************************************/ | | 866 | ************************************************************************/ |
867 | static int | | 867 | static int |
868 | ixv_msix_que(void *arg) | | 868 | ixv_msix_que(void *arg) |
869 | { | | 869 | { |
870 | struct ix_queue *que = arg; | | 870 | struct ix_queue *que = arg; |
871 | struct adapter *adapter = que->adapter; | | 871 | struct adapter *adapter = que->adapter; |
872 | struct tx_ring *txr = que->txr; | | 872 | struct tx_ring *txr = que->txr; |
873 | struct rx_ring *rxr = que->rxr; | | 873 | struct rx_ring *rxr = que->rxr; |
874 | bool more; | | 874 | bool more; |
875 | u32 newitr = 0; | | 875 | u32 newitr = 0; |
876 | | | 876 | |
877 | ixv_disable_queue(adapter, que->msix); | | 877 | ixv_disable_queue(adapter, que->msix); |
878 | ++que->irqs.ev_count; | | 878 | ++que->irqs.ev_count; |
879 | | | 879 | |
880 | #ifdef __NetBSD__ | | 880 | #ifdef __NetBSD__ |
881 | /* Don't run ixgbe_rxeof in interrupt context */ | | 881 | /* Don't run ixgbe_rxeof in interrupt context */ |
882 | more = true; | | 882 | more = true; |
883 | #else | | 883 | #else |
884 | more = ixgbe_rxeof(que); | | 884 | more = ixgbe_rxeof(que); |
885 | #endif | | 885 | #endif |
886 | | | 886 | |
887 | IXGBE_TX_LOCK(txr); | | 887 | IXGBE_TX_LOCK(txr); |
888 | ixgbe_txeof(txr); | | 888 | ixgbe_txeof(txr); |
889 | IXGBE_TX_UNLOCK(txr); | | 889 | IXGBE_TX_UNLOCK(txr); |
890 | | | 890 | |
891 | /* Do AIM now? */ | | 891 | /* Do AIM now? */ |
892 | | | 892 | |
893 | if (adapter->enable_aim == false) | | 893 | if (adapter->enable_aim == false) |
894 | goto no_calc; | | 894 | goto no_calc; |
895 | /* | | 895 | /* |
896 | * Do Adaptive Interrupt Moderation: | | 896 | * Do Adaptive Interrupt Moderation: |
897 | * - Write out last calculated setting | | 897 | * - Write out last calculated setting |
898 | * - Calculate based on average size over | | 898 | * - Calculate based on average size over |
899 | * the last interval. | | 899 | * the last interval. |
900 | */ | | 900 | */ |
901 | if (que->eitr_setting) | | 901 | if (que->eitr_setting) |
902 | ixv_eitr_write(adapter, que->msix, que->eitr_setting); | | 902 | ixv_eitr_write(adapter, que->msix, que->eitr_setting); |
903 | | | 903 | |
904 | que->eitr_setting = 0; | | 904 | que->eitr_setting = 0; |
905 | | | 905 | |
906 | /* Idle, do nothing */ | | 906 | /* Idle, do nothing */ |
907 | if ((txr->bytes == 0) && (rxr->bytes == 0)) | | 907 | if ((txr->bytes == 0) && (rxr->bytes == 0)) |
908 | goto no_calc; | | 908 | goto no_calc; |
909 | | | 909 | |
910 | if ((txr->bytes) && (txr->packets)) | | 910 | if ((txr->bytes) && (txr->packets)) |
911 | newitr = txr->bytes/txr->packets; | | 911 | newitr = txr->bytes/txr->packets; |
912 | if ((rxr->bytes) && (rxr->packets)) | | 912 | if ((rxr->bytes) && (rxr->packets)) |
913 | newitr = uimax(newitr, (rxr->bytes / rxr->packets)); | | 913 | newitr = uimax(newitr, (rxr->bytes / rxr->packets)); |
914 | newitr += 24; /* account for hardware frame, crc */ | | 914 | newitr += 24; /* account for hardware frame, crc */ |
915 | | | 915 | |
916 | /* set an upper boundary */ | | 916 | /* set an upper boundary */ |
917 | newitr = uimin(newitr, 3000); | | 917 | newitr = uimin(newitr, 3000); |
918 | | | 918 | |
919 | /* Be nice to the mid range */ | | 919 | /* Be nice to the mid range */ |
920 | if ((newitr > 300) && (newitr < 1200)) | | 920 | if ((newitr > 300) && (newitr < 1200)) |
921 | newitr = (newitr / 3); | | 921 | newitr = (newitr / 3); |
922 | else | | 922 | else |
923 | newitr = (newitr / 2); | | 923 | newitr = (newitr / 2); |
924 | | | 924 | |
925 | /* | | 925 | /* |
926 | * When RSC is used, ITR interval must be larger than RSC_DELAY. | | 926 | * When RSC is used, ITR interval must be larger than RSC_DELAY. |
927 | * Currently, we use 2us for RSC_DELAY. The minimum value is always | | 927 | * Currently, we use 2us for RSC_DELAY. The minimum value is always |
928 | * greater than 2us on 100M (and 10M?(not documented)), but it's not | | 928 | * greater than 2us on 100M (and 10M?(not documented)), but it's not |
929 | * on 1G and higher. | | 929 | * on 1G and higher. |
930 | */ | | 930 | */ |
931 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) | | 931 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) |
932 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { | | 932 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { |
933 | if (newitr < IXGBE_MIN_RSC_EITR_10G1G) | | 933 | if (newitr < IXGBE_MIN_RSC_EITR_10G1G) |
934 | newitr = IXGBE_MIN_RSC_EITR_10G1G; | | 934 | newitr = IXGBE_MIN_RSC_EITR_10G1G; |
935 | } | | 935 | } |
936 | | | 936 | |
937 | /* save for next interrupt */ | | 937 | /* save for next interrupt */ |
938 | que->eitr_setting = newitr; | | 938 | que->eitr_setting = newitr; |
939 | | | 939 | |
940 | /* Reset state */ | | 940 | /* Reset state */ |
941 | txr->bytes = 0; | | 941 | txr->bytes = 0; |
942 | txr->packets = 0; | | 942 | txr->packets = 0; |
943 | rxr->bytes = 0; | | 943 | rxr->bytes = 0; |
944 | rxr->packets = 0; | | 944 | rxr->packets = 0; |
945 | | | 945 | |
946 | no_calc: | | 946 | no_calc: |
947 | if (more) | | 947 | if (more) |
948 | softint_schedule(que->que_si); | | 948 | softint_schedule(que->que_si); |
949 | else /* Re-enable this interrupt */ | | 949 | else /* Re-enable this interrupt */ |
950 | ixv_enable_queue(adapter, que->msix); | | 950 | ixv_enable_queue(adapter, que->msix); |
951 | | | 951 | |
952 | return 1; | | 952 | return 1; |
953 | } /* ixv_msix_que */ | | 953 | } /* ixv_msix_que */ |
954 | | | 954 | |
955 | /************************************************************************ | | 955 | /************************************************************************ |
956 | * ixv_msix_mbx | | 956 | * ixv_msix_mbx |
957 | ************************************************************************/ | | 957 | ************************************************************************/ |
958 | static int | | 958 | static int |
959 | ixv_msix_mbx(void *arg) | | 959 | ixv_msix_mbx(void *arg) |
960 | { | | 960 | { |
961 | struct adapter *adapter = arg; | | 961 | struct adapter *adapter = arg; |
962 | struct ixgbe_hw *hw = &adapter->hw; | | 962 | struct ixgbe_hw *hw = &adapter->hw; |
963 | | | 963 | |
964 | ++adapter->admin_irqev.ev_count; | | 964 | ++adapter->admin_irqev.ev_count; |
965 | /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ | | 965 | /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ |
966 | | | 966 | |
967 | /* Link status change */ | | 967 | /* Link status change */ |
968 | hw->mac.get_link_status = TRUE; | | 968 | hw->mac.get_link_status = TRUE; |
969 | atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX); | | 969 | atomic_or_32(&adapter->task_requests, IXGBE_REQUEST_TASK_MBX); |
970 | ixv_schedule_admin_tasklet(adapter); | | 970 | ixv_schedule_admin_tasklet(adapter); |
971 | | | 971 | |
972 | return 1; | | 972 | return 1; |
973 | } /* ixv_msix_mbx */ | | 973 | } /* ixv_msix_mbx */ |
974 | | | 974 | |
975 | static void | | 975 | static void |
976 | ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) | | 976 | ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) |
977 | { | | 977 | { |
978 | | | 978 | |
979 | /* | | 979 | /* |
980 | * Newer devices than 82598 have VF function, so this function is | | 980 | * Newer devices than 82598 have VF function, so this function is |
981 | * simple. | | 981 | * simple. |
982 | */ | | 982 | */ |
983 | itr |= IXGBE_EITR_CNT_WDIS; | | 983 | itr |= IXGBE_EITR_CNT_WDIS; |
984 | | | 984 | |
985 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); | | 985 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); |
986 | } | | 986 | } |
987 | | | 987 | |
988 | | | 988 | |
989 | /************************************************************************ | | 989 | /************************************************************************ |
990 | * ixv_media_status - Media Ioctl callback | | 990 | * ixv_media_status - Media Ioctl callback |
991 | * | | 991 | * |
992 | * Called whenever the user queries the status of | | 992 | * Called whenever the user queries the status of |
993 | * the interface using ifconfig. | | 993 | * the interface using ifconfig. |
994 | ************************************************************************/ | | 994 | ************************************************************************/ |
995 | static void | | 995 | static void |
996 | ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) | | 996 | ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
997 | { | | 997 | { |
998 | struct adapter *adapter = ifp->if_softc; | | 998 | struct adapter *adapter = ifp->if_softc; |
999 | | | 999 | |
1000 | INIT_DEBUGOUT("ixv_media_status: begin"); | | 1000 | INIT_DEBUGOUT("ixv_media_status: begin"); |
| @@ -2362,1165 +2362,1165 @@ ixv_configure_ivars(struct adapter *adap | | | @@ -2362,1165 +2362,1165 @@ ixv_configure_ivars(struct adapter *adap |
2362 | ixv_set_ivar(adapter, i, que->msix, 1); | | 2362 | ixv_set_ivar(adapter, i, que->msix, 1); |
2363 | /* Set an initial value in EITR */ | | 2363 | /* Set an initial value in EITR */ |
2364 | ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT); | | 2364 | ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT); |
2365 | } | | 2365 | } |
2366 | | | 2366 | |
2367 | /* For the mailbox interrupt */ | | 2367 | /* For the mailbox interrupt */ |
2368 | ixv_set_ivar(adapter, 1, adapter->vector, -1); | | 2368 | ixv_set_ivar(adapter, 1, adapter->vector, -1); |
2369 | } /* ixv_configure_ivars */ | | 2369 | } /* ixv_configure_ivars */ |
2370 | | | 2370 | |
2371 | | | 2371 | |
2372 | /************************************************************************ | | 2372 | /************************************************************************ |
2373 | * ixv_save_stats | | 2373 | * ixv_save_stats |
2374 | * | | 2374 | * |
2375 | * The VF stats registers never have a truly virgin | | 2375 | * The VF stats registers never have a truly virgin |
2376 | * starting point, so this routine tries to make an | | 2376 | * starting point, so this routine tries to make an |
2377 | * artificial one, marking ground zero on attach as | | 2377 | * artificial one, marking ground zero on attach as |
2378 | * it were. | | 2378 | * it were. |
2379 | ************************************************************************/ | | 2379 | ************************************************************************/ |
2380 | static void | | 2380 | static void |
2381 | ixv_save_stats(struct adapter *adapter) | | 2381 | ixv_save_stats(struct adapter *adapter) |
2382 | { | | 2382 | { |
2383 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; | | 2383 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; |
2384 | | | 2384 | |
2385 | if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) { | | 2385 | if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) { |
2386 | stats->saved_reset_vfgprc += | | 2386 | stats->saved_reset_vfgprc += |
2387 | stats->vfgprc.ev_count - stats->base_vfgprc; | | 2387 | stats->vfgprc.ev_count - stats->base_vfgprc; |
2388 | stats->saved_reset_vfgptc += | | 2388 | stats->saved_reset_vfgptc += |
2389 | stats->vfgptc.ev_count - stats->base_vfgptc; | | 2389 | stats->vfgptc.ev_count - stats->base_vfgptc; |
2390 | stats->saved_reset_vfgorc += | | 2390 | stats->saved_reset_vfgorc += |
2391 | stats->vfgorc.ev_count - stats->base_vfgorc; | | 2391 | stats->vfgorc.ev_count - stats->base_vfgorc; |
2392 | stats->saved_reset_vfgotc += | | 2392 | stats->saved_reset_vfgotc += |
2393 | stats->vfgotc.ev_count - stats->base_vfgotc; | | 2393 | stats->vfgotc.ev_count - stats->base_vfgotc; |
2394 | stats->saved_reset_vfmprc += | | 2394 | stats->saved_reset_vfmprc += |
2395 | stats->vfmprc.ev_count - stats->base_vfmprc; | | 2395 | stats->vfmprc.ev_count - stats->base_vfmprc; |
2396 | } | | 2396 | } |
2397 | } /* ixv_save_stats */ | | 2397 | } /* ixv_save_stats */ |
2398 | | | 2398 | |
2399 | /************************************************************************ | | 2399 | /************************************************************************ |
2400 | * ixv_init_stats | | 2400 | * ixv_init_stats |
2401 | ************************************************************************/ | | 2401 | ************************************************************************/ |
2402 | static void | | 2402 | static void |
2403 | ixv_init_stats(struct adapter *adapter) | | 2403 | ixv_init_stats(struct adapter *adapter) |
2404 | { | | 2404 | { |
2405 | struct ixgbe_hw *hw = &adapter->hw; | | 2405 | struct ixgbe_hw *hw = &adapter->hw; |
2406 | | | 2406 | |
2407 | adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); | | 2407 | adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); |
2408 | adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); | | 2408 | adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); |
2409 | adapter->stats.vf.last_vfgorc |= | | 2409 | adapter->stats.vf.last_vfgorc |= |
2410 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); | | 2410 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); |
2411 | | | 2411 | |
2412 | adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); | | 2412 | adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); |
2413 | adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); | | 2413 | adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); |
2414 | adapter->stats.vf.last_vfgotc |= | | 2414 | adapter->stats.vf.last_vfgotc |= |
2415 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); | | 2415 | (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); |
2416 | | | 2416 | |
2417 | adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); | | 2417 | adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); |
2418 | | | 2418 | |
2419 | adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; | | 2419 | adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; |
2420 | adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; | | 2420 | adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; |
2421 | adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; | | 2421 | adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; |
2422 | adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; | | 2422 | adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; |
2423 | adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; | | 2423 | adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; |
2424 | } /* ixv_init_stats */ | | 2424 | } /* ixv_init_stats */ |
2425 | | | 2425 | |
2426 | #define UPDATE_STAT_32(reg, last, count) \ | | 2426 | #define UPDATE_STAT_32(reg, last, count) \ |
2427 | { \ | | 2427 | { \ |
2428 | u32 current = IXGBE_READ_REG(hw, (reg)); \ | | 2428 | u32 current = IXGBE_READ_REG(hw, (reg)); \ |
2429 | if (current < (last)) \ | | 2429 | if (current < (last)) \ |
2430 | count.ev_count += 0x100000000LL; \ | | 2430 | count.ev_count += 0x100000000LL; \ |
2431 | (last) = current; \ | | 2431 | (last) = current; \ |
2432 | count.ev_count &= 0xFFFFFFFF00000000LL; \ | | 2432 | count.ev_count &= 0xFFFFFFFF00000000LL; \ |
2433 | count.ev_count |= current; \ | | 2433 | count.ev_count |= current; \ |
2434 | } | | 2434 | } |
2435 | | | 2435 | |
2436 | #define UPDATE_STAT_36(lsb, msb, last, count) \ | | 2436 | #define UPDATE_STAT_36(lsb, msb, last, count) \ |
2437 | { \ | | 2437 | { \ |
2438 | u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \ | | 2438 | u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \ |
2439 | u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \ | | 2439 | u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \ |
2440 | u64 current = ((cur_msb << 32) | cur_lsb); \ | | 2440 | u64 current = ((cur_msb << 32) | cur_lsb); \ |
2441 | if (current < (last)) \ | | 2441 | if (current < (last)) \ |
2442 | count.ev_count += 0x1000000000LL; \ | | 2442 | count.ev_count += 0x1000000000LL; \ |
2443 | (last) = current; \ | | 2443 | (last) = current; \ |
2444 | count.ev_count &= 0xFFFFFFF000000000LL; \ | | 2444 | count.ev_count &= 0xFFFFFFF000000000LL; \ |
2445 | count.ev_count |= current; \ | | 2445 | count.ev_count |= current; \ |
2446 | } | | 2446 | } |
2447 | | | 2447 | |
2448 | /************************************************************************ | | 2448 | /************************************************************************ |
2449 | * ixv_update_stats - Update the board statistics counters. | | 2449 | * ixv_update_stats - Update the board statistics counters. |
2450 | ************************************************************************/ | | 2450 | ************************************************************************/ |
2451 | void | | 2451 | void |
2452 | ixv_update_stats(struct adapter *adapter) | | 2452 | ixv_update_stats(struct adapter *adapter) |
2453 | { | | 2453 | { |
2454 | struct ixgbe_hw *hw = &adapter->hw; | | 2454 | struct ixgbe_hw *hw = &adapter->hw; |
2455 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; | | 2455 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; |
2456 | | | 2456 | |
2457 | UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc); | | 2457 | UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc); |
2458 | UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc); | | 2458 | UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc); |
2459 | UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc, | | 2459 | UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc, |
2460 | stats->vfgorc); | | 2460 | stats->vfgorc); |
2461 | UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc, | | 2461 | UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc, |
2462 | stats->vfgotc); | | 2462 | stats->vfgotc); |
2463 | UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc); | | 2463 | UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc); |
2464 | | | 2464 | |
2465 | /* VF doesn't count errors by hardware */ | | 2465 | /* VF doesn't count errors by hardware */ |
2466 | | | 2466 | |
2467 | } /* ixv_update_stats */ | | 2467 | } /* ixv_update_stats */ |
2468 | | | 2468 | |
2469 | /************************************************************************ | | 2469 | /************************************************************************ |
2470 | * ixv_sysctl_interrupt_rate_handler | | 2470 | * ixv_sysctl_interrupt_rate_handler |
2471 | ************************************************************************/ | | 2471 | ************************************************************************/ |
2472 | static int | | 2472 | static int |
2473 | ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) | | 2473 | ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) |
2474 | { | | 2474 | { |
2475 | struct sysctlnode node = *rnode; | | 2475 | struct sysctlnode node = *rnode; |
2476 | struct ix_queue *que = (struct ix_queue *)node.sysctl_data; | | 2476 | struct ix_queue *que = (struct ix_queue *)node.sysctl_data; |
2477 | struct adapter *adapter = que->adapter; | | 2477 | struct adapter *adapter = que->adapter; |
2478 | uint32_t reg, usec, rate; | | 2478 | uint32_t reg, usec, rate; |
2479 | int error; | | 2479 | int error; |
2480 | | | 2480 | |
2481 | if (que == NULL) | | 2481 | if (que == NULL) |
2482 | return 0; | | 2482 | return 0; |
2483 | reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix)); | | 2483 | reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix)); |
2484 | usec = ((reg & 0x0FF8) >> 3); | | 2484 | usec = ((reg & 0x0FF8) >> 3); |
2485 | if (usec > 0) | | 2485 | if (usec > 0) |
2486 | rate = 500000 / usec; | | 2486 | rate = 500000 / usec; |
2487 | else | | 2487 | else |
2488 | rate = 0; | | 2488 | rate = 0; |
2489 | node.sysctl_data = &rate; | | 2489 | node.sysctl_data = &rate; |
2490 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2490 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
2491 | if (error || newp == NULL) | | 2491 | if (error || newp == NULL) |
2492 | return error; | | 2492 | return error; |
2493 | reg &= ~0xfff; /* default, no limitation */ | | 2493 | reg &= ~0xfff; /* default, no limitation */ |
2494 | if (rate > 0 && rate < 500000) { | | 2494 | if (rate > 0 && rate < 500000) { |
2495 | if (rate < 1000) | | 2495 | if (rate < 1000) |
2496 | rate = 1000; | | 2496 | rate = 1000; |
2497 | reg |= ((4000000 / rate) & 0xff8); | | 2497 | reg |= ((4000000 / rate) & 0xff8); |
2498 | /* | | 2498 | /* |
2499 | * When RSC is used, ITR interval must be larger than | | 2499 | * When RSC is used, ITR interval must be larger than |
2500 | * RSC_DELAY. Currently, we use 2us for RSC_DELAY. | | 2500 | * RSC_DELAY. Currently, we use 2us for RSC_DELAY. |
2501 | * The minimum value is always greater than 2us on 100M | | 2501 | * The minimum value is always greater than 2us on 100M |
2502 | * (and 10M?(not documented)), but it's not on 1G and higher. | | 2502 | * (and 10M?(not documented)), but it's not on 1G and higher. |
2503 | */ | | 2503 | */ |
2504 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) | | 2504 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) |
2505 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { | | 2505 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { |
2506 | if ((adapter->num_queues > 1) | | 2506 | if ((adapter->num_queues > 1) |
2507 | && (reg < IXGBE_MIN_RSC_EITR_10G1G)) | | 2507 | && (reg < IXGBE_MIN_RSC_EITR_10G1G)) |
2508 | return EINVAL; | | 2508 | return EINVAL; |
2509 | } | | 2509 | } |
2510 | ixv_max_interrupt_rate = rate; | | 2510 | ixv_max_interrupt_rate = rate; |
2511 | } else | | 2511 | } else |
2512 | ixv_max_interrupt_rate = 0; | | 2512 | ixv_max_interrupt_rate = 0; |
2513 | ixv_eitr_write(adapter, que->msix, reg); | | 2513 | ixv_eitr_write(adapter, que->msix, reg); |
2514 | | | 2514 | |
2515 | return (0); | | 2515 | return (0); |
2516 | } /* ixv_sysctl_interrupt_rate_handler */ | | 2516 | } /* ixv_sysctl_interrupt_rate_handler */ |
2517 | | | 2517 | |
2518 | const struct sysctlnode * | | 2518 | const struct sysctlnode * |
2519 | ixv_sysctl_instance(struct adapter *adapter) | | 2519 | ixv_sysctl_instance(struct adapter *adapter) |
2520 | { | | 2520 | { |
2521 | const char *dvname; | | 2521 | const char *dvname; |
2522 | struct sysctllog **log; | | 2522 | struct sysctllog **log; |
2523 | int rc; | | 2523 | int rc; |
2524 | const struct sysctlnode *rnode; | | 2524 | const struct sysctlnode *rnode; |
2525 | | | 2525 | |
2526 | log = &adapter->sysctllog; | | 2526 | log = &adapter->sysctllog; |
2527 | dvname = device_xname(adapter->dev); | | 2527 | dvname = device_xname(adapter->dev); |
2528 | | | 2528 | |
2529 | if ((rc = sysctl_createv(log, 0, NULL, &rnode, | | 2529 | if ((rc = sysctl_createv(log, 0, NULL, &rnode, |
2530 | 0, CTLTYPE_NODE, dvname, | | 2530 | 0, CTLTYPE_NODE, dvname, |
2531 | SYSCTL_DESCR("ixv information and settings"), | | 2531 | SYSCTL_DESCR("ixv information and settings"), |
2532 | NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) | | 2532 | NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) |
2533 | goto err; | | 2533 | goto err; |
2534 | | | 2534 | |
2535 | return rnode; | | 2535 | return rnode; |
2536 | err: | | 2536 | err: |
2537 | device_printf(adapter->dev, | | 2537 | device_printf(adapter->dev, |
2538 | "%s: sysctl_createv failed, rc = %d\n", __func__, rc); | | 2538 | "%s: sysctl_createv failed, rc = %d\n", __func__, rc); |
2539 | return NULL; | | 2539 | return NULL; |
2540 | } | | 2540 | } |
2541 | | | 2541 | |
2542 | static void | | 2542 | static void |
2543 | ixv_add_device_sysctls(struct adapter *adapter) | | 2543 | ixv_add_device_sysctls(struct adapter *adapter) |
2544 | { | | 2544 | { |
2545 | struct sysctllog **log; | | 2545 | struct sysctllog **log; |
2546 | const struct sysctlnode *rnode, *cnode; | | 2546 | const struct sysctlnode *rnode, *cnode; |
2547 | device_t dev; | | 2547 | device_t dev; |
2548 | | | 2548 | |
2549 | dev = adapter->dev; | | 2549 | dev = adapter->dev; |
2550 | log = &adapter->sysctllog; | | 2550 | log = &adapter->sysctllog; |
2551 | | | 2551 | |
2552 | if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { | | 2552 | if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { |
2553 | aprint_error_dev(dev, "could not create sysctl root\n"); | | 2553 | aprint_error_dev(dev, "could not create sysctl root\n"); |
2554 | return; | | 2554 | return; |
2555 | } | | 2555 | } |
2556 | | | 2556 | |
2557 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2557 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2558 | CTLFLAG_READWRITE, CTLTYPE_INT, | | 2558 | CTLFLAG_READWRITE, CTLTYPE_INT, |
2559 | "debug", SYSCTL_DESCR("Debug Info"), | | 2559 | "debug", SYSCTL_DESCR("Debug Info"), |
2560 | ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) | | 2560 | ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) |
2561 | aprint_error_dev(dev, "could not create sysctl\n"); | | 2561 | aprint_error_dev(dev, "could not create sysctl\n"); |
2562 | | | 2562 | |
2563 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2563 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2564 | CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue", | | 2564 | CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue", |
2565 | SYSCTL_DESCR("Number of jumbo buffers per queue"), | | 2565 | SYSCTL_DESCR("Number of jumbo buffers per queue"), |
2566 | NULL, 0, &adapter->num_jcl, 0, CTL_CREATE, | | 2566 | NULL, 0, &adapter->num_jcl, 0, CTL_CREATE, |
2567 | CTL_EOL) != 0) | | 2567 | CTL_EOL) != 0) |
2568 | aprint_error_dev(dev, "could not create sysctl\n"); | | 2568 | aprint_error_dev(dev, "could not create sysctl\n"); |
2569 | | | 2569 | |
2570 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2570 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2571 | CTLFLAG_READWRITE, CTLTYPE_BOOL, | | 2571 | CTLFLAG_READWRITE, CTLTYPE_BOOL, |
2572 | "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), | | 2572 | "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), |
2573 | NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) | | 2573 | NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) |
2574 | aprint_error_dev(dev, "could not create sysctl\n"); | | 2574 | aprint_error_dev(dev, "could not create sysctl\n"); |
2575 | | | 2575 | |
2576 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2576 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2577 | CTLFLAG_READWRITE, CTLTYPE_BOOL, | | 2577 | CTLFLAG_READWRITE, CTLTYPE_BOOL, |
2578 | "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), | | 2578 | "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), |
2579 | NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) | | 2579 | NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) |
2580 | aprint_error_dev(dev, "could not create sysctl\n"); | | 2580 | aprint_error_dev(dev, "could not create sysctl\n"); |
2581 | } | | 2581 | } |
2582 | | | 2582 | |
2583 | /************************************************************************ | | 2583 | /************************************************************************ |
2584 | * ixv_add_stats_sysctls - Add statistic sysctls for the VF. | | 2584 | * ixv_add_stats_sysctls - Add statistic sysctls for the VF. |
2585 | ************************************************************************/ | | 2585 | ************************************************************************/ |
2586 | static void | | 2586 | static void |
2587 | ixv_add_stats_sysctls(struct adapter *adapter) | | 2587 | ixv_add_stats_sysctls(struct adapter *adapter) |
2588 | { | | 2588 | { |
2589 | device_t dev = adapter->dev; | | 2589 | device_t dev = adapter->dev; |
2590 | struct tx_ring *txr = adapter->tx_rings; | | 2590 | struct tx_ring *txr = adapter->tx_rings; |
2591 | struct rx_ring *rxr = adapter->rx_rings; | | 2591 | struct rx_ring *rxr = adapter->rx_rings; |
2592 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; | | 2592 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; |
2593 | struct ixgbe_hw *hw = &adapter->hw; | | 2593 | struct ixgbe_hw *hw = &adapter->hw; |
2594 | const struct sysctlnode *rnode, *cnode; | | 2594 | const struct sysctlnode *rnode, *cnode; |
2595 | struct sysctllog **log = &adapter->sysctllog; | | 2595 | struct sysctllog **log = &adapter->sysctllog; |
2596 | const char *xname = device_xname(dev); | | 2596 | const char *xname = device_xname(dev); |
2597 | | | 2597 | |
2598 | /* Driver Statistics */ | | 2598 | /* Driver Statistics */ |
2599 | evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, | | 2599 | evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, |
2600 | NULL, xname, "Driver tx dma soft fail EFBIG"); | | 2600 | NULL, xname, "Driver tx dma soft fail EFBIG"); |
2601 | evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, | | 2601 | evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, |
2602 | NULL, xname, "m_defrag() failed"); | | 2602 | NULL, xname, "m_defrag() failed"); |
2603 | evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, | | 2603 | evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, |
2604 | NULL, xname, "Driver tx dma hard fail EFBIG"); | | 2604 | NULL, xname, "Driver tx dma hard fail EFBIG"); |
2605 | evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, | | 2605 | evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, |
2606 | NULL, xname, "Driver tx dma hard fail EINVAL"); | | 2606 | NULL, xname, "Driver tx dma hard fail EINVAL"); |
2607 | evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, | | 2607 | evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, |
2608 | NULL, xname, "Driver tx dma hard fail other"); | | 2608 | NULL, xname, "Driver tx dma hard fail other"); |
2609 | evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, | | 2609 | evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, |
2610 | NULL, xname, "Driver tx dma soft fail EAGAIN"); | | 2610 | NULL, xname, "Driver tx dma soft fail EAGAIN"); |
2611 | evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, | | 2611 | evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, |
2612 | NULL, xname, "Driver tx dma soft fail ENOMEM"); | | 2612 | NULL, xname, "Driver tx dma soft fail ENOMEM"); |
2613 | evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, | | 2613 | evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, |
2614 | NULL, xname, "Watchdog timeouts"); | | 2614 | NULL, xname, "Watchdog timeouts"); |
2615 | evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, | | 2615 | evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, |
2616 | NULL, xname, "TSO errors"); | | 2616 | NULL, xname, "TSO errors"); |
2617 | evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR, | | 2617 | evcnt_attach_dynamic(&adapter->admin_irqev, EVCNT_TYPE_INTR, |
2618 | NULL, xname, "Admin MSI-X IRQ Handled"); | | 2618 | NULL, xname, "Admin MSI-X IRQ Handled"); |
2619 | evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR, | | 2619 | evcnt_attach_dynamic(&adapter->link_workev, EVCNT_TYPE_INTR, |
2620 | NULL, xname, "Admin event"); | | 2620 | NULL, xname, "Admin event"); |
2621 | | | 2621 | |
2622 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 2622 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
2623 | snprintf(adapter->queues[i].evnamebuf, | | 2623 | snprintf(adapter->queues[i].evnamebuf, |
2624 | sizeof(adapter->queues[i].evnamebuf), "%s q%d", | | 2624 | sizeof(adapter->queues[i].evnamebuf), "%s q%d", |
2625 | xname, i); | | 2625 | xname, i); |
2626 | snprintf(adapter->queues[i].namebuf, | | 2626 | snprintf(adapter->queues[i].namebuf, |
2627 | sizeof(adapter->queues[i].namebuf), "q%d", i); | | 2627 | sizeof(adapter->queues[i].namebuf), "q%d", i); |
2628 | | | 2628 | |
2629 | if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { | | 2629 | if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { |
2630 | aprint_error_dev(dev, "could not create sysctl root\n"); | | 2630 | aprint_error_dev(dev, "could not create sysctl root\n"); |
2631 | break; | | 2631 | break; |
2632 | } | | 2632 | } |
2633 | | | 2633 | |
2634 | if (sysctl_createv(log, 0, &rnode, &rnode, | | 2634 | if (sysctl_createv(log, 0, &rnode, &rnode, |
2635 | 0, CTLTYPE_NODE, | | 2635 | 0, CTLTYPE_NODE, |
2636 | adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), | | 2636 | adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), |
2637 | NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) | | 2637 | NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) |
2638 | break; | | 2638 | break; |
2639 | | | 2639 | |
2640 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2640 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2641 | CTLFLAG_READWRITE, CTLTYPE_INT, | | 2641 | CTLFLAG_READWRITE, CTLTYPE_INT, |
2642 | "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), | | 2642 | "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), |
2643 | ixv_sysctl_interrupt_rate_handler, 0, | | 2643 | ixv_sysctl_interrupt_rate_handler, 0, |
2644 | (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) | | 2644 | (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) |
2645 | break; | | 2645 | break; |
2646 | | | 2646 | |
2647 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2647 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2648 | CTLFLAG_READONLY, CTLTYPE_INT, | | 2648 | CTLFLAG_READONLY, CTLTYPE_INT, |
2649 | "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), | | 2649 | "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), |
2650 | ixv_sysctl_tdh_handler, 0, (void *)txr, | | 2650 | ixv_sysctl_tdh_handler, 0, (void *)txr, |
2651 | 0, CTL_CREATE, CTL_EOL) != 0) | | 2651 | 0, CTL_CREATE, CTL_EOL) != 0) |
2652 | break; | | 2652 | break; |
2653 | | | 2653 | |
2654 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2654 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2655 | CTLFLAG_READONLY, CTLTYPE_INT, | | 2655 | CTLFLAG_READONLY, CTLTYPE_INT, |
2656 | "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), | | 2656 | "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), |
2657 | ixv_sysctl_tdt_handler, 0, (void *)txr, | | 2657 | ixv_sysctl_tdt_handler, 0, (void *)txr, |
2658 | 0, CTL_CREATE, CTL_EOL) != 0) | | 2658 | 0, CTL_CREATE, CTL_EOL) != 0) |
2659 | break; | | 2659 | break; |
2660 | | | 2660 | |
2661 | evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, | | 2661 | evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, |
2662 | NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); | | 2662 | NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); |
2663 | evcnt_attach_dynamic(&adapter->queues[i].handleq, | | 2663 | evcnt_attach_dynamic(&adapter->queues[i].handleq, |
2664 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, | | 2664 | EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, |
2665 | "Handled queue in softint"); | | 2665 | "Handled queue in softint"); |
2666 | evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, | | 2666 | evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, |
2667 | NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); | | 2667 | NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); |
2668 | evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, | | 2668 | evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, |
2669 | NULL, adapter->queues[i].evnamebuf, "TSO"); | | 2669 | NULL, adapter->queues[i].evnamebuf, "TSO"); |
2670 | evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, | | 2670 | evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, |
2671 | NULL, adapter->queues[i].evnamebuf, | | 2671 | NULL, adapter->queues[i].evnamebuf, |
2672 | "TX Queue No Descriptor Available"); | | 2672 | "TX Queue No Descriptor Available"); |
2673 | evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, | | 2673 | evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, |
2674 | NULL, adapter->queues[i].evnamebuf, | | 2674 | NULL, adapter->queues[i].evnamebuf, |
2675 | "Queue Packets Transmitted"); | | 2675 | "Queue Packets Transmitted"); |
2676 | #ifndef IXGBE_LEGACY_TX | | 2676 | #ifndef IXGBE_LEGACY_TX |
2677 | evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, | | 2677 | evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, |
2678 | NULL, adapter->queues[i].evnamebuf, | | 2678 | NULL, adapter->queues[i].evnamebuf, |
2679 | "Packets dropped in pcq"); | | 2679 | "Packets dropped in pcq"); |
2680 | #endif | | 2680 | #endif |
2681 | | | 2681 | |
2682 | #ifdef LRO | | 2682 | #ifdef LRO |
2683 | struct lro_ctrl *lro = &rxr->lro; | | 2683 | struct lro_ctrl *lro = &rxr->lro; |
2684 | #endif /* LRO */ | | 2684 | #endif /* LRO */ |
2685 | | | 2685 | |
2686 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2686 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2687 | CTLFLAG_READONLY, | | 2687 | CTLFLAG_READONLY, |
2688 | CTLTYPE_INT, | | 2688 | CTLTYPE_INT, |
2689 | "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), | | 2689 | "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), |
2690 | ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0, | | 2690 | ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0, |
2691 | CTL_CREATE, CTL_EOL) != 0) | | 2691 | CTL_CREATE, CTL_EOL) != 0) |
2692 | break; | | 2692 | break; |
2693 | | | 2693 | |
2694 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2694 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2695 | CTLFLAG_READONLY, | | 2695 | CTLFLAG_READONLY, |
2696 | CTLTYPE_INT, | | 2696 | CTLTYPE_INT, |
2697 | "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), | | 2697 | "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), |
2698 | ixv_sysctl_rdh_handler, 0, (void *)rxr, 0, | | 2698 | ixv_sysctl_rdh_handler, 0, (void *)rxr, 0, |
2699 | CTL_CREATE, CTL_EOL) != 0) | | 2699 | CTL_CREATE, CTL_EOL) != 0) |
2700 | break; | | 2700 | break; |
2701 | | | 2701 | |
2702 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2702 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2703 | CTLFLAG_READONLY, | | 2703 | CTLFLAG_READONLY, |
2704 | CTLTYPE_INT, | | 2704 | CTLTYPE_INT, |
2705 | "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), | | 2705 | "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), |
2706 | ixv_sysctl_rdt_handler, 0, (void *)rxr, 0, | | 2706 | ixv_sysctl_rdt_handler, 0, (void *)rxr, 0, |
2707 | CTL_CREATE, CTL_EOL) != 0) | | 2707 | CTL_CREATE, CTL_EOL) != 0) |
2708 | break; | | 2708 | break; |
2709 | | | 2709 | |
2710 | evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, | | 2710 | evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, |
2711 | NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); | | 2711 | NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); |
2712 | evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, | | 2712 | evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, |
2713 | NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); | | 2713 | NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); |
2714 | evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, | | 2714 | evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, |
2715 | NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); | | 2715 | NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); |
2716 | evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, | | 2716 | evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, |
2717 | NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); | | 2717 | NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); |
2718 | evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, | | 2718 | evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, |
2719 | NULL, adapter->queues[i].evnamebuf, "Rx discarded"); | | 2719 | NULL, adapter->queues[i].evnamebuf, "Rx discarded"); |
2720 | #ifdef LRO | | 2720 | #ifdef LRO |
2721 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", | | 2721 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", |
2722 | CTLFLAG_RD, &lro->lro_queued, 0, | | 2722 | CTLFLAG_RD, &lro->lro_queued, 0, |
2723 | "LRO Queued"); | | 2723 | "LRO Queued"); |
2724 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", | | 2724 | SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", |
2725 | CTLFLAG_RD, &lro->lro_flushed, 0, | | 2725 | CTLFLAG_RD, &lro->lro_flushed, 0, |
2726 | "LRO Flushed"); | | 2726 | "LRO Flushed"); |
2727 | #endif /* LRO */ | | 2727 | #endif /* LRO */ |
2728 | } | | 2728 | } |
2729 | | | 2729 | |
2730 | /* MAC stats get their own sub node */ | | 2730 | /* MAC stats get their own sub node */ |
2731 | | | 2731 | |
2732 | snprintf(stats->namebuf, | | 2732 | snprintf(stats->namebuf, |
2733 | sizeof(stats->namebuf), "%s MAC Statistics", xname); | | 2733 | sizeof(stats->namebuf), "%s MAC Statistics", xname); |
2734 | | | 2734 | |
2735 | evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, | | 2735 | evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, |
2736 | stats->namebuf, "rx csum offload - IP"); | | 2736 | stats->namebuf, "rx csum offload - IP"); |
2737 | evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, | | 2737 | evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, |
2738 | stats->namebuf, "rx csum offload - L4"); | | 2738 | stats->namebuf, "rx csum offload - L4"); |
2739 | evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, | | 2739 | evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, |
2740 | stats->namebuf, "rx csum offload - IP bad"); | | 2740 | stats->namebuf, "rx csum offload - IP bad"); |
2741 | evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, | | 2741 | evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, |
2742 | stats->namebuf, "rx csum offload - L4 bad"); | | 2742 | stats->namebuf, "rx csum offload - L4 bad"); |
2743 | | | 2743 | |
2744 | /* Packet Reception Stats */ | | 2744 | /* Packet Reception Stats */ |
2745 | evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL, | | 2745 | evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL, |
2746 | xname, "Good Packets Received"); | | 2746 | xname, "Good Packets Received"); |
2747 | evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL, | | 2747 | evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL, |
2748 | xname, "Good Octets Received"); | | 2748 | xname, "Good Octets Received"); |
2749 | evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL, | | 2749 | evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL, |
2750 | xname, "Multicast Packets Received"); | | 2750 | xname, "Multicast Packets Received"); |
2751 | evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL, | | 2751 | evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL, |
2752 | xname, "Good Packets Transmitted"); | | 2752 | xname, "Good Packets Transmitted"); |
2753 | evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL, | | 2753 | evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL, |
2754 | xname, "Good Octets Transmitted"); | | 2754 | xname, "Good Octets Transmitted"); |
2755 | | | 2755 | |
2756 | /* Mailbox Stats */ | | 2756 | /* Mailbox Stats */ |
2757 | evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL, | | 2757 | evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL, |
2758 | xname, "message TXs"); | | 2758 | xname, "message TXs"); |
2759 | evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL, | | 2759 | evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL, |
2760 | xname, "message RXs"); | | 2760 | xname, "message RXs"); |
2761 | evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL, | | 2761 | evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL, |
2762 | xname, "ACKs"); | | 2762 | xname, "ACKs"); |
2763 | evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL, | | 2763 | evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL, |
2764 | xname, "REQs"); | | 2764 | xname, "REQs"); |
2765 | evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL, | | 2765 | evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL, |
2766 | xname, "RSTs"); | | 2766 | xname, "RSTs"); |
2767 | | | 2767 | |
2768 | } /* ixv_add_stats_sysctls */ | | 2768 | } /* ixv_add_stats_sysctls */ |
2769 | | | 2769 | |
2770 | static void | | 2770 | static void |
2771 | ixv_clear_evcnt(struct adapter *adapter) | | 2771 | ixv_clear_evcnt(struct adapter *adapter) |
2772 | { | | 2772 | { |
2773 | struct tx_ring *txr = adapter->tx_rings; | | 2773 | struct tx_ring *txr = adapter->tx_rings; |
2774 | struct rx_ring *rxr = adapter->rx_rings; | | 2774 | struct rx_ring *rxr = adapter->rx_rings; |
2775 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; | | 2775 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; |
2776 | struct ixgbe_hw *hw = &adapter->hw; | | 2776 | struct ixgbe_hw *hw = &adapter->hw; |
2777 | int i; | | 2777 | int i; |
2778 | | | 2778 | |
2779 | /* Driver Statistics */ | | 2779 | /* Driver Statistics */ |
2780 | adapter->efbig_tx_dma_setup.ev_count = 0; | | 2780 | adapter->efbig_tx_dma_setup.ev_count = 0; |
2781 | adapter->mbuf_defrag_failed.ev_count = 0; | | 2781 | adapter->mbuf_defrag_failed.ev_count = 0; |
2782 | adapter->efbig2_tx_dma_setup.ev_count = 0; | | 2782 | adapter->efbig2_tx_dma_setup.ev_count = 0; |
2783 | adapter->einval_tx_dma_setup.ev_count = 0; | | 2783 | adapter->einval_tx_dma_setup.ev_count = 0; |
2784 | adapter->other_tx_dma_setup.ev_count = 0; | | 2784 | adapter->other_tx_dma_setup.ev_count = 0; |
2785 | adapter->eagain_tx_dma_setup.ev_count = 0; | | 2785 | adapter->eagain_tx_dma_setup.ev_count = 0; |
2786 | adapter->enomem_tx_dma_setup.ev_count = 0; | | 2786 | adapter->enomem_tx_dma_setup.ev_count = 0; |
2787 | adapter->watchdog_events.ev_count = 0; | | 2787 | adapter->watchdog_events.ev_count = 0; |
2788 | adapter->tso_err.ev_count = 0; | | 2788 | adapter->tso_err.ev_count = 0; |
2789 | adapter->admin_irqev.ev_count = 0; | | 2789 | adapter->admin_irqev.ev_count = 0; |
2790 | adapter->link_workev.ev_count = 0; | | 2790 | adapter->link_workev.ev_count = 0; |
2791 | | | 2791 | |
2792 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 2792 | for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
2793 | adapter->queues[i].irqs.ev_count = 0; | | 2793 | adapter->queues[i].irqs.ev_count = 0; |
2794 | adapter->queues[i].handleq.ev_count = 0; | | 2794 | adapter->queues[i].handleq.ev_count = 0; |
2795 | adapter->queues[i].req.ev_count = 0; | | 2795 | adapter->queues[i].req.ev_count = 0; |
2796 | txr->tso_tx.ev_count = 0; | | 2796 | txr->tso_tx.ev_count = 0; |
2797 | txr->no_desc_avail.ev_count = 0; | | 2797 | txr->no_desc_avail.ev_count = 0; |
2798 | txr->total_packets.ev_count = 0; | | 2798 | txr->total_packets.ev_count = 0; |
2799 | #ifndef IXGBE_LEGACY_TX | | 2799 | #ifndef IXGBE_LEGACY_TX |
2800 | txr->pcq_drops.ev_count = 0; | | 2800 | txr->pcq_drops.ev_count = 0; |
2801 | #endif | | 2801 | #endif |
2802 | txr->q_efbig_tx_dma_setup = 0; | | 2802 | txr->q_efbig_tx_dma_setup = 0; |
2803 | txr->q_mbuf_defrag_failed = 0; | | 2803 | txr->q_mbuf_defrag_failed = 0; |
2804 | txr->q_efbig2_tx_dma_setup = 0; | | 2804 | txr->q_efbig2_tx_dma_setup = 0; |
2805 | txr->q_einval_tx_dma_setup = 0; | | 2805 | txr->q_einval_tx_dma_setup = 0; |
2806 | txr->q_other_tx_dma_setup = 0; | | 2806 | txr->q_other_tx_dma_setup = 0; |
2807 | txr->q_eagain_tx_dma_setup = 0; | | 2807 | txr->q_eagain_tx_dma_setup = 0; |
2808 | txr->q_enomem_tx_dma_setup = 0; | | 2808 | txr->q_enomem_tx_dma_setup = 0; |
2809 | txr->q_tso_err = 0; | | 2809 | txr->q_tso_err = 0; |
2810 | | | 2810 | |
2811 | rxr->rx_packets.ev_count = 0; | | 2811 | rxr->rx_packets.ev_count = 0; |
2812 | rxr->rx_bytes.ev_count = 0; | | 2812 | rxr->rx_bytes.ev_count = 0; |
2813 | rxr->rx_copies.ev_count = 0; | | 2813 | rxr->rx_copies.ev_count = 0; |
2814 | rxr->no_jmbuf.ev_count = 0; | | 2814 | rxr->no_jmbuf.ev_count = 0; |
2815 | rxr->rx_discarded.ev_count = 0; | | 2815 | rxr->rx_discarded.ev_count = 0; |
2816 | } | | 2816 | } |
2817 | | | 2817 | |
2818 | /* MAC stats get their own sub node */ | | 2818 | /* MAC stats get their own sub node */ |
2819 | | | 2819 | |
2820 | stats->ipcs.ev_count = 0; | | 2820 | stats->ipcs.ev_count = 0; |
2821 | stats->l4cs.ev_count = 0; | | 2821 | stats->l4cs.ev_count = 0; |
2822 | stats->ipcs_bad.ev_count = 0; | | 2822 | stats->ipcs_bad.ev_count = 0; |
2823 | stats->l4cs_bad.ev_count = 0; | | 2823 | stats->l4cs_bad.ev_count = 0; |
2824 | | | 2824 | |
2825 | /* Packet Reception Stats */ | | 2825 | /* Packet Reception Stats */ |
2826 | stats->vfgprc.ev_count = 0; | | 2826 | stats->vfgprc.ev_count = 0; |
2827 | stats->vfgorc.ev_count = 0; | | 2827 | stats->vfgorc.ev_count = 0; |
2828 | stats->vfmprc.ev_count = 0; | | 2828 | stats->vfmprc.ev_count = 0; |
2829 | stats->vfgptc.ev_count = 0; | | 2829 | stats->vfgptc.ev_count = 0; |
2830 | stats->vfgotc.ev_count = 0; | | 2830 | stats->vfgotc.ev_count = 0; |
2831 | | | 2831 | |
2832 | /* Mailbox Stats */ | | 2832 | /* Mailbox Stats */ |
2833 | hw->mbx.stats.msgs_tx.ev_count = 0; | | 2833 | hw->mbx.stats.msgs_tx.ev_count = 0; |
2834 | hw->mbx.stats.msgs_rx.ev_count = 0; | | 2834 | hw->mbx.stats.msgs_rx.ev_count = 0; |
2835 | hw->mbx.stats.acks.ev_count = 0; | | 2835 | hw->mbx.stats.acks.ev_count = 0; |
2836 | hw->mbx.stats.reqs.ev_count = 0; | | 2836 | hw->mbx.stats.reqs.ev_count = 0; |
2837 | hw->mbx.stats.rsts.ev_count = 0; | | 2837 | hw->mbx.stats.rsts.ev_count = 0; |
2838 | | | 2838 | |
2839 | } /* ixv_clear_evcnt */ | | 2839 | } /* ixv_clear_evcnt */ |
2840 | | | 2840 | |
2841 | /************************************************************************ | | 2841 | /************************************************************************ |
2842 | * ixv_set_sysctl_value | | 2842 | * ixv_set_sysctl_value |
2843 | ************************************************************************/ | | 2843 | ************************************************************************/ |
2844 | static void | | 2844 | static void |
2845 | ixv_set_sysctl_value(struct adapter *adapter, const char *name, | | 2845 | ixv_set_sysctl_value(struct adapter *adapter, const char *name, |
2846 | const char *description, int *limit, int value) | | 2846 | const char *description, int *limit, int value) |
2847 | { | | 2847 | { |
2848 | device_t dev = adapter->dev; | | 2848 | device_t dev = adapter->dev; |
2849 | struct sysctllog **log; | | 2849 | struct sysctllog **log; |
2850 | const struct sysctlnode *rnode, *cnode; | | 2850 | const struct sysctlnode *rnode, *cnode; |
2851 | | | 2851 | |
2852 | log = &adapter->sysctllog; | | 2852 | log = &adapter->sysctllog; |
2853 | if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { | | 2853 | if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { |
2854 | aprint_error_dev(dev, "could not create sysctl root\n"); | | 2854 | aprint_error_dev(dev, "could not create sysctl root\n"); |
2855 | return; | | 2855 | return; |
2856 | } | | 2856 | } |
2857 | if (sysctl_createv(log, 0, &rnode, &cnode, | | 2857 | if (sysctl_createv(log, 0, &rnode, &cnode, |
2858 | CTLFLAG_READWRITE, CTLTYPE_INT, | | 2858 | CTLFLAG_READWRITE, CTLTYPE_INT, |
2859 | name, SYSCTL_DESCR(description), | | 2859 | name, SYSCTL_DESCR(description), |
2860 | NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) | | 2860 | NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) |
2861 | aprint_error_dev(dev, "could not create sysctl\n"); | | 2861 | aprint_error_dev(dev, "could not create sysctl\n"); |
2862 | *limit = value; | | 2862 | *limit = value; |
2863 | } /* ixv_set_sysctl_value */ | | 2863 | } /* ixv_set_sysctl_value */ |
2864 | | | 2864 | |
2865 | /************************************************************************ | | 2865 | /************************************************************************ |
2866 | * ixv_print_debug_info | | 2866 | * ixv_print_debug_info |
2867 | * | | 2867 | * |
2868 | * Called only when em_display_debug_stats is enabled. | | 2868 | * Called only when em_display_debug_stats is enabled. |
2869 | * Provides a way to take a look at important statistics | | 2869 | * Provides a way to take a look at important statistics |
2870 | * maintained by the driver and hardware. | | 2870 | * maintained by the driver and hardware. |
2871 | ************************************************************************/ | | 2871 | ************************************************************************/ |
2872 | static void | | 2872 | static void |
2873 | ixv_print_debug_info(struct adapter *adapter) | | 2873 | ixv_print_debug_info(struct adapter *adapter) |
2874 | { | | 2874 | { |
2875 | device_t dev = adapter->dev; | | 2875 | device_t dev = adapter->dev; |
2876 | struct ix_queue *que = adapter->queues; | | 2876 | struct ix_queue *que = adapter->queues; |
2877 | struct rx_ring *rxr; | | 2877 | struct rx_ring *rxr; |
2878 | struct tx_ring *txr; | | 2878 | struct tx_ring *txr; |
2879 | #ifdef LRO | | 2879 | #ifdef LRO |
2880 | struct lro_ctrl *lro; | | 2880 | struct lro_ctrl *lro; |
2881 | #endif /* LRO */ | | 2881 | #endif /* LRO */ |
2882 | | | 2882 | |
2883 | for (int i = 0; i < adapter->num_queues; i++, que++) { | | 2883 | for (int i = 0; i < adapter->num_queues; i++, que++) { |
2884 | txr = que->txr; | | 2884 | txr = que->txr; |
2885 | rxr = que->rxr; | | 2885 | rxr = que->rxr; |
2886 | #ifdef LRO | | 2886 | #ifdef LRO |
2887 | lro = &rxr->lro; | | 2887 | lro = &rxr->lro; |
2888 | #endif /* LRO */ | | 2888 | #endif /* LRO */ |
2889 | device_printf(dev, "QUE(%d) IRQs Handled: %lu\n", | | 2889 | device_printf(dev, "QUE(%d) IRQs Handled: %lu\n", |
2890 | que->msix, (long)que->irqs.ev_count); | | 2890 | que->msix, (long)que->irqs.ev_count); |
2891 | device_printf(dev, "RX(%d) Packets Received: %lld\n", | | 2891 | device_printf(dev, "RX(%d) Packets Received: %lld\n", |
2892 | rxr->me, (long long)rxr->rx_packets.ev_count); | | 2892 | rxr->me, (long long)rxr->rx_packets.ev_count); |
2893 | device_printf(dev, "RX(%d) Bytes Received: %lu\n", | | 2893 | device_printf(dev, "RX(%d) Bytes Received: %lu\n", |
2894 | rxr->me, (long)rxr->rx_bytes.ev_count); | | 2894 | rxr->me, (long)rxr->rx_bytes.ev_count); |
2895 | #ifdef LRO | | 2895 | #ifdef LRO |
2896 | device_printf(dev, "RX(%d) LRO Queued= %ju\n", | | 2896 | device_printf(dev, "RX(%d) LRO Queued= %ju\n", |
2897 | rxr->me, (uintmax_t)lro->lro_queued); | | 2897 | rxr->me, (uintmax_t)lro->lro_queued); |
2898 | device_printf(dev, "RX(%d) LRO Flushed= %ju\n", | | 2898 | device_printf(dev, "RX(%d) LRO Flushed= %ju\n", |
2899 | rxr->me, (uintmax_t)lro->lro_flushed); | | 2899 | rxr->me, (uintmax_t)lro->lro_flushed); |
2900 | #endif /* LRO */ | | 2900 | #endif /* LRO */ |
2901 | device_printf(dev, "TX(%d) Packets Sent: %lu\n", | | 2901 | device_printf(dev, "TX(%d) Packets Sent: %lu\n", |
2902 | txr->me, (long)txr->total_packets.ev_count); | | 2902 | txr->me, (long)txr->total_packets.ev_count); |
2903 | device_printf(dev, "TX(%d) NO Desc Avail: %lu\n", | | 2903 | device_printf(dev, "TX(%d) NO Desc Avail: %lu\n", |
2904 | txr->me, (long)txr->no_desc_avail.ev_count); | | 2904 | txr->me, (long)txr->no_desc_avail.ev_count); |
2905 | } | | 2905 | } |
2906 | | | 2906 | |
2907 | device_printf(dev, "Admin IRQ Handled: %lu\n", | | 2907 | device_printf(dev, "Admin IRQ Handled: %lu\n", |
2908 | (long)adapter->admin_irqev.ev_count); | | 2908 | (long)adapter->admin_irqev.ev_count); |
2909 | device_printf(dev, "Admin work Handled: %lu\n", | | 2909 | device_printf(dev, "Admin work Handled: %lu\n", |
2910 | (long)adapter->link_workev.ev_count); | | 2910 | (long)adapter->link_workev.ev_count); |
2911 | } /* ixv_print_debug_info */ | | 2911 | } /* ixv_print_debug_info */ |
2912 | | | 2912 | |
2913 | /************************************************************************ | | 2913 | /************************************************************************ |
2914 | * ixv_sysctl_debug | | 2914 | * ixv_sysctl_debug |
2915 | ************************************************************************/ | | 2915 | ************************************************************************/ |
2916 | static int | | 2916 | static int |
2917 | ixv_sysctl_debug(SYSCTLFN_ARGS) | | 2917 | ixv_sysctl_debug(SYSCTLFN_ARGS) |
2918 | { | | 2918 | { |
2919 | struct sysctlnode node = *rnode; | | 2919 | struct sysctlnode node = *rnode; |
2920 | struct adapter *adapter = (struct adapter *)node.sysctl_data; | | 2920 | struct adapter *adapter = (struct adapter *)node.sysctl_data; |
2921 | int error, result; | | 2921 | int error, result; |
2922 | | | 2922 | |
2923 | node.sysctl_data = &result; | | 2923 | node.sysctl_data = &result; |
2924 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); | | 2924 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
2925 | | | 2925 | |
2926 | if (error || newp == NULL) | | 2926 | if (error || newp == NULL) |
2927 | return error; | | 2927 | return error; |
2928 | | | 2928 | |
2929 | if (result == 1) | | 2929 | if (result == 1) |
2930 | ixv_print_debug_info(adapter); | | 2930 | ixv_print_debug_info(adapter); |
2931 | | | 2931 | |
2932 | return 0; | | 2932 | return 0; |
2933 | } /* ixv_sysctl_debug */ | | 2933 | } /* ixv_sysctl_debug */ |
2934 | | | 2934 | |
2935 | /************************************************************************ | | 2935 | /************************************************************************ |
2936 | * ixv_init_device_features | | 2936 | * ixv_init_device_features |
2937 | ************************************************************************/ | | 2937 | ************************************************************************/ |
2938 | static void | | 2938 | static void |
2939 | ixv_init_device_features(struct adapter *adapter) | | 2939 | ixv_init_device_features(struct adapter *adapter) |
2940 | { | | 2940 | { |
2941 | adapter->feat_cap = IXGBE_FEATURE_NETMAP | | 2941 | adapter->feat_cap = IXGBE_FEATURE_NETMAP |
2942 | | IXGBE_FEATURE_VF | | 2942 | | IXGBE_FEATURE_VF |
2943 | | IXGBE_FEATURE_RSS | | 2943 | | IXGBE_FEATURE_RSS |
2944 | | IXGBE_FEATURE_LEGACY_TX; | | 2944 | | IXGBE_FEATURE_LEGACY_TX; |
2945 | | | 2945 | |
2946 | /* A tad short on feature flags for VFs, atm. */ | | 2946 | /* A tad short on feature flags for VFs, atm. */ |
2947 | switch (adapter->hw.mac.type) { | | 2947 | switch (adapter->hw.mac.type) { |
2948 | case ixgbe_mac_82599_vf: | | 2948 | case ixgbe_mac_82599_vf: |
2949 | break; | | 2949 | break; |
2950 | case ixgbe_mac_X540_vf: | | 2950 | case ixgbe_mac_X540_vf: |
2951 | break; | | 2951 | break; |
2952 | case ixgbe_mac_X550_vf: | | 2952 | case ixgbe_mac_X550_vf: |
2953 | case ixgbe_mac_X550EM_x_vf: | | 2953 | case ixgbe_mac_X550EM_x_vf: |
2954 | case ixgbe_mac_X550EM_a_vf: | | 2954 | case ixgbe_mac_X550EM_a_vf: |
2955 | adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; | | 2955 | adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; |
2956 | break; | | 2956 | break; |
2957 | default: | | 2957 | default: |
2958 | break; | | 2958 | break; |
2959 | } | | 2959 | } |
2960 | | | 2960 | |
2961 | /* Enabled by default... */ | | 2961 | /* Enabled by default... */ |
2962 | /* Is a virtual function (VF) */ | | 2962 | /* Is a virtual function (VF) */ |
2963 | if (adapter->feat_cap & IXGBE_FEATURE_VF) | | 2963 | if (adapter->feat_cap & IXGBE_FEATURE_VF) |
2964 | adapter->feat_en |= IXGBE_FEATURE_VF; | | 2964 | adapter->feat_en |= IXGBE_FEATURE_VF; |
2965 | /* Netmap */ | | 2965 | /* Netmap */ |
2966 | if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) | | 2966 | if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) |
2967 | adapter->feat_en |= IXGBE_FEATURE_NETMAP; | | 2967 | adapter->feat_en |= IXGBE_FEATURE_NETMAP; |
2968 | /* Receive-Side Scaling (RSS) */ | | 2968 | /* Receive-Side Scaling (RSS) */ |
2969 | if (adapter->feat_cap & IXGBE_FEATURE_RSS) | | 2969 | if (adapter->feat_cap & IXGBE_FEATURE_RSS) |
2970 | adapter->feat_en |= IXGBE_FEATURE_RSS; | | 2970 | adapter->feat_en |= IXGBE_FEATURE_RSS; |
2971 | /* Needs advanced context descriptor regardless of offloads req'd */ | | 2971 | /* Needs advanced context descriptor regardless of offloads req'd */ |
2972 | if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) | | 2972 | if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) |
2973 | adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; | | 2973 | adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; |
2974 | | | 2974 | |
2975 | /* Enabled via sysctl... */ | | 2975 | /* Enabled via sysctl... */ |
2976 | /* Legacy (single queue) transmit */ | | 2976 | /* Legacy (single queue) transmit */ |
2977 | if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && | | 2977 | if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && |
2978 | ixv_enable_legacy_tx) | | 2978 | ixv_enable_legacy_tx) |
2979 | adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; | | 2979 | adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; |
2980 | } /* ixv_init_device_features */ | | 2980 | } /* ixv_init_device_features */ |
2981 | | | 2981 | |
2982 | /************************************************************************ | | 2982 | /************************************************************************ |
2983 | * ixv_shutdown - Shutdown entry point | | 2983 | * ixv_shutdown - Shutdown entry point |
2984 | ************************************************************************/ | | 2984 | ************************************************************************/ |
2985 | #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ | | 2985 | #if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ |
2986 | static int | | 2986 | static int |
2987 | ixv_shutdown(device_t dev) | | 2987 | ixv_shutdown(device_t dev) |
2988 | { | | 2988 | { |
2989 | struct adapter *adapter = device_private(dev); | | 2989 | struct adapter *adapter = device_private(dev); |
2990 | IXGBE_CORE_LOCK(adapter); | | 2990 | IXGBE_CORE_LOCK(adapter); |
2991 | ixv_stop_locked(adapter); | | 2991 | ixv_stop_locked(adapter); |
2992 | IXGBE_CORE_UNLOCK(adapter); | | 2992 | IXGBE_CORE_UNLOCK(adapter); |
2993 | | | 2993 | |
2994 | return (0); | | 2994 | return (0); |
2995 | } /* ixv_shutdown */ | | 2995 | } /* ixv_shutdown */ |
2996 | #endif | | 2996 | #endif |
2997 | | | 2997 | |
2998 | static int | | 2998 | static int |
2999 | ixv_ifflags_cb(struct ethercom *ec) | | 2999 | ixv_ifflags_cb(struct ethercom *ec) |
3000 | { | | 3000 | { |
3001 | struct ifnet *ifp = &ec->ec_if; | | 3001 | struct ifnet *ifp = &ec->ec_if; |
3002 | struct adapter *adapter = ifp->if_softc; | | 3002 | struct adapter *adapter = ifp->if_softc; |
3003 | u_short saved_flags; | | 3003 | u_short saved_flags; |
3004 | u_short change; | | 3004 | u_short change; |
3005 | int rv = 0; | | 3005 | int rv = 0; |
3006 | | | 3006 | |
3007 | IXGBE_CORE_LOCK(adapter); | | 3007 | IXGBE_CORE_LOCK(adapter); |
3008 | | | 3008 | |
3009 | saved_flags = adapter->if_flags; | | 3009 | saved_flags = adapter->if_flags; |
3010 | change = ifp->if_flags ^ adapter->if_flags; | | 3010 | change = ifp->if_flags ^ adapter->if_flags; |
3011 | if (change != 0) | | 3011 | if (change != 0) |
3012 | adapter->if_flags = ifp->if_flags; | | 3012 | adapter->if_flags = ifp->if_flags; |
3013 | | | 3013 | |
3014 | if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { | | 3014 | if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { |
3015 | rv = ENETRESET; | | 3015 | rv = ENETRESET; |
3016 | goto out; | | 3016 | goto out; |
3017 | } else if ((change & IFF_PROMISC) != 0) { | | 3017 | } else if ((change & IFF_PROMISC) != 0) { |
3018 | rv = ixv_set_rxfilter(adapter); | | 3018 | rv = ixv_set_rxfilter(adapter); |
3019 | if (rv != 0) { | | 3019 | if (rv != 0) { |
3020 | /* Restore previous */ | | 3020 | /* Restore previous */ |
3021 | adapter->if_flags = saved_flags; | | 3021 | adapter->if_flags = saved_flags; |
3022 | goto out; | | 3022 | goto out; |
3023 | } | | 3023 | } |
3024 | } | | 3024 | } |
3025 | | | 3025 | |
3026 | /* Check for ec_capenable. */ | | 3026 | /* Check for ec_capenable. */ |
3027 | change = ec->ec_capenable ^ adapter->ec_capenable; | | 3027 | change = ec->ec_capenable ^ adapter->ec_capenable; |
3028 | adapter->ec_capenable = ec->ec_capenable; | | 3028 | adapter->ec_capenable = ec->ec_capenable; |
3029 | if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING | | 3029 | if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING |
3030 | | ETHERCAP_VLAN_HWFILTER)) != 0) { | | 3030 | | ETHERCAP_VLAN_HWFILTER)) != 0) { |
3031 | rv = ENETRESET; | | 3031 | rv = ENETRESET; |
3032 | goto out; | | 3032 | goto out; |
3033 | } | | 3033 | } |
3034 | | | 3034 | |
3035 | /* | | 3035 | /* |
3036 | * Special handling is not required for ETHERCAP_VLAN_MTU. | | 3036 | * Special handling is not required for ETHERCAP_VLAN_MTU. |
3037 | * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header. | | 3037 | * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header. |
3038 | */ | | 3038 | */ |
3039 | | | 3039 | |
3040 | /* Set up VLAN support and filter */ | | 3040 | /* Set up VLAN support and filter */ |
3041 | if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) | | 3041 | if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) |
3042 | rv = ixv_setup_vlan_support(adapter); | | 3042 | rv = ixv_setup_vlan_support(adapter); |
3043 | | | 3043 | |
3044 | out: | | 3044 | out: |
3045 | IXGBE_CORE_UNLOCK(adapter); | | 3045 | IXGBE_CORE_UNLOCK(adapter); |
3046 | | | 3046 | |
3047 | return rv; | | 3047 | return rv; |
3048 | } | | 3048 | } |
3049 | | | 3049 | |
3050 | | | 3050 | |
3051 | /************************************************************************ | | 3051 | /************************************************************************ |
3052 | * ixv_ioctl - Ioctl entry point | | 3052 | * ixv_ioctl - Ioctl entry point |
3053 | * | | 3053 | * |
3054 | * Called when the user wants to configure the interface. | | 3054 | * Called when the user wants to configure the interface. |
3055 | * | | 3055 | * |
3056 | * return 0 on success, positive on failure | | 3056 | * return 0 on success, positive on failure |
3057 | ************************************************************************/ | | 3057 | ************************************************************************/ |
3058 | static int | | 3058 | static int |
3059 | ixv_ioctl(struct ifnet *ifp, u_long command, void *data) | | 3059 | ixv_ioctl(struct ifnet *ifp, u_long command, void *data) |
3060 | { | | 3060 | { |
3061 | struct adapter *adapter = ifp->if_softc; | | 3061 | struct adapter *adapter = ifp->if_softc; |
3062 | struct ixgbe_hw *hw = &adapter->hw; | | 3062 | struct ixgbe_hw *hw = &adapter->hw; |
3063 | struct ifcapreq *ifcr = data; | | 3063 | struct ifcapreq *ifcr = data; |
3064 | int error; | | 3064 | int error; |
3065 | int l4csum_en; | | 3065 | int l4csum_en; |
3066 | const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | | | 3066 | const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | |
3067 | IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; | | 3067 | IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; |
3068 | | | 3068 | |
3069 | switch (command) { | | 3069 | switch (command) { |
3070 | case SIOCSIFFLAGS: | | 3070 | case SIOCSIFFLAGS: |
3071 | IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); | | 3071 | IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); |
3072 | break; | | 3072 | break; |
3073 | case SIOCADDMULTI: { | | 3073 | case SIOCADDMULTI: { |
3074 | struct ether_multi *enm; | | 3074 | struct ether_multi *enm; |
3075 | struct ether_multistep step; | | 3075 | struct ether_multistep step; |
3076 | struct ethercom *ec = &adapter->osdep.ec; | | 3076 | struct ethercom *ec = &adapter->osdep.ec; |
3077 | bool overflow = false; | | 3077 | bool overflow = false; |
3078 | int mcnt = 0; | | 3078 | int mcnt = 0; |
3079 | | | 3079 | |
3080 | /* | | 3080 | /* |
3081 | * Check the number of multicast address. If it exceeds, | | 3081 | * Check the number of multicast address. If it exceeds, |
3082 | * return ENOSPC. | | 3082 | * return ENOSPC. |
3083 | * Update this code when we support API 1.3. | | 3083 | * Update this code when we support API 1.3. |
3084 | */ | | 3084 | */ |
3085 | ETHER_LOCK(ec); | | 3085 | ETHER_LOCK(ec); |
3086 | ETHER_FIRST_MULTI(step, ec, enm); | | 3086 | ETHER_FIRST_MULTI(step, ec, enm); |
3087 | while (enm != NULL) { | | 3087 | while (enm != NULL) { |
3088 | mcnt++; | | 3088 | mcnt++; |
3089 | | | 3089 | |
3090 | /* | | 3090 | /* |
3091 | * This code is before adding, so one room is required | | 3091 | * This code is before adding, so one room is required |
3092 | * at least. | | 3092 | * at least. |
3093 | */ | | 3093 | */ |
3094 | if (mcnt > (IXGBE_MAX_VF_MC - 1)) { | | 3094 | if (mcnt > (IXGBE_MAX_VF_MC - 1)) { |
3095 | overflow = true; | | 3095 | overflow = true; |
3096 | break; | | 3096 | break; |
3097 | } | | 3097 | } |
3098 | ETHER_NEXT_MULTI(step, enm); | | 3098 | ETHER_NEXT_MULTI(step, enm); |
3099 | } | | 3099 | } |
3100 | ETHER_UNLOCK(ec); | | 3100 | ETHER_UNLOCK(ec); |
3101 | error = 0; | | 3101 | error = 0; |
3102 | if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) { | | 3102 | if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) { |
3103 | error = hw->mac.ops.update_xcast_mode(hw, | | 3103 | error = hw->mac.ops.update_xcast_mode(hw, |
3104 | IXGBEVF_XCAST_MODE_ALLMULTI); | | 3104 | IXGBEVF_XCAST_MODE_ALLMULTI); |
3105 | if (error == IXGBE_ERR_NOT_TRUSTED) { | | 3105 | if (error == IXGBE_ERR_NOT_TRUSTED) { |
3106 | device_printf(adapter->dev, | | 3106 | device_printf(adapter->dev, |
3107 | "this interface is not trusted\n"); | | 3107 | "this interface is not trusted\n"); |
3108 | error = EPERM; | | 3108 | error = EPERM; |
3109 | } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { | | 3109 | } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { |
3110 | device_printf(adapter->dev, | | 3110 | device_printf(adapter->dev, |
3111 | "the PF doesn't support allmulti mode\n"); | | 3111 | "the PF doesn't support allmulti mode\n"); |
3112 | error = EOPNOTSUPP; | | 3112 | error = EOPNOTSUPP; |
3113 | } else if (error) { | | 3113 | } else if (error) { |
3114 | device_printf(adapter->dev, | | 3114 | device_printf(adapter->dev, |
3115 | "number of Ethernet multicast addresses " | | 3115 | "number of Ethernet multicast addresses " |
3116 | "exceeds the limit (%d). error = %d\n", | | 3116 | "exceeds the limit (%d). error = %d\n", |
3117 | IXGBE_MAX_VF_MC, error); | | 3117 | IXGBE_MAX_VF_MC, error); |
3118 | error = ENOSPC; | | 3118 | error = ENOSPC; |
3119 | } else | | 3119 | } else |
3120 | ec->ec_flags |= ETHER_F_ALLMULTI; | | 3120 | ec->ec_flags |= ETHER_F_ALLMULTI; |
3121 | } | | 3121 | } |
3122 | if (error) | | 3122 | if (error) |
3123 | return error; | | 3123 | return error; |
3124 | } | | 3124 | } |
3125 | /*FALLTHROUGH*/ | | 3125 | /*FALLTHROUGH*/ |
3126 | case SIOCDELMULTI: | | 3126 | case SIOCDELMULTI: |
3127 | IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); | | 3127 | IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); |
3128 | break; | | 3128 | break; |
3129 | case SIOCSIFMEDIA: | | 3129 | case SIOCSIFMEDIA: |
3130 | case SIOCGIFMEDIA: | | 3130 | case SIOCGIFMEDIA: |
3131 | IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); | | 3131 | IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); |
3132 | break; | | 3132 | break; |
3133 | case SIOCSIFCAP: | | 3133 | case SIOCSIFCAP: |
3134 | IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); | | 3134 | IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); |
3135 | break; | | 3135 | break; |
3136 | case SIOCSIFMTU: | | 3136 | case SIOCSIFMTU: |
3137 | IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); | | 3137 | IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); |
3138 | break; | | 3138 | break; |
3139 | case SIOCZIFDATA: | | 3139 | case SIOCZIFDATA: |
3140 | IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); | | 3140 | IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); |
3141 | ixv_update_stats(adapter); | | 3141 | ixv_update_stats(adapter); |
3142 | ixv_clear_evcnt(adapter); | | 3142 | ixv_clear_evcnt(adapter); |
3143 | break; | | 3143 | break; |
3144 | default: | | 3144 | default: |
3145 | IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); | | 3145 | IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); |
3146 | break; | | 3146 | break; |
3147 | } | | 3147 | } |
3148 | | | 3148 | |
3149 | switch (command) { | | 3149 | switch (command) { |
3150 | case SIOCSIFCAP: | | 3150 | case SIOCSIFCAP: |
3151 | /* Layer-4 Rx checksum offload has to be turned on and | | 3151 | /* Layer-4 Rx checksum offload has to be turned on and |
3152 | * off as a unit. | | 3152 | * off as a unit. |
3153 | */ | | 3153 | */ |
3154 | l4csum_en = ifcr->ifcr_capenable & l4csum; | | 3154 | l4csum_en = ifcr->ifcr_capenable & l4csum; |
3155 | if (l4csum_en != l4csum && l4csum_en != 0) | | 3155 | if (l4csum_en != l4csum && l4csum_en != 0) |
3156 | return EINVAL; | | 3156 | return EINVAL; |
3157 | /*FALLTHROUGH*/ | | 3157 | /*FALLTHROUGH*/ |
3158 | case SIOCADDMULTI: | | 3158 | case SIOCADDMULTI: |
3159 | case SIOCDELMULTI: | | 3159 | case SIOCDELMULTI: |
3160 | case SIOCSIFFLAGS: | | 3160 | case SIOCSIFFLAGS: |
3161 | case SIOCSIFMTU: | | 3161 | case SIOCSIFMTU: |
3162 | default: | | 3162 | default: |
3163 | if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) | | 3163 | if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) |
3164 | return error; | | 3164 | return error; |
3165 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 3165 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
3166 | ; | | 3166 | ; |
3167 | else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { | | 3167 | else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { |
3168 | IXGBE_CORE_LOCK(adapter); | | 3168 | IXGBE_CORE_LOCK(adapter); |
3169 | ixv_init_locked(adapter); | | 3169 | ixv_init_locked(adapter); |
3170 | IXGBE_CORE_UNLOCK(adapter); | | 3170 | IXGBE_CORE_UNLOCK(adapter); |
3171 | } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { | | 3171 | } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { |
3172 | /* | | 3172 | /* |
3173 | * Multicast list has changed; set the hardware filter | | 3173 | * Multicast list has changed; set the hardware filter |
3174 | * accordingly. | | 3174 | * accordingly. |
3175 | */ | | 3175 | */ |
3176 | IXGBE_CORE_LOCK(adapter); | | 3176 | IXGBE_CORE_LOCK(adapter); |
3177 | ixv_disable_intr(adapter); | | 3177 | ixv_disable_intr(adapter); |
3178 | ixv_set_rxfilter(adapter); | | 3178 | ixv_set_rxfilter(adapter); |
3179 | ixv_enable_intr(adapter); | | 3179 | ixv_enable_intr(adapter); |
3180 | IXGBE_CORE_UNLOCK(adapter); | | 3180 | IXGBE_CORE_UNLOCK(adapter); |
3181 | } | | 3181 | } |
3182 | return 0; | | 3182 | return 0; |
3183 | } | | 3183 | } |
3184 | } /* ixv_ioctl */ | | 3184 | } /* ixv_ioctl */ |
3185 | | | 3185 | |
3186 | /************************************************************************ | | 3186 | /************************************************************************ |
3187 | * ixv_init | | 3187 | * ixv_init |
3188 | ************************************************************************/ | | 3188 | ************************************************************************/ |
3189 | static int | | 3189 | static int |
3190 | ixv_init(struct ifnet *ifp) | | 3190 | ixv_init(struct ifnet *ifp) |
3191 | { | | 3191 | { |
3192 | struct adapter *adapter = ifp->if_softc; | | 3192 | struct adapter *adapter = ifp->if_softc; |
3193 | | | 3193 | |
3194 | IXGBE_CORE_LOCK(adapter); | | 3194 | IXGBE_CORE_LOCK(adapter); |
3195 | ixv_init_locked(adapter); | | 3195 | ixv_init_locked(adapter); |
3196 | IXGBE_CORE_UNLOCK(adapter); | | 3196 | IXGBE_CORE_UNLOCK(adapter); |
3197 | | | 3197 | |
3198 | return 0; | | 3198 | return 0; |
3199 | } /* ixv_init */ | | 3199 | } /* ixv_init */ |
3200 | | | 3200 | |
3201 | /************************************************************************ | | 3201 | /************************************************************************ |
3202 | * ixv_handle_que | | 3202 | * ixv_handle_que |
3203 | ************************************************************************/ | | 3203 | ************************************************************************/ |
3204 | static void | | 3204 | static void |
3205 | ixv_handle_que(void *context) | | 3205 | ixv_handle_que(void *context) |
3206 | { | | 3206 | { |
3207 | struct ix_queue *que = context; | | 3207 | struct ix_queue *que = context; |
3208 | struct adapter *adapter = que->adapter; | | 3208 | struct adapter *adapter = que->adapter; |
3209 | struct tx_ring *txr = que->txr; | | 3209 | struct tx_ring *txr = que->txr; |
3210 | struct ifnet *ifp = adapter->ifp; | | 3210 | struct ifnet *ifp = adapter->ifp; |
3211 | bool more; | | 3211 | bool more; |
3212 | | | 3212 | |
3213 | que->handleq.ev_count++; | | 3213 | que->handleq.ev_count++; |
3214 | | | 3214 | |
3215 | if (ifp->if_flags & IFF_RUNNING) { | | 3215 | if (ifp->if_flags & IFF_RUNNING) { |
3216 | more = ixgbe_rxeof(que); | | 3216 | more = ixgbe_rxeof(que); |
3217 | IXGBE_TX_LOCK(txr); | | 3217 | IXGBE_TX_LOCK(txr); |
3218 | more |= ixgbe_txeof(txr); | | 3218 | more |= ixgbe_txeof(txr); |
3219 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) | | 3219 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) |
3220 | if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) | | 3220 | if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) |
3221 | ixgbe_mq_start_locked(ifp, txr); | | 3221 | ixgbe_mq_start_locked(ifp, txr); |
3222 | /* Only for queue 0 */ | | 3222 | /* Only for queue 0 */ |
3223 | /* NetBSD still needs this for CBQ */ | | 3223 | /* NetBSD still needs this for CBQ */ |
3224 | if ((&adapter->queues[0] == que) | | 3224 | if ((&adapter->queues[0] == que) |
3225 | && (!ixgbe_legacy_ring_empty(ifp, NULL))) | | 3225 | && (!ixgbe_legacy_ring_empty(ifp, NULL))) |
3226 | ixgbe_legacy_start_locked(ifp, txr); | | 3226 | ixgbe_legacy_start_locked(ifp, txr); |
3227 | IXGBE_TX_UNLOCK(txr); | | 3227 | IXGBE_TX_UNLOCK(txr); |
3228 | if (more) { | | 3228 | if (more) { |
3229 | que->req.ev_count++; | | 3229 | que->req.ev_count++; |
3230 | if (adapter->txrx_use_workqueue) { | | 3230 | if (adapter->txrx_use_workqueue) { |
3231 | /* | | 3231 | /* |
3232 | * "enqueued flag" is not required here | | 3232 | * "enqueued flag" is not required here |
3233 | * the same as ixg(4). See ixgbe_msix_que(). | | 3233 | * the same as ixg(4). See ixgbe_msix_que(). |
3234 | */ | | 3234 | */ |
3235 | workqueue_enqueue(adapter->que_wq, | | 3235 | workqueue_enqueue(adapter->que_wq, |
3236 | &que->wq_cookie, curcpu()); | | 3236 | &que->wq_cookie, curcpu()); |
3237 | } else | | 3237 | } else |
3238 | softint_schedule(que->que_si); | | 3238 | softint_schedule(que->que_si); |
3239 | return; | | 3239 | return; |
3240 | } | | 3240 | } |
3241 | } | | 3241 | } |
3242 | | | 3242 | |
3243 | /* Re-enable this interrupt */ | | 3243 | /* Re-enable this interrupt */ |
3244 | ixv_enable_queue(adapter, que->msix); | | 3244 | ixv_enable_queue(adapter, que->msix); |
3245 | | | 3245 | |
3246 | return; | | 3246 | return; |
3247 | } /* ixv_handle_que */ | | 3247 | } /* ixv_handle_que */ |
3248 | | | 3248 | |
3249 | /************************************************************************ | | 3249 | /************************************************************************ |
3250 | * ixv_handle_que_work | | 3250 | * ixv_handle_que_work |
3251 | ************************************************************************/ | | 3251 | ************************************************************************/ |
3252 | static void | | 3252 | static void |
3253 | ixv_handle_que_work(struct work *wk, void *context) | | 3253 | ixv_handle_que_work(struct work *wk, void *context) |
3254 | { | | 3254 | { |
3255 | struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); | | 3255 | struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); |
3256 | | | 3256 | |
3257 | /* | | 3257 | /* |
3258 | * "enqueued flag" is not required here the same as ixg(4). | | 3258 | * "enqueued flag" is not required here the same as ixg(4). |
3259 | * See ixgbe_msix_que(). | | 3259 | * See ixgbe_msix_que(). |
3260 | */ | | 3260 | */ |
3261 | ixv_handle_que(que); | | 3261 | ixv_handle_que(que); |
3262 | } | | 3262 | } |
3263 | | | 3263 | |
3264 | /************************************************************************ | | 3264 | /************************************************************************ |
3265 | * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers | | 3265 | * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers |
3266 | ************************************************************************/ | | 3266 | ************************************************************************/ |
3267 | static int | | 3267 | static int |
3268 | ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa) | | 3268 | ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa) |
3269 | { | | 3269 | { |
3270 | device_t dev = adapter->dev; | | 3270 | device_t dev = adapter->dev; |
3271 | struct ix_queue *que = adapter->queues; | | 3271 | struct ix_queue *que = adapter->queues; |
3272 | struct tx_ring *txr = adapter->tx_rings; | | 3272 | struct tx_ring *txr = adapter->tx_rings; |
3273 | int error, msix_ctrl, rid, vector = 0; | | 3273 | int error, msix_ctrl, rid, vector = 0; |
3274 | pci_chipset_tag_t pc; | | 3274 | pci_chipset_tag_t pc; |
3275 | pcitag_t tag; | | 3275 | pcitag_t tag; |
3276 | char intrbuf[PCI_INTRSTR_LEN]; | | 3276 | char intrbuf[PCI_INTRSTR_LEN]; |
3277 | char wqname[MAXCOMLEN]; | | 3277 | char wqname[MAXCOMLEN]; |
3278 | char intr_xname[32]; | | 3278 | char intr_xname[32]; |
3279 | const char *intrstr = NULL; | | 3279 | const char *intrstr = NULL; |
3280 | kcpuset_t *affinity; | | 3280 | kcpuset_t *affinity; |
3281 | int cpu_id = 0; | | 3281 | int cpu_id = 0; |
3282 | | | 3282 | |
3283 | pc = adapter->osdep.pc; | | 3283 | pc = adapter->osdep.pc; |
3284 | tag = adapter->osdep.tag; | | 3284 | tag = adapter->osdep.tag; |
3285 | | | 3285 | |
3286 | adapter->osdep.nintrs = adapter->num_queues + 1; | | 3286 | adapter->osdep.nintrs = adapter->num_queues + 1; |
3287 | if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs, | | 3287 | if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs, |
3288 | adapter->osdep.nintrs) != 0) { | | 3288 | adapter->osdep.nintrs) != 0) { |
3289 | aprint_error_dev(dev, | | 3289 | aprint_error_dev(dev, |
3290 | "failed to allocate MSI-X interrupt\n"); | | 3290 | "failed to allocate MSI-X interrupt\n"); |
3291 | return (ENXIO); | | 3291 | return (ENXIO); |
3292 | } | | 3292 | } |
3293 | | | 3293 | |
3294 | kcpuset_create(&affinity, false); | | 3294 | kcpuset_create(&affinity, false); |
3295 | for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { | | 3295 | for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { |
3296 | snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", | | 3296 | snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", |
3297 | device_xname(dev), i); | | 3297 | device_xname(dev), i); |
3298 | intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf, | | 3298 | intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf, |
3299 | sizeof(intrbuf)); | | 3299 | sizeof(intrbuf)); |
3300 | #ifdef IXGBE_MPSAFE | | 3300 | #ifdef IXGBE_MPSAFE |
3301 | pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE, | | 3301 | pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE, |
3302 | true); | | 3302 | true); |
3303 | #endif | | 3303 | #endif |
3304 | /* Set the handler function */ | | 3304 | /* Set the handler function */ |
3305 | que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc, | | 3305 | que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc, |
3306 | adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que, | | 3306 | adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que, |
3307 | intr_xname); | | 3307 | intr_xname); |
3308 | if (que->res == NULL) { | | 3308 | if (que->res == NULL) { |
3309 | pci_intr_release(pc, adapter->osdep.intrs, | | 3309 | pci_intr_release(pc, adapter->osdep.intrs, |
3310 | adapter->osdep.nintrs); | | 3310 | adapter->osdep.nintrs); |
3311 | aprint_error_dev(dev, | | 3311 | aprint_error_dev(dev, |
3312 | "Failed to register QUE handler\n"); | | 3312 | "Failed to register QUE handler\n"); |
3313 | kcpuset_destroy(affinity); | | 3313 | kcpuset_destroy(affinity); |
3314 | return (ENXIO); | | 3314 | return (ENXIO); |
3315 | } | | 3315 | } |
3316 | que->msix = vector; | | 3316 | que->msix = vector; |
3317 | adapter->active_queues |= (u64)(1 << que->msix); | | 3317 | adapter->active_queues |= (u64)(1 << que->msix); |
3318 | | | 3318 | |
3319 | cpu_id = i; | | 3319 | cpu_id = i; |
3320 | /* Round-robin affinity */ | | 3320 | /* Round-robin affinity */ |
3321 | kcpuset_zero(affinity); | | 3321 | kcpuset_zero(affinity); |
3322 | kcpuset_set(affinity, cpu_id % ncpu); | | 3322 | kcpuset_set(affinity, cpu_id % ncpu); |
3323 | error = interrupt_distribute(adapter->osdep.ihs[i], affinity, | | 3323 | error = interrupt_distribute(adapter->osdep.ihs[i], affinity, |
3324 | NULL); | | 3324 | NULL); |
3325 | aprint_normal_dev(dev, "for TX/RX, interrupting at %s", | | 3325 | aprint_normal_dev(dev, "for TX/RX, interrupting at %s", |
3326 | intrstr); | | 3326 | intrstr); |
3327 | if (error == 0) | | 3327 | if (error == 0) |
3328 | aprint_normal(", bound queue %d to cpu %d\n", | | 3328 | aprint_normal(", bound queue %d to cpu %d\n", |
3329 | i, cpu_id % ncpu); | | 3329 | i, cpu_id % ncpu); |
3330 | else | | 3330 | else |
3331 | aprint_normal("\n"); | | 3331 | aprint_normal("\n"); |
3332 | | | 3332 | |
3333 | #ifndef IXGBE_LEGACY_TX | | 3333 | #ifndef IXGBE_LEGACY_TX |
3334 | txr->txr_si | | 3334 | txr->txr_si |
3335 | = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, | | 3335 | = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, |
3336 | ixgbe_deferred_mq_start, txr); | | 3336 | ixgbe_deferred_mq_start, txr); |
3337 | #endif | | 3337 | #endif |
3338 | que->que_si | | 3338 | que->que_si |
3339 | = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, | | 3339 | = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, |
3340 | ixv_handle_que, que); | | 3340 | ixv_handle_que, que); |
3341 | if (que->que_si == NULL) { | | 3341 | if (que->que_si == NULL) { |
3342 | aprint_error_dev(dev, | | 3342 | aprint_error_dev(dev, |
3343 | "could not establish software interrupt\n"); | | 3343 | "could not establish software interrupt\n"); |
3344 | } | | 3344 | } |
3345 | } | | 3345 | } |
3346 | snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); | | 3346 | snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); |
3347 | error = workqueue_create(&adapter->txr_wq, wqname, | | 3347 | error = workqueue_create(&adapter->txr_wq, wqname, |
3348 | ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, | | 3348 | ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, |
3349 | IXGBE_WORKQUEUE_FLAGS); | | 3349 | IXGBE_WORKQUEUE_FLAGS); |
3350 | if (error) { | | 3350 | if (error) { |
3351 | aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n"); | | 3351 | aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n"); |
3352 | } | | 3352 | } |
3353 | adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); | | 3353 | adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); |
3354 | | | 3354 | |
3355 | snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); | | 3355 | snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); |
3356 | error = workqueue_create(&adapter->que_wq, wqname, | | 3356 | error = workqueue_create(&adapter->que_wq, wqname, |
3357 | ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, | | 3357 | ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, |
3358 | IXGBE_WORKQUEUE_FLAGS); | | 3358 | IXGBE_WORKQUEUE_FLAGS); |
3359 | if (error) { | | 3359 | if (error) { |
3360 | aprint_error_dev(dev, | | 3360 | aprint_error_dev(dev, |
3361 | "couldn't create workqueue\n"); | | 3361 | "couldn't create workqueue for Tx/Rx\n"); |
3362 | } | | 3362 | } |
3363 | | | 3363 | |
3364 | /* and Mailbox */ | | 3364 | /* and Mailbox */ |
3365 | cpu_id++; | | 3365 | cpu_id++; |
3366 | snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); | | 3366 | snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); |
3367 | adapter->vector = vector; | | 3367 | adapter->vector = vector; |
3368 | intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf, | | 3368 | intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf, |
3369 | sizeof(intrbuf)); | | 3369 | sizeof(intrbuf)); |
3370 | #ifdef IXGBE_MPSAFE | | 3370 | #ifdef IXGBE_MPSAFE |
3371 | pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, | | 3371 | pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, |
3372 | true); | | 3372 | true); |
3373 | #endif | | 3373 | #endif |
3374 | /* Set the mbx handler function */ | | 3374 | /* Set the mbx handler function */ |
3375 | adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc, | | 3375 | adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc, |
3376 | adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter, | | 3376 | adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter, |
3377 | intr_xname); | | 3377 | intr_xname); |
3378 | if (adapter->osdep.ihs[vector] == NULL) { | | 3378 | if (adapter->osdep.ihs[vector] == NULL) { |
3379 | aprint_error_dev(dev, "Failed to register LINK handler\n"); | | 3379 | aprint_error_dev(dev, "Failed to register LINK handler\n"); |
3380 | kcpuset_destroy(affinity); | | 3380 | kcpuset_destroy(affinity); |
3381 | return (ENXIO); | | 3381 | return (ENXIO); |
3382 | } | | 3382 | } |
3383 | /* Round-robin affinity */ | | 3383 | /* Round-robin affinity */ |
3384 | kcpuset_zero(affinity); | | 3384 | kcpuset_zero(affinity); |
3385 | kcpuset_set(affinity, cpu_id % ncpu); | | 3385 | kcpuset_set(affinity, cpu_id % ncpu); |
3386 | error = interrupt_distribute(adapter->osdep.ihs[vector], affinity, | | 3386 | error = interrupt_distribute(adapter->osdep.ihs[vector], affinity, |
3387 | NULL); | | 3387 | NULL); |
3388 | | | 3388 | |
3389 | aprint_normal_dev(dev, | | 3389 | aprint_normal_dev(dev, |
3390 | "for link, interrupting at %s", intrstr); | | 3390 | "for link, interrupting at %s", intrstr); |
3391 | if (error == 0) | | 3391 | if (error == 0) |
3392 | aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); | | 3392 | aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); |
3393 | else | | 3393 | else |
3394 | aprint_normal("\n"); | | 3394 | aprint_normal("\n"); |
3395 | | | 3395 | |
3396 | /* Tasklets for Mailbox */ | | 3396 | /* Tasklets for Mailbox */ |
3397 | snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev)); | | 3397 | snprintf(wqname, sizeof(wqname), "%s-admin", device_xname(dev)); |
3398 | error = workqueue_create(&adapter->admin_wq, wqname, | | 3398 | error = workqueue_create(&adapter->admin_wq, wqname, |
3399 | ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, | | 3399 | ixv_handle_admin, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, |
3400 | IXGBE_TASKLET_WQ_FLAGS); | | 3400 | IXGBE_TASKLET_WQ_FLAGS); |
3401 | if (error) { | | 3401 | if (error) { |
3402 | aprint_error_dev(dev, | | 3402 | aprint_error_dev(dev, |
3403 | "could not create admin workqueue (%d)\n", error); | | 3403 | "could not create admin workqueue (%d)\n", error); |
3404 | goto err_out; | | 3404 | goto err_out; |
3405 | } | | 3405 | } |
3406 | | | 3406 | |
3407 | /* | | 3407 | /* |
3408 | * Due to a broken design QEMU will fail to properly | | 3408 | * Due to a broken design QEMU will fail to properly |
3409 | * enable the guest for MSI-X unless the vectors in | | 3409 | * enable the guest for MSI-X unless the vectors in |
3410 | * the table are all set up, so we must rewrite the | | 3410 | * the table are all set up, so we must rewrite the |
3411 | * ENABLE in the MSI-X control register again at this | | 3411 | * ENABLE in the MSI-X control register again at this |
3412 | * point to cause it to successfully initialize us. | | 3412 | * point to cause it to successfully initialize us. |
3413 | */ | | 3413 | */ |
3414 | if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { | | 3414 | if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { |
3415 | pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL); | | 3415 | pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL); |
3416 | rid += PCI_MSIX_CTL; | | 3416 | rid += PCI_MSIX_CTL; |
3417 | msix_ctrl = pci_conf_read(pc, tag, rid); | | 3417 | msix_ctrl = pci_conf_read(pc, tag, rid); |
3418 | msix_ctrl |= PCI_MSIX_CTL_ENABLE; | | 3418 | msix_ctrl |= PCI_MSIX_CTL_ENABLE; |
3419 | pci_conf_write(pc, tag, rid, msix_ctrl); | | 3419 | pci_conf_write(pc, tag, rid, msix_ctrl); |
3420 | } | | 3420 | } |
3421 | | | 3421 | |
3422 | kcpuset_destroy(affinity); | | 3422 | kcpuset_destroy(affinity); |
3423 | return (0); | | 3423 | return (0); |
3424 | err_out: | | 3424 | err_out: |
3425 | kcpuset_destroy(affinity); | | 3425 | kcpuset_destroy(affinity); |
3426 | ixv_free_deferred_handlers(adapter); | | 3426 | ixv_free_deferred_handlers(adapter); |
3427 | ixv_free_pci_resources(adapter); | | 3427 | ixv_free_pci_resources(adapter); |
3428 | return (error); | | 3428 | return (error); |
3429 | } /* ixv_allocate_msix */ | | 3429 | } /* ixv_allocate_msix */ |
3430 | | | 3430 | |
3431 | /************************************************************************ | | 3431 | /************************************************************************ |
3432 | * ixv_configure_interrupts - Setup MSI-X resources | | 3432 | * ixv_configure_interrupts - Setup MSI-X resources |
3433 | * | | 3433 | * |
3434 | * Note: The VF device MUST use MSI-X, there is no fallback. | | 3434 | * Note: The VF device MUST use MSI-X, there is no fallback. |
3435 | ************************************************************************/ | | 3435 | ************************************************************************/ |
3436 | static int | | 3436 | static int |
3437 | ixv_configure_interrupts(struct adapter *adapter) | | 3437 | ixv_configure_interrupts(struct adapter *adapter) |
3438 | { | | 3438 | { |
3439 | device_t dev = adapter->dev; | | 3439 | device_t dev = adapter->dev; |
3440 | int want, queues, msgs; | | 3440 | int want, queues, msgs; |
3441 | | | 3441 | |
3442 | /* Must have at least 2 MSI-X vectors */ | | 3442 | /* Must have at least 2 MSI-X vectors */ |
3443 | msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag); | | 3443 | msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag); |
3444 | if (msgs < 2) { | | 3444 | if (msgs < 2) { |
3445 | aprint_error_dev(dev, "MSIX config error\n"); | | 3445 | aprint_error_dev(dev, "MSIX config error\n"); |
3446 | return (ENXIO); | | 3446 | return (ENXIO); |
3447 | } | | 3447 | } |
3448 | msgs = MIN(msgs, IXG_MAX_NINTR); | | 3448 | msgs = MIN(msgs, IXG_MAX_NINTR); |
3449 | | | 3449 | |
3450 | /* Figure out a reasonable auto config value */ | | 3450 | /* Figure out a reasonable auto config value */ |
3451 | queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu; | | 3451 | queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu; |
3452 | | | 3452 | |
3453 | if (ixv_num_queues != 0) | | 3453 | if (ixv_num_queues != 0) |
3454 | queues = ixv_num_queues; | | 3454 | queues = ixv_num_queues; |
3455 | else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES)) | | 3455 | else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES)) |
3456 | queues = IXGBE_VF_MAX_TX_QUEUES; | | 3456 | queues = IXGBE_VF_MAX_TX_QUEUES; |
3457 | | | 3457 | |
3458 | /* | | 3458 | /* |
3459 | * Want vectors for the queues, | | 3459 | * Want vectors for the queues, |
3460 | * plus an additional for mailbox. | | 3460 | * plus an additional for mailbox. |
3461 | */ | | 3461 | */ |
3462 | want = queues + 1; | | 3462 | want = queues + 1; |
3463 | if (msgs >= want) | | 3463 | if (msgs >= want) |
3464 | msgs = want; | | 3464 | msgs = want; |
3465 | else { | | 3465 | else { |
3466 | aprint_error_dev(dev, | | 3466 | aprint_error_dev(dev, |
3467 | "MSI-X Configuration Problem, " | | 3467 | "MSI-X Configuration Problem, " |
3468 | "%d vectors but %d queues wanted!\n", | | 3468 | "%d vectors but %d queues wanted!\n", |
3469 | msgs, want); | | 3469 | msgs, want); |
3470 | return -1; | | 3470 | return -1; |
3471 | } | | 3471 | } |
3472 | | | 3472 | |
3473 | adapter->msix_mem = (void *)1; /* XXX */ | | 3473 | adapter->msix_mem = (void *)1; /* XXX */ |
3474 | aprint_normal_dev(dev, | | 3474 | aprint_normal_dev(dev, |
3475 | "Using MSI-X interrupts with %d vectors\n", msgs); | | 3475 | "Using MSI-X interrupts with %d vectors\n", msgs); |
3476 | adapter->num_queues = queues; | | 3476 | adapter->num_queues = queues; |
3477 | | | 3477 | |
3478 | return (0); | | 3478 | return (0); |
3479 | } /* ixv_configure_interrupts */ | | 3479 | } /* ixv_configure_interrupts */ |
3480 | | | 3480 | |
3481 | | | 3481 | |
3482 | /************************************************************************ | | 3482 | /************************************************************************ |
3483 | * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts | | 3483 | * ixv_handle_admin - Tasklet handler for MSI-X MBX interrupts |
3484 | * | | 3484 | * |
3485 | * Done outside of interrupt context since the driver might sleep | | 3485 | * Done outside of interrupt context since the driver might sleep |
3486 | ************************************************************************/ | | 3486 | ************************************************************************/ |
3487 | static void | | 3487 | static void |
3488 | ixv_handle_admin(struct work *wk, void *context) | | 3488 | ixv_handle_admin(struct work *wk, void *context) |
3489 | { | | 3489 | { |
3490 | struct adapter *adapter = context; | | 3490 | struct adapter *adapter = context; |
3491 | struct ixgbe_hw *hw = &adapter->hw; | | 3491 | struct ixgbe_hw *hw = &adapter->hw; |
3492 | | | 3492 | |
3493 | IXGBE_CORE_LOCK(adapter); | | 3493 | IXGBE_CORE_LOCK(adapter); |
3494 | | | 3494 | |
3495 | ++adapter->link_workev.ev_count; | | 3495 | ++adapter->link_workev.ev_count; |
3496 | adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed, | | 3496 | adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed, |
3497 | &adapter->link_up, FALSE); | | 3497 | &adapter->link_up, FALSE); |
3498 | ixv_update_link_status(adapter); | | 3498 | ixv_update_link_status(adapter); |
3499 | | | 3499 | |
3500 | adapter->task_requests = 0; | | 3500 | adapter->task_requests = 0; |
3501 | atomic_store_relaxed(&adapter->admin_pending, 0); | | 3501 | atomic_store_relaxed(&adapter->admin_pending, 0); |
3502 | | | 3502 | |
3503 | /* Re-enable interrupts */ | | 3503 | /* Re-enable interrupts */ |
3504 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); | | 3504 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); |
3505 | | | 3505 | |
3506 | IXGBE_CORE_UNLOCK(adapter); | | 3506 | IXGBE_CORE_UNLOCK(adapter); |
3507 | } /* ixv_handle_admin */ | | 3507 | } /* ixv_handle_admin */ |
3508 | | | 3508 | |
3509 | /************************************************************************ | | 3509 | /************************************************************************ |
3510 | * ixv_check_link - Used in the local timer to poll for link changes | | 3510 | * ixv_check_link - Used in the local timer to poll for link changes |
3511 | ************************************************************************/ | | 3511 | ************************************************************************/ |
3512 | static s32 | | 3512 | static s32 |
3513 | ixv_check_link(struct adapter *adapter) | | 3513 | ixv_check_link(struct adapter *adapter) |
3514 | { | | 3514 | { |
3515 | s32 error; | | 3515 | s32 error; |
3516 | | | 3516 | |
3517 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 3517 | KASSERT(mutex_owned(&adapter->core_mtx)); |
3518 | | | 3518 | |
3519 | adapter->hw.mac.get_link_status = TRUE; | | 3519 | adapter->hw.mac.get_link_status = TRUE; |
3520 | | | 3520 | |
3521 | error = adapter->hw.mac.ops.check_link(&adapter->hw, | | 3521 | error = adapter->hw.mac.ops.check_link(&adapter->hw, |
3522 | &adapter->link_speed, &adapter->link_up, FALSE); | | 3522 | &adapter->link_speed, &adapter->link_up, FALSE); |
3523 | ixv_update_link_status(adapter); | | 3523 | ixv_update_link_status(adapter); |
3524 | | | 3524 | |
3525 | return error; | | 3525 | return error; |
3526 | } /* ixv_check_link */ | | 3526 | } /* ixv_check_link */ |