| @@ -1,1753 +1,1756 @@ | | | @@ -1,1753 +1,1756 @@ |
1 | /*$NetBSD: ixv.c,v 1.125.2.4 2019/09/26 19:07:22 martin Exp $*/ | | 1 | /*$NetBSD: ixv.c,v 1.125.2.5 2019/10/08 17:05:16 martin Exp $*/ |
2 | | | 2 | |
3 | /****************************************************************************** | | 3 | /****************************************************************************** |
4 | | | 4 | |
5 | Copyright (c) 2001-2017, Intel Corporation | | 5 | Copyright (c) 2001-2017, Intel Corporation |
6 | All rights reserved. | | 6 | All rights reserved. |
7 | | | 7 | |
8 | Redistribution and use in source and binary forms, with or without | | 8 | Redistribution and use in source and binary forms, with or without |
9 | modification, are permitted provided that the following conditions are met: | | 9 | modification, are permitted provided that the following conditions are met: |
10 | | | 10 | |
11 | 1. Redistributions of source code must retain the above copyright notice, | | 11 | 1. Redistributions of source code must retain the above copyright notice, |
12 | this list of conditions and the following disclaimer. | | 12 | this list of conditions and the following disclaimer. |
13 | | | 13 | |
14 | 2. Redistributions in binary form must reproduce the above copyright | | 14 | 2. Redistributions in binary form must reproduce the above copyright |
15 | notice, this list of conditions and the following disclaimer in the | | 15 | notice, this list of conditions and the following disclaimer in the |
16 | documentation and/or other materials provided with the distribution. | | 16 | documentation and/or other materials provided with the distribution. |
17 | | | 17 | |
18 | 3. Neither the name of the Intel Corporation nor the names of its | | 18 | 3. Neither the name of the Intel Corporation nor the names of its |
19 | contributors may be used to endorse or promote products derived from | | 19 | contributors may be used to endorse or promote products derived from |
20 | this software without specific prior written permission. | | 20 | this software without specific prior written permission. |
21 | | | 21 | |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
32 | POSSIBILITY OF SUCH DAMAGE. | | 32 | POSSIBILITY OF SUCH DAMAGE. |
33 | | | 33 | |
34 | ******************************************************************************/ | | 34 | ******************************************************************************/ |
35 | /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ | | 35 | /*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ |
36 | | | 36 | |
37 | #ifdef _KERNEL_OPT | | 37 | #ifdef _KERNEL_OPT |
38 | #include "opt_inet.h" | | 38 | #include "opt_inet.h" |
39 | #include "opt_inet6.h" | | 39 | #include "opt_inet6.h" |
40 | #include "opt_net_mpsafe.h" | | 40 | #include "opt_net_mpsafe.h" |
41 | #endif | | 41 | #endif |
42 | | | 42 | |
43 | #include "ixgbe.h" | | 43 | #include "ixgbe.h" |
44 | #include "vlan.h" | | 44 | #include "vlan.h" |
45 | | | 45 | |
46 | /************************************************************************ | | 46 | /************************************************************************ |
47 | * Driver version | | 47 | * Driver version |
48 | ************************************************************************/ | | 48 | ************************************************************************/ |
49 | static const char ixv_driver_version[] = "2.0.1-k"; | | 49 | static const char ixv_driver_version[] = "2.0.1-k"; |
50 | /* XXX NetBSD: + 1.5.17 */ | | 50 | /* XXX NetBSD: + 1.5.17 */ |
51 | | | 51 | |
52 | /************************************************************************ | | 52 | /************************************************************************ |
53 | * PCI Device ID Table | | 53 | * PCI Device ID Table |
54 | * | | 54 | * |
55 | * Used by probe to select devices to load on | | 55 | * Used by probe to select devices to load on |
56 | * Last field stores an index into ixv_strings | | 56 | * Last field stores an index into ixv_strings |
57 | * Last entry must be all 0s | | 57 | * Last entry must be all 0s |
58 | * | | 58 | * |
59 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | | 59 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } |
60 | ************************************************************************/ | | 60 | ************************************************************************/ |
61 | static const ixgbe_vendor_info_t ixv_vendor_info_array[] = | | 61 | static const ixgbe_vendor_info_t ixv_vendor_info_array[] = |
62 | { | | 62 | { |
63 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, | | 63 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, |
64 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, | | 64 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, |
65 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, | | 65 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, |
66 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, | | 66 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, |
67 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, | | 67 | {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, |
68 | /* required last entry */ | | 68 | /* required last entry */ |
69 | {0, 0, 0, 0, 0} | | 69 | {0, 0, 0, 0, 0} |
70 | }; | | 70 | }; |
71 | | | 71 | |
72 | /************************************************************************ | | 72 | /************************************************************************ |
73 | * Table of branding strings | | 73 | * Table of branding strings |
74 | ************************************************************************/ | | 74 | ************************************************************************/ |
75 | static const char *ixv_strings[] = { | | 75 | static const char *ixv_strings[] = { |
76 | "Intel(R) PRO/10GbE Virtual Function Network Driver" | | 76 | "Intel(R) PRO/10GbE Virtual Function Network Driver" |
77 | }; | | 77 | }; |
78 | | | 78 | |
79 | /********************************************************************* | | 79 | /********************************************************************* |
80 | * Function prototypes | | 80 | * Function prototypes |
81 | *********************************************************************/ | | 81 | *********************************************************************/ |
82 | static int ixv_probe(device_t, cfdata_t, void *); | | 82 | static int ixv_probe(device_t, cfdata_t, void *); |
83 | static void ixv_attach(device_t, device_t, void *); | | 83 | static void ixv_attach(device_t, device_t, void *); |
84 | static int ixv_detach(device_t, int); | | 84 | static int ixv_detach(device_t, int); |
85 | #if 0 | | 85 | #if 0 |
86 | static int ixv_shutdown(device_t); | | 86 | static int ixv_shutdown(device_t); |
87 | #endif | | 87 | #endif |
88 | static int ixv_ifflags_cb(struct ethercom *); | | 88 | static int ixv_ifflags_cb(struct ethercom *); |
89 | static int ixv_ioctl(struct ifnet *, u_long, void *); | | 89 | static int ixv_ioctl(struct ifnet *, u_long, void *); |
90 | static int ixv_init(struct ifnet *); | | 90 | static int ixv_init(struct ifnet *); |
91 | static void ixv_init_locked(struct adapter *); | | 91 | static void ixv_init_locked(struct adapter *); |
92 | static void ixv_ifstop(struct ifnet *, int); | | 92 | static void ixv_ifstop(struct ifnet *, int); |
93 | static void ixv_stop(void *); | | 93 | static void ixv_stop(void *); |
94 | static void ixv_init_device_features(struct adapter *); | | 94 | static void ixv_init_device_features(struct adapter *); |
95 | static void ixv_media_status(struct ifnet *, struct ifmediareq *); | | 95 | static void ixv_media_status(struct ifnet *, struct ifmediareq *); |
96 | static int ixv_media_change(struct ifnet *); | | 96 | static int ixv_media_change(struct ifnet *); |
97 | static int ixv_allocate_pci_resources(struct adapter *, | | 97 | static int ixv_allocate_pci_resources(struct adapter *, |
98 | const struct pci_attach_args *); | | 98 | const struct pci_attach_args *); |
99 | static int ixv_allocate_msix(struct adapter *, | | 99 | static int ixv_allocate_msix(struct adapter *, |
100 | const struct pci_attach_args *); | | 100 | const struct pci_attach_args *); |
101 | static int ixv_configure_interrupts(struct adapter *); | | 101 | static int ixv_configure_interrupts(struct adapter *); |
102 | static void ixv_free_pci_resources(struct adapter *); | | 102 | static void ixv_free_pci_resources(struct adapter *); |
103 | static void ixv_local_timer(void *); | | 103 | static void ixv_local_timer(void *); |
104 | static void ixv_local_timer_locked(void *); | | 104 | static void ixv_local_timer_locked(void *); |
105 | static int ixv_setup_interface(device_t, struct adapter *); | | 105 | static int ixv_setup_interface(device_t, struct adapter *); |
106 | static int ixv_negotiate_api(struct adapter *); | | 106 | static int ixv_negotiate_api(struct adapter *); |
107 | | | 107 | |
108 | static void ixv_initialize_transmit_units(struct adapter *); | | 108 | static void ixv_initialize_transmit_units(struct adapter *); |
109 | static void ixv_initialize_receive_units(struct adapter *); | | 109 | static void ixv_initialize_receive_units(struct adapter *); |
110 | static void ixv_initialize_rss_mapping(struct adapter *); | | 110 | static void ixv_initialize_rss_mapping(struct adapter *); |
111 | static s32 ixv_check_link(struct adapter *); | | 111 | static s32 ixv_check_link(struct adapter *); |
112 | | | 112 | |
113 | static void ixv_enable_intr(struct adapter *); | | 113 | static void ixv_enable_intr(struct adapter *); |
114 | static void ixv_disable_intr(struct adapter *); | | 114 | static void ixv_disable_intr(struct adapter *); |
115 | static int ixv_set_rxfilter(struct adapter *); | | 115 | static int ixv_set_rxfilter(struct adapter *); |
116 | static void ixv_update_link_status(struct adapter *); | | 116 | static void ixv_update_link_status(struct adapter *); |
117 | static int ixv_sysctl_debug(SYSCTLFN_PROTO); | | 117 | static int ixv_sysctl_debug(SYSCTLFN_PROTO); |
118 | static void ixv_set_ivar(struct adapter *, u8, u8, s8); | | 118 | static void ixv_set_ivar(struct adapter *, u8, u8, s8); |
119 | static void ixv_configure_ivars(struct adapter *); | | 119 | static void ixv_configure_ivars(struct adapter *); |
120 | static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); | | 120 | static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); |
121 | static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); | | 121 | static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); |
122 | | | 122 | |
123 | static void ixv_setup_vlan_tagging(struct adapter *); | | 123 | static void ixv_setup_vlan_tagging(struct adapter *); |
124 | static int ixv_setup_vlan_support(struct adapter *); | | 124 | static int ixv_setup_vlan_support(struct adapter *); |
125 | static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); | | 125 | static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); |
126 | static int ixv_register_vlan(struct adapter *, u16); | | 126 | static int ixv_register_vlan(struct adapter *, u16); |
127 | static int ixv_unregister_vlan(struct adapter *, u16); | | 127 | static int ixv_unregister_vlan(struct adapter *, u16); |
128 | | | 128 | |
129 | static void ixv_add_device_sysctls(struct adapter *); | | 129 | static void ixv_add_device_sysctls(struct adapter *); |
130 | static void ixv_save_stats(struct adapter *); | | 130 | static void ixv_save_stats(struct adapter *); |
131 | static void ixv_init_stats(struct adapter *); | | 131 | static void ixv_init_stats(struct adapter *); |
132 | static void ixv_update_stats(struct adapter *); | | 132 | static void ixv_update_stats(struct adapter *); |
133 | static void ixv_add_stats_sysctls(struct adapter *); | | 133 | static void ixv_add_stats_sysctls(struct adapter *); |
134 | static void ixv_clear_evcnt(struct adapter *); | | 134 | static void ixv_clear_evcnt(struct adapter *); |
135 | | | 135 | |
136 | /* Sysctl handlers */ | | 136 | /* Sysctl handlers */ |
137 | static void ixv_set_sysctl_value(struct adapter *, const char *, | | 137 | static void ixv_set_sysctl_value(struct adapter *, const char *, |
138 | const char *, int *, int); | | 138 | const char *, int *, int); |
139 | static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); | | 139 | static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); |
140 | static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); | | 140 | static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); |
141 | static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); | | 141 | static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); |
142 | static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); | | 142 | static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); |
143 | static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); | | 143 | static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); |
144 | static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); | | 144 | static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); |
145 | | | 145 | |
146 | /* The MSI-X Interrupt handlers */ | | 146 | /* The MSI-X Interrupt handlers */ |
147 | static int ixv_msix_que(void *); | | 147 | static int ixv_msix_que(void *); |
148 | static int ixv_msix_mbx(void *); | | 148 | static int ixv_msix_mbx(void *); |
149 | | | 149 | |
150 | /* Deferred interrupt tasklets */ | | 150 | /* Deferred interrupt tasklets */ |
151 | static void ixv_handle_que(void *); | | 151 | static void ixv_handle_que(void *); |
152 | static void ixv_handle_link(void *); | | 152 | static void ixv_handle_link(void *); |
153 | | | 153 | |
154 | /* Workqueue handler for deferred work */ | | 154 | /* Workqueue handler for deferred work */ |
155 | static void ixv_handle_que_work(struct work *, void *); | | 155 | static void ixv_handle_que_work(struct work *, void *); |
156 | | | 156 | |
157 | const struct sysctlnode *ixv_sysctl_instance(struct adapter *); | | 157 | const struct sysctlnode *ixv_sysctl_instance(struct adapter *); |
158 | static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); | | 158 | static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); |
159 | | | 159 | |
160 | /************************************************************************ | | 160 | /************************************************************************ |
161 | * FreeBSD Device Interface Entry Points | | 161 | * FreeBSD Device Interface Entry Points |
162 | ************************************************************************/ | | 162 | ************************************************************************/ |
163 | CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), | | 163 | CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), |
164 | ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, | | 164 | ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, |
165 | DVF_DETACH_SHUTDOWN); | | 165 | DVF_DETACH_SHUTDOWN); |
166 | | | 166 | |
167 | #if 0 | | 167 | #if 0 |
168 | static driver_t ixv_driver = { | | 168 | static driver_t ixv_driver = { |
169 | "ixv", ixv_methods, sizeof(struct adapter), | | 169 | "ixv", ixv_methods, sizeof(struct adapter), |
170 | }; | | 170 | }; |
171 | | | 171 | |
172 | devclass_t ixv_devclass; | | 172 | devclass_t ixv_devclass; |
173 | DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); | | 173 | DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); |
174 | MODULE_DEPEND(ixv, pci, 1, 1, 1); | | 174 | MODULE_DEPEND(ixv, pci, 1, 1, 1); |
175 | MODULE_DEPEND(ixv, ether, 1, 1, 1); | | 175 | MODULE_DEPEND(ixv, ether, 1, 1, 1); |
176 | #endif | | 176 | #endif |
177 | | | 177 | |
178 | /* | | 178 | /* |
179 | * TUNEABLE PARAMETERS: | | 179 | * TUNEABLE PARAMETERS: |
180 | */ | | 180 | */ |
181 | | | 181 | |
182 | /* Number of Queues - do not exceed MSI-X vectors - 1 */ | | 182 | /* Number of Queues - do not exceed MSI-X vectors - 1 */ |
183 | static int ixv_num_queues = 0; | | 183 | static int ixv_num_queues = 0; |
184 | #define TUNABLE_INT(__x, __y) | | 184 | #define TUNABLE_INT(__x, __y) |
185 | TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); | | 185 | TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); |
186 | | | 186 | |
187 | /* | | 187 | /* |
188 | * AIM: Adaptive Interrupt Moderation | | 188 | * AIM: Adaptive Interrupt Moderation |
189 | * which means that the interrupt rate | | 189 | * which means that the interrupt rate |
190 | * is varied over time based on the | | 190 | * is varied over time based on the |
191 | * traffic for that interrupt vector | | 191 | * traffic for that interrupt vector |
192 | */ | | 192 | */ |
193 | static bool ixv_enable_aim = false; | | 193 | static bool ixv_enable_aim = false; |
194 | TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); | | 194 | TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); |
195 | | | 195 | |
196 | static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); | | 196 | static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); |
197 | TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); | | 197 | TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); |
198 | | | 198 | |
199 | /* How many packets rxeof tries to clean at a time */ | | 199 | /* How many packets rxeof tries to clean at a time */ |
200 | static int ixv_rx_process_limit = 256; | | 200 | static int ixv_rx_process_limit = 256; |
201 | TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); | | 201 | TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); |
202 | | | 202 | |
203 | /* How many packets txeof tries to clean at a time */ | | 203 | /* How many packets txeof tries to clean at a time */ |
204 | static int ixv_tx_process_limit = 256; | | 204 | static int ixv_tx_process_limit = 256; |
205 | TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); | | 205 | TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); |
206 | | | 206 | |
207 | /* Which packet processing uses workqueue or softint */ | | 207 | /* Which packet processing uses workqueue or softint */ |
208 | static bool ixv_txrx_workqueue = false; | | 208 | static bool ixv_txrx_workqueue = false; |
209 | | | 209 | |
210 | /* | | 210 | /* |
211 | * Number of TX descriptors per ring, | | 211 | * Number of TX descriptors per ring, |
212 | * setting higher than RX as this seems | | 212 | * setting higher than RX as this seems |
213 | * the better performing choice. | | 213 | * the better performing choice. |
214 | */ | | 214 | */ |
215 | static int ixv_txd = PERFORM_TXD; | | 215 | static int ixv_txd = PERFORM_TXD; |
216 | TUNABLE_INT("hw.ixv.txd", &ixv_txd); | | 216 | TUNABLE_INT("hw.ixv.txd", &ixv_txd); |
217 | | | 217 | |
218 | /* Number of RX descriptors per ring */ | | 218 | /* Number of RX descriptors per ring */ |
219 | static int ixv_rxd = PERFORM_RXD; | | 219 | static int ixv_rxd = PERFORM_RXD; |
220 | TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); | | 220 | TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); |
221 | | | 221 | |
222 | /* Legacy Transmit (single queue) */ | | 222 | /* Legacy Transmit (single queue) */ |
223 | static int ixv_enable_legacy_tx = 0; | | 223 | static int ixv_enable_legacy_tx = 0; |
224 | TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); | | 224 | TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); |
225 | | | 225 | |
226 | #ifdef NET_MPSAFE | | 226 | #ifdef NET_MPSAFE |
227 | #define IXGBE_MPSAFE 1 | | 227 | #define IXGBE_MPSAFE 1 |
228 | #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE | | 228 | #define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE |
229 | #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE | | 229 | #define IXGBE_SOFTINFT_FLAGS SOFTINT_MPSAFE |
230 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE | | 230 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE |
231 | #else | | 231 | #else |
232 | #define IXGBE_CALLOUT_FLAGS 0 | | 232 | #define IXGBE_CALLOUT_FLAGS 0 |
233 | #define IXGBE_SOFTINFT_FLAGS 0 | | 233 | #define IXGBE_SOFTINFT_FLAGS 0 |
234 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | | 234 | #define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU |
235 | #endif | | 235 | #endif |
236 | #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET | | 236 | #define IXGBE_WORKQUEUE_PRI PRI_SOFTNET |
237 | | | 237 | |
238 | #if 0 | | 238 | #if 0 |
239 | static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); | | 239 | static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); |
240 | static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); | | 240 | static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); |
241 | #endif | | 241 | #endif |
242 | | | 242 | |
243 | /************************************************************************ | | 243 | /************************************************************************ |
244 | * ixv_probe - Device identification routine | | 244 | * ixv_probe - Device identification routine |
245 | * | | 245 | * |
246 | * Determines if the driver should be loaded on | | 246 | * Determines if the driver should be loaded on |
247 | * adapter based on its PCI vendor/device ID. | | 247 | * adapter based on its PCI vendor/device ID. |
248 | * | | 248 | * |
249 | * return BUS_PROBE_DEFAULT on success, positive on failure | | 249 | * return BUS_PROBE_DEFAULT on success, positive on failure |
250 | ************************************************************************/ | | 250 | ************************************************************************/ |
251 | static int | | 251 | static int |
252 | ixv_probe(device_t dev, cfdata_t cf, void *aux) | | 252 | ixv_probe(device_t dev, cfdata_t cf, void *aux) |
253 | { | | 253 | { |
254 | #ifdef __HAVE_PCI_MSI_MSIX | | 254 | #ifdef __HAVE_PCI_MSI_MSIX |
255 | const struct pci_attach_args *pa = aux; | | 255 | const struct pci_attach_args *pa = aux; |
256 | | | 256 | |
257 | return (ixv_lookup(pa) != NULL) ? 1 : 0; | | 257 | return (ixv_lookup(pa) != NULL) ? 1 : 0; |
258 | #else | | 258 | #else |
259 | return 0; | | 259 | return 0; |
260 | #endif | | 260 | #endif |
261 | } /* ixv_probe */ | | 261 | } /* ixv_probe */ |
262 | | | 262 | |
263 | static const ixgbe_vendor_info_t * | | 263 | static const ixgbe_vendor_info_t * |
264 | ixv_lookup(const struct pci_attach_args *pa) | | 264 | ixv_lookup(const struct pci_attach_args *pa) |
265 | { | | 265 | { |
266 | const ixgbe_vendor_info_t *ent; | | 266 | const ixgbe_vendor_info_t *ent; |
267 | pcireg_t subid; | | 267 | pcireg_t subid; |
268 | | | 268 | |
269 | INIT_DEBUGOUT("ixv_lookup: begin"); | | 269 | INIT_DEBUGOUT("ixv_lookup: begin"); |
270 | | | 270 | |
271 | if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) | | 271 | if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) |
272 | return NULL; | | 272 | return NULL; |
273 | | | 273 | |
274 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); | | 274 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); |
275 | | | 275 | |
276 | for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { | | 276 | for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { |
277 | if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && | | 277 | if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && |
278 | (PCI_PRODUCT(pa->pa_id) == ent->device_id) && | | 278 | (PCI_PRODUCT(pa->pa_id) == ent->device_id) && |
279 | ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || | | 279 | ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || |
280 | (ent->subvendor_id == 0)) && | | 280 | (ent->subvendor_id == 0)) && |
281 | ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || | | 281 | ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || |
282 | (ent->subdevice_id == 0))) { | | 282 | (ent->subdevice_id == 0))) { |
283 | return ent; | | 283 | return ent; |
284 | } | | 284 | } |
285 | } | | 285 | } |
286 | | | 286 | |
287 | return NULL; | | 287 | return NULL; |
288 | } | | 288 | } |
289 | | | 289 | |
290 | /************************************************************************ | | 290 | /************************************************************************ |
291 | * ixv_attach - Device initialization routine | | 291 | * ixv_attach - Device initialization routine |
292 | * | | 292 | * |
293 | * Called when the driver is being loaded. | | 293 | * Called when the driver is being loaded. |
294 | * Identifies the type of hardware, allocates all resources | | 294 | * Identifies the type of hardware, allocates all resources |
295 | * and initializes the hardware. | | 295 | * and initializes the hardware. |
296 | * | | 296 | * |
297 | * return 0 on success, positive on failure | | 297 | * return 0 on success, positive on failure |
298 | ************************************************************************/ | | 298 | ************************************************************************/ |
299 | static void | | 299 | static void |
300 | ixv_attach(device_t parent, device_t dev, void *aux) | | 300 | ixv_attach(device_t parent, device_t dev, void *aux) |
301 | { | | 301 | { |
302 | struct adapter *adapter; | | 302 | struct adapter *adapter; |
303 | struct ixgbe_hw *hw; | | 303 | struct ixgbe_hw *hw; |
304 | int error = 0; | | 304 | int error = 0; |
305 | pcireg_t id, subid; | | 305 | pcireg_t id, subid; |
306 | const ixgbe_vendor_info_t *ent; | | 306 | const ixgbe_vendor_info_t *ent; |
307 | const struct pci_attach_args *pa = aux; | | 307 | const struct pci_attach_args *pa = aux; |
308 | const char *apivstr; | | 308 | const char *apivstr; |
309 | const char *str; | | 309 | const char *str; |
310 | char buf[256]; | | 310 | char buf[256]; |
311 | | | 311 | |
312 | INIT_DEBUGOUT("ixv_attach: begin"); | | 312 | INIT_DEBUGOUT("ixv_attach: begin"); |
313 | | | 313 | |
314 | /* | | 314 | /* |
315 | * Make sure BUSMASTER is set, on a VM under | | 315 | * Make sure BUSMASTER is set, on a VM under |
316 | * KVM it may not be and will break things. | | 316 | * KVM it may not be and will break things. |
317 | */ | | 317 | */ |
318 | ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); | | 318 | ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); |
319 | | | 319 | |
320 | /* Allocate, clear, and link in our adapter structure */ | | 320 | /* Allocate, clear, and link in our adapter structure */ |
321 | adapter = device_private(dev); | | 321 | adapter = device_private(dev); |
322 | adapter->dev = dev; | | 322 | adapter->dev = dev; |
323 | adapter->hw.back = adapter; | | 323 | adapter->hw.back = adapter; |
324 | hw = &adapter->hw; | | 324 | hw = &adapter->hw; |
325 | | | 325 | |
326 | adapter->init_locked = ixv_init_locked; | | 326 | adapter->init_locked = ixv_init_locked; |
327 | adapter->stop_locked = ixv_stop; | | 327 | adapter->stop_locked = ixv_stop; |
328 | | | 328 | |
329 | adapter->osdep.pc = pa->pa_pc; | | 329 | adapter->osdep.pc = pa->pa_pc; |
330 | adapter->osdep.tag = pa->pa_tag; | | 330 | adapter->osdep.tag = pa->pa_tag; |
331 | if (pci_dma64_available(pa)) | | 331 | if (pci_dma64_available(pa)) |
332 | adapter->osdep.dmat = pa->pa_dmat64; | | 332 | adapter->osdep.dmat = pa->pa_dmat64; |
333 | else | | 333 | else |
334 | adapter->osdep.dmat = pa->pa_dmat; | | 334 | adapter->osdep.dmat = pa->pa_dmat; |
335 | adapter->osdep.attached = false; | | 335 | adapter->osdep.attached = false; |
336 | | | 336 | |
337 | ent = ixv_lookup(pa); | | 337 | ent = ixv_lookup(pa); |
338 | | | 338 | |
339 | KASSERT(ent != NULL); | | 339 | KASSERT(ent != NULL); |
340 | | | 340 | |
341 | aprint_normal(": %s, Version - %s\n", | | 341 | aprint_normal(": %s, Version - %s\n", |
342 | ixv_strings[ent->index], ixv_driver_version); | | 342 | ixv_strings[ent->index], ixv_driver_version); |
343 | | | 343 | |
344 | /* Core Lock Init*/ | | 344 | /* Core Lock Init*/ |
345 | IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); | | 345 | IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); |
346 | | | 346 | |
347 | /* Do base PCI setup - map BAR0 */ | | 347 | /* Do base PCI setup - map BAR0 */ |
348 | if (ixv_allocate_pci_resources(adapter, pa)) { | | 348 | if (ixv_allocate_pci_resources(adapter, pa)) { |
349 | aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); | | 349 | aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); |
350 | error = ENXIO; | | 350 | error = ENXIO; |
351 | goto err_out; | | 351 | goto err_out; |
352 | } | | 352 | } |
353 | | | 353 | |
354 | /* SYSCTL APIs */ | | 354 | /* SYSCTL APIs */ |
355 | ixv_add_device_sysctls(adapter); | | 355 | ixv_add_device_sysctls(adapter); |
356 | | | 356 | |
357 | /* Set up the timer callout */ | | 357 | /* Set up the timer callout */ |
358 | callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); | | 358 | callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); |
359 | | | 359 | |
360 | /* Save off the information about this board */ | | 360 | /* Save off the information about this board */ |
361 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); | | 361 | id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); |
362 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); | | 362 | subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); |
363 | hw->vendor_id = PCI_VENDOR(id); | | 363 | hw->vendor_id = PCI_VENDOR(id); |
364 | hw->device_id = PCI_PRODUCT(id); | | 364 | hw->device_id = PCI_PRODUCT(id); |
365 | hw->revision_id = | | 365 | hw->revision_id = |
366 | PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); | | 366 | PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); |
367 | hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); | | 367 | hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); |
368 | hw->subsystem_device_id = PCI_SUBSYS_ID(subid); | | 368 | hw->subsystem_device_id = PCI_SUBSYS_ID(subid); |
369 | | | 369 | |
370 | /* A subset of set_mac_type */ | | 370 | /* A subset of set_mac_type */ |
371 | switch (hw->device_id) { | | 371 | switch (hw->device_id) { |
372 | case IXGBE_DEV_ID_82599_VF: | | 372 | case IXGBE_DEV_ID_82599_VF: |
373 | hw->mac.type = ixgbe_mac_82599_vf; | | 373 | hw->mac.type = ixgbe_mac_82599_vf; |
374 | str = "82599 VF"; | | 374 | str = "82599 VF"; |
375 | break; | | 375 | break; |
376 | case IXGBE_DEV_ID_X540_VF: | | 376 | case IXGBE_DEV_ID_X540_VF: |
377 | hw->mac.type = ixgbe_mac_X540_vf; | | 377 | hw->mac.type = ixgbe_mac_X540_vf; |
378 | str = "X540 VF"; | | 378 | str = "X540 VF"; |
379 | break; | | 379 | break; |
380 | case IXGBE_DEV_ID_X550_VF: | | 380 | case IXGBE_DEV_ID_X550_VF: |
381 | hw->mac.type = ixgbe_mac_X550_vf; | | 381 | hw->mac.type = ixgbe_mac_X550_vf; |
382 | str = "X550 VF"; | | 382 | str = "X550 VF"; |
383 | break; | | 383 | break; |
384 | case IXGBE_DEV_ID_X550EM_X_VF: | | 384 | case IXGBE_DEV_ID_X550EM_X_VF: |
385 | hw->mac.type = ixgbe_mac_X550EM_x_vf; | | 385 | hw->mac.type = ixgbe_mac_X550EM_x_vf; |
386 | str = "X550EM X VF"; | | 386 | str = "X550EM X VF"; |
387 | break; | | 387 | break; |
388 | case IXGBE_DEV_ID_X550EM_A_VF: | | 388 | case IXGBE_DEV_ID_X550EM_A_VF: |
389 | hw->mac.type = ixgbe_mac_X550EM_a_vf; | | 389 | hw->mac.type = ixgbe_mac_X550EM_a_vf; |
390 | str = "X550EM A VF"; | | 390 | str = "X550EM A VF"; |
391 | break; | | 391 | break; |
392 | default: | | 392 | default: |
393 | /* Shouldn't get here since probe succeeded */ | | 393 | /* Shouldn't get here since probe succeeded */ |
394 | aprint_error_dev(dev, "Unknown device ID!\n"); | | 394 | aprint_error_dev(dev, "Unknown device ID!\n"); |
395 | error = ENXIO; | | 395 | error = ENXIO; |
396 | goto err_out; | | 396 | goto err_out; |
397 | break; | | 397 | break; |
398 | } | | 398 | } |
399 | aprint_normal_dev(dev, "device %s\n", str); | | 399 | aprint_normal_dev(dev, "device %s\n", str); |
400 | | | 400 | |
401 | ixv_init_device_features(adapter); | | 401 | ixv_init_device_features(adapter); |
402 | | | 402 | |
403 | /* Initialize the shared code */ | | 403 | /* Initialize the shared code */ |
404 | error = ixgbe_init_ops_vf(hw); | | 404 | error = ixgbe_init_ops_vf(hw); |
405 | if (error) { | | 405 | if (error) { |
406 | aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); | | 406 | aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); |
407 | error = EIO; | | 407 | error = EIO; |
408 | goto err_out; | | 408 | goto err_out; |
409 | } | | 409 | } |
410 | | | 410 | |
411 | /* Setup the mailbox */ | | 411 | /* Setup the mailbox */ |
412 | ixgbe_init_mbx_params_vf(hw); | | 412 | ixgbe_init_mbx_params_vf(hw); |
413 | | | 413 | |
414 | /* Set the right number of segments */ | | 414 | /* Set the right number of segments */ |
415 | adapter->num_segs = IXGBE_82599_SCATTER; | | 415 | adapter->num_segs = IXGBE_82599_SCATTER; |
416 | | | 416 | |
417 | /* Reset mbox api to 1.0 */ | | 417 | /* Reset mbox api to 1.0 */ |
418 | error = hw->mac.ops.reset_hw(hw); | | 418 | error = hw->mac.ops.reset_hw(hw); |
419 | if (error == IXGBE_ERR_RESET_FAILED) | | 419 | if (error == IXGBE_ERR_RESET_FAILED) |
420 | aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); | | 420 | aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); |
421 | else if (error) | | 421 | else if (error) |
422 | aprint_error_dev(dev, "...reset_hw() failed with error %d\n", | | 422 | aprint_error_dev(dev, "...reset_hw() failed with error %d\n", |
423 | error); | | 423 | error); |
424 | if (error) { | | 424 | if (error) { |
425 | error = EIO; | | 425 | error = EIO; |
426 | goto err_out; | | 426 | goto err_out; |
427 | } | | 427 | } |
428 | | | 428 | |
429 | error = hw->mac.ops.init_hw(hw); | | 429 | error = hw->mac.ops.init_hw(hw); |
430 | if (error) { | | 430 | if (error) { |
431 | aprint_error_dev(dev, "...init_hw() failed!\n"); | | 431 | aprint_error_dev(dev, "...init_hw() failed!\n"); |
432 | error = EIO; | | 432 | error = EIO; |
433 | goto err_out; | | 433 | goto err_out; |
434 | } | | 434 | } |
435 | | | 435 | |
436 | /* Negotiate mailbox API version */ | | 436 | /* Negotiate mailbox API version */ |
437 | error = ixv_negotiate_api(adapter); | | 437 | error = ixv_negotiate_api(adapter); |
438 | if (error) | | 438 | if (error) |
439 | aprint_normal_dev(dev, | | 439 | aprint_normal_dev(dev, |
440 | "MBX API negotiation failed during attach!\n"); | | 440 | "MBX API negotiation failed during attach!\n"); |
441 | switch (hw->api_version) { | | 441 | switch (hw->api_version) { |
442 | case ixgbe_mbox_api_10: | | 442 | case ixgbe_mbox_api_10: |
443 | apivstr = "1.0"; | | 443 | apivstr = "1.0"; |
444 | break; | | 444 | break; |
445 | case ixgbe_mbox_api_20: | | 445 | case ixgbe_mbox_api_20: |
446 | apivstr = "2.0"; | | 446 | apivstr = "2.0"; |
447 | break; | | 447 | break; |
448 | case ixgbe_mbox_api_11: | | 448 | case ixgbe_mbox_api_11: |
449 | apivstr = "1.1"; | | 449 | apivstr = "1.1"; |
450 | break; | | 450 | break; |
451 | case ixgbe_mbox_api_12: | | 451 | case ixgbe_mbox_api_12: |
452 | apivstr = "1.2"; | | 452 | apivstr = "1.2"; |
453 | break; | | 453 | break; |
454 | case ixgbe_mbox_api_13: | | 454 | case ixgbe_mbox_api_13: |
455 | apivstr = "1.3"; | | 455 | apivstr = "1.3"; |
456 | break; | | 456 | break; |
457 | default: | | 457 | default: |
458 | apivstr = "unknown"; | | 458 | apivstr = "unknown"; |
459 | break; | | 459 | break; |
460 | } | | 460 | } |
461 | aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); | | 461 | aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); |
462 | | | 462 | |
463 | /* If no mac address was assigned, make a random one */ | | 463 | /* If no mac address was assigned, make a random one */ |
464 | if (!ixv_check_ether_addr(hw->mac.addr)) { | | 464 | if (!ixv_check_ether_addr(hw->mac.addr)) { |
465 | u8 addr[ETHER_ADDR_LEN]; | | 465 | u8 addr[ETHER_ADDR_LEN]; |
466 | uint64_t rndval = cprng_strong64(); | | 466 | uint64_t rndval = cprng_strong64(); |
467 | | | 467 | |
468 | memcpy(addr, &rndval, sizeof(addr)); | | 468 | memcpy(addr, &rndval, sizeof(addr)); |
469 | addr[0] &= 0xFE; | | 469 | addr[0] &= 0xFE; |
470 | addr[0] |= 0x02; | | 470 | addr[0] |= 0x02; |
471 | bcopy(addr, hw->mac.addr, sizeof(addr)); | | 471 | bcopy(addr, hw->mac.addr, sizeof(addr)); |
472 | } | | 472 | } |
473 | | | 473 | |
474 | /* Register for VLAN events */ | | 474 | /* Register for VLAN events */ |
475 | ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); | | 475 | ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); |
476 | | | 476 | |
477 | /* Sysctls for limiting the amount of work done in the taskqueues */ | | 477 | /* Sysctls for limiting the amount of work done in the taskqueues */ |
478 | ixv_set_sysctl_value(adapter, "rx_processing_limit", | | 478 | ixv_set_sysctl_value(adapter, "rx_processing_limit", |
479 | "max number of rx packets to process", | | 479 | "max number of rx packets to process", |
480 | &adapter->rx_process_limit, ixv_rx_process_limit); | | 480 | &adapter->rx_process_limit, ixv_rx_process_limit); |
481 | | | 481 | |
482 | ixv_set_sysctl_value(adapter, "tx_processing_limit", | | 482 | ixv_set_sysctl_value(adapter, "tx_processing_limit", |
483 | "max number of tx packets to process", | | 483 | "max number of tx packets to process", |
484 | &adapter->tx_process_limit, ixv_tx_process_limit); | | 484 | &adapter->tx_process_limit, ixv_tx_process_limit); |
485 | | | 485 | |
486 | /* Do descriptor calc and sanity checks */ | | 486 | /* Do descriptor calc and sanity checks */ |
487 | if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || | | 487 | if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || |
488 | ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { | | 488 | ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { |
489 | aprint_error_dev(dev, "TXD config issue, using default!\n"); | | 489 | aprint_error_dev(dev, "TXD config issue, using default!\n"); |
490 | adapter->num_tx_desc = DEFAULT_TXD; | | 490 | adapter->num_tx_desc = DEFAULT_TXD; |
491 | } else | | 491 | } else |
492 | adapter->num_tx_desc = ixv_txd; | | 492 | adapter->num_tx_desc = ixv_txd; |
493 | | | 493 | |
494 | if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || | | 494 | if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || |
495 | ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { | | 495 | ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { |
496 | aprint_error_dev(dev, "RXD config issue, using default!\n"); | | 496 | aprint_error_dev(dev, "RXD config issue, using default!\n"); |
497 | adapter->num_rx_desc = DEFAULT_RXD; | | 497 | adapter->num_rx_desc = DEFAULT_RXD; |
498 | } else | | 498 | } else |
499 | adapter->num_rx_desc = ixv_rxd; | | 499 | adapter->num_rx_desc = ixv_rxd; |
500 | | | 500 | |
501 | /* Setup MSI-X */ | | 501 | /* Setup MSI-X */ |
502 | error = ixv_configure_interrupts(adapter); | | 502 | error = ixv_configure_interrupts(adapter); |
503 | if (error) | | 503 | if (error) |
504 | goto err_out; | | 504 | goto err_out; |
505 | | | 505 | |
506 | /* Allocate our TX/RX Queues */ | | 506 | /* Allocate our TX/RX Queues */ |
507 | if (ixgbe_allocate_queues(adapter)) { | | 507 | if (ixgbe_allocate_queues(adapter)) { |
508 | aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); | | 508 | aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); |
509 | error = ENOMEM; | | 509 | error = ENOMEM; |
510 | goto err_out; | | 510 | goto err_out; |
511 | } | | 511 | } |
512 | | | 512 | |
513 | /* hw.ix defaults init */ | | 513 | /* hw.ix defaults init */ |
514 | adapter->enable_aim = ixv_enable_aim; | | 514 | adapter->enable_aim = ixv_enable_aim; |
515 | | | 515 | |
516 | adapter->txrx_use_workqueue = ixv_txrx_workqueue; | | 516 | adapter->txrx_use_workqueue = ixv_txrx_workqueue; |
517 | | | 517 | |
518 | error = ixv_allocate_msix(adapter, pa); | | 518 | error = ixv_allocate_msix(adapter, pa); |
519 | if (error) { | | 519 | if (error) { |
520 | aprint_error_dev(dev, "ixv_allocate_msix() failed!\n"); | | 520 | aprint_error_dev(dev, "ixv_allocate_msix() failed!\n"); |
521 | goto err_late; | | 521 | goto err_late; |
522 | } | | 522 | } |
523 | | | 523 | |
524 | /* Setup OS specific network interface */ | | 524 | /* Setup OS specific network interface */ |
525 | error = ixv_setup_interface(dev, adapter); | | 525 | error = ixv_setup_interface(dev, adapter); |
526 | if (error != 0) { | | 526 | if (error != 0) { |
527 | aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); | | 527 | aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); |
528 | goto err_late; | | 528 | goto err_late; |
529 | } | | 529 | } |
530 | | | 530 | |
531 | /* Do the stats setup */ | | 531 | /* Do the stats setup */ |
532 | ixv_save_stats(adapter); | | 532 | ixv_save_stats(adapter); |
533 | ixv_init_stats(adapter); | | 533 | ixv_init_stats(adapter); |
534 | ixv_add_stats_sysctls(adapter); | | 534 | ixv_add_stats_sysctls(adapter); |
535 | | | 535 | |
536 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 536 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
537 | ixgbe_netmap_attach(adapter); | | 537 | ixgbe_netmap_attach(adapter); |
538 | | | 538 | |
539 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); | | 539 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); |
540 | aprint_verbose_dev(dev, "feature cap %s\n", buf); | | 540 | aprint_verbose_dev(dev, "feature cap %s\n", buf); |
541 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); | | 541 | snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); |
542 | aprint_verbose_dev(dev, "feature ena %s\n", buf); | | 542 | aprint_verbose_dev(dev, "feature ena %s\n", buf); |
543 | | | 543 | |
544 | INIT_DEBUGOUT("ixv_attach: end"); | | 544 | INIT_DEBUGOUT("ixv_attach: end"); |
545 | adapter->osdep.attached = true; | | 545 | adapter->osdep.attached = true; |
546 | | | 546 | |
547 | return; | | 547 | return; |
548 | | | 548 | |
549 | err_late: | | 549 | err_late: |
550 | ixgbe_free_transmit_structures(adapter); | | 550 | ixgbe_free_transmit_structures(adapter); |
551 | ixgbe_free_receive_structures(adapter); | | 551 | ixgbe_free_receive_structures(adapter); |
552 | free(adapter->queues, M_DEVBUF); | | 552 | free(adapter->queues, M_DEVBUF); |
553 | err_out: | | 553 | err_out: |
554 | ixv_free_pci_resources(adapter); | | 554 | ixv_free_pci_resources(adapter); |
555 | IXGBE_CORE_LOCK_DESTROY(adapter); | | 555 | IXGBE_CORE_LOCK_DESTROY(adapter); |
556 | | | 556 | |
557 | return; | | 557 | return; |
558 | } /* ixv_attach */ | | 558 | } /* ixv_attach */ |
559 | | | 559 | |
560 | /************************************************************************ | | 560 | /************************************************************************ |
561 | * ixv_detach - Device removal routine | | 561 | * ixv_detach - Device removal routine |
562 | * | | 562 | * |
563 | * Called when the driver is being removed. | | 563 | * Called when the driver is being removed. |
564 | * Stops the adapter and deallocates all the resources | | 564 | * Stops the adapter and deallocates all the resources |
565 | * that were allocated for driver operation. | | 565 | * that were allocated for driver operation. |
566 | * | | 566 | * |
567 | * return 0 on success, positive on failure | | 567 | * return 0 on success, positive on failure |
568 | ************************************************************************/ | | 568 | ************************************************************************/ |
569 | static int | | 569 | static int |
570 | ixv_detach(device_t dev, int flags) | | 570 | ixv_detach(device_t dev, int flags) |
571 | { | | 571 | { |
572 | struct adapter *adapter = device_private(dev); | | 572 | struct adapter *adapter = device_private(dev); |
573 | struct ixgbe_hw *hw = &adapter->hw; | | 573 | struct ixgbe_hw *hw = &adapter->hw; |
574 | struct ix_queue *que = adapter->queues; | | 574 | struct ix_queue *que = adapter->queues; |
575 | struct tx_ring *txr = adapter->tx_rings; | | 575 | struct tx_ring *txr = adapter->tx_rings; |
576 | struct rx_ring *rxr = adapter->rx_rings; | | 576 | struct rx_ring *rxr = adapter->rx_rings; |
577 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; | | 577 | struct ixgbevf_hw_stats *stats = &adapter->stats.vf; |
578 | | | 578 | |
579 | INIT_DEBUGOUT("ixv_detach: begin"); | | 579 | INIT_DEBUGOUT("ixv_detach: begin"); |
580 | if (adapter->osdep.attached == false) | | 580 | if (adapter->osdep.attached == false) |
581 | return 0; | | 581 | return 0; |
582 | | | 582 | |
583 | /* Stop the interface. Callouts are stopped in it. */ | | 583 | /* Stop the interface. Callouts are stopped in it. */ |
584 | ixv_ifstop(adapter->ifp, 1); | | 584 | ixv_ifstop(adapter->ifp, 1); |
585 | | | 585 | |
586 | #if NVLAN > 0 | | 586 | #if NVLAN > 0 |
587 | /* Make sure VLANs are not using driver */ | | 587 | /* Make sure VLANs are not using driver */ |
588 | if (!VLAN_ATTACHED(&adapter->osdep.ec)) | | 588 | if (!VLAN_ATTACHED(&adapter->osdep.ec)) |
589 | ; /* nothing to do: no VLANs */ | | 589 | ; /* nothing to do: no VLANs */ |
590 | else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) | | 590 | else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) |
591 | vlan_ifdetach(adapter->ifp); | | 591 | vlan_ifdetach(adapter->ifp); |
592 | else { | | 592 | else { |
593 | aprint_error_dev(dev, "VLANs in use, detach first\n"); | | 593 | aprint_error_dev(dev, "VLANs in use, detach first\n"); |
594 | return EBUSY; | | 594 | return EBUSY; |
595 | } | | 595 | } |
596 | #endif | | 596 | #endif |
597 | | | 597 | |
598 | for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { | | 598 | for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { |
599 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) | | 599 | if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) |
600 | softint_disestablish(txr->txr_si); | | 600 | softint_disestablish(txr->txr_si); |
601 | softint_disestablish(que->que_si); | | 601 | softint_disestablish(que->que_si); |
602 | } | | 602 | } |
603 | if (adapter->txr_wq != NULL) | | 603 | if (adapter->txr_wq != NULL) |
604 | workqueue_destroy(adapter->txr_wq); | | 604 | workqueue_destroy(adapter->txr_wq); |
605 | if (adapter->txr_wq_enqueued != NULL) | | 605 | if (adapter->txr_wq_enqueued != NULL) |
606 | percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); | | 606 | percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); |
607 | if (adapter->que_wq != NULL) | | 607 | if (adapter->que_wq != NULL) |
608 | workqueue_destroy(adapter->que_wq); | | 608 | workqueue_destroy(adapter->que_wq); |
609 | | | 609 | |
610 | /* Drain the Mailbox(link) queue */ | | 610 | /* Drain the Mailbox(link) queue */ |
611 | softint_disestablish(adapter->link_si); | | 611 | softint_disestablish(adapter->link_si); |
612 | | | 612 | |
613 | ether_ifdetach(adapter->ifp); | | 613 | ether_ifdetach(adapter->ifp); |
614 | callout_halt(&adapter->timer, NULL); | | 614 | callout_halt(&adapter->timer, NULL); |
615 | | | 615 | |
616 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) | | 616 | if (adapter->feat_en & IXGBE_FEATURE_NETMAP) |
617 | netmap_detach(adapter->ifp); | | 617 | netmap_detach(adapter->ifp); |
618 | | | 618 | |
619 | ixv_free_pci_resources(adapter); | | 619 | ixv_free_pci_resources(adapter); |
620 | #if 0 /* XXX the NetBSD port is probably missing something here */ | | 620 | #if 0 /* XXX the NetBSD port is probably missing something here */ |
621 | bus_generic_detach(dev); | | 621 | bus_generic_detach(dev); |
622 | #endif | | 622 | #endif |
623 | if_detach(adapter->ifp); | | 623 | if_detach(adapter->ifp); |
624 | if_percpuq_destroy(adapter->ipq); | | 624 | if_percpuq_destroy(adapter->ipq); |
625 | | | 625 | |
626 | sysctl_teardown(&adapter->sysctllog); | | 626 | sysctl_teardown(&adapter->sysctllog); |
627 | evcnt_detach(&adapter->efbig_tx_dma_setup); | | 627 | evcnt_detach(&adapter->efbig_tx_dma_setup); |
628 | evcnt_detach(&adapter->mbuf_defrag_failed); | | 628 | evcnt_detach(&adapter->mbuf_defrag_failed); |
629 | evcnt_detach(&adapter->efbig2_tx_dma_setup); | | 629 | evcnt_detach(&adapter->efbig2_tx_dma_setup); |
630 | evcnt_detach(&adapter->einval_tx_dma_setup); | | 630 | evcnt_detach(&adapter->einval_tx_dma_setup); |
631 | evcnt_detach(&adapter->other_tx_dma_setup); | | 631 | evcnt_detach(&adapter->other_tx_dma_setup); |
632 | evcnt_detach(&adapter->eagain_tx_dma_setup); | | 632 | evcnt_detach(&adapter->eagain_tx_dma_setup); |
633 | evcnt_detach(&adapter->enomem_tx_dma_setup); | | 633 | evcnt_detach(&adapter->enomem_tx_dma_setup); |
634 | evcnt_detach(&adapter->watchdog_events); | | 634 | evcnt_detach(&adapter->watchdog_events); |
635 | evcnt_detach(&adapter->tso_err); | | 635 | evcnt_detach(&adapter->tso_err); |
636 | evcnt_detach(&adapter->link_irq); | | 636 | evcnt_detach(&adapter->link_irq); |
637 | | | 637 | |
638 | txr = adapter->tx_rings; | | 638 | txr = adapter->tx_rings; |
639 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { | | 639 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
640 | evcnt_detach(&adapter->queues[i].irqs); | | 640 | evcnt_detach(&adapter->queues[i].irqs); |
641 | evcnt_detach(&adapter->queues[i].handleq); | | 641 | evcnt_detach(&adapter->queues[i].handleq); |
642 | evcnt_detach(&adapter->queues[i].req); | | 642 | evcnt_detach(&adapter->queues[i].req); |
643 | evcnt_detach(&txr->no_desc_avail); | | 643 | evcnt_detach(&txr->no_desc_avail); |
644 | evcnt_detach(&txr->total_packets); | | 644 | evcnt_detach(&txr->total_packets); |
645 | evcnt_detach(&txr->tso_tx); | | 645 | evcnt_detach(&txr->tso_tx); |
646 | #ifndef IXGBE_LEGACY_TX | | 646 | #ifndef IXGBE_LEGACY_TX |
647 | evcnt_detach(&txr->pcq_drops); | | 647 | evcnt_detach(&txr->pcq_drops); |
648 | #endif | | 648 | #endif |
649 | | | 649 | |
650 | evcnt_detach(&rxr->rx_packets); | | 650 | evcnt_detach(&rxr->rx_packets); |
651 | evcnt_detach(&rxr->rx_bytes); | | 651 | evcnt_detach(&rxr->rx_bytes); |
652 | evcnt_detach(&rxr->rx_copies); | | 652 | evcnt_detach(&rxr->rx_copies); |
653 | evcnt_detach(&rxr->no_jmbuf); | | 653 | evcnt_detach(&rxr->no_jmbuf); |
654 | evcnt_detach(&rxr->rx_discarded); | | 654 | evcnt_detach(&rxr->rx_discarded); |
655 | } | | 655 | } |
656 | evcnt_detach(&stats->ipcs); | | 656 | evcnt_detach(&stats->ipcs); |
657 | evcnt_detach(&stats->l4cs); | | 657 | evcnt_detach(&stats->l4cs); |
658 | evcnt_detach(&stats->ipcs_bad); | | 658 | evcnt_detach(&stats->ipcs_bad); |
659 | evcnt_detach(&stats->l4cs_bad); | | 659 | evcnt_detach(&stats->l4cs_bad); |
660 | | | 660 | |
661 | /* Packet Reception Stats */ | | 661 | /* Packet Reception Stats */ |
662 | evcnt_detach(&stats->vfgorc); | | 662 | evcnt_detach(&stats->vfgorc); |
663 | evcnt_detach(&stats->vfgprc); | | 663 | evcnt_detach(&stats->vfgprc); |
664 | evcnt_detach(&stats->vfmprc); | | 664 | evcnt_detach(&stats->vfmprc); |
665 | | | 665 | |
666 | /* Packet Transmission Stats */ | | 666 | /* Packet Transmission Stats */ |
667 | evcnt_detach(&stats->vfgotc); | | 667 | evcnt_detach(&stats->vfgotc); |
668 | evcnt_detach(&stats->vfgptc); | | 668 | evcnt_detach(&stats->vfgptc); |
669 | | | 669 | |
670 | /* Mailbox Stats */ | | 670 | /* Mailbox Stats */ |
671 | evcnt_detach(&hw->mbx.stats.msgs_tx); | | 671 | evcnt_detach(&hw->mbx.stats.msgs_tx); |
672 | evcnt_detach(&hw->mbx.stats.msgs_rx); | | 672 | evcnt_detach(&hw->mbx.stats.msgs_rx); |
673 | evcnt_detach(&hw->mbx.stats.acks); | | 673 | evcnt_detach(&hw->mbx.stats.acks); |
674 | evcnt_detach(&hw->mbx.stats.reqs); | | 674 | evcnt_detach(&hw->mbx.stats.reqs); |
675 | evcnt_detach(&hw->mbx.stats.rsts); | | 675 | evcnt_detach(&hw->mbx.stats.rsts); |
676 | | | 676 | |
677 | ixgbe_free_transmit_structures(adapter); | | 677 | ixgbe_free_transmit_structures(adapter); |
678 | ixgbe_free_receive_structures(adapter); | | 678 | ixgbe_free_receive_structures(adapter); |
679 | for (int i = 0; i < adapter->num_queues; i++) { | | 679 | for (int i = 0; i < adapter->num_queues; i++) { |
680 | struct ix_queue *lque = &adapter->queues[i]; | | 680 | struct ix_queue *lque = &adapter->queues[i]; |
681 | mutex_destroy(&lque->dc_mtx); | | 681 | mutex_destroy(&lque->dc_mtx); |
682 | } | | 682 | } |
683 | free(adapter->queues, M_DEVBUF); | | 683 | free(adapter->queues, M_DEVBUF); |
684 | | | 684 | |
685 | IXGBE_CORE_LOCK_DESTROY(adapter); | | 685 | IXGBE_CORE_LOCK_DESTROY(adapter); |
686 | | | 686 | |
687 | return (0); | | 687 | return (0); |
688 | } /* ixv_detach */ | | 688 | } /* ixv_detach */ |
689 | | | 689 | |
690 | /************************************************************************ | | 690 | /************************************************************************ |
691 | * ixv_init_locked - Init entry point | | 691 | * ixv_init_locked - Init entry point |
692 | * | | 692 | * |
693 | * Used in two ways: It is used by the stack as an init entry | | 693 | * Used in two ways: It is used by the stack as an init entry |
694 | * point in network interface structure. It is also used | | 694 | * point in network interface structure. It is also used |
695 | * by the driver as a hw/sw initialization routine to get | | 695 | * by the driver as a hw/sw initialization routine to get |
696 | * to a consistent state. | | 696 | * to a consistent state. |
697 | * | | 697 | * |
698 | * return 0 on success, positive on failure | | 698 | * return 0 on success, positive on failure |
699 | ************************************************************************/ | | 699 | ************************************************************************/ |
700 | static void | | 700 | static void |
701 | ixv_init_locked(struct adapter *adapter) | | 701 | ixv_init_locked(struct adapter *adapter) |
702 | { | | 702 | { |
703 | struct ifnet *ifp = adapter->ifp; | | 703 | struct ifnet *ifp = adapter->ifp; |
704 | device_t dev = adapter->dev; | | 704 | device_t dev = adapter->dev; |
705 | struct ixgbe_hw *hw = &adapter->hw; | | 705 | struct ixgbe_hw *hw = &adapter->hw; |
706 | struct ix_queue *que; | | 706 | struct ix_queue *que; |
707 | int error = 0; | | 707 | int error = 0; |
708 | uint32_t mask; | | 708 | uint32_t mask; |
709 | int i; | | 709 | int i; |
710 | | | 710 | |
711 | INIT_DEBUGOUT("ixv_init_locked: begin"); | | 711 | INIT_DEBUGOUT("ixv_init_locked: begin"); |
712 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 712 | KASSERT(mutex_owned(&adapter->core_mtx)); |
713 | hw->adapter_stopped = FALSE; | | 713 | hw->adapter_stopped = FALSE; |
714 | hw->mac.ops.stop_adapter(hw); | | 714 | hw->mac.ops.stop_adapter(hw); |
715 | callout_stop(&adapter->timer); | | 715 | callout_stop(&adapter->timer); |
716 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) | | 716 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) |
717 | que->disabled_count = 0; | | 717 | que->disabled_count = 0; |
718 | | | 718 | |
| | | 719 | adapter->max_frame_size = |
| | | 720 | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; |
| | | 721 | |
719 | /* reprogram the RAR[0] in case user changed it. */ | | 722 | /* reprogram the RAR[0] in case user changed it. */ |
720 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | | 723 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
721 | | | 724 | |
722 | /* Get the latest mac address, User can use a LAA */ | | 725 | /* Get the latest mac address, User can use a LAA */ |
723 | memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), | | 726 | memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), |
724 | IXGBE_ETH_LENGTH_OF_ADDRESS); | | 727 | IXGBE_ETH_LENGTH_OF_ADDRESS); |
725 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); | | 728 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); |
726 | | | 729 | |
727 | /* Prepare transmit descriptors and buffers */ | | 730 | /* Prepare transmit descriptors and buffers */ |
728 | if (ixgbe_setup_transmit_structures(adapter)) { | | 731 | if (ixgbe_setup_transmit_structures(adapter)) { |
729 | aprint_error_dev(dev, "Could not setup transmit structures\n"); | | 732 | aprint_error_dev(dev, "Could not setup transmit structures\n"); |
730 | ixv_stop(adapter); | | 733 | ixv_stop(adapter); |
731 | return; | | 734 | return; |
732 | } | | 735 | } |
733 | | | 736 | |
734 | /* Reset VF and renegotiate mailbox API version */ | | 737 | /* Reset VF and renegotiate mailbox API version */ |
735 | hw->mac.ops.reset_hw(hw); | | 738 | hw->mac.ops.reset_hw(hw); |
736 | hw->mac.ops.start_hw(hw); | | 739 | hw->mac.ops.start_hw(hw); |
737 | error = ixv_negotiate_api(adapter); | | 740 | error = ixv_negotiate_api(adapter); |
738 | if (error) | | 741 | if (error) |
739 | device_printf(dev, | | 742 | device_printf(dev, |
740 | "Mailbox API negotiation failed in init_locked!\n"); | | 743 | "Mailbox API negotiation failed in init_locked!\n"); |
741 | | | 744 | |
742 | ixv_initialize_transmit_units(adapter); | | 745 | ixv_initialize_transmit_units(adapter); |
743 | | | 746 | |
744 | /* Setup Multicast table */ | | 747 | /* Setup Multicast table */ |
745 | ixv_set_rxfilter(adapter); | | 748 | ixv_set_rxfilter(adapter); |
746 | | | 749 | |
747 | /* | | 750 | /* |
748 | * Determine the correct mbuf pool | | 751 | * Determine the correct mbuf pool |
749 | * for doing jumbo/headersplit | | 752 | * for doing jumbo/headersplit |
750 | */ | | 753 | */ |
751 | if (ifp->if_mtu > ETHERMTU) | | 754 | if (adapter->max_frame_size <= MCLBYTES) |
752 | adapter->rx_mbuf_sz = MJUMPAGESIZE; | | | |
753 | else | | | |
754 | adapter->rx_mbuf_sz = MCLBYTES; | | 755 | adapter->rx_mbuf_sz = MCLBYTES; |
| | | 756 | else |
| | | 757 | adapter->rx_mbuf_sz = MJUMPAGESIZE; |
755 | | | 758 | |
756 | /* Prepare receive descriptors and buffers */ | | 759 | /* Prepare receive descriptors and buffers */ |
757 | if (ixgbe_setup_receive_structures(adapter)) { | | 760 | if (ixgbe_setup_receive_structures(adapter)) { |
758 | device_printf(dev, "Could not setup receive structures\n"); | | 761 | device_printf(dev, "Could not setup receive structures\n"); |
759 | ixv_stop(adapter); | | 762 | ixv_stop(adapter); |
760 | return; | | 763 | return; |
761 | } | | 764 | } |
762 | | | 765 | |
763 | /* Configure RX settings */ | | 766 | /* Configure RX settings */ |
764 | ixv_initialize_receive_units(adapter); | | 767 | ixv_initialize_receive_units(adapter); |
765 | | | 768 | |
766 | #if 0 /* XXX isn't it required? -- msaitoh */ | | 769 | #if 0 /* XXX isn't it required? -- msaitoh */ |
767 | /* Set the various hardware offload abilities */ | | 770 | /* Set the various hardware offload abilities */ |
768 | ifp->if_hwassist = 0; | | 771 | ifp->if_hwassist = 0; |
769 | if (ifp->if_capenable & IFCAP_TSO4) | | 772 | if (ifp->if_capenable & IFCAP_TSO4) |
770 | ifp->if_hwassist |= CSUM_TSO; | | 773 | ifp->if_hwassist |= CSUM_TSO; |
771 | if (ifp->if_capenable & IFCAP_TXCSUM) { | | 774 | if (ifp->if_capenable & IFCAP_TXCSUM) { |
772 | ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); | | 775 | ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); |
773 | #if __FreeBSD_version >= 800000 | | 776 | #if __FreeBSD_version >= 800000 |
774 | ifp->if_hwassist |= CSUM_SCTP; | | 777 | ifp->if_hwassist |= CSUM_SCTP; |
775 | #endif | | 778 | #endif |
776 | } | | 779 | } |
777 | #endif | | 780 | #endif |
778 | | | 781 | |
779 | /* Set up VLAN offload and filter */ | | 782 | /* Set up VLAN offload and filter */ |
780 | ixv_setup_vlan_support(adapter); | | 783 | ixv_setup_vlan_support(adapter); |
781 | | | 784 | |
782 | /* Set up MSI-X routing */ | | 785 | /* Set up MSI-X routing */ |
783 | ixv_configure_ivars(adapter); | | 786 | ixv_configure_ivars(adapter); |
784 | | | 787 | |
785 | /* Set up auto-mask */ | | 788 | /* Set up auto-mask */ |
786 | mask = (1 << adapter->vector); | | 789 | mask = (1 << adapter->vector); |
787 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) | | 790 | for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) |
788 | mask |= (1 << que->msix); | | 791 | mask |= (1 << que->msix); |
789 | IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); | | 792 | IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); |
790 | | | 793 | |
791 | /* Set moderation on the Link interrupt */ | | 794 | /* Set moderation on the Link interrupt */ |
792 | ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); | | 795 | ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); |
793 | | | 796 | |
794 | /* Stats init */ | | 797 | /* Stats init */ |
795 | ixv_init_stats(adapter); | | 798 | ixv_init_stats(adapter); |
796 | | | 799 | |
797 | /* Config/Enable Link */ | | 800 | /* Config/Enable Link */ |
798 | hw->mac.get_link_status = TRUE; | | 801 | hw->mac.get_link_status = TRUE; |
799 | hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, | | 802 | hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, |
800 | FALSE); | | 803 | FALSE); |
801 | | | 804 | |
802 | /* Start watchdog */ | | 805 | /* Start watchdog */ |
803 | callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); | | 806 | callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); |
804 | | | 807 | |
805 | /* And now turn on interrupts */ | | 808 | /* And now turn on interrupts */ |
806 | ixv_enable_intr(adapter); | | 809 | ixv_enable_intr(adapter); |
807 | | | 810 | |
808 | /* Update saved flags. See ixgbe_ifflags_cb() */ | | 811 | /* Update saved flags. See ixgbe_ifflags_cb() */ |
809 | adapter->if_flags = ifp->if_flags; | | 812 | adapter->if_flags = ifp->if_flags; |
810 | adapter->ec_capenable = adapter->osdep.ec.ec_capenable; | | 813 | adapter->ec_capenable = adapter->osdep.ec.ec_capenable; |
811 | | | 814 | |
812 | /* Now inform the stack we're ready */ | | 815 | /* Now inform the stack we're ready */ |
813 | ifp->if_flags |= IFF_RUNNING; | | 816 | ifp->if_flags |= IFF_RUNNING; |
814 | ifp->if_flags &= ~IFF_OACTIVE; | | 817 | ifp->if_flags &= ~IFF_OACTIVE; |
815 | | | 818 | |
816 | return; | | 819 | return; |
817 | } /* ixv_init_locked */ | | 820 | } /* ixv_init_locked */ |
818 | | | 821 | |
819 | /************************************************************************ | | 822 | /************************************************************************ |
820 | * ixv_enable_queue | | 823 | * ixv_enable_queue |
821 | ************************************************************************/ | | 824 | ************************************************************************/ |
822 | static inline void | | 825 | static inline void |
823 | ixv_enable_queue(struct adapter *adapter, u32 vector) | | 826 | ixv_enable_queue(struct adapter *adapter, u32 vector) |
824 | { | | 827 | { |
825 | struct ixgbe_hw *hw = &adapter->hw; | | 828 | struct ixgbe_hw *hw = &adapter->hw; |
826 | struct ix_queue *que = &adapter->queues[vector]; | | 829 | struct ix_queue *que = &adapter->queues[vector]; |
827 | u32 queue = 1UL << vector; | | 830 | u32 queue = 1UL << vector; |
828 | u32 mask; | | 831 | u32 mask; |
829 | | | 832 | |
830 | mutex_enter(&que->dc_mtx); | | 833 | mutex_enter(&que->dc_mtx); |
831 | if (que->disabled_count > 0 && --que->disabled_count > 0) | | 834 | if (que->disabled_count > 0 && --que->disabled_count > 0) |
832 | goto out; | | 835 | goto out; |
833 | | | 836 | |
834 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); | | 837 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); |
835 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); | | 838 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); |
836 | out: | | 839 | out: |
837 | mutex_exit(&que->dc_mtx); | | 840 | mutex_exit(&que->dc_mtx); |
838 | } /* ixv_enable_queue */ | | 841 | } /* ixv_enable_queue */ |
839 | | | 842 | |
840 | /************************************************************************ | | 843 | /************************************************************************ |
841 | * ixv_disable_queue | | 844 | * ixv_disable_queue |
842 | ************************************************************************/ | | 845 | ************************************************************************/ |
843 | static inline void | | 846 | static inline void |
844 | ixv_disable_queue(struct adapter *adapter, u32 vector) | | 847 | ixv_disable_queue(struct adapter *adapter, u32 vector) |
845 | { | | 848 | { |
846 | struct ixgbe_hw *hw = &adapter->hw; | | 849 | struct ixgbe_hw *hw = &adapter->hw; |
847 | struct ix_queue *que = &adapter->queues[vector]; | | 850 | struct ix_queue *que = &adapter->queues[vector]; |
848 | u32 queue = 1UL << vector; | | 851 | u32 queue = 1UL << vector; |
849 | u32 mask; | | 852 | u32 mask; |
850 | | | 853 | |
851 | mutex_enter(&que->dc_mtx); | | 854 | mutex_enter(&que->dc_mtx); |
852 | if (que->disabled_count++ > 0) | | 855 | if (que->disabled_count++ > 0) |
853 | goto out; | | 856 | goto out; |
854 | | | 857 | |
855 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); | | 858 | mask = (IXGBE_EIMS_RTX_QUEUE & queue); |
856 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); | | 859 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); |
857 | out: | | 860 | out: |
858 | mutex_exit(&que->dc_mtx); | | 861 | mutex_exit(&que->dc_mtx); |
859 | } /* ixv_disable_queue */ | | 862 | } /* ixv_disable_queue */ |
860 | | | 863 | |
861 | #if 0 | | 864 | #if 0 |
862 | static inline void | | 865 | static inline void |
863 | ixv_rearm_queues(struct adapter *adapter, u64 queues) | | 866 | ixv_rearm_queues(struct adapter *adapter, u64 queues) |
864 | { | | 867 | { |
865 | u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); | | 868 | u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); |
866 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); | | 869 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); |
867 | } /* ixv_rearm_queues */ | | 870 | } /* ixv_rearm_queues */ |
868 | #endif | | 871 | #endif |
869 | | | 872 | |
870 | | | 873 | |
871 | /************************************************************************ | | 874 | /************************************************************************ |
872 | * ixv_msix_que - MSI-X Queue Interrupt Service routine | | 875 | * ixv_msix_que - MSI-X Queue Interrupt Service routine |
873 | ************************************************************************/ | | 876 | ************************************************************************/ |
874 | static int | | 877 | static int |
875 | ixv_msix_que(void *arg) | | 878 | ixv_msix_que(void *arg) |
876 | { | | 879 | { |
877 | struct ix_queue *que = arg; | | 880 | struct ix_queue *que = arg; |
878 | struct adapter *adapter = que->adapter; | | 881 | struct adapter *adapter = que->adapter; |
879 | struct tx_ring *txr = que->txr; | | 882 | struct tx_ring *txr = que->txr; |
880 | struct rx_ring *rxr = que->rxr; | | 883 | struct rx_ring *rxr = que->rxr; |
881 | bool more; | | 884 | bool more; |
882 | u32 newitr = 0; | | 885 | u32 newitr = 0; |
883 | | | 886 | |
884 | ixv_disable_queue(adapter, que->msix); | | 887 | ixv_disable_queue(adapter, que->msix); |
885 | ++que->irqs.ev_count; | | 888 | ++que->irqs.ev_count; |
886 | | | 889 | |
887 | #ifdef __NetBSD__ | | 890 | #ifdef __NetBSD__ |
888 | /* Don't run ixgbe_rxeof in interrupt context */ | | 891 | /* Don't run ixgbe_rxeof in interrupt context */ |
889 | more = true; | | 892 | more = true; |
890 | #else | | 893 | #else |
891 | more = ixgbe_rxeof(que); | | 894 | more = ixgbe_rxeof(que); |
892 | #endif | | 895 | #endif |
893 | | | 896 | |
894 | IXGBE_TX_LOCK(txr); | | 897 | IXGBE_TX_LOCK(txr); |
895 | ixgbe_txeof(txr); | | 898 | ixgbe_txeof(txr); |
896 | IXGBE_TX_UNLOCK(txr); | | 899 | IXGBE_TX_UNLOCK(txr); |
897 | | | 900 | |
898 | /* Do AIM now? */ | | 901 | /* Do AIM now? */ |
899 | | | 902 | |
900 | if (adapter->enable_aim == false) | | 903 | if (adapter->enable_aim == false) |
901 | goto no_calc; | | 904 | goto no_calc; |
902 | /* | | 905 | /* |
903 | * Do Adaptive Interrupt Moderation: | | 906 | * Do Adaptive Interrupt Moderation: |
904 | * - Write out last calculated setting | | 907 | * - Write out last calculated setting |
905 | * - Calculate based on average size over | | 908 | * - Calculate based on average size over |
906 | * the last interval. | | 909 | * the last interval. |
907 | */ | | 910 | */ |
908 | if (que->eitr_setting) | | 911 | if (que->eitr_setting) |
909 | ixv_eitr_write(adapter, que->msix, que->eitr_setting); | | 912 | ixv_eitr_write(adapter, que->msix, que->eitr_setting); |
910 | | | 913 | |
911 | que->eitr_setting = 0; | | 914 | que->eitr_setting = 0; |
912 | | | 915 | |
913 | /* Idle, do nothing */ | | 916 | /* Idle, do nothing */ |
914 | if ((txr->bytes == 0) && (rxr->bytes == 0)) | | 917 | if ((txr->bytes == 0) && (rxr->bytes == 0)) |
915 | goto no_calc; | | 918 | goto no_calc; |
916 | | | 919 | |
917 | if ((txr->bytes) && (txr->packets)) | | 920 | if ((txr->bytes) && (txr->packets)) |
918 | newitr = txr->bytes/txr->packets; | | 921 | newitr = txr->bytes/txr->packets; |
919 | if ((rxr->bytes) && (rxr->packets)) | | 922 | if ((rxr->bytes) && (rxr->packets)) |
920 | newitr = uimax(newitr, (rxr->bytes / rxr->packets)); | | 923 | newitr = uimax(newitr, (rxr->bytes / rxr->packets)); |
921 | newitr += 24; /* account for hardware frame, crc */ | | 924 | newitr += 24; /* account for hardware frame, crc */ |
922 | | | 925 | |
923 | /* set an upper boundary */ | | 926 | /* set an upper boundary */ |
924 | newitr = uimin(newitr, 3000); | | 927 | newitr = uimin(newitr, 3000); |
925 | | | 928 | |
926 | /* Be nice to the mid range */ | | 929 | /* Be nice to the mid range */ |
927 | if ((newitr > 300) && (newitr < 1200)) | | 930 | if ((newitr > 300) && (newitr < 1200)) |
928 | newitr = (newitr / 3); | | 931 | newitr = (newitr / 3); |
929 | else | | 932 | else |
930 | newitr = (newitr / 2); | | 933 | newitr = (newitr / 2); |
931 | | | 934 | |
932 | /* | | 935 | /* |
933 | * When RSC is used, ITR interval must be larger than RSC_DELAY. | | 936 | * When RSC is used, ITR interval must be larger than RSC_DELAY. |
934 | * Currently, we use 2us for RSC_DELAY. The minimum value is always | | 937 | * Currently, we use 2us for RSC_DELAY. The minimum value is always |
935 | * greater than 2us on 100M (and 10M?(not documented)), but it's not | | 938 | * greater than 2us on 100M (and 10M?(not documented)), but it's not |
936 | * on 1G and higher. | | 939 | * on 1G and higher. |
937 | */ | | 940 | */ |
938 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) | | 941 | if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) |
939 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { | | 942 | && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { |
940 | if (newitr < IXGBE_MIN_RSC_EITR_10G1G) | | 943 | if (newitr < IXGBE_MIN_RSC_EITR_10G1G) |
941 | newitr = IXGBE_MIN_RSC_EITR_10G1G; | | 944 | newitr = IXGBE_MIN_RSC_EITR_10G1G; |
942 | } | | 945 | } |
943 | | | 946 | |
944 | /* save for next interrupt */ | | 947 | /* save for next interrupt */ |
945 | que->eitr_setting = newitr; | | 948 | que->eitr_setting = newitr; |
946 | | | 949 | |
947 | /* Reset state */ | | 950 | /* Reset state */ |
948 | txr->bytes = 0; | | 951 | txr->bytes = 0; |
949 | txr->packets = 0; | | 952 | txr->packets = 0; |
950 | rxr->bytes = 0; | | 953 | rxr->bytes = 0; |
951 | rxr->packets = 0; | | 954 | rxr->packets = 0; |
952 | | | 955 | |
953 | no_calc: | | 956 | no_calc: |
954 | if (more) | | 957 | if (more) |
955 | softint_schedule(que->que_si); | | 958 | softint_schedule(que->que_si); |
956 | else /* Re-enable this interrupt */ | | 959 | else /* Re-enable this interrupt */ |
957 | ixv_enable_queue(adapter, que->msix); | | 960 | ixv_enable_queue(adapter, que->msix); |
958 | | | 961 | |
959 | return 1; | | 962 | return 1; |
960 | } /* ixv_msix_que */ | | 963 | } /* ixv_msix_que */ |
961 | | | 964 | |
962 | /************************************************************************ | | 965 | /************************************************************************ |
963 | * ixv_msix_mbx | | 966 | * ixv_msix_mbx |
964 | ************************************************************************/ | | 967 | ************************************************************************/ |
965 | static int | | 968 | static int |
966 | ixv_msix_mbx(void *arg) | | 969 | ixv_msix_mbx(void *arg) |
967 | { | | 970 | { |
968 | struct adapter *adapter = arg; | | 971 | struct adapter *adapter = arg; |
969 | struct ixgbe_hw *hw = &adapter->hw; | | 972 | struct ixgbe_hw *hw = &adapter->hw; |
970 | | | 973 | |
971 | ++adapter->link_irq.ev_count; | | 974 | ++adapter->link_irq.ev_count; |
972 | /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ | | 975 | /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ |
973 | | | 976 | |
974 | /* Link status change */ | | 977 | /* Link status change */ |
975 | hw->mac.get_link_status = TRUE; | | 978 | hw->mac.get_link_status = TRUE; |
976 | softint_schedule(adapter->link_si); | | 979 | softint_schedule(adapter->link_si); |
977 | | | 980 | |
978 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); | | 981 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); |
979 | | | 982 | |
980 | return 1; | | 983 | return 1; |
981 | } /* ixv_msix_mbx */ | | 984 | } /* ixv_msix_mbx */ |
982 | | | 985 | |
983 | static void | | 986 | static void |
984 | ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) | | 987 | ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) |
985 | { | | 988 | { |
986 | | | 989 | |
987 | /* | | 990 | /* |
988 | * Newer devices than 82598 have VF function, so this function is | | 991 | * Newer devices than 82598 have VF function, so this function is |
989 | * simple. | | 992 | * simple. |
990 | */ | | 993 | */ |
991 | itr |= IXGBE_EITR_CNT_WDIS; | | 994 | itr |= IXGBE_EITR_CNT_WDIS; |
992 | | | 995 | |
993 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); | | 996 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); |
994 | } | | 997 | } |
995 | | | 998 | |
996 | | | 999 | |
997 | /************************************************************************ | | 1000 | /************************************************************************ |
998 | * ixv_media_status - Media Ioctl callback | | 1001 | * ixv_media_status - Media Ioctl callback |
999 | * | | 1002 | * |
1000 | * Called whenever the user queries the status of | | 1003 | * Called whenever the user queries the status of |
1001 | * the interface using ifconfig. | | 1004 | * the interface using ifconfig. |
1002 | ************************************************************************/ | | 1005 | ************************************************************************/ |
1003 | static void | | 1006 | static void |
1004 | ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) | | 1007 | ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
1005 | { | | 1008 | { |
1006 | struct adapter *adapter = ifp->if_softc; | | 1009 | struct adapter *adapter = ifp->if_softc; |
1007 | | | 1010 | |
1008 | INIT_DEBUGOUT("ixv_media_status: begin"); | | 1011 | INIT_DEBUGOUT("ixv_media_status: begin"); |
1009 | IXGBE_CORE_LOCK(adapter); | | 1012 | IXGBE_CORE_LOCK(adapter); |
1010 | ixv_update_link_status(adapter); | | 1013 | ixv_update_link_status(adapter); |
1011 | | | 1014 | |
1012 | ifmr->ifm_status = IFM_AVALID; | | 1015 | ifmr->ifm_status = IFM_AVALID; |
1013 | ifmr->ifm_active = IFM_ETHER; | | 1016 | ifmr->ifm_active = IFM_ETHER; |
1014 | | | 1017 | |
1015 | if (adapter->link_active != LINK_STATE_UP) { | | 1018 | if (adapter->link_active != LINK_STATE_UP) { |
1016 | ifmr->ifm_active |= IFM_NONE; | | 1019 | ifmr->ifm_active |= IFM_NONE; |
1017 | IXGBE_CORE_UNLOCK(adapter); | | 1020 | IXGBE_CORE_UNLOCK(adapter); |
1018 | return; | | 1021 | return; |
1019 | } | | 1022 | } |
1020 | | | 1023 | |
1021 | ifmr->ifm_status |= IFM_ACTIVE; | | 1024 | ifmr->ifm_status |= IFM_ACTIVE; |
1022 | | | 1025 | |
1023 | switch (adapter->link_speed) { | | 1026 | switch (adapter->link_speed) { |
1024 | case IXGBE_LINK_SPEED_10GB_FULL: | | 1027 | case IXGBE_LINK_SPEED_10GB_FULL: |
1025 | ifmr->ifm_active |= IFM_10G_T | IFM_FDX; | | 1028 | ifmr->ifm_active |= IFM_10G_T | IFM_FDX; |
1026 | break; | | 1029 | break; |
1027 | case IXGBE_LINK_SPEED_5GB_FULL: | | 1030 | case IXGBE_LINK_SPEED_5GB_FULL: |
1028 | ifmr->ifm_active |= IFM_5000_T | IFM_FDX; | | 1031 | ifmr->ifm_active |= IFM_5000_T | IFM_FDX; |
1029 | break; | | 1032 | break; |
1030 | case IXGBE_LINK_SPEED_2_5GB_FULL: | | 1033 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
1031 | ifmr->ifm_active |= IFM_2500_T | IFM_FDX; | | 1034 | ifmr->ifm_active |= IFM_2500_T | IFM_FDX; |
1032 | break; | | 1035 | break; |
1033 | case IXGBE_LINK_SPEED_1GB_FULL: | | 1036 | case IXGBE_LINK_SPEED_1GB_FULL: |
1034 | ifmr->ifm_active |= IFM_1000_T | IFM_FDX; | | 1037 | ifmr->ifm_active |= IFM_1000_T | IFM_FDX; |
1035 | break; | | 1038 | break; |
1036 | case IXGBE_LINK_SPEED_100_FULL: | | 1039 | case IXGBE_LINK_SPEED_100_FULL: |
1037 | ifmr->ifm_active |= IFM_100_TX | IFM_FDX; | | 1040 | ifmr->ifm_active |= IFM_100_TX | IFM_FDX; |
1038 | break; | | 1041 | break; |
1039 | case IXGBE_LINK_SPEED_10_FULL: | | 1042 | case IXGBE_LINK_SPEED_10_FULL: |
1040 | ifmr->ifm_active |= IFM_10_T | IFM_FDX; | | 1043 | ifmr->ifm_active |= IFM_10_T | IFM_FDX; |
1041 | break; | | 1044 | break; |
1042 | } | | 1045 | } |
1043 | | | 1046 | |
1044 | ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); | | 1047 | ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); |
1045 | | | 1048 | |
1046 | IXGBE_CORE_UNLOCK(adapter); | | 1049 | IXGBE_CORE_UNLOCK(adapter); |
1047 | } /* ixv_media_status */ | | 1050 | } /* ixv_media_status */ |
1048 | | | 1051 | |
1049 | /************************************************************************ | | 1052 | /************************************************************************ |
1050 | * ixv_media_change - Media Ioctl callback | | 1053 | * ixv_media_change - Media Ioctl callback |
1051 | * | | 1054 | * |
1052 | * Called when the user changes speed/duplex using | | 1055 | * Called when the user changes speed/duplex using |
1053 | * media/mediopt option with ifconfig. | | 1056 | * media/mediopt option with ifconfig. |
1054 | ************************************************************************/ | | 1057 | ************************************************************************/ |
1055 | static int | | 1058 | static int |
1056 | ixv_media_change(struct ifnet *ifp) | | 1059 | ixv_media_change(struct ifnet *ifp) |
1057 | { | | 1060 | { |
1058 | struct adapter *adapter = ifp->if_softc; | | 1061 | struct adapter *adapter = ifp->if_softc; |
1059 | struct ifmedia *ifm = &adapter->media; | | 1062 | struct ifmedia *ifm = &adapter->media; |
1060 | | | 1063 | |
1061 | INIT_DEBUGOUT("ixv_media_change: begin"); | | 1064 | INIT_DEBUGOUT("ixv_media_change: begin"); |
1062 | | | 1065 | |
1063 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) | | 1066 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
1064 | return (EINVAL); | | 1067 | return (EINVAL); |
1065 | | | 1068 | |
1066 | switch (IFM_SUBTYPE(ifm->ifm_media)) { | | 1069 | switch (IFM_SUBTYPE(ifm->ifm_media)) { |
1067 | case IFM_AUTO: | | 1070 | case IFM_AUTO: |
1068 | break; | | 1071 | break; |
1069 | default: | | 1072 | default: |
1070 | device_printf(adapter->dev, "Only auto media type\n"); | | 1073 | device_printf(adapter->dev, "Only auto media type\n"); |
1071 | return (EINVAL); | | 1074 | return (EINVAL); |
1072 | } | | 1075 | } |
1073 | | | 1076 | |
1074 | return (0); | | 1077 | return (0); |
1075 | } /* ixv_media_change */ | | 1078 | } /* ixv_media_change */ |
1076 | | | 1079 | |
1077 | /************************************************************************ | | 1080 | /************************************************************************ |
1078 | * ixv_negotiate_api | | 1081 | * ixv_negotiate_api |
1079 | * | | 1082 | * |
1080 | * Negotiate the Mailbox API with the PF; | | 1083 | * Negotiate the Mailbox API with the PF; |
1081 | * start with the most featured API first. | | 1084 | * start with the most featured API first. |
1082 | ************************************************************************/ | | 1085 | ************************************************************************/ |
1083 | static int | | 1086 | static int |
1084 | ixv_negotiate_api(struct adapter *adapter) | | 1087 | ixv_negotiate_api(struct adapter *adapter) |
1085 | { | | 1088 | { |
1086 | struct ixgbe_hw *hw = &adapter->hw; | | 1089 | struct ixgbe_hw *hw = &adapter->hw; |
1087 | int mbx_api[] = { ixgbe_mbox_api_13, | | 1090 | int mbx_api[] = { ixgbe_mbox_api_13, |
1088 | ixgbe_mbox_api_12, | | 1091 | ixgbe_mbox_api_12, |
1089 | ixgbe_mbox_api_11, | | 1092 | ixgbe_mbox_api_11, |
1090 | ixgbe_mbox_api_10, | | 1093 | ixgbe_mbox_api_10, |
1091 | ixgbe_mbox_api_unknown }; | | 1094 | ixgbe_mbox_api_unknown }; |
1092 | int i = 0; | | 1095 | int i = 0; |
1093 | | | 1096 | |
1094 | while (mbx_api[i] != ixgbe_mbox_api_unknown) { | | 1097 | while (mbx_api[i] != ixgbe_mbox_api_unknown) { |
1095 | if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) | | 1098 | if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) |
1096 | return (0); | | 1099 | return (0); |
1097 | i++; | | 1100 | i++; |
1098 | } | | 1101 | } |
1099 | | | 1102 | |
1100 | return (EINVAL); | | 1103 | return (EINVAL); |
1101 | } /* ixv_negotiate_api */ | | 1104 | } /* ixv_negotiate_api */ |
1102 | | | 1105 | |
1103 | | | 1106 | |
1104 | /************************************************************************ | | 1107 | /************************************************************************ |
1105 | * ixv_set_multi - Multicast Update | | 1108 | * ixv_set_multi - Multicast Update |
1106 | * | | 1109 | * |
1107 | * Called whenever multicast address list is updated. | | 1110 | * Called whenever multicast address list is updated. |
1108 | ************************************************************************/ | | 1111 | ************************************************************************/ |
1109 | static int | | 1112 | static int |
1110 | ixv_set_rxfilter(struct adapter *adapter) | | 1113 | ixv_set_rxfilter(struct adapter *adapter) |
1111 | { | | 1114 | { |
1112 | u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS]; | | 1115 | u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS]; |
1113 | struct ifnet *ifp = adapter->ifp; | | 1116 | struct ifnet *ifp = adapter->ifp; |
1114 | struct ixgbe_hw *hw = &adapter->hw; | | 1117 | struct ixgbe_hw *hw = &adapter->hw; |
1115 | u8 *update_ptr; | | 1118 | u8 *update_ptr; |
1116 | int mcnt = 0; | | 1119 | int mcnt = 0; |
1117 | struct ethercom *ec = &adapter->osdep.ec; | | 1120 | struct ethercom *ec = &adapter->osdep.ec; |
1118 | struct ether_multi *enm; | | 1121 | struct ether_multi *enm; |
1119 | struct ether_multistep step; | | 1122 | struct ether_multistep step; |
1120 | bool overflow = false; | | 1123 | bool overflow = false; |
1121 | int error, rc = 0; | | 1124 | int error, rc = 0; |
1122 | | | 1125 | |
1123 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 1126 | KASSERT(mutex_owned(&adapter->core_mtx)); |
1124 | IOCTL_DEBUGOUT("ixv_set_rxfilter: begin"); | | 1127 | IOCTL_DEBUGOUT("ixv_set_rxfilter: begin"); |
1125 | | | 1128 | |
1126 | /* 1: For PROMISC */ | | 1129 | /* 1: For PROMISC */ |
1127 | if (ifp->if_flags & IFF_PROMISC) { | | 1130 | if (ifp->if_flags & IFF_PROMISC) { |
1128 | error = hw->mac.ops.update_xcast_mode(hw, | | 1131 | error = hw->mac.ops.update_xcast_mode(hw, |
1129 | IXGBEVF_XCAST_MODE_PROMISC); | | 1132 | IXGBEVF_XCAST_MODE_PROMISC); |
1130 | if (error == IXGBE_ERR_NOT_TRUSTED) { | | 1133 | if (error == IXGBE_ERR_NOT_TRUSTED) { |
1131 | device_printf(adapter->dev, | | 1134 | device_printf(adapter->dev, |
1132 | "this interface is not trusted\n"); | | 1135 | "this interface is not trusted\n"); |
1133 | error = EPERM; | | 1136 | error = EPERM; |
1134 | } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { | | 1137 | } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { |
1135 | device_printf(adapter->dev, | | 1138 | device_printf(adapter->dev, |
1136 | "the PF doesn't support promisc mode\n"); | | 1139 | "the PF doesn't support promisc mode\n"); |
1137 | error = EOPNOTSUPP; | | 1140 | error = EOPNOTSUPP; |
1138 | } else if (error == IXGBE_ERR_NOT_IN_PROMISC) { | | 1141 | } else if (error == IXGBE_ERR_NOT_IN_PROMISC) { |
1139 | device_printf(adapter->dev, | | 1142 | device_printf(adapter->dev, |
1140 | "the PF may not in promisc mode\n"); | | 1143 | "the PF may not in promisc mode\n"); |
1141 | error = EINVAL; | | 1144 | error = EINVAL; |
1142 | } else if (error) { | | 1145 | } else if (error) { |
1143 | device_printf(adapter->dev, | | 1146 | device_printf(adapter->dev, |
1144 | "failed to set promisc mode. error = %d\n", | | 1147 | "failed to set promisc mode. error = %d\n", |
1145 | error); | | 1148 | error); |
1146 | error = EIO; | | 1149 | error = EIO; |
1147 | } else | | 1150 | } else |
1148 | return 0; | | 1151 | return 0; |
1149 | rc = error; | | 1152 | rc = error; |
1150 | } | | 1153 | } |
1151 | | | 1154 | |
1152 | /* 2: For ALLMULTI or normal */ | | 1155 | /* 2: For ALLMULTI or normal */ |
1153 | ETHER_LOCK(ec); | | 1156 | ETHER_LOCK(ec); |
1154 | ETHER_FIRST_MULTI(step, ec, enm); | | 1157 | ETHER_FIRST_MULTI(step, ec, enm); |
1155 | while (enm != NULL) { | | 1158 | while (enm != NULL) { |
1156 | if ((mcnt >= IXGBE_MAX_VF_MC) || | | 1159 | if ((mcnt >= IXGBE_MAX_VF_MC) || |
1157 | (memcmp(enm->enm_addrlo, enm->enm_addrhi, | | 1160 | (memcmp(enm->enm_addrlo, enm->enm_addrhi, |
1158 | ETHER_ADDR_LEN) != 0)) { | | 1161 | ETHER_ADDR_LEN) != 0)) { |
1159 | overflow = true; | | 1162 | overflow = true; |
1160 | break; | | 1163 | break; |
1161 | } | | 1164 | } |
1162 | bcopy(enm->enm_addrlo, | | 1165 | bcopy(enm->enm_addrlo, |
1163 | &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], | | 1166 | &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], |
1164 | IXGBE_ETH_LENGTH_OF_ADDRESS); | | 1167 | IXGBE_ETH_LENGTH_OF_ADDRESS); |
1165 | mcnt++; | | 1168 | mcnt++; |
1166 | ETHER_NEXT_MULTI(step, enm); | | 1169 | ETHER_NEXT_MULTI(step, enm); |
1167 | } | | 1170 | } |
1168 | ETHER_UNLOCK(ec); | | 1171 | ETHER_UNLOCK(ec); |
1169 | | | 1172 | |
1170 | /* 3: For ALLMULTI */ | | 1173 | /* 3: For ALLMULTI */ |
1171 | if (overflow) { | | 1174 | if (overflow) { |
1172 | error = hw->mac.ops.update_xcast_mode(hw, | | 1175 | error = hw->mac.ops.update_xcast_mode(hw, |
1173 | IXGBEVF_XCAST_MODE_ALLMULTI); | | 1176 | IXGBEVF_XCAST_MODE_ALLMULTI); |
1174 | if (error == IXGBE_ERR_NOT_TRUSTED) { | | 1177 | if (error == IXGBE_ERR_NOT_TRUSTED) { |
1175 | device_printf(adapter->dev, | | 1178 | device_printf(adapter->dev, |
1176 | "this interface is not trusted\n"); | | 1179 | "this interface is not trusted\n"); |
1177 | error = EPERM; | | 1180 | error = EPERM; |
1178 | } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { | | 1181 | } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { |
1179 | device_printf(adapter->dev, | | 1182 | device_printf(adapter->dev, |
1180 | "the PF doesn't support allmulti mode\n"); | | 1183 | "the PF doesn't support allmulti mode\n"); |
1181 | error = EOPNOTSUPP; | | 1184 | error = EOPNOTSUPP; |
1182 | } else if (error) { | | 1185 | } else if (error) { |
1183 | device_printf(adapter->dev, | | 1186 | device_printf(adapter->dev, |
1184 | "number of Ethernet multicast addresses " | | 1187 | "number of Ethernet multicast addresses " |
1185 | "exceeds the limit (%d). error = %d\n", | | 1188 | "exceeds the limit (%d). error = %d\n", |
1186 | IXGBE_MAX_VF_MC, error); | | 1189 | IXGBE_MAX_VF_MC, error); |
1187 | error = ENOSPC; | | 1190 | error = ENOSPC; |
1188 | } else { | | 1191 | } else { |
1189 | ETHER_LOCK(ec); | | 1192 | ETHER_LOCK(ec); |
1190 | ec->ec_flags |= ETHER_F_ALLMULTI; | | 1193 | ec->ec_flags |= ETHER_F_ALLMULTI; |
1191 | ETHER_UNLOCK(ec); | | 1194 | ETHER_UNLOCK(ec); |
1192 | return rc; /* Promisc might failed */ | | 1195 | return rc; /* Promisc might failed */ |
1193 | } | | 1196 | } |
1194 | | | 1197 | |
1195 | if (rc == 0) | | 1198 | if (rc == 0) |
1196 | rc = error; | | 1199 | rc = error; |
1197 | | | 1200 | |
1198 | /* Continue to update the multicast table as many as we can */ | | 1201 | /* Continue to update the multicast table as many as we can */ |
1199 | } | | 1202 | } |
1200 | | | 1203 | |
1201 | /* 4: For normal operation */ | | 1204 | /* 4: For normal operation */ |
1202 | error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); | | 1205 | error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); |
1203 | if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) { | | 1206 | if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) { |
1204 | /* Normal operation */ | | 1207 | /* Normal operation */ |
1205 | ETHER_LOCK(ec); | | 1208 | ETHER_LOCK(ec); |
1206 | ec->ec_flags &= ~ETHER_F_ALLMULTI; | | 1209 | ec->ec_flags &= ~ETHER_F_ALLMULTI; |
1207 | ETHER_UNLOCK(ec); | | 1210 | ETHER_UNLOCK(ec); |
1208 | error = 0; | | 1211 | error = 0; |
1209 | } else if (error) { | | 1212 | } else if (error) { |
1210 | device_printf(adapter->dev, | | 1213 | device_printf(adapter->dev, |
1211 | "failed to set Ethernet multicast address " | | 1214 | "failed to set Ethernet multicast address " |
1212 | "operation to normal. error = %d\n", error); | | 1215 | "operation to normal. error = %d\n", error); |
1213 | } | | 1216 | } |
1214 | | | 1217 | |
1215 | update_ptr = mta; | | 1218 | update_ptr = mta; |
1216 | | | 1219 | |
1217 | error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, | | 1220 | error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, |
1218 | update_ptr, mcnt, ixv_mc_array_itr, TRUE); | | 1221 | update_ptr, mcnt, ixv_mc_array_itr, TRUE); |
1219 | if (rc == 0) | | 1222 | if (rc == 0) |
1220 | rc = error; | | 1223 | rc = error; |
1221 | | | 1224 | |
1222 | return rc; | | 1225 | return rc; |
1223 | } /* ixv_set_rxfilter */ | | 1226 | } /* ixv_set_rxfilter */ |
1224 | | | 1227 | |
1225 | /************************************************************************ | | 1228 | /************************************************************************ |
1226 | * ixv_mc_array_itr | | 1229 | * ixv_mc_array_itr |
1227 | * | | 1230 | * |
1228 | * An iterator function needed by the multicast shared code. | | 1231 | * An iterator function needed by the multicast shared code. |
1229 | * It feeds the shared code routine the addresses in the | | 1232 | * It feeds the shared code routine the addresses in the |
1230 | * array of ixv_set_rxfilter() one by one. | | 1233 | * array of ixv_set_rxfilter() one by one. |
1231 | ************************************************************************/ | | 1234 | ************************************************************************/ |
1232 | static u8 * | | 1235 | static u8 * |
1233 | ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) | | 1236 | ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) |
1234 | { | | 1237 | { |
1235 | u8 *addr = *update_ptr; | | 1238 | u8 *addr = *update_ptr; |
1236 | u8 *newptr; | | 1239 | u8 *newptr; |
1237 | | | 1240 | |
1238 | *vmdq = 0; | | 1241 | *vmdq = 0; |
1239 | | | 1242 | |
1240 | newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; | | 1243 | newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; |
1241 | *update_ptr = newptr; | | 1244 | *update_ptr = newptr; |
1242 | | | 1245 | |
1243 | return addr; | | 1246 | return addr; |
1244 | } /* ixv_mc_array_itr */ | | 1247 | } /* ixv_mc_array_itr */ |
1245 | | | 1248 | |
1246 | /************************************************************************ | | 1249 | /************************************************************************ |
1247 | * ixv_local_timer - Timer routine | | 1250 | * ixv_local_timer - Timer routine |
1248 | * | | 1251 | * |
1249 | * Checks for link status, updates statistics, | | 1252 | * Checks for link status, updates statistics, |
1250 | * and runs the watchdog check. | | 1253 | * and runs the watchdog check. |
1251 | ************************************************************************/ | | 1254 | ************************************************************************/ |
1252 | static void | | 1255 | static void |
1253 | ixv_local_timer(void *arg) | | 1256 | ixv_local_timer(void *arg) |
1254 | { | | 1257 | { |
1255 | struct adapter *adapter = arg; | | 1258 | struct adapter *adapter = arg; |
1256 | | | 1259 | |
1257 | IXGBE_CORE_LOCK(adapter); | | 1260 | IXGBE_CORE_LOCK(adapter); |
1258 | ixv_local_timer_locked(adapter); | | 1261 | ixv_local_timer_locked(adapter); |
1259 | IXGBE_CORE_UNLOCK(adapter); | | 1262 | IXGBE_CORE_UNLOCK(adapter); |
1260 | } | | 1263 | } |
1261 | | | 1264 | |
1262 | static void | | 1265 | static void |
1263 | ixv_local_timer_locked(void *arg) | | 1266 | ixv_local_timer_locked(void *arg) |
1264 | { | | 1267 | { |
1265 | struct adapter *adapter = arg; | | 1268 | struct adapter *adapter = arg; |
1266 | device_t dev = adapter->dev; | | 1269 | device_t dev = adapter->dev; |
1267 | struct ix_queue *que = adapter->queues; | | 1270 | struct ix_queue *que = adapter->queues; |
1268 | u64 queues = 0; | | 1271 | u64 queues = 0; |
1269 | u64 v0, v1, v2, v3, v4, v5, v6, v7; | | 1272 | u64 v0, v1, v2, v3, v4, v5, v6, v7; |
1270 | int hung = 0; | | 1273 | int hung = 0; |
1271 | int i; | | 1274 | int i; |
1272 | | | 1275 | |
1273 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 1276 | KASSERT(mutex_owned(&adapter->core_mtx)); |
1274 | | | 1277 | |
1275 | if (ixv_check_link(adapter)) { | | 1278 | if (ixv_check_link(adapter)) { |
1276 | ixv_init_locked(adapter); | | 1279 | ixv_init_locked(adapter); |
1277 | return; | | 1280 | return; |
1278 | } | | 1281 | } |
1279 | | | 1282 | |
1280 | /* Stats Update */ | | 1283 | /* Stats Update */ |
1281 | ixv_update_stats(adapter); | | 1284 | ixv_update_stats(adapter); |
1282 | | | 1285 | |
1283 | /* Update some event counters */ | | 1286 | /* Update some event counters */ |
1284 | v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; | | 1287 | v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; |
1285 | que = adapter->queues; | | 1288 | que = adapter->queues; |
1286 | for (i = 0; i < adapter->num_queues; i++, que++) { | | 1289 | for (i = 0; i < adapter->num_queues; i++, que++) { |
1287 | struct tx_ring *txr = que->txr; | | 1290 | struct tx_ring *txr = que->txr; |
1288 | | | 1291 | |
1289 | v0 += txr->q_efbig_tx_dma_setup; | | 1292 | v0 += txr->q_efbig_tx_dma_setup; |
1290 | v1 += txr->q_mbuf_defrag_failed; | | 1293 | v1 += txr->q_mbuf_defrag_failed; |
1291 | v2 += txr->q_efbig2_tx_dma_setup; | | 1294 | v2 += txr->q_efbig2_tx_dma_setup; |
1292 | v3 += txr->q_einval_tx_dma_setup; | | 1295 | v3 += txr->q_einval_tx_dma_setup; |
1293 | v4 += txr->q_other_tx_dma_setup; | | 1296 | v4 += txr->q_other_tx_dma_setup; |
1294 | v5 += txr->q_eagain_tx_dma_setup; | | 1297 | v5 += txr->q_eagain_tx_dma_setup; |
1295 | v6 += txr->q_enomem_tx_dma_setup; | | 1298 | v6 += txr->q_enomem_tx_dma_setup; |
1296 | v7 += txr->q_tso_err; | | 1299 | v7 += txr->q_tso_err; |
1297 | } | | 1300 | } |
1298 | adapter->efbig_tx_dma_setup.ev_count = v0; | | 1301 | adapter->efbig_tx_dma_setup.ev_count = v0; |
1299 | adapter->mbuf_defrag_failed.ev_count = v1; | | 1302 | adapter->mbuf_defrag_failed.ev_count = v1; |
1300 | adapter->efbig2_tx_dma_setup.ev_count = v2; | | 1303 | adapter->efbig2_tx_dma_setup.ev_count = v2; |
1301 | adapter->einval_tx_dma_setup.ev_count = v3; | | 1304 | adapter->einval_tx_dma_setup.ev_count = v3; |
1302 | adapter->other_tx_dma_setup.ev_count = v4; | | 1305 | adapter->other_tx_dma_setup.ev_count = v4; |
1303 | adapter->eagain_tx_dma_setup.ev_count = v5; | | 1306 | adapter->eagain_tx_dma_setup.ev_count = v5; |
1304 | adapter->enomem_tx_dma_setup.ev_count = v6; | | 1307 | adapter->enomem_tx_dma_setup.ev_count = v6; |
1305 | adapter->tso_err.ev_count = v7; | | 1308 | adapter->tso_err.ev_count = v7; |
1306 | | | 1309 | |
1307 | /* | | 1310 | /* |
1308 | * Check the TX queues status | | 1311 | * Check the TX queues status |
1309 | * - mark hung queues so we don't schedule on them | | 1312 | * - mark hung queues so we don't schedule on them |
1310 | * - watchdog only if all queues show hung | | 1313 | * - watchdog only if all queues show hung |
1311 | */ | | 1314 | */ |
1312 | que = adapter->queues; | | 1315 | que = adapter->queues; |
1313 | for (i = 0; i < adapter->num_queues; i++, que++) { | | 1316 | for (i = 0; i < adapter->num_queues; i++, que++) { |
1314 | /* Keep track of queues with work for soft irq */ | | 1317 | /* Keep track of queues with work for soft irq */ |
1315 | if (que->txr->busy) | | 1318 | if (que->txr->busy) |
1316 | queues |= ((u64)1 << que->me); | | 1319 | queues |= ((u64)1 << que->me); |
1317 | /* | | 1320 | /* |
1318 | * Each time txeof runs without cleaning, but there | | 1321 | * Each time txeof runs without cleaning, but there |
1319 | * are uncleaned descriptors it increments busy. If | | 1322 | * are uncleaned descriptors it increments busy. If |
1320 | * we get to the MAX we declare it hung. | | 1323 | * we get to the MAX we declare it hung. |
1321 | */ | | 1324 | */ |
1322 | if (que->busy == IXGBE_QUEUE_HUNG) { | | 1325 | if (que->busy == IXGBE_QUEUE_HUNG) { |
1323 | ++hung; | | 1326 | ++hung; |
1324 | /* Mark the queue as inactive */ | | 1327 | /* Mark the queue as inactive */ |
1325 | adapter->active_queues &= ~((u64)1 << que->me); | | 1328 | adapter->active_queues &= ~((u64)1 << que->me); |
1326 | continue; | | 1329 | continue; |
1327 | } else { | | 1330 | } else { |
1328 | /* Check if we've come back from hung */ | | 1331 | /* Check if we've come back from hung */ |
1329 | if ((adapter->active_queues & ((u64)1 << que->me)) == 0) | | 1332 | if ((adapter->active_queues & ((u64)1 << que->me)) == 0) |
1330 | adapter->active_queues |= ((u64)1 << que->me); | | 1333 | adapter->active_queues |= ((u64)1 << que->me); |
1331 | } | | 1334 | } |
1332 | if (que->busy >= IXGBE_MAX_TX_BUSY) { | | 1335 | if (que->busy >= IXGBE_MAX_TX_BUSY) { |
1333 | device_printf(dev, | | 1336 | device_printf(dev, |
1334 | "Warning queue %d appears to be hung!\n", i); | | 1337 | "Warning queue %d appears to be hung!\n", i); |
1335 | que->txr->busy = IXGBE_QUEUE_HUNG; | | 1338 | que->txr->busy = IXGBE_QUEUE_HUNG; |
1336 | ++hung; | | 1339 | ++hung; |
1337 | } | | 1340 | } |
1338 | } | | 1341 | } |
1339 | | | 1342 | |
1340 | /* Only truly watchdog if all queues show hung */ | | 1343 | /* Only truly watchdog if all queues show hung */ |
1341 | if (hung == adapter->num_queues) | | 1344 | if (hung == adapter->num_queues) |
1342 | goto watchdog; | | 1345 | goto watchdog; |
1343 | #if 0 | | 1346 | #if 0 |
1344 | else if (queues != 0) { /* Force an IRQ on queues with work */ | | 1347 | else if (queues != 0) { /* Force an IRQ on queues with work */ |
1345 | ixv_rearm_queues(adapter, queues); | | 1348 | ixv_rearm_queues(adapter, queues); |
1346 | } | | 1349 | } |
1347 | #endif | | 1350 | #endif |
1348 | | | 1351 | |
1349 | callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); | | 1352 | callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); |
1350 | | | 1353 | |
1351 | return; | | 1354 | return; |
1352 | | | 1355 | |
1353 | watchdog: | | 1356 | watchdog: |
1354 | | | 1357 | |
1355 | device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); | | 1358 | device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); |
1356 | adapter->ifp->if_flags &= ~IFF_RUNNING; | | 1359 | adapter->ifp->if_flags &= ~IFF_RUNNING; |
1357 | adapter->watchdog_events.ev_count++; | | 1360 | adapter->watchdog_events.ev_count++; |
1358 | ixv_init_locked(adapter); | | 1361 | ixv_init_locked(adapter); |
1359 | } /* ixv_local_timer */ | | 1362 | } /* ixv_local_timer */ |
1360 | | | 1363 | |
1361 | /************************************************************************ | | 1364 | /************************************************************************ |
1362 | * ixv_update_link_status - Update OS on link state | | 1365 | * ixv_update_link_status - Update OS on link state |
1363 | * | | 1366 | * |
1364 | * Note: Only updates the OS on the cached link state. | | 1367 | * Note: Only updates the OS on the cached link state. |
1365 | * The real check of the hardware only happens with | | 1368 | * The real check of the hardware only happens with |
1366 | * a link interrupt. | | 1369 | * a link interrupt. |
1367 | ************************************************************************/ | | 1370 | ************************************************************************/ |
1368 | static void | | 1371 | static void |
1369 | ixv_update_link_status(struct adapter *adapter) | | 1372 | ixv_update_link_status(struct adapter *adapter) |
1370 | { | | 1373 | { |
1371 | struct ifnet *ifp = adapter->ifp; | | 1374 | struct ifnet *ifp = adapter->ifp; |
1372 | device_t dev = adapter->dev; | | 1375 | device_t dev = adapter->dev; |
1373 | | | 1376 | |
1374 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 1377 | KASSERT(mutex_owned(&adapter->core_mtx)); |
1375 | | | 1378 | |
1376 | if (adapter->link_up) { | | 1379 | if (adapter->link_up) { |
1377 | if (adapter->link_active != LINK_STATE_UP) { | | 1380 | if (adapter->link_active != LINK_STATE_UP) { |
1378 | if (bootverbose) { | | 1381 | if (bootverbose) { |
1379 | const char *bpsmsg; | | 1382 | const char *bpsmsg; |
1380 | | | 1383 | |
1381 | switch (adapter->link_speed) { | | 1384 | switch (adapter->link_speed) { |
1382 | case IXGBE_LINK_SPEED_10GB_FULL: | | 1385 | case IXGBE_LINK_SPEED_10GB_FULL: |
1383 | bpsmsg = "10 Gbps"; | | 1386 | bpsmsg = "10 Gbps"; |
1384 | break; | | 1387 | break; |
1385 | case IXGBE_LINK_SPEED_5GB_FULL: | | 1388 | case IXGBE_LINK_SPEED_5GB_FULL: |
1386 | bpsmsg = "5 Gbps"; | | 1389 | bpsmsg = "5 Gbps"; |
1387 | break; | | 1390 | break; |
1388 | case IXGBE_LINK_SPEED_2_5GB_FULL: | | 1391 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
1389 | bpsmsg = "2.5 Gbps"; | | 1392 | bpsmsg = "2.5 Gbps"; |
1390 | break; | | 1393 | break; |
1391 | case IXGBE_LINK_SPEED_1GB_FULL: | | 1394 | case IXGBE_LINK_SPEED_1GB_FULL: |
1392 | bpsmsg = "1 Gbps"; | | 1395 | bpsmsg = "1 Gbps"; |
1393 | break; | | 1396 | break; |
1394 | case IXGBE_LINK_SPEED_100_FULL: | | 1397 | case IXGBE_LINK_SPEED_100_FULL: |
1395 | bpsmsg = "100 Mbps"; | | 1398 | bpsmsg = "100 Mbps"; |
1396 | break; | | 1399 | break; |
1397 | case IXGBE_LINK_SPEED_10_FULL: | | 1400 | case IXGBE_LINK_SPEED_10_FULL: |
1398 | bpsmsg = "10 Mbps"; | | 1401 | bpsmsg = "10 Mbps"; |
1399 | break; | | 1402 | break; |
1400 | default: | | 1403 | default: |
1401 | bpsmsg = "unknown speed"; | | 1404 | bpsmsg = "unknown speed"; |
1402 | break; | | 1405 | break; |
1403 | } | | 1406 | } |
1404 | device_printf(dev, "Link is up %s %s \n", | | 1407 | device_printf(dev, "Link is up %s %s \n", |
1405 | bpsmsg, "Full Duplex"); | | 1408 | bpsmsg, "Full Duplex"); |
1406 | } | | 1409 | } |
1407 | adapter->link_active = LINK_STATE_UP; | | 1410 | adapter->link_active = LINK_STATE_UP; |
1408 | if_link_state_change(ifp, LINK_STATE_UP); | | 1411 | if_link_state_change(ifp, LINK_STATE_UP); |
1409 | } | | 1412 | } |
1410 | } else { | | 1413 | } else { |
1411 | /* | | 1414 | /* |
1412 | * Do it when link active changes to DOWN. i.e. | | 1415 | * Do it when link active changes to DOWN. i.e. |
1413 | * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN | | 1416 | * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN |
1414 | * b) LINK_STATE_UP -> LINK_STATE_DOWN | | 1417 | * b) LINK_STATE_UP -> LINK_STATE_DOWN |
1415 | */ | | 1418 | */ |
1416 | if (adapter->link_active != LINK_STATE_DOWN) { | | 1419 | if (adapter->link_active != LINK_STATE_DOWN) { |
1417 | if (bootverbose) | | 1420 | if (bootverbose) |
1418 | device_printf(dev, "Link is Down\n"); | | 1421 | device_printf(dev, "Link is Down\n"); |
1419 | if_link_state_change(ifp, LINK_STATE_DOWN); | | 1422 | if_link_state_change(ifp, LINK_STATE_DOWN); |
1420 | adapter->link_active = LINK_STATE_DOWN; | | 1423 | adapter->link_active = LINK_STATE_DOWN; |
1421 | } | | 1424 | } |
1422 | } | | 1425 | } |
1423 | } /* ixv_update_link_status */ | | 1426 | } /* ixv_update_link_status */ |
1424 | | | 1427 | |
1425 | | | 1428 | |
1426 | /************************************************************************ | | 1429 | /************************************************************************ |
1427 | * ixv_stop - Stop the hardware | | 1430 | * ixv_stop - Stop the hardware |
1428 | * | | 1431 | * |
1429 | * Disables all traffic on the adapter by issuing a | | 1432 | * Disables all traffic on the adapter by issuing a |
1430 | * global reset on the MAC and deallocates TX/RX buffers. | | 1433 | * global reset on the MAC and deallocates TX/RX buffers. |
1431 | ************************************************************************/ | | 1434 | ************************************************************************/ |
1432 | static void | | 1435 | static void |
1433 | ixv_ifstop(struct ifnet *ifp, int disable) | | 1436 | ixv_ifstop(struct ifnet *ifp, int disable) |
1434 | { | | 1437 | { |
1435 | struct adapter *adapter = ifp->if_softc; | | 1438 | struct adapter *adapter = ifp->if_softc; |
1436 | | | 1439 | |
1437 | IXGBE_CORE_LOCK(adapter); | | 1440 | IXGBE_CORE_LOCK(adapter); |
1438 | ixv_stop(adapter); | | 1441 | ixv_stop(adapter); |
1439 | IXGBE_CORE_UNLOCK(adapter); | | 1442 | IXGBE_CORE_UNLOCK(adapter); |
1440 | } | | 1443 | } |
1441 | | | 1444 | |
1442 | static void | | 1445 | static void |
1443 | ixv_stop(void *arg) | | 1446 | ixv_stop(void *arg) |
1444 | { | | 1447 | { |
1445 | struct ifnet *ifp; | | 1448 | struct ifnet *ifp; |
1446 | struct adapter *adapter = arg; | | 1449 | struct adapter *adapter = arg; |
1447 | struct ixgbe_hw *hw = &adapter->hw; | | 1450 | struct ixgbe_hw *hw = &adapter->hw; |
1448 | | | 1451 | |
1449 | ifp = adapter->ifp; | | 1452 | ifp = adapter->ifp; |
1450 | | | 1453 | |
1451 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 1454 | KASSERT(mutex_owned(&adapter->core_mtx)); |
1452 | | | 1455 | |
1453 | INIT_DEBUGOUT("ixv_stop: begin\n"); | | 1456 | INIT_DEBUGOUT("ixv_stop: begin\n"); |
1454 | ixv_disable_intr(adapter); | | 1457 | ixv_disable_intr(adapter); |
1455 | | | 1458 | |
1456 | /* Tell the stack that the interface is no longer active */ | | 1459 | /* Tell the stack that the interface is no longer active */ |
1457 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 1460 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1458 | | | 1461 | |
1459 | hw->mac.ops.reset_hw(hw); | | 1462 | hw->mac.ops.reset_hw(hw); |
1460 | adapter->hw.adapter_stopped = FALSE; | | 1463 | adapter->hw.adapter_stopped = FALSE; |
1461 | hw->mac.ops.stop_adapter(hw); | | 1464 | hw->mac.ops.stop_adapter(hw); |
1462 | callout_stop(&adapter->timer); | | 1465 | callout_stop(&adapter->timer); |
1463 | | | 1466 | |
1464 | /* reprogram the RAR[0] in case user changed it. */ | | 1467 | /* reprogram the RAR[0] in case user changed it. */ |
1465 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | | 1468 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
1466 | | | 1469 | |
1467 | return; | | 1470 | return; |
1468 | } /* ixv_stop */ | | 1471 | } /* ixv_stop */ |
1469 | | | 1472 | |
1470 | | | 1473 | |
1471 | /************************************************************************ | | 1474 | /************************************************************************ |
1472 | * ixv_allocate_pci_resources | | 1475 | * ixv_allocate_pci_resources |
1473 | ************************************************************************/ | | 1476 | ************************************************************************/ |
1474 | static int | | 1477 | static int |
1475 | ixv_allocate_pci_resources(struct adapter *adapter, | | 1478 | ixv_allocate_pci_resources(struct adapter *adapter, |
1476 | const struct pci_attach_args *pa) | | 1479 | const struct pci_attach_args *pa) |
1477 | { | | 1480 | { |
1478 | pcireg_t memtype, csr; | | 1481 | pcireg_t memtype, csr; |
1479 | device_t dev = adapter->dev; | | 1482 | device_t dev = adapter->dev; |
1480 | bus_addr_t addr; | | 1483 | bus_addr_t addr; |
1481 | int flags; | | 1484 | int flags; |
1482 | | | 1485 | |
1483 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); | | 1486 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); |
1484 | switch (memtype) { | | 1487 | switch (memtype) { |
1485 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: | | 1488 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: |
1486 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: | | 1489 | case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: |
1487 | adapter->osdep.mem_bus_space_tag = pa->pa_memt; | | 1490 | adapter->osdep.mem_bus_space_tag = pa->pa_memt; |
1488 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), | | 1491 | if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), |
1489 | memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) | | 1492 | memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) |
1490 | goto map_err; | | 1493 | goto map_err; |
1491 | if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { | | 1494 | if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { |
1492 | aprint_normal_dev(dev, "clearing prefetchable bit\n"); | | 1495 | aprint_normal_dev(dev, "clearing prefetchable bit\n"); |
1493 | flags &= ~BUS_SPACE_MAP_PREFETCHABLE; | | 1496 | flags &= ~BUS_SPACE_MAP_PREFETCHABLE; |
1494 | } | | 1497 | } |
1495 | if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, | | 1498 | if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, |
1496 | adapter->osdep.mem_size, flags, | | 1499 | adapter->osdep.mem_size, flags, |
1497 | &adapter->osdep.mem_bus_space_handle) != 0) { | | 1500 | &adapter->osdep.mem_bus_space_handle) != 0) { |
1498 | map_err: | | 1501 | map_err: |
1499 | adapter->osdep.mem_size = 0; | | 1502 | adapter->osdep.mem_size = 0; |
1500 | aprint_error_dev(dev, "unable to map BAR0\n"); | | 1503 | aprint_error_dev(dev, "unable to map BAR0\n"); |
1501 | return ENXIO; | | 1504 | return ENXIO; |
1502 | } | | 1505 | } |
1503 | /* | | 1506 | /* |
1504 | * Enable address decoding for memory range in case it's not | | 1507 | * Enable address decoding for memory range in case it's not |
1505 | * set. | | 1508 | * set. |
1506 | */ | | 1509 | */ |
1507 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, | | 1510 | csr = pci_conf_read(pa->pa_pc, pa->pa_tag, |
1508 | PCI_COMMAND_STATUS_REG); | | 1511 | PCI_COMMAND_STATUS_REG); |
1509 | csr |= PCI_COMMAND_MEM_ENABLE; | | 1512 | csr |= PCI_COMMAND_MEM_ENABLE; |
1510 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, | | 1513 | pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, |
1511 | csr); | | 1514 | csr); |
1512 | break; | | 1515 | break; |
1513 | default: | | 1516 | default: |
1514 | aprint_error_dev(dev, "unexpected type on BAR0\n"); | | 1517 | aprint_error_dev(dev, "unexpected type on BAR0\n"); |
1515 | return ENXIO; | | 1518 | return ENXIO; |
1516 | } | | 1519 | } |
1517 | | | 1520 | |
1518 | /* Pick up the tuneable queues */ | | 1521 | /* Pick up the tuneable queues */ |
1519 | adapter->num_queues = ixv_num_queues; | | 1522 | adapter->num_queues = ixv_num_queues; |
1520 | | | 1523 | |
1521 | return (0); | | 1524 | return (0); |
1522 | } /* ixv_allocate_pci_resources */ | | 1525 | } /* ixv_allocate_pci_resources */ |
1523 | | | 1526 | |
1524 | /************************************************************************ | | 1527 | /************************************************************************ |
1525 | * ixv_free_pci_resources | | 1528 | * ixv_free_pci_resources |
1526 | ************************************************************************/ | | 1529 | ************************************************************************/ |
1527 | static void | | 1530 | static void |
1528 | ixv_free_pci_resources(struct adapter * adapter) | | 1531 | ixv_free_pci_resources(struct adapter * adapter) |
1529 | { | | 1532 | { |
1530 | struct ix_queue *que = adapter->queues; | | 1533 | struct ix_queue *que = adapter->queues; |
1531 | int rid; | | 1534 | int rid; |
1532 | | | 1535 | |
1533 | /* | | 1536 | /* |
1534 | * Release all msix queue resources: | | 1537 | * Release all msix queue resources: |
1535 | */ | | 1538 | */ |
1536 | for (int i = 0; i < adapter->num_queues; i++, que++) { | | 1539 | for (int i = 0; i < adapter->num_queues; i++, que++) { |
1537 | if (que->res != NULL) | | 1540 | if (que->res != NULL) |
1538 | pci_intr_disestablish(adapter->osdep.pc, | | 1541 | pci_intr_disestablish(adapter->osdep.pc, |
1539 | adapter->osdep.ihs[i]); | | 1542 | adapter->osdep.ihs[i]); |
1540 | } | | 1543 | } |
1541 | | | 1544 | |
1542 | | | 1545 | |
1543 | /* Clean the Mailbox interrupt last */ | | 1546 | /* Clean the Mailbox interrupt last */ |
1544 | rid = adapter->vector; | | 1547 | rid = adapter->vector; |
1545 | | | 1548 | |
1546 | if (adapter->osdep.ihs[rid] != NULL) { | | 1549 | if (adapter->osdep.ihs[rid] != NULL) { |
1547 | pci_intr_disestablish(adapter->osdep.pc, | | 1550 | pci_intr_disestablish(adapter->osdep.pc, |
1548 | adapter->osdep.ihs[rid]); | | 1551 | adapter->osdep.ihs[rid]); |
1549 | adapter->osdep.ihs[rid] = NULL; | | 1552 | adapter->osdep.ihs[rid] = NULL; |
1550 | } | | 1553 | } |
1551 | | | 1554 | |
1552 | pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, | | 1555 | pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, |
1553 | adapter->osdep.nintrs); | | 1556 | adapter->osdep.nintrs); |
1554 | | | 1557 | |
1555 | if (adapter->osdep.mem_size != 0) { | | 1558 | if (adapter->osdep.mem_size != 0) { |
1556 | bus_space_unmap(adapter->osdep.mem_bus_space_tag, | | 1559 | bus_space_unmap(adapter->osdep.mem_bus_space_tag, |
1557 | adapter->osdep.mem_bus_space_handle, | | 1560 | adapter->osdep.mem_bus_space_handle, |
1558 | adapter->osdep.mem_size); | | 1561 | adapter->osdep.mem_size); |
1559 | } | | 1562 | } |
1560 | | | 1563 | |
1561 | return; | | 1564 | return; |
1562 | } /* ixv_free_pci_resources */ | | 1565 | } /* ixv_free_pci_resources */ |
1563 | | | 1566 | |
1564 | /************************************************************************ | | 1567 | /************************************************************************ |
1565 | * ixv_setup_interface | | 1568 | * ixv_setup_interface |
1566 | * | | 1569 | * |
1567 | * Setup networking device structure and register an interface. | | 1570 | * Setup networking device structure and register an interface. |
1568 | ************************************************************************/ | | 1571 | ************************************************************************/ |
1569 | static int | | 1572 | static int |
1570 | ixv_setup_interface(device_t dev, struct adapter *adapter) | | 1573 | ixv_setup_interface(device_t dev, struct adapter *adapter) |
1571 | { | | 1574 | { |
1572 | struct ethercom *ec = &adapter->osdep.ec; | | 1575 | struct ethercom *ec = &adapter->osdep.ec; |
1573 | struct ifnet *ifp; | | 1576 | struct ifnet *ifp; |
1574 | int rv; | | 1577 | int rv; |
1575 | | | 1578 | |
1576 | INIT_DEBUGOUT("ixv_setup_interface: begin"); | | 1579 | INIT_DEBUGOUT("ixv_setup_interface: begin"); |
1577 | | | 1580 | |
1578 | ifp = adapter->ifp = &ec->ec_if; | | 1581 | ifp = adapter->ifp = &ec->ec_if; |
1579 | strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); | | 1582 | strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); |
1580 | ifp->if_baudrate = IF_Gbps(10); | | 1583 | ifp->if_baudrate = IF_Gbps(10); |
1581 | ifp->if_init = ixv_init; | | 1584 | ifp->if_init = ixv_init; |
1582 | ifp->if_stop = ixv_ifstop; | | 1585 | ifp->if_stop = ixv_ifstop; |
1583 | ifp->if_softc = adapter; | | 1586 | ifp->if_softc = adapter; |
1584 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 1587 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
1585 | #ifdef IXGBE_MPSAFE | | 1588 | #ifdef IXGBE_MPSAFE |
1586 | ifp->if_extflags = IFEF_MPSAFE; | | 1589 | ifp->if_extflags = IFEF_MPSAFE; |
1587 | #endif | | 1590 | #endif |
1588 | ifp->if_ioctl = ixv_ioctl; | | 1591 | ifp->if_ioctl = ixv_ioctl; |
1589 | if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { | | 1592 | if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { |
1590 | #if 0 | | 1593 | #if 0 |
1591 | ixv_start_locked = ixgbe_legacy_start_locked; | | 1594 | ixv_start_locked = ixgbe_legacy_start_locked; |
1592 | #endif | | 1595 | #endif |
1593 | } else { | | 1596 | } else { |
1594 | ifp->if_transmit = ixgbe_mq_start; | | 1597 | ifp->if_transmit = ixgbe_mq_start; |
1595 | #if 0 | | 1598 | #if 0 |
1596 | ixv_start_locked = ixgbe_mq_start_locked; | | 1599 | ixv_start_locked = ixgbe_mq_start_locked; |
1597 | #endif | | 1600 | #endif |
1598 | } | | 1601 | } |
1599 | ifp->if_start = ixgbe_legacy_start; | | 1602 | ifp->if_start = ixgbe_legacy_start; |
1600 | IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); | | 1603 | IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); |
1601 | IFQ_SET_READY(&ifp->if_snd); | | 1604 | IFQ_SET_READY(&ifp->if_snd); |
1602 | | | 1605 | |
1603 | rv = if_initialize(ifp); | | 1606 | rv = if_initialize(ifp); |
1604 | if (rv != 0) { | | 1607 | if (rv != 0) { |
1605 | aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); | | 1608 | aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); |
1606 | return rv; | | 1609 | return rv; |
1607 | } | | 1610 | } |
1608 | adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); | | 1611 | adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); |
1609 | ether_ifattach(ifp, adapter->hw.mac.addr); | | 1612 | ether_ifattach(ifp, adapter->hw.mac.addr); |
1610 | /* | | 1613 | /* |
1611 | * We use per TX queue softint, so if_deferred_start_init() isn't | | 1614 | * We use per TX queue softint, so if_deferred_start_init() isn't |
1612 | * used. | | 1615 | * used. |
1613 | */ | | 1616 | */ |
1614 | ether_set_ifflags_cb(ec, ixv_ifflags_cb); | | 1617 | ether_set_ifflags_cb(ec, ixv_ifflags_cb); |
1615 | | | 1618 | |
1616 | adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; | | 1619 | adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; |
1617 | | | 1620 | |
1618 | /* | | 1621 | /* |
1619 | * Tell the upper layer(s) we support long frames. | | 1622 | * Tell the upper layer(s) we support long frames. |
1620 | */ | | 1623 | */ |
1621 | ifp->if_hdrlen = sizeof(struct ether_vlan_header); | | 1624 | ifp->if_hdrlen = sizeof(struct ether_vlan_header); |
1622 | | | 1625 | |
1623 | /* Set capability flags */ | | 1626 | /* Set capability flags */ |
1624 | ifp->if_capabilities |= IFCAP_HWCSUM | | 1627 | ifp->if_capabilities |= IFCAP_HWCSUM |
1625 | | IFCAP_TSOv4 | | 1628 | | IFCAP_TSOv4 |
1626 | | IFCAP_TSOv6; | | 1629 | | IFCAP_TSOv6; |
1627 | ifp->if_capenable = 0; | | 1630 | ifp->if_capenable = 0; |
1628 | | | 1631 | |
1629 | ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER | | 1632 | ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER |
1630 | | ETHERCAP_VLAN_HWTAGGING | | 1633 | | ETHERCAP_VLAN_HWTAGGING |
1631 | | ETHERCAP_VLAN_HWCSUM | | 1634 | | ETHERCAP_VLAN_HWCSUM |
1632 | | ETHERCAP_JUMBO_MTU | | 1635 | | ETHERCAP_JUMBO_MTU |
1633 | | ETHERCAP_VLAN_MTU; | | 1636 | | ETHERCAP_VLAN_MTU; |
1634 | | | 1637 | |
1635 | /* Enable the above capabilities by default */ | | 1638 | /* Enable the above capabilities by default */ |
1636 | ec->ec_capenable = ec->ec_capabilities; | | 1639 | ec->ec_capenable = ec->ec_capabilities; |
1637 | | | 1640 | |
1638 | /* Don't enable LRO by default */ | | 1641 | /* Don't enable LRO by default */ |
1639 | #if 0 | | 1642 | #if 0 |
1640 | /* NetBSD doesn't support LRO yet */ | | 1643 | /* NetBSD doesn't support LRO yet */ |
1641 | ifp->if_capabilities |= IFCAP_LRO; | | 1644 | ifp->if_capabilities |= IFCAP_LRO; |
1642 | #endif | | 1645 | #endif |
1643 | | | 1646 | |
1644 | /* | | 1647 | /* |
1645 | * Specify the media types supported by this adapter and register | | 1648 | * Specify the media types supported by this adapter and register |
1646 | * callbacks to update media and link information | | 1649 | * callbacks to update media and link information |
1647 | */ | | 1650 | */ |
1648 | ec->ec_ifmedia = &adapter->media; | | 1651 | ec->ec_ifmedia = &adapter->media; |
1649 | ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, | | 1652 | ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, |
1650 | ixv_media_status); | | 1653 | ixv_media_status); |
1651 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); | | 1654 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); |
1652 | ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); | | 1655 | ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); |
1653 | | | 1656 | |
1654 | if_register(ifp); | | 1657 | if_register(ifp); |
1655 | | | 1658 | |
1656 | return 0; | | 1659 | return 0; |
1657 | } /* ixv_setup_interface */ | | 1660 | } /* ixv_setup_interface */ |
1658 | | | 1661 | |
1659 | | | 1662 | |
1660 | /************************************************************************ | | 1663 | /************************************************************************ |
1661 | * ixv_initialize_transmit_units - Enable transmit unit. | | 1664 | * ixv_initialize_transmit_units - Enable transmit unit. |
1662 | ************************************************************************/ | | 1665 | ************************************************************************/ |
1663 | static void | | 1666 | static void |
1664 | ixv_initialize_transmit_units(struct adapter *adapter) | | 1667 | ixv_initialize_transmit_units(struct adapter *adapter) |
1665 | { | | 1668 | { |
1666 | struct tx_ring *txr = adapter->tx_rings; | | 1669 | struct tx_ring *txr = adapter->tx_rings; |
1667 | struct ixgbe_hw *hw = &adapter->hw; | | 1670 | struct ixgbe_hw *hw = &adapter->hw; |
1668 | int i; | | 1671 | int i; |
1669 | | | 1672 | |
1670 | for (i = 0; i < adapter->num_queues; i++, txr++) { | | 1673 | for (i = 0; i < adapter->num_queues; i++, txr++) { |
1671 | u64 tdba = txr->txdma.dma_paddr; | | 1674 | u64 tdba = txr->txdma.dma_paddr; |
1672 | u32 txctrl, txdctl; | | 1675 | u32 txctrl, txdctl; |
1673 | int j = txr->me; | | 1676 | int j = txr->me; |
1674 | | | 1677 | |
1675 | /* Set WTHRESH to 8, burst writeback */ | | 1678 | /* Set WTHRESH to 8, burst writeback */ |
1676 | txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); | | 1679 | txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); |
1677 | txdctl |= (8 << 16); | | 1680 | txdctl |= (8 << 16); |
1678 | IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); | | 1681 | IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); |
1679 | | | 1682 | |
1680 | /* Set the HW Tx Head and Tail indices */ | | 1683 | /* Set the HW Tx Head and Tail indices */ |
1681 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); | | 1684 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); |
1682 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); | | 1685 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); |
1683 | | | 1686 | |
1684 | /* Set Tx Tail register */ | | 1687 | /* Set Tx Tail register */ |
1685 | txr->tail = IXGBE_VFTDT(j); | | 1688 | txr->tail = IXGBE_VFTDT(j); |
1686 | | | 1689 | |
1687 | txr->txr_no_space = false; | | 1690 | txr->txr_no_space = false; |
1688 | | | 1691 | |
1689 | /* Set Ring parameters */ | | 1692 | /* Set Ring parameters */ |
1690 | IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), | | 1693 | IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), |
1691 | (tdba & 0x00000000ffffffffULL)); | | 1694 | (tdba & 0x00000000ffffffffULL)); |
1692 | IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); | | 1695 | IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); |
1693 | IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), | | 1696 | IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), |
1694 | adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); | | 1697 | adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); |
1695 | txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); | | 1698 | txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); |
1696 | txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; | | 1699 | txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; |
1697 | IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); | | 1700 | IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); |
1698 | | | 1701 | |
1699 | /* Now enable */ | | 1702 | /* Now enable */ |
1700 | txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); | | 1703 | txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); |
1701 | txdctl |= IXGBE_TXDCTL_ENABLE; | | 1704 | txdctl |= IXGBE_TXDCTL_ENABLE; |
1702 | IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); | | 1705 | IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); |
1703 | } | | 1706 | } |
1704 | | | 1707 | |
1705 | return; | | 1708 | return; |
1706 | } /* ixv_initialize_transmit_units */ | | 1709 | } /* ixv_initialize_transmit_units */ |
1707 | | | 1710 | |
1708 | | | 1711 | |
1709 | /************************************************************************ | | 1712 | /************************************************************************ |
1710 | * ixv_initialize_rss_mapping | | 1713 | * ixv_initialize_rss_mapping |
1711 | ************************************************************************/ | | 1714 | ************************************************************************/ |
1712 | static void | | 1715 | static void |
1713 | ixv_initialize_rss_mapping(struct adapter *adapter) | | 1716 | ixv_initialize_rss_mapping(struct adapter *adapter) |
1714 | { | | 1717 | { |
1715 | struct ixgbe_hw *hw = &adapter->hw; | | 1718 | struct ixgbe_hw *hw = &adapter->hw; |
1716 | u32 reta = 0, mrqc, rss_key[10]; | | 1719 | u32 reta = 0, mrqc, rss_key[10]; |
1717 | int queue_id; | | 1720 | int queue_id; |
1718 | int i, j; | | 1721 | int i, j; |
1719 | u32 rss_hash_config; | | 1722 | u32 rss_hash_config; |
1720 | | | 1723 | |
1721 | /* force use default RSS key. */ | | 1724 | /* force use default RSS key. */ |
1722 | #ifdef __NetBSD__ | | 1725 | #ifdef __NetBSD__ |
1723 | rss_getkey((uint8_t *) &rss_key); | | 1726 | rss_getkey((uint8_t *) &rss_key); |
1724 | #else | | 1727 | #else |
1725 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { | | 1728 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { |
1726 | /* Fetch the configured RSS key */ | | 1729 | /* Fetch the configured RSS key */ |
1727 | rss_getkey((uint8_t *)&rss_key); | | 1730 | rss_getkey((uint8_t *)&rss_key); |
1728 | } else { | | 1731 | } else { |
1729 | /* set up random bits */ | | 1732 | /* set up random bits */ |
1730 | cprng_fast(&rss_key, sizeof(rss_key)); | | 1733 | cprng_fast(&rss_key, sizeof(rss_key)); |
1731 | } | | 1734 | } |
1732 | #endif | | 1735 | #endif |
1733 | | | 1736 | |
1734 | /* Now fill out hash function seeds */ | | 1737 | /* Now fill out hash function seeds */ |
1735 | for (i = 0; i < 10; i++) | | 1738 | for (i = 0; i < 10; i++) |
1736 | IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); | | 1739 | IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); |
1737 | | | 1740 | |
1738 | /* Set up the redirection table */ | | 1741 | /* Set up the redirection table */ |
1739 | for (i = 0, j = 0; i < 64; i++, j++) { | | 1742 | for (i = 0, j = 0; i < 64; i++, j++) { |
1740 | if (j == adapter->num_queues) | | 1743 | if (j == adapter->num_queues) |
1741 | j = 0; | | 1744 | j = 0; |
1742 | | | 1745 | |
1743 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { | | 1746 | if (adapter->feat_en & IXGBE_FEATURE_RSS) { |
1744 | /* | | 1747 | /* |
1745 | * Fetch the RSS bucket id for the given indirection | | 1748 | * Fetch the RSS bucket id for the given indirection |
1746 | * entry. Cap it at the number of configured buckets | | 1749 | * entry. Cap it at the number of configured buckets |
1747 | * (which is num_queues.) | | 1750 | * (which is num_queues.) |
1748 | */ | | 1751 | */ |
1749 | queue_id = rss_get_indirection_to_bucket(i); | | 1752 | queue_id = rss_get_indirection_to_bucket(i); |
1750 | queue_id = queue_id % adapter->num_queues; | | 1753 | queue_id = queue_id % adapter->num_queues; |
1751 | } else | | 1754 | } else |
1752 | queue_id = j; | | 1755 | queue_id = j; |
1753 | | | 1756 | |