| @@ -1,932 +1,932 @@ | | | @@ -1,932 +1,932 @@ |
1 | /****************************************************************************** | | 1 | /****************************************************************************** |
2 | | | 2 | |
3 | Copyright (c) 2001-2017, Intel Corporation | | 3 | Copyright (c) 2001-2017, Intel Corporation |
4 | All rights reserved. | | 4 | All rights reserved. |
5 | | | 5 | |
6 | Redistribution and use in source and binary forms, with or without | | 6 | Redistribution and use in source and binary forms, with or without |
7 | modification, are permitted provided that the following conditions are met: | | 7 | modification, are permitted provided that the following conditions are met: |
8 | | | 8 | |
9 | 1. Redistributions of source code must retain the above copyright notice, | | 9 | 1. Redistributions of source code must retain the above copyright notice, |
10 | this list of conditions and the following disclaimer. | | 10 | this list of conditions and the following disclaimer. |
11 | | | 11 | |
12 | 2. Redistributions in binary form must reproduce the above copyright | | 12 | 2. Redistributions in binary form must reproduce the above copyright |
13 | notice, this list of conditions and the following disclaimer in the | | 13 | notice, this list of conditions and the following disclaimer in the |
14 | documentation and/or other materials provided with the distribution. | | 14 | documentation and/or other materials provided with the distribution. |
15 | | | 15 | |
16 | 3. Neither the name of the Intel Corporation nor the names of its | | 16 | 3. Neither the name of the Intel Corporation nor the names of its |
17 | contributors may be used to endorse or promote products derived from | | 17 | contributors may be used to endorse or promote products derived from |
18 | this software without specific prior written permission. | | 18 | this software without specific prior written permission. |
19 | | | 19 | |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
23 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | | 23 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
24 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | POSSIBILITY OF SUCH DAMAGE. | | 30 | POSSIBILITY OF SUCH DAMAGE. |
31 | | | 31 | |
32 | ******************************************************************************/ | | 32 | ******************************************************************************/ |
33 | /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/ | | 33 | /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/ |
34 | | | 34 | |
35 | #include "ixgbe.h" | | 35 | #include "ixgbe.h" |
36 | #include "ixgbe_sriov.h" | | 36 | #include "ixgbe_sriov.h" |
37 | | | 37 | |
38 | #ifdef PCI_IOV | | 38 | #ifdef PCI_IOV |
39 | | | 39 | |
40 | MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations"); | | 40 | MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations"); |
41 | | | 41 | |
42 | /************************************************************************ | | 42 | /************************************************************************ |
43 | * ixgbe_pci_iov_detach | | 43 | * ixgbe_pci_iov_detach |
44 | ************************************************************************/ | | 44 | ************************************************************************/ |
45 | int | | 45 | int |
46 | ixgbe_pci_iov_detach(device_t dev) | | 46 | ixgbe_pci_iov_detach(device_t dev) |
47 | { | | 47 | { |
48 | return pci_iov_detach(dev); | | 48 | return pci_iov_detach(dev); |
49 | } | | 49 | } |
50 | | | 50 | |
51 | /************************************************************************ | | 51 | /************************************************************************ |
52 | * ixgbe_define_iov_schemas | | 52 | * ixgbe_define_iov_schemas |
53 | ************************************************************************/ | | 53 | ************************************************************************/ |
54 | void | | 54 | void |
55 | ixgbe_define_iov_schemas(device_t dev, int *error) | | 55 | ixgbe_define_iov_schemas(device_t dev, int *error) |
56 | { | | 56 | { |
57 | nvlist_t *pf_schema, *vf_schema; | | 57 | nvlist_t *pf_schema, *vf_schema; |
58 | | | 58 | |
59 | pf_schema = pci_iov_schema_alloc_node(); | | 59 | pf_schema = pci_iov_schema_alloc_node(); |
60 | vf_schema = pci_iov_schema_alloc_node(); | | 60 | vf_schema = pci_iov_schema_alloc_node(); |
61 | pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); | | 61 | pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); |
62 | pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", | | 62 | pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", |
63 | IOV_SCHEMA_HASDEFAULT, TRUE); | | 63 | IOV_SCHEMA_HASDEFAULT, TRUE); |
64 | pci_iov_schema_add_bool(vf_schema, "allow-set-mac", | | 64 | pci_iov_schema_add_bool(vf_schema, "allow-set-mac", |
65 | IOV_SCHEMA_HASDEFAULT, FALSE); | | 65 | IOV_SCHEMA_HASDEFAULT, FALSE); |
66 | pci_iov_schema_add_bool(vf_schema, "allow-promisc", | | 66 | pci_iov_schema_add_bool(vf_schema, "allow-promisc", |
67 | IOV_SCHEMA_HASDEFAULT, FALSE); | | 67 | IOV_SCHEMA_HASDEFAULT, FALSE); |
68 | *error = pci_iov_attach(dev, pf_schema, vf_schema); | | 68 | *error = pci_iov_attach(dev, pf_schema, vf_schema); |
69 | if (*error != 0) { | | 69 | if (*error != 0) { |
70 | device_printf(dev, | | 70 | device_printf(dev, |
71 | "Error %d setting up SR-IOV\n", *error); | | 71 | "Error %d setting up SR-IOV\n", *error); |
72 | } | | 72 | } |
73 | } /* ixgbe_define_iov_schemas */ | | 73 | } /* ixgbe_define_iov_schemas */ |
74 | | | 74 | |
75 | /************************************************************************ | | 75 | /************************************************************************ |
76 | * ixgbe_align_all_queue_indices | | 76 | * ixgbe_align_all_queue_indices |
77 | ************************************************************************/ | | 77 | ************************************************************************/ |
78 | inline void | | 78 | inline void |
79 | ixgbe_align_all_queue_indices(struct adapter *adapter) | | 79 | ixgbe_align_all_queue_indices(struct adapter *adapter) |
80 | { | | 80 | { |
81 | int i; | | 81 | int i; |
82 | int index; | | 82 | int index; |
83 | | | 83 | |
84 | for (i = 0; i < adapter->num_queues; i++) { | | 84 | for (i = 0; i < adapter->num_queues; i++) { |
85 | index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); | | 85 | index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); |
86 | adapter->rx_rings[i].me = index; | | 86 | adapter->rx_rings[i].me = index; |
87 | adapter->tx_rings[i].me = index; | | 87 | adapter->tx_rings[i].me = index; |
88 | } | | 88 | } |
89 | } | | 89 | } |
90 | | | 90 | |
91 | /* Support functions for SR-IOV/VF management */ | | 91 | /* Support functions for SR-IOV/VF management */ |
92 | static inline void | | 92 | static inline void |
93 | ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) | | 93 | ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) |
94 | { | | 94 | { |
95 | if (vf->flags & IXGBE_VF_CTS) | | 95 | if (vf->flags & IXGBE_VF_CTS) |
96 | msg |= IXGBE_VT_MSGTYPE_CTS; | | 96 | msg |= IXGBE_VT_MSGTYPE_CTS; |
97 | | | 97 | |
98 | adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool); | | 98 | adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool); |
99 | } | | 99 | } |
100 | | | 100 | |
101 | static inline void | | 101 | static inline void |
102 | ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) | | 102 | ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) |
103 | { | | 103 | { |
104 | msg &= IXGBE_VT_MSG_MASK; | | 104 | msg &= IXGBE_VT_MSG_MASK; |
105 | ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK); | | 105 | ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK); |
106 | } | | 106 | } |
107 | | | 107 | |
108 | static inline void | | 108 | static inline void |
109 | ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) | | 109 | ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) |
110 | { | | 110 | { |
111 | msg &= IXGBE_VT_MSG_MASK; | | 111 | msg &= IXGBE_VT_MSG_MASK; |
112 | ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK); | | 112 | ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK); |
113 | } | | 113 | } |
114 | | | 114 | |
115 | static inline void | | 115 | static inline void |
116 | ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf) | | 116 | ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf) |
117 | { | | 117 | { |
118 | if (!(vf->flags & IXGBE_VF_CTS)) | | 118 | if (!(vf->flags & IXGBE_VF_CTS)) |
119 | ixgbe_send_vf_nack(adapter, vf, 0); | | 119 | ixgbe_send_vf_nack(adapter, vf, 0); |
120 | } | | 120 | } |
121 | | | 121 | |
122 | static inline boolean_t | | 122 | static inline bool |
123 | ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) | | 123 | ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) |
124 | { | | 124 | { |
125 | return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); | | 125 | return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); |
126 | } | | 126 | } |
127 | | | 127 | |
128 | static inline int | | 128 | static inline int |
129 | ixgbe_vf_queues(int mode) | | 129 | ixgbe_vf_queues(int mode) |
130 | { | | 130 | { |
131 | switch (mode) { | | 131 | switch (mode) { |
132 | case IXGBE_64_VM: | | 132 | case IXGBE_64_VM: |
133 | return (2); | | 133 | return (2); |
134 | case IXGBE_32_VM: | | 134 | case IXGBE_32_VM: |
135 | return (4); | | 135 | return (4); |
136 | case IXGBE_NO_VM: | | 136 | case IXGBE_NO_VM: |
137 | default: | | 137 | default: |
138 | return (0); | | 138 | return (0); |
139 | } | | 139 | } |
140 | } | | 140 | } |
141 | | | 141 | |
142 | inline int | | 142 | inline int |
143 | ixgbe_vf_que_index(int mode, int vfnum, int num) | | 143 | ixgbe_vf_que_index(int mode, int vfnum, int num) |
144 | { | | 144 | { |
145 | return ((vfnum * ixgbe_vf_queues(mode)) + num); | | 145 | return ((vfnum * ixgbe_vf_queues(mode)) + num); |
146 | } | | 146 | } |
147 | | | 147 | |
148 | static inline void | | 148 | static inline void |
149 | ixgbe_update_max_frame(struct adapter * adapter, int max_frame) | | 149 | ixgbe_update_max_frame(struct adapter * adapter, int max_frame) |
150 | { | | 150 | { |
151 | if (adapter->max_frame_size < max_frame) | | 151 | if (adapter->max_frame_size < max_frame) |
152 | adapter->max_frame_size = max_frame; | | 152 | adapter->max_frame_size = max_frame; |
153 | } | | 153 | } |
154 | | | 154 | |
155 | inline u32 | | 155 | inline u32 |
156 | ixgbe_get_mrqc(int iov_mode) | | 156 | ixgbe_get_mrqc(int iov_mode) |
157 | { | | 157 | { |
158 | u32 mrqc; | | 158 | u32 mrqc; |
159 | | | 159 | |
160 | switch (iov_mode) { | | 160 | switch (iov_mode) { |
161 | case IXGBE_64_VM: | | 161 | case IXGBE_64_VM: |
162 | mrqc = IXGBE_MRQC_VMDQRSS64EN; | | 162 | mrqc = IXGBE_MRQC_VMDQRSS64EN; |
163 | break; | | 163 | break; |
164 | case IXGBE_32_VM: | | 164 | case IXGBE_32_VM: |
165 | mrqc = IXGBE_MRQC_VMDQRSS32EN; | | 165 | mrqc = IXGBE_MRQC_VMDQRSS32EN; |
166 | break; | | 166 | break; |
167 | case IXGBE_NO_VM: | | 167 | case IXGBE_NO_VM: |
168 | mrqc = 0; | | 168 | mrqc = 0; |
169 | break; | | 169 | break; |
170 | default: | | 170 | default: |
171 | panic("Unexpected SR-IOV mode %d", iov_mode); | | 171 | panic("Unexpected SR-IOV mode %d", iov_mode); |
172 | } | | 172 | } |
173 | | | 173 | |
174 | return mrqc; | | 174 | return mrqc; |
175 | } | | 175 | } |
176 | | | 176 | |
177 | | | 177 | |
178 | inline u32 | | 178 | inline u32 |
179 | ixgbe_get_mtqc(int iov_mode) | | 179 | ixgbe_get_mtqc(int iov_mode) |
180 | { | | 180 | { |
181 | uint32_t mtqc; | | 181 | uint32_t mtqc; |
182 | | | 182 | |
183 | switch (iov_mode) { | | 183 | switch (iov_mode) { |
184 | case IXGBE_64_VM: | | 184 | case IXGBE_64_VM: |
185 | mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA; | | 185 | mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA; |
186 | break; | | 186 | break; |
187 | case IXGBE_32_VM: | | 187 | case IXGBE_32_VM: |
188 | mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA; | | 188 | mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA; |
189 | break; | | 189 | break; |
190 | case IXGBE_NO_VM: | | 190 | case IXGBE_NO_VM: |
191 | mtqc = IXGBE_MTQC_64Q_1PB; | | 191 | mtqc = IXGBE_MTQC_64Q_1PB; |
192 | break; | | 192 | break; |
193 | default: | | 193 | default: |
194 | panic("Unexpected SR-IOV mode %d", iov_mode); | | 194 | panic("Unexpected SR-IOV mode %d", iov_mode); |
195 | } | | 195 | } |
196 | | | 196 | |
197 | return mtqc; | | 197 | return mtqc; |
198 | } | | 198 | } |
199 | | | 199 | |
200 | void | | 200 | void |
201 | ixgbe_ping_all_vfs(struct adapter *adapter) | | 201 | ixgbe_ping_all_vfs(struct adapter *adapter) |
202 | { | | 202 | { |
203 | struct ixgbe_vf *vf; | | 203 | struct ixgbe_vf *vf; |
204 | | | 204 | |
205 | for (int i = 0; i < adapter->num_vfs; i++) { | | 205 | for (int i = 0; i < adapter->num_vfs; i++) { |
206 | vf = &adapter->vfs[i]; | | 206 | vf = &adapter->vfs[i]; |
207 | if (vf->flags & IXGBE_VF_ACTIVE) | | 207 | if (vf->flags & IXGBE_VF_ACTIVE) |
208 | ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); | | 208 | ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); |
209 | } | | 209 | } |
210 | } /* ixgbe_ping_all_vfs */ | | 210 | } /* ixgbe_ping_all_vfs */ |
211 | | | 211 | |
212 | | | 212 | |
213 | static void | | 213 | static void |
214 | ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, | | 214 | ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, |
215 | uint16_t tag) | | 215 | uint16_t tag) |
216 | { | | 216 | { |
217 | struct ixgbe_hw *hw; | | 217 | struct ixgbe_hw *hw; |
218 | uint32_t vmolr, vmvir; | | 218 | uint32_t vmolr, vmvir; |
219 | | | 219 | |
220 | hw = &adapter->hw; | | 220 | hw = &adapter->hw; |
221 | | | 221 | |
222 | vf->vlan_tag = tag; | | 222 | vf->vlan_tag = tag; |
223 | | | 223 | |
224 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); | | 224 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); |
225 | | | 225 | |
226 | /* Do not receive packets that pass inexact filters. */ | | 226 | /* Do not receive packets that pass inexact filters. */ |
227 | vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); | | 227 | vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); |
228 | | | 228 | |
229 | /* Disable Multicast Promicuous Mode. */ | | 229 | /* Disable Multicast Promicuous Mode. */ |
230 | vmolr &= ~IXGBE_VMOLR_MPE; | | 230 | vmolr &= ~IXGBE_VMOLR_MPE; |
231 | | | 231 | |
232 | /* Accept broadcasts. */ | | 232 | /* Accept broadcasts. */ |
233 | vmolr |= IXGBE_VMOLR_BAM; | | 233 | vmolr |= IXGBE_VMOLR_BAM; |
234 | | | 234 | |
235 | if (tag == 0) { | | 235 | if (tag == 0) { |
236 | /* Accept non-vlan tagged traffic. */ | | 236 | /* Accept non-vlan tagged traffic. */ |
237 | vmolr |= IXGBE_VMOLR_AUPE; | | 237 | vmolr |= IXGBE_VMOLR_AUPE; |
238 | | | 238 | |
239 | /* Allow VM to tag outgoing traffic; no default tag. */ | | 239 | /* Allow VM to tag outgoing traffic; no default tag. */ |
240 | vmvir = 0; | | 240 | vmvir = 0; |
241 | } else { | | 241 | } else { |
242 | /* Require vlan-tagged traffic. */ | | 242 | /* Require vlan-tagged traffic. */ |
243 | vmolr &= ~IXGBE_VMOLR_AUPE; | | 243 | vmolr &= ~IXGBE_VMOLR_AUPE; |
244 | | | 244 | |
245 | /* Tag all traffic with provided vlan tag. */ | | 245 | /* Tag all traffic with provided vlan tag. */ |
246 | vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); | | 246 | vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); |
247 | } | | 247 | } |
248 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); | | 248 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); |
249 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); | | 249 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); |
250 | } /* ixgbe_vf_set_default_vlan */ | | 250 | } /* ixgbe_vf_set_default_vlan */ |
251 | | | 251 | |
252 | | | 252 | |
253 | static void | | 253 | static void |
254 | ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf) | | 254 | ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf) |
255 | { | | 255 | { |
256 | uint32_t vf_index = IXGBE_VF_INDEX(vf->pool); | | 256 | uint32_t vf_index = IXGBE_VF_INDEX(vf->pool); |
257 | uint16_t mbx_size = hw->mbx.size; | | 257 | uint16_t mbx_size = hw->mbx.size; |
258 | uint16_t i; | | 258 | uint16_t i; |
259 | | | 259 | |
260 | IXGBE_CORE_LOCK_ASSERT(adapter); | | 260 | IXGBE_CORE_LOCK_ASSERT(adapter); |
261 | | | 261 | |
262 | for (i = 0; i < mbx_size; ++i) | | 262 | for (i = 0; i < mbx_size; ++i) |
263 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0); | | 263 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0); |
264 | } /* ixgbe_clear_vfmbmem */ | | 264 | } /* ixgbe_clear_vfmbmem */ |
265 | | | 265 | |
266 | | | 266 | |
267 | static boolean_t | | 267 | static bool |
268 | ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) | | 268 | ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) |
269 | { | | 269 | { |
270 | | | 270 | |
271 | /* | | 271 | /* |
272 | * Frame size compatibility between PF and VF is only a problem on | | 272 | * Frame size compatibility between PF and VF is only a problem on |
273 | * 82599-based cards. X540 and later support any combination of jumbo | | 273 | * 82599-based cards. X540 and later support any combination of jumbo |
274 | * frames on PFs and VFs. | | 274 | * frames on PFs and VFs. |
275 | */ | | 275 | */ |
276 | if (adapter->hw.mac.type != ixgbe_mac_82599EB) | | 276 | if (adapter->hw.mac.type != ixgbe_mac_82599EB) |
277 | return (TRUE); | | 277 | return (TRUE); |
278 | | | 278 | |
279 | switch (vf->api_ver) { | | 279 | switch (vf->api_ver) { |
280 | case IXGBE_API_VER_1_0: | | 280 | case IXGBE_API_VER_1_0: |
281 | case IXGBE_API_VER_UNKNOWN: | | 281 | case IXGBE_API_VER_UNKNOWN: |
282 | /* | | 282 | /* |
283 | * On legacy (1.0 and older) VF versions, we don't support jumbo | | 283 | * On legacy (1.0 and older) VF versions, we don't support jumbo |
284 | * frames on either the PF or the VF. | | 284 | * frames on either the PF or the VF. |
285 | */ | | 285 | */ |
286 | if (adapter->max_frame_size > ETHER_MAX_LEN || | | 286 | if (adapter->max_frame_size > ETHER_MAX_LEN || |
287 | vf->max_frame_size > ETHER_MAX_LEN) | | 287 | vf->max_frame_size > ETHER_MAX_LEN) |
288 | return (FALSE); | | 288 | return (FALSE); |
289 | | | 289 | |
290 | return (TRUE); | | 290 | return (TRUE); |
291 | | | 291 | |
292 | break; | | 292 | break; |
293 | case IXGBE_API_VER_1_1: | | 293 | case IXGBE_API_VER_1_1: |
294 | default: | | 294 | default: |
295 | /* | | 295 | /* |
296 | * 1.1 or later VF versions always work if they aren't using | | 296 | * 1.1 or later VF versions always work if they aren't using |
297 | * jumbo frames. | | 297 | * jumbo frames. |
298 | */ | | 298 | */ |
299 | if (vf->max_frame_size <= ETHER_MAX_LEN) | | 299 | if (vf->max_frame_size <= ETHER_MAX_LEN) |
300 | return (TRUE); | | 300 | return (TRUE); |
301 | | | 301 | |
302 | /* | | 302 | /* |
303 | * Jumbo frames only work with VFs if the PF is also using jumbo | | 303 | * Jumbo frames only work with VFs if the PF is also using jumbo |
304 | * frames. | | 304 | * frames. |
305 | */ | | 305 | */ |
306 | if (adapter->max_frame_size <= ETHER_MAX_LEN) | | 306 | if (adapter->max_frame_size <= ETHER_MAX_LEN) |
307 | return (TRUE); | | 307 | return (TRUE); |
308 | | | 308 | |
309 | return (FALSE); | | 309 | return (FALSE); |
310 | } | | 310 | } |
311 | } /* ixgbe_vf_frame_size_compatible */ | | 311 | } /* ixgbe_vf_frame_size_compatible */ |
312 | | | 312 | |
313 | | | 313 | |
314 | static void | | 314 | static void |
315 | ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) | | 315 | ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) |
316 | { | | 316 | { |
317 | ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); | | 317 | ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); |
318 | | | 318 | |
319 | // XXX clear multicast addresses | | 319 | // XXX clear multicast addresses |
320 | | | 320 | |
321 | ixgbe_clear_rar(&adapter->hw, vf->rar_index); | | 321 | ixgbe_clear_rar(&adapter->hw, vf->rar_index); |
322 | ixgbe_clear_vfmbmem(&adapter->hw, vf); | | 322 | ixgbe_clear_vfmbmem(&adapter->hw, vf); |
323 | ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool)); | | 323 | ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool)); |
324 | | | 324 | |
325 | vf->api_ver = IXGBE_API_VER_UNKNOWN; | | 325 | vf->api_ver = IXGBE_API_VER_UNKNOWN; |
326 | } /* ixgbe_process_vf_reset */ | | 326 | } /* ixgbe_process_vf_reset */ |
327 | | | 327 | |
328 | | | 328 | |
329 | static void | | 329 | static void |
330 | ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) | | 330 | ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) |
331 | { | | 331 | { |
332 | struct ixgbe_hw *hw; | | 332 | struct ixgbe_hw *hw; |
333 | uint32_t vf_index, vfte; | | 333 | uint32_t vf_index, vfte; |
334 | | | 334 | |
335 | hw = &adapter->hw; | | 335 | hw = &adapter->hw; |
336 | | | 336 | |
337 | vf_index = IXGBE_VF_INDEX(vf->pool); | | 337 | vf_index = IXGBE_VF_INDEX(vf->pool); |
338 | vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); | | 338 | vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); |
339 | vfte |= IXGBE_VF_BIT(vf->pool); | | 339 | vfte |= IXGBE_VF_BIT(vf->pool); |
340 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); | | 340 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); |
341 | } /* ixgbe_vf_enable_transmit */ | | 341 | } /* ixgbe_vf_enable_transmit */ |
342 | | | 342 | |
343 | | | 343 | |
344 | static void | | 344 | static void |
345 | ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) | | 345 | ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) |
346 | { | | 346 | { |
347 | struct ixgbe_hw *hw; | | 347 | struct ixgbe_hw *hw; |
348 | uint32_t vf_index, vfre; | | 348 | uint32_t vf_index, vfre; |
349 | | | 349 | |
350 | hw = &adapter->hw; | | 350 | hw = &adapter->hw; |
351 | | | 351 | |
352 | vf_index = IXGBE_VF_INDEX(vf->pool); | | 352 | vf_index = IXGBE_VF_INDEX(vf->pool); |
353 | vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); | | 353 | vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); |
354 | if (ixgbe_vf_frame_size_compatible(adapter, vf)) | | 354 | if (ixgbe_vf_frame_size_compatible(adapter, vf)) |
355 | vfre |= IXGBE_VF_BIT(vf->pool); | | 355 | vfre |= IXGBE_VF_BIT(vf->pool); |
356 | else | | 356 | else |
357 | vfre &= ~IXGBE_VF_BIT(vf->pool); | | 357 | vfre &= ~IXGBE_VF_BIT(vf->pool); |
358 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); | | 358 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); |
359 | } /* ixgbe_vf_enable_receive */ | | 359 | } /* ixgbe_vf_enable_receive */ |
360 | | | 360 | |
361 | | | 361 | |
362 | static void | | 362 | static void |
363 | ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | | 363 | ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) |
364 | { | | 364 | { |
365 | struct ixgbe_hw *hw; | | 365 | struct ixgbe_hw *hw; |
366 | uint32_t ack; | | 366 | uint32_t ack; |
367 | uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; | | 367 | uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; |
368 | | | 368 | |
369 | hw = &adapter->hw; | | 369 | hw = &adapter->hw; |
370 | | | 370 | |
371 | ixgbe_process_vf_reset(adapter, vf); | | 371 | ixgbe_process_vf_reset(adapter, vf); |
372 | | | 372 | |
373 | if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { | | 373 | if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { |
374 | ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, | | 374 | ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, |
375 | vf->pool, TRUE); | | 375 | vf->pool, TRUE); |
376 | ack = IXGBE_VT_MSGTYPE_ACK; | | 376 | ack = IXGBE_VT_MSGTYPE_ACK; |
377 | } else | | 377 | } else |
378 | ack = IXGBE_VT_MSGTYPE_NACK; | | 378 | ack = IXGBE_VT_MSGTYPE_NACK; |
379 | | | 379 | |
380 | ixgbe_vf_enable_transmit(adapter, vf); | | 380 | ixgbe_vf_enable_transmit(adapter, vf); |
381 | ixgbe_vf_enable_receive(adapter, vf); | | 381 | ixgbe_vf_enable_receive(adapter, vf); |
382 | | | 382 | |
383 | vf->flags |= IXGBE_VF_CTS; | | 383 | vf->flags |= IXGBE_VF_CTS; |
384 | | | 384 | |
385 | resp[0] = IXGBE_VF_RESET | ack; | | 385 | resp[0] = IXGBE_VF_RESET | ack; |
386 | bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); | | 386 | bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); |
387 | resp[3] = hw->mac.mc_filter_type; | | 387 | resp[3] = hw->mac.mc_filter_type; |
388 | hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); | | 388 | hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); |
389 | } /* ixgbe_vf_reset_msg */ | | 389 | } /* ixgbe_vf_reset_msg */ |
390 | | | 390 | |
391 | | | 391 | |
392 | static void | | 392 | static void |
393 | ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | | 393 | ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) |
394 | { | | 394 | { |
395 | uint8_t *mac; | | 395 | uint8_t *mac; |
396 | | | 396 | |
397 | mac = (uint8_t*)&msg[1]; | | 397 | mac = (uint8_t*)&msg[1]; |
398 | | | 398 | |
399 | /* Check that the VF has permission to change the MAC address. */ | | 399 | /* Check that the VF has permission to change the MAC address. */ |
400 | if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { | | 400 | if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { |
401 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 401 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
402 | return; | | 402 | return; |
403 | } | | 403 | } |
404 | | | 404 | |
405 | if (ixgbe_validate_mac_addr(mac) != 0) { | | 405 | if (ixgbe_validate_mac_addr(mac) != 0) { |
406 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 406 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
407 | return; | | 407 | return; |
408 | } | | 408 | } |
409 | | | 409 | |
410 | bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); | | 410 | bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); |
411 | | | 411 | |
412 | ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool, | | 412 | ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool, |
413 | TRUE); | | 413 | TRUE); |
414 | | | 414 | |
415 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 415 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
416 | } /* ixgbe_vf_set_mac */ | | 416 | } /* ixgbe_vf_set_mac */ |
417 | | | 417 | |
418 | | | 418 | |
419 | /* | | 419 | /* |
420 | * VF multicast addresses are set by using the appropriate bit in | | 420 | * VF multicast addresses are set by using the appropriate bit in |
421 | * 1 of 128 32 bit addresses (4096 possible). | | 421 | * 1 of 128 32 bit addresses (4096 possible). |
422 | */ | | 422 | */ |
423 | static void | | 423 | static void |
424 | ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) | | 424 | ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) |
425 | { | | 425 | { |
426 | u16 *list = (u16*)&msg[1]; | | 426 | u16 *list = (u16*)&msg[1]; |
427 | int entries; | | 427 | int entries; |
428 | u32 vmolr, vec_bit, vec_reg, mta_reg; | | 428 | u32 vmolr, vec_bit, vec_reg, mta_reg; |
429 | | | 429 | |
430 | entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; | | 430 | entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; |
431 | entries = uimin(entries, IXGBE_MAX_VF_MC); | | 431 | entries = uimin(entries, IXGBE_MAX_VF_MC); |
432 | | | 432 | |
433 | vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); | | 433 | vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); |
434 | | | 434 | |
435 | vf->num_mc_hashes = entries; | | 435 | vf->num_mc_hashes = entries; |
436 | | | 436 | |
437 | /* Set the appropriate MTA bit */ | | 437 | /* Set the appropriate MTA bit */ |
438 | for (int i = 0; i < entries; i++) { | | 438 | for (int i = 0; i < entries; i++) { |
439 | vf->mc_hash[i] = list[i]; | | 439 | vf->mc_hash[i] = list[i]; |
440 | vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; | | 440 | vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; |
441 | vec_bit = vf->mc_hash[i] & 0x1F; | | 441 | vec_bit = vf->mc_hash[i] & 0x1F; |
442 | mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); | | 442 | mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); |
443 | mta_reg |= (1 << vec_bit); | | 443 | mta_reg |= (1 << vec_bit); |
444 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); | | 444 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); |
445 | } | | 445 | } |
446 | | | 446 | |
447 | vmolr |= IXGBE_VMOLR_ROMPE; | | 447 | vmolr |= IXGBE_VMOLR_ROMPE; |
448 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); | | 448 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); |
449 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 449 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
450 | } /* ixgbe_vf_set_mc_addr */ | | 450 | } /* ixgbe_vf_set_mc_addr */ |
451 | | | 451 | |
452 | | | 452 | |
453 | static void | | 453 | static void |
454 | ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | | 454 | ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) |
455 | { | | 455 | { |
456 | struct ixgbe_hw *hw; | | 456 | struct ixgbe_hw *hw; |
457 | int enable; | | 457 | int enable; |
458 | uint16_t tag; | | 458 | uint16_t tag; |
459 | | | 459 | |
460 | hw = &adapter->hw; | | 460 | hw = &adapter->hw; |
461 | enable = IXGBE_VT_MSGINFO(msg[0]); | | 461 | enable = IXGBE_VT_MSGINFO(msg[0]); |
462 | tag = msg[1] & IXGBE_VLVF_VLANID_MASK; | | 462 | tag = msg[1] & IXGBE_VLVF_VLANID_MASK; |
463 | | | 463 | |
464 | if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { | | 464 | if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { |
465 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 465 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
466 | return; | | 466 | return; |
467 | } | | 467 | } |
468 | | | 468 | |
469 | /* It is illegal to enable vlan tag 0. */ | | 469 | /* It is illegal to enable vlan tag 0. */ |
470 | if (tag == 0 && enable != 0) { | | 470 | if (tag == 0 && enable != 0) { |
471 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 471 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
472 | return; | | 472 | return; |
473 | } | | 473 | } |
474 | | | 474 | |
475 | ixgbe_set_vfta(hw, tag, vf->pool, enable, false); | | 475 | ixgbe_set_vfta(hw, tag, vf->pool, enable, false); |
476 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 476 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
477 | } /* ixgbe_vf_set_vlan */ | | 477 | } /* ixgbe_vf_set_vlan */ |
478 | | | 478 | |
479 | | | 479 | |
480 | static void | | 480 | static void |
481 | ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | | 481 | ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) |
482 | { | | 482 | { |
483 | struct ixgbe_hw *hw; | | 483 | struct ixgbe_hw *hw; |
484 | uint32_t vf_max_size, pf_max_size, mhadd; | | 484 | uint32_t vf_max_size, pf_max_size, mhadd; |
485 | | | 485 | |
486 | hw = &adapter->hw; | | 486 | hw = &adapter->hw; |
487 | vf_max_size = msg[1]; | | 487 | vf_max_size = msg[1]; |
488 | | | 488 | |
489 | if (vf_max_size < ETHER_CRC_LEN) { | | 489 | if (vf_max_size < ETHER_CRC_LEN) { |
490 | /* We intentionally ACK invalid LPE requests. */ | | 490 | /* We intentionally ACK invalid LPE requests. */ |
491 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 491 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
492 | return; | | 492 | return; |
493 | } | | 493 | } |
494 | | | 494 | |
495 | vf_max_size -= ETHER_CRC_LEN; | | 495 | vf_max_size -= ETHER_CRC_LEN; |
496 | | | 496 | |
497 | if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { | | 497 | if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { |
498 | /* We intentionally ACK invalid LPE requests. */ | | 498 | /* We intentionally ACK invalid LPE requests. */ |
499 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 499 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
500 | return; | | 500 | return; |
501 | } | | 501 | } |
502 | | | 502 | |
503 | vf->max_frame_size = vf_max_size; | | 503 | vf->max_frame_size = vf_max_size; |
504 | ixgbe_update_max_frame(adapter, vf->max_frame_size); | | 504 | ixgbe_update_max_frame(adapter, vf->max_frame_size); |
505 | | | 505 | |
506 | /* | | 506 | /* |
507 | * We might have to disable reception to this VF if the frame size is | | 507 | * We might have to disable reception to this VF if the frame size is |
508 | * not compatible with the config on the PF. | | 508 | * not compatible with the config on the PF. |
509 | */ | | 509 | */ |
510 | ixgbe_vf_enable_receive(adapter, vf); | | 510 | ixgbe_vf_enable_receive(adapter, vf); |
511 | | | 511 | |
512 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | | 512 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); |
513 | pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; | | 513 | pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; |
514 | | | 514 | |
515 | if (pf_max_size < adapter->max_frame_size) { | | 515 | if (pf_max_size < adapter->max_frame_size) { |
516 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | | 516 | mhadd &= ~IXGBE_MHADD_MFS_MASK; |
517 | mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; | | 517 | mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; |
518 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); | | 518 | IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); |
519 | } | | 519 | } |
520 | | | 520 | |
521 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 521 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
522 | } /* ixgbe_vf_set_lpe */ | | 522 | } /* ixgbe_vf_set_lpe */ |
523 | | | 523 | |
524 | | | 524 | |
525 | static void | | 525 | static void |
526 | ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, | | 526 | ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, |
527 | uint32_t *msg) | | 527 | uint32_t *msg) |
528 | { | | 528 | { |
529 | //XXX implement this | | 529 | //XXX implement this |
530 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 530 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
531 | } /* ixgbe_vf_set_macvlan */ | | 531 | } /* ixgbe_vf_set_macvlan */ |
532 | | | 532 | |
533 | | | 533 | |
534 | static void | | 534 | static void |
535 | ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, | | 535 | ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, |
536 | uint32_t *msg) | | 536 | uint32_t *msg) |
537 | { | | 537 | { |
538 | | | 538 | |
539 | switch (msg[1]) { | | 539 | switch (msg[1]) { |
540 | case IXGBE_API_VER_1_0: | | 540 | case IXGBE_API_VER_1_0: |
541 | case IXGBE_API_VER_1_1: | | 541 | case IXGBE_API_VER_1_1: |
542 | vf->api_ver = msg[1]; | | 542 | vf->api_ver = msg[1]; |
543 | ixgbe_send_vf_ack(adapter, vf, msg[0]); | | 543 | ixgbe_send_vf_ack(adapter, vf, msg[0]); |
544 | break; | | 544 | break; |
545 | default: | | 545 | default: |
546 | vf->api_ver = IXGBE_API_VER_UNKNOWN; | | 546 | vf->api_ver = IXGBE_API_VER_UNKNOWN; |
547 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 547 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
548 | break; | | 548 | break; |
549 | } | | 549 | } |
550 | } /* ixgbe_vf_api_negotiate */ | | 550 | } /* ixgbe_vf_api_negotiate */ |
551 | | | 551 | |
552 | | | 552 | |
553 | static void | | 553 | static void |
554 | ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) | | 554 | ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) |
555 | { | | 555 | { |
556 | struct ixgbe_hw *hw; | | 556 | struct ixgbe_hw *hw; |
557 | uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; | | 557 | uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; |
558 | int num_queues; | | 558 | int num_queues; |
559 | | | 559 | |
560 | hw = &adapter->hw; | | 560 | hw = &adapter->hw; |
561 | | | 561 | |
562 | /* GET_QUEUES is not supported on pre-1.1 APIs. */ | | 562 | /* GET_QUEUES is not supported on pre-1.1 APIs. */ |
563 | switch (msg[0]) { | | 563 | switch (msg[0]) { |
564 | case IXGBE_API_VER_1_0: | | 564 | case IXGBE_API_VER_1_0: |
565 | case IXGBE_API_VER_UNKNOWN: | | 565 | case IXGBE_API_VER_UNKNOWN: |
566 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 566 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
567 | return; | | 567 | return; |
568 | } | | 568 | } |
569 | | | 569 | |
570 | resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | | | 570 | resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | |
571 | IXGBE_VT_MSGTYPE_CTS; | | 571 | IXGBE_VT_MSGTYPE_CTS; |
572 | | | 572 | |
573 | num_queues = ixgbe_vf_queues(adapter->iov_mode); | | 573 | num_queues = ixgbe_vf_queues(adapter->iov_mode); |
574 | resp[IXGBE_VF_TX_QUEUES] = num_queues; | | 574 | resp[IXGBE_VF_TX_QUEUES] = num_queues; |
575 | resp[IXGBE_VF_RX_QUEUES] = num_queues; | | 575 | resp[IXGBE_VF_RX_QUEUES] = num_queues; |
576 | resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); | | 576 | resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); |
577 | resp[IXGBE_VF_DEF_QUEUE] = 0; | | 577 | resp[IXGBE_VF_DEF_QUEUE] = 0; |
578 | | | 578 | |
579 | hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); | | 579 | hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); |
580 | } /* ixgbe_vf_get_queues */ | | 580 | } /* ixgbe_vf_get_queues */ |
581 | | | 581 | |
582 | | | 582 | |
583 | static void | | 583 | static void |
584 | ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf) | | 584 | ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf) |
585 | { | | 585 | { |
586 | struct ixgbe_hw *hw; | | 586 | struct ixgbe_hw *hw; |
587 | uint32_t msg[IXGBE_VFMAILBOX_SIZE]; | | 587 | uint32_t msg[IXGBE_VFMAILBOX_SIZE]; |
588 | int error; | | 588 | int error; |
589 | | | 589 | |
590 | hw = &adapter->hw; | | 590 | hw = &adapter->hw; |
591 | | | 591 | |
592 | error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); | | 592 | error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); |
593 | | | 593 | |
594 | if (error != 0) | | 594 | if (error != 0) |
595 | return; | | 595 | return; |
596 | | | 596 | |
597 | CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname, | | 597 | CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname, |
598 | msg[0], vf->pool); | | 598 | msg[0], vf->pool); |
599 | if (msg[0] == IXGBE_VF_RESET) { | | 599 | if (msg[0] == IXGBE_VF_RESET) { |
600 | ixgbe_vf_reset_msg(adapter, vf, msg); | | 600 | ixgbe_vf_reset_msg(adapter, vf, msg); |
601 | return; | | 601 | return; |
602 | } | | 602 | } |
603 | | | 603 | |
604 | if (!(vf->flags & IXGBE_VF_CTS)) { | | 604 | if (!(vf->flags & IXGBE_VF_CTS)) { |
605 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 605 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
606 | return; | | 606 | return; |
607 | } | | 607 | } |
608 | | | 608 | |
609 | switch (msg[0] & IXGBE_VT_MSG_MASK) { | | 609 | switch (msg[0] & IXGBE_VT_MSG_MASK) { |
610 | case IXGBE_VF_SET_MAC_ADDR: | | 610 | case IXGBE_VF_SET_MAC_ADDR: |
611 | ixgbe_vf_set_mac(adapter, vf, msg); | | 611 | ixgbe_vf_set_mac(adapter, vf, msg); |
612 | break; | | 612 | break; |
613 | case IXGBE_VF_SET_MULTICAST: | | 613 | case IXGBE_VF_SET_MULTICAST: |
614 | ixgbe_vf_set_mc_addr(adapter, vf, msg); | | 614 | ixgbe_vf_set_mc_addr(adapter, vf, msg); |
615 | break; | | 615 | break; |
616 | case IXGBE_VF_SET_VLAN: | | 616 | case IXGBE_VF_SET_VLAN: |
617 | ixgbe_vf_set_vlan(adapter, vf, msg); | | 617 | ixgbe_vf_set_vlan(adapter, vf, msg); |
618 | break; | | 618 | break; |
619 | case IXGBE_VF_SET_LPE: | | 619 | case IXGBE_VF_SET_LPE: |
620 | ixgbe_vf_set_lpe(adapter, vf, msg); | | 620 | ixgbe_vf_set_lpe(adapter, vf, msg); |
621 | break; | | 621 | break; |
622 | case IXGBE_VF_SET_MACVLAN: | | 622 | case IXGBE_VF_SET_MACVLAN: |
623 | ixgbe_vf_set_macvlan(adapter, vf, msg); | | 623 | ixgbe_vf_set_macvlan(adapter, vf, msg); |
624 | break; | | 624 | break; |
625 | case IXGBE_VF_API_NEGOTIATE: | | 625 | case IXGBE_VF_API_NEGOTIATE: |
626 | ixgbe_vf_api_negotiate(adapter, vf, msg); | | 626 | ixgbe_vf_api_negotiate(adapter, vf, msg); |
627 | break; | | 627 | break; |
628 | case IXGBE_VF_GET_QUEUES: | | 628 | case IXGBE_VF_GET_QUEUES: |
629 | ixgbe_vf_get_queues(adapter, vf, msg); | | 629 | ixgbe_vf_get_queues(adapter, vf, msg); |
630 | break; | | 630 | break; |
631 | default: | | 631 | default: |
632 | ixgbe_send_vf_nack(adapter, vf, msg[0]); | | 632 | ixgbe_send_vf_nack(adapter, vf, msg[0]); |
633 | } | | 633 | } |
634 | } /* ixgbe_process_vf_msg */ | | 634 | } /* ixgbe_process_vf_msg */ |
635 | | | 635 | |
636 | | | 636 | |
637 | /* Tasklet for handling VF -> PF mailbox messages */ | | 637 | /* Tasklet for handling VF -> PF mailbox messages */ |
638 | void | | 638 | void |
639 | ixgbe_handle_mbx(void *context, int pending) | | 639 | ixgbe_handle_mbx(void *context, int pending) |
640 | { | | 640 | { |
641 | struct adapter *adapter = context; | | 641 | struct adapter *adapter = context; |
642 | struct ixgbe_hw *hw; | | 642 | struct ixgbe_hw *hw; |
643 | struct ixgbe_vf *vf; | | 643 | struct ixgbe_vf *vf; |
644 | int i; | | 644 | int i; |
645 | | | 645 | |
646 | KASSERT(mutex_owned(&adapter->core_mtx)); | | 646 | KASSERT(mutex_owned(&adapter->core_mtx)); |
647 | | | 647 | |
648 | hw = &adapter->hw; | | 648 | hw = &adapter->hw; |
649 | | | 649 | |
650 | for (i = 0; i < adapter->num_vfs; i++) { | | 650 | for (i = 0; i < adapter->num_vfs; i++) { |
651 | vf = &adapter->vfs[i]; | | 651 | vf = &adapter->vfs[i]; |
652 | | | 652 | |
653 | if (vf->flags & IXGBE_VF_ACTIVE) { | | 653 | if (vf->flags & IXGBE_VF_ACTIVE) { |
654 | if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0) | | 654 | if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0) |
655 | ixgbe_process_vf_reset(adapter, vf); | | 655 | ixgbe_process_vf_reset(adapter, vf); |
656 | | | 656 | |
657 | if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0) | | 657 | if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0) |
658 | ixgbe_process_vf_msg(adapter, vf); | | 658 | ixgbe_process_vf_msg(adapter, vf); |
659 | | | 659 | |
660 | if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0) | | 660 | if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0) |
661 | ixgbe_process_vf_ack(adapter, vf); | | 661 | ixgbe_process_vf_ack(adapter, vf); |
662 | } | | 662 | } |
663 | } | | 663 | } |
664 | } /* ixgbe_handle_mbx */ | | 664 | } /* ixgbe_handle_mbx */ |
665 | | | 665 | |
666 | int | | 666 | int |
667 | ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config) | | 667 | ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config) |
668 | { | | 668 | { |
669 | struct adapter *adapter; | | 669 | struct adapter *adapter; |
670 | int retval = 0; | | 670 | int retval = 0; |
671 | | | 671 | |
672 | adapter = device_get_softc(dev); | | 672 | adapter = device_get_softc(dev); |
673 | adapter->iov_mode = IXGBE_NO_VM; | | 673 | adapter->iov_mode = IXGBE_NO_VM; |
674 | | | 674 | |
675 | if (num_vfs == 0) { | | 675 | if (num_vfs == 0) { |
676 | /* Would we ever get num_vfs = 0? */ | | 676 | /* Would we ever get num_vfs = 0? */ |
677 | retval = EINVAL; | | 677 | retval = EINVAL; |
678 | goto err_init_iov; | | 678 | goto err_init_iov; |
679 | } | | 679 | } |
680 | | | 680 | |
681 | /* | | 681 | /* |
682 | * We've got to reserve a VM's worth of queues for the PF, | | 682 | * We've got to reserve a VM's worth of queues for the PF, |
683 | * thus we go into "64 VF mode" if 32+ VFs are requested. | | 683 | * thus we go into "64 VF mode" if 32+ VFs are requested. |
684 | * With 64 VFs, you can only have two queues per VF. | | 684 | * With 64 VFs, you can only have two queues per VF. |
685 | * With 32 VFs, you can have up to four queues per VF. | | 685 | * With 32 VFs, you can have up to four queues per VF. |
686 | */ | | 686 | */ |
687 | if (num_vfs >= IXGBE_32_VM) | | 687 | if (num_vfs >= IXGBE_32_VM) |
688 | adapter->iov_mode = IXGBE_64_VM; | | 688 | adapter->iov_mode = IXGBE_64_VM; |
689 | else | | 689 | else |
690 | adapter->iov_mode = IXGBE_32_VM; | | 690 | adapter->iov_mode = IXGBE_32_VM; |
691 | | | 691 | |
692 | /* Again, reserving 1 VM's worth of queues for the PF */ | | 692 | /* Again, reserving 1 VM's worth of queues for the PF */ |
693 | adapter->pool = adapter->iov_mode - 1; | | 693 | adapter->pool = adapter->iov_mode - 1; |
694 | | | 694 | |
695 | if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) { | | 695 | if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) { |
696 | retval = ENOSPC; | | 696 | retval = ENOSPC; |
697 | goto err_init_iov; | | 697 | goto err_init_iov; |
698 | } | | 698 | } |
699 | | | 699 | |
700 | IXGBE_CORE_LOCK(adapter); | | 700 | IXGBE_CORE_LOCK(adapter); |
701 | | | 701 | |
702 | adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV, | | 702 | adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV, |
703 | M_NOWAIT | M_ZERO); | | 703 | M_NOWAIT | M_ZERO); |
704 | | | 704 | |
705 | if (adapter->vfs == NULL) { | | 705 | if (adapter->vfs == NULL) { |
706 | retval = ENOMEM; | | 706 | retval = ENOMEM; |
707 | IXGBE_CORE_UNLOCK(adapter); | | 707 | IXGBE_CORE_UNLOCK(adapter); |
708 | goto err_init_iov; | | 708 | goto err_init_iov; |
709 | } | | 709 | } |
710 | | | 710 | |
711 | adapter->num_vfs = num_vfs; | | 711 | adapter->num_vfs = num_vfs; |
712 | | | 712 | |
713 | /* set the SRIOV flag now as it's needed | | 713 | /* set the SRIOV flag now as it's needed |
714 | * by ixgbe_init_locked() */ | | 714 | * by ixgbe_init_locked() */ |
715 | adapter->feat_en |= IXGBE_FEATURE_SRIOV; | | 715 | adapter->feat_en |= IXGBE_FEATURE_SRIOV; |
716 | adapter->init_locked(adapter); | | 716 | adapter->init_locked(adapter); |
717 | | | 717 | |
718 | IXGBE_CORE_UNLOCK(adapter); | | 718 | IXGBE_CORE_UNLOCK(adapter); |
719 | | | 719 | |
720 | return (retval); | | 720 | return (retval); |
721 | | | 721 | |
722 | err_init_iov: | | 722 | err_init_iov: |
723 | adapter->num_vfs = 0; | | 723 | adapter->num_vfs = 0; |
724 | adapter->pool = 0; | | 724 | adapter->pool = 0; |
725 | adapter->iov_mode = IXGBE_NO_VM; | | 725 | adapter->iov_mode = IXGBE_NO_VM; |
726 | | | 726 | |
727 | return (retval); | | 727 | return (retval); |
728 | } /* ixgbe_init_iov */ | | 728 | } /* ixgbe_init_iov */ |
729 | | | 729 | |
730 | void | | 730 | void |
731 | ixgbe_uninit_iov(device_t dev) | | 731 | ixgbe_uninit_iov(device_t dev) |
732 | { | | 732 | { |
733 | struct ixgbe_hw *hw; | | 733 | struct ixgbe_hw *hw; |
734 | struct adapter *adapter; | | 734 | struct adapter *adapter; |
735 | uint32_t pf_reg, vf_reg; | | 735 | uint32_t pf_reg, vf_reg; |
736 | | | 736 | |
737 | adapter = device_get_softc(dev); | | 737 | adapter = device_get_softc(dev); |
738 | hw = &adapter->hw; | | 738 | hw = &adapter->hw; |
739 | | | 739 | |
740 | IXGBE_CORE_LOCK(adapter); | | 740 | IXGBE_CORE_LOCK(adapter); |
741 | | | 741 | |
742 | /* Enable rx/tx for the PF and disable it for all VFs. */ | | 742 | /* Enable rx/tx for the PF and disable it for all VFs. */ |
743 | pf_reg = IXGBE_VF_INDEX(adapter->pool); | | 743 | pf_reg = IXGBE_VF_INDEX(adapter->pool); |
744 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool)); | | 744 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool)); |
745 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool)); | | 745 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool)); |
746 | | | 746 | |
747 | if (pf_reg == 0) | | 747 | if (pf_reg == 0) |
748 | vf_reg = 1; | | 748 | vf_reg = 1; |
749 | else | | 749 | else |
750 | vf_reg = 0; | | 750 | vf_reg = 0; |
751 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); | | 751 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); |
752 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); | | 752 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); |
753 | | | 753 | |
754 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); | | 754 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); |
755 | | | 755 | |
756 | free(adapter->vfs, M_IXGBE_SRIOV); | | 756 | free(adapter->vfs, M_IXGBE_SRIOV); |
757 | adapter->vfs = NULL; | | 757 | adapter->vfs = NULL; |
758 | adapter->num_vfs = 0; | | 758 | adapter->num_vfs = 0; |
759 | adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; | | 759 | adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; |
760 | | | 760 | |
761 | IXGBE_CORE_UNLOCK(adapter); | | 761 | IXGBE_CORE_UNLOCK(adapter); |
762 | } /* ixgbe_uninit_iov */ | | 762 | } /* ixgbe_uninit_iov */ |
763 | | | 763 | |
764 | static void | | 764 | static void |
765 | ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) | | 765 | ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) |
766 | { | | 766 | { |
767 | struct ixgbe_hw *hw; | | 767 | struct ixgbe_hw *hw; |
768 | uint32_t vf_index, pfmbimr; | | 768 | uint32_t vf_index, pfmbimr; |
769 | | | 769 | |
770 | IXGBE_CORE_LOCK_ASSERT(adapter); | | 770 | IXGBE_CORE_LOCK_ASSERT(adapter); |
771 | | | 771 | |
772 | hw = &adapter->hw; | | 772 | hw = &adapter->hw; |
773 | | | 773 | |
774 | if (!(vf->flags & IXGBE_VF_ACTIVE)) | | 774 | if (!(vf->flags & IXGBE_VF_ACTIVE)) |
775 | return; | | 775 | return; |
776 | | | 776 | |
777 | vf_index = IXGBE_VF_INDEX(vf->pool); | | 777 | vf_index = IXGBE_VF_INDEX(vf->pool); |
778 | pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); | | 778 | pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); |
779 | pfmbimr |= IXGBE_VF_BIT(vf->pool); | | 779 | pfmbimr |= IXGBE_VF_BIT(vf->pool); |
780 | IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); | | 780 | IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); |
781 | | | 781 | |
782 | ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); | | 782 | ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); |
783 | | | 783 | |
784 | // XXX multicast addresses | | 784 | // XXX multicast addresses |
785 | | | 785 | |
786 | if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { | | 786 | if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { |
787 | ixgbe_set_rar(&adapter->hw, vf->rar_index, | | 787 | ixgbe_set_rar(&adapter->hw, vf->rar_index, |
788 | vf->ether_addr, vf->pool, TRUE); | | 788 | vf->ether_addr, vf->pool, TRUE); |
789 | } | | 789 | } |
790 | | | 790 | |
791 | ixgbe_vf_enable_transmit(adapter, vf); | | 791 | ixgbe_vf_enable_transmit(adapter, vf); |
792 | ixgbe_vf_enable_receive(adapter, vf); | | 792 | ixgbe_vf_enable_receive(adapter, vf); |
793 | | | 793 | |
794 | ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); | | 794 | ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); |
795 | } /* ixgbe_init_vf */ | | 795 | } /* ixgbe_init_vf */ |
796 | | | 796 | |
797 | void | | 797 | void |
798 | ixgbe_initialize_iov(struct adapter *adapter) | | 798 | ixgbe_initialize_iov(struct adapter *adapter) |
799 | { | | 799 | { |
800 | struct ixgbe_hw *hw = &adapter->hw; | | 800 | struct ixgbe_hw *hw = &adapter->hw; |
801 | uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; | | 801 | uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; |
802 | int i; | | 802 | int i; |
803 | | | 803 | |
804 | if (adapter->iov_mode == IXGBE_NO_VM) | | 804 | if (adapter->iov_mode == IXGBE_NO_VM) |
805 | return; | | 805 | return; |
806 | | | 806 | |
807 | IXGBE_CORE_LOCK_ASSERT(adapter); | | 807 | IXGBE_CORE_LOCK_ASSERT(adapter); |
808 | | | 808 | |
809 | /* RMW appropriate registers based on IOV mode */ | | 809 | /* RMW appropriate registers based on IOV mode */ |
810 | /* Read... */ | | 810 | /* Read... */ |
811 | mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); | | 811 | mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); |
812 | gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); | | 812 | gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); |
813 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | | 813 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); |
814 | /* Modify... */ | | 814 | /* Modify... */ |
815 | mrqc &= ~IXGBE_MRQC_MRQE_MASK; | | 815 | mrqc &= ~IXGBE_MRQC_MRQE_MASK; |
816 | mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */ | | 816 | mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */ |
817 | gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; | | 817 | gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; |
818 | gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; | | 818 | gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; |
819 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; | | 819 | gpie &= ~IXGBE_GPIE_VTMODE_MASK; |
820 | switch (adapter->iov_mode) { | | 820 | switch (adapter->iov_mode) { |
821 | case IXGBE_64_VM: | | 821 | case IXGBE_64_VM: |
822 | mrqc |= IXGBE_MRQC_VMDQRSS64EN; | | 822 | mrqc |= IXGBE_MRQC_VMDQRSS64EN; |
823 | mtqc |= IXGBE_MTQC_64VF; | | 823 | mtqc |= IXGBE_MTQC_64VF; |
824 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; | | 824 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; |
825 | gpie |= IXGBE_GPIE_VTMODE_64; | | 825 | gpie |= IXGBE_GPIE_VTMODE_64; |
826 | break; | | 826 | break; |
827 | case IXGBE_32_VM: | | 827 | case IXGBE_32_VM: |
828 | mrqc |= IXGBE_MRQC_VMDQRSS32EN; | | 828 | mrqc |= IXGBE_MRQC_VMDQRSS32EN; |
829 | mtqc |= IXGBE_MTQC_32VF; | | 829 | mtqc |= IXGBE_MTQC_32VF; |
830 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; | | 830 | gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; |
831 | gpie |= IXGBE_GPIE_VTMODE_32; | | 831 | gpie |= IXGBE_GPIE_VTMODE_32; |
832 | break; | | 832 | break; |
833 | default: | | 833 | default: |
834 | panic("Unexpected SR-IOV mode %d", adapter->iov_mode); | | 834 | panic("Unexpected SR-IOV mode %d", adapter->iov_mode); |
835 | } | | 835 | } |
836 | /* Write... */ | | 836 | /* Write... */ |
837 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | | 837 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
838 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); | | 838 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); |
839 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); | | 839 | IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); |
840 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | | 840 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); |
841 | | | 841 | |
842 | /* Enable rx/tx for the PF. */ | | 842 | /* Enable rx/tx for the PF. */ |
843 | vf_reg = IXGBE_VF_INDEX(adapter->pool); | | 843 | vf_reg = IXGBE_VF_INDEX(adapter->pool); |
844 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool)); | | 844 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool)); |
845 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool)); | | 845 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool)); |
846 | | | 846 | |
847 | /* Allow VM-to-VM communication. */ | | 847 | /* Allow VM-to-VM communication. */ |
848 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); | | 848 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
849 | | | 849 | |
850 | vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; | | 850 | vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; |
851 | vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); | | 851 | vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); |
852 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); | | 852 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); |
853 | | | 853 | |
854 | for (i = 0; i < adapter->num_vfs; i++) | | 854 | for (i = 0; i < adapter->num_vfs; i++) |
855 | ixgbe_init_vf(adapter, &adapter->vfs[i]); | | 855 | ixgbe_init_vf(adapter, &adapter->vfs[i]); |
856 | } /* ixgbe_initialize_iov */ | | 856 | } /* ixgbe_initialize_iov */ |
857 | | | 857 | |
858 | | | 858 | |
859 | /* Check the max frame setting of all active VF's */ | | 859 | /* Check the max frame setting of all active VF's */ |
860 | void | | 860 | void |
861 | ixgbe_recalculate_max_frame(struct adapter *adapter) | | 861 | ixgbe_recalculate_max_frame(struct adapter *adapter) |
862 | { | | 862 | { |
863 | struct ixgbe_vf *vf; | | 863 | struct ixgbe_vf *vf; |
864 | | | 864 | |
865 | IXGBE_CORE_LOCK_ASSERT(adapter); | | 865 | IXGBE_CORE_LOCK_ASSERT(adapter); |
866 | | | 866 | |
867 | for (int i = 0; i < adapter->num_vfs; i++) { | | 867 | for (int i = 0; i < adapter->num_vfs; i++) { |
868 | vf = &adapter->vfs[i]; | | 868 | vf = &adapter->vfs[i]; |
869 | if (vf->flags & IXGBE_VF_ACTIVE) | | 869 | if (vf->flags & IXGBE_VF_ACTIVE) |
870 | ixgbe_update_max_frame(adapter, vf->max_frame_size); | | 870 | ixgbe_update_max_frame(adapter, vf->max_frame_size); |
871 | } | | 871 | } |
872 | } /* ixgbe_recalculate_max_frame */ | | 872 | } /* ixgbe_recalculate_max_frame */ |
873 | | | 873 | |
874 | int | | 874 | int |
875 | ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) | | 875 | ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config) |
876 | { | | 876 | { |
877 | struct adapter *adapter; | | 877 | struct adapter *adapter; |
878 | struct ixgbe_vf *vf; | | 878 | struct ixgbe_vf *vf; |
879 | const void *mac; | | 879 | const void *mac; |
880 | | | 880 | |
881 | adapter = device_get_softc(dev); | | 881 | adapter = device_get_softc(dev); |
882 | | | 882 | |
883 | KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", | | 883 | KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", |
884 | vfnum, adapter->num_vfs)); | | 884 | vfnum, adapter->num_vfs)); |
885 | | | 885 | |
886 | IXGBE_CORE_LOCK(adapter); | | 886 | IXGBE_CORE_LOCK(adapter); |
887 | vf = &adapter->vfs[vfnum]; | | 887 | vf = &adapter->vfs[vfnum]; |
888 | vf->pool= vfnum; | | 888 | vf->pool= vfnum; |
889 | | | 889 | |
890 | /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ | | 890 | /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ |
891 | vf->rar_index = vfnum + 1; | | 891 | vf->rar_index = vfnum + 1; |
892 | vf->default_vlan = 0; | | 892 | vf->default_vlan = 0; |
893 | vf->max_frame_size = ETHER_MAX_LEN; | | 893 | vf->max_frame_size = ETHER_MAX_LEN; |
894 | ixgbe_update_max_frame(adapter, vf->max_frame_size); | | 894 | ixgbe_update_max_frame(adapter, vf->max_frame_size); |
895 | | | 895 | |
896 | if (nvlist_exists_binary(config, "mac-addr")) { | | 896 | if (nvlist_exists_binary(config, "mac-addr")) { |
897 | mac = nvlist_get_binary(config, "mac-addr", NULL); | | 897 | mac = nvlist_get_binary(config, "mac-addr", NULL); |
898 | bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); | | 898 | bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); |
899 | if (nvlist_get_bool(config, "allow-set-mac")) | | 899 | if (nvlist_get_bool(config, "allow-set-mac")) |
900 | vf->flags |= IXGBE_VF_CAP_MAC; | | 900 | vf->flags |= IXGBE_VF_CAP_MAC; |
901 | } else | | 901 | } else |
902 | /* | | 902 | /* |
903 | * If the administrator has not specified a MAC address then | | 903 | * If the administrator has not specified a MAC address then |
904 | * we must allow the VF to choose one. | | 904 | * we must allow the VF to choose one. |
905 | */ | | 905 | */ |
906 | vf->flags |= IXGBE_VF_CAP_MAC; | | 906 | vf->flags |= IXGBE_VF_CAP_MAC; |
907 | | | 907 | |
908 | vf->flags |= IXGBE_VF_ACTIVE; | | 908 | vf->flags |= IXGBE_VF_ACTIVE; |
909 | | | 909 | |
910 | ixgbe_init_vf(adapter, vf); | | 910 | ixgbe_init_vf(adapter, vf); |
911 | IXGBE_CORE_UNLOCK(adapter); | | 911 | IXGBE_CORE_UNLOCK(adapter); |
912 | | | 912 | |
913 | return (0); | | 913 | return (0); |
914 | } /* ixgbe_add_vf */ | | 914 | } /* ixgbe_add_vf */ |
915 | | | 915 | |
916 | #else | | 916 | #else |
917 | | | 917 | |
918 | void | | 918 | void |
919 | ixgbe_handle_mbx(void *context, int pending) | | 919 | ixgbe_handle_mbx(void *context, int pending) |
920 | { | | 920 | { |
921 | UNREFERENCED_2PARAMETER(context, pending); | | 921 | UNREFERENCED_2PARAMETER(context, pending); |
922 | } /* ixgbe_handle_mbx */ | | 922 | } /* ixgbe_handle_mbx */ |
923 | | | 923 | |
924 | inline int | | 924 | inline int |
925 | ixgbe_vf_que_index(int mode, int vfnum, int num) | | 925 | ixgbe_vf_que_index(int mode, int vfnum, int num) |
926 | { | | 926 | { |
927 | UNREFERENCED_2PARAMETER(mode, vfnum); | | 927 | UNREFERENCED_2PARAMETER(mode, vfnum); |
928 | | | 928 | |
929 | return num; | | 929 | return num; |
930 | } /* ixgbe_vf_que_index */ | | 930 | } /* ixgbe_vf_que_index */ |
931 | | | 931 | |
932 | #endif | | 932 | #endif |