Thu Jun 25 06:45:10 2020 UTC ()
 Use unsigned to avoid undefined behavior in ixgbe_fc_enable_generic().
Same as ixgbe_common.c rev. 1.24. Found by KUBSan.


(msaitoh)
diff -r1.14 -r1.15 src/sys/dev/pci/ixgbe/ixgbe_82598.c

cvs diff -r1.14 -r1.15 src/sys/dev/pci/ixgbe/ixgbe_82598.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe_82598.c 2020/01/03 12:59:46 1.14
+++ src/sys/dev/pci/ixgbe/ixgbe_82598.c 2020/06/25 06:45:10 1.15
@@ -1,1443 +1,1443 @@ @@ -1,1443 +1,1443 @@
1/* $NetBSD: ixgbe_82598.c,v 1.14 2020/01/03 12:59:46 pgoyette Exp $ */ 1/* $NetBSD: ixgbe_82598.c,v 1.15 2020/06/25 06:45:10 msaitoh Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause 4 SPDX-License-Identifier: BSD-3-Clause
5 5
6 Copyright (c) 2001-2017, Intel Corporation 6 Copyright (c) 2001-2017, Intel Corporation
7 All rights reserved. 7 All rights reserved.
8 8
9 Redistribution and use in source and binary forms, with or without 9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met: 10 modification, are permitted provided that the following conditions are met:
11 11
12 1. Redistributions of source code must retain the above copyright notice, 12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer. 13 this list of conditions and the following disclaimer.
14 14
15 2. Redistributions in binary form must reproduce the above copyright 15 2. Redistributions in binary form must reproduce the above copyright
16 notice, this list of conditions and the following disclaimer in the 16 notice, this list of conditions and the following disclaimer in the
17 documentation and/or other materials provided with the distribution. 17 documentation and/or other materials provided with the distribution.
18 18
19 3. Neither the name of the Intel Corporation nor the names of its 19 3. Neither the name of the Intel Corporation nor the names of its
20 contributors may be used to endorse or promote products derived from 20 contributors may be used to endorse or promote products derived from
21 this software without specific prior written permission. 21 this software without specific prior written permission.
22 22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE. 33 POSSIBILITY OF SUCH DAMAGE.
34 34
35******************************************************************************/ 35******************************************************************************/
36/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 331224 2018-03-19 20:55:05Z erj $*/ 36/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_82598.c 331224 2018-03-19 20:55:05Z erj $*/
37 37
38#include "ixgbe_type.h" 38#include "ixgbe_type.h"
39#include "ixgbe_82598.h" 39#include "ixgbe_82598.h"
40#include "ixgbe_api.h" 40#include "ixgbe_api.h"
41#include "ixgbe_common.h" 41#include "ixgbe_common.h"
42#include "ixgbe_phy.h" 42#include "ixgbe_phy.h"
43 43
44#define IXGBE_82598_MAX_TX_QUEUES 32 44#define IXGBE_82598_MAX_TX_QUEUES 32
45#define IXGBE_82598_MAX_RX_QUEUES 64 45#define IXGBE_82598_MAX_RX_QUEUES 64
46#define IXGBE_82598_RAR_ENTRIES 16 46#define IXGBE_82598_RAR_ENTRIES 16
47#define IXGBE_82598_MC_TBL_SIZE 128 47#define IXGBE_82598_MC_TBL_SIZE 128
48#define IXGBE_82598_VFT_TBL_SIZE 128 48#define IXGBE_82598_VFT_TBL_SIZE 128
49#define IXGBE_82598_RX_PB_SIZE 512 49#define IXGBE_82598_RX_PB_SIZE 512
50 50
51static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 51static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
52 ixgbe_link_speed *speed, 52 ixgbe_link_speed *speed,
53 bool *autoneg); 53 bool *autoneg);
54static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); 54static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
55static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 55static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
56 bool autoneg_wait_to_complete); 56 bool autoneg_wait_to_complete);
57static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 57static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
58 ixgbe_link_speed *speed, bool *link_up, 58 ixgbe_link_speed *speed, bool *link_up,
59 bool link_up_wait_to_complete); 59 bool link_up_wait_to_complete);
60static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 60static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
61 ixgbe_link_speed speed, 61 ixgbe_link_speed speed,
62 bool autoneg_wait_to_complete); 62 bool autoneg_wait_to_complete);
63static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 63static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
64 ixgbe_link_speed speed, 64 ixgbe_link_speed speed,
65 bool autoneg_wait_to_complete); 65 bool autoneg_wait_to_complete);
66static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 66static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
67static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 67static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
68static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); 68static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
69static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 69static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
70 u32 headroom, int strategy); 70 u32 headroom, int strategy);
71static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, 71static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
72 u8 *sff8472_data); 72 u8 *sff8472_data);
73/** 73/**
74 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 74 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
75 * @hw: pointer to the HW structure 75 * @hw: pointer to the HW structure
76 * 76 *
77 * The defaults for 82598 should be in the range of 50us to 50ms, 77 * The defaults for 82598 should be in the range of 50us to 50ms,
78 * however the hardware default for these parts is 500us to 1ms which is less 78 * however the hardware default for these parts is 500us to 1ms which is less
79 * than the 10ms recommended by the pci-e spec. To address this we need to 79 * than the 10ms recommended by the pci-e spec. To address this we need to
80 * increase the value to either 10ms to 250ms for capability version 1 config, 80 * increase the value to either 10ms to 250ms for capability version 1 config,
81 * or 16ms to 55ms for version 2. 81 * or 16ms to 55ms for version 2.
82 **/ 82 **/
83void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 83void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
84{ 84{
85 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 85 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
86 u16 pcie_devctl2; 86 u16 pcie_devctl2;
87 87
88 /* only take action if timeout value is defaulted to 0 */ 88 /* only take action if timeout value is defaulted to 0 */
89 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 89 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
90 goto out; 90 goto out;
91 91
92 /* 92 /*
93 * if capabilities version is type 1 we can write the 93 * if capabilities version is type 1 we can write the
94 * timeout of 10ms to 250ms through the GCR register 94 * timeout of 10ms to 250ms through the GCR register
95 */ 95 */
96 if (!(gcr & IXGBE_GCR_CAP_VER2)) { 96 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
97 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 97 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
98 goto out; 98 goto out;
99 } 99 }
100 100
101 /* 101 /*
102 * for version 2 capabilities we need to write the config space 102 * for version 2 capabilities we need to write the config space
103 * directly in order to set the completion timeout value for 103 * directly in order to set the completion timeout value for
104 * 16ms to 55ms 104 * 16ms to 55ms
105 */ 105 */
106 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); 106 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
107 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 107 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
108 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 108 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
109out: 109out:
110 /* disable completion timeout resend */ 110 /* disable completion timeout resend */
111 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 111 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
112 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 112 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
113} 113}
114 114
115/** 115/**
116 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type 116 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
117 * @hw: pointer to hardware structure 117 * @hw: pointer to hardware structure
118 * 118 *
119 * Initialize the function pointers and assign the MAC type for 82598. 119 * Initialize the function pointers and assign the MAC type for 82598.
120 * Does not touch the hardware. 120 * Does not touch the hardware.
121 **/ 121 **/
122s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) 122s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
123{ 123{
124 struct ixgbe_mac_info *mac = &hw->mac; 124 struct ixgbe_mac_info *mac = &hw->mac;
125 struct ixgbe_phy_info *phy = &hw->phy; 125 struct ixgbe_phy_info *phy = &hw->phy;
126 s32 ret_val; 126 s32 ret_val;
127 127
128 DEBUGFUNC("ixgbe_init_ops_82598"); 128 DEBUGFUNC("ixgbe_init_ops_82598");
129 129
130 ret_val = ixgbe_init_phy_ops_generic(hw); 130 ret_val = ixgbe_init_phy_ops_generic(hw);
131 ret_val = ixgbe_init_ops_generic(hw); 131 ret_val = ixgbe_init_ops_generic(hw);
132 132
133 /* PHY */ 133 /* PHY */
134 phy->ops.init = ixgbe_init_phy_ops_82598; 134 phy->ops.init = ixgbe_init_phy_ops_82598;
135 135
136 /* MAC */ 136 /* MAC */
137 mac->ops.start_hw = ixgbe_start_hw_82598; 137 mac->ops.start_hw = ixgbe_start_hw_82598;
138 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598; 138 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
139 mac->ops.reset_hw = ixgbe_reset_hw_82598; 139 mac->ops.reset_hw = ixgbe_reset_hw_82598;
140 mac->ops.get_media_type = ixgbe_get_media_type_82598; 140 mac->ops.get_media_type = ixgbe_get_media_type_82598;
141 mac->ops.get_supported_physical_layer = 141 mac->ops.get_supported_physical_layer =
142 ixgbe_get_supported_physical_layer_82598; 142 ixgbe_get_supported_physical_layer_82598;
143 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; 143 mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
144 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; 144 mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
145 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; 145 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
146 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; 146 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
147 147
148 /* RAR, Multicast, VLAN */ 148 /* RAR, Multicast, VLAN */
149 mac->ops.set_vmdq = ixgbe_set_vmdq_82598; 149 mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
150 mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; 150 mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
151 mac->ops.set_vfta = ixgbe_set_vfta_82598; 151 mac->ops.set_vfta = ixgbe_set_vfta_82598;
152 mac->ops.set_vlvf = NULL; 152 mac->ops.set_vlvf = NULL;
153 mac->ops.clear_vfta = ixgbe_clear_vfta_82598; 153 mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
154 154
155 /* Flow Control */ 155 /* Flow Control */
156 mac->ops.fc_enable = ixgbe_fc_enable_82598; 156 mac->ops.fc_enable = ixgbe_fc_enable_82598;
157 157
158 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 158 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
159 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 159 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
160 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 160 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
161 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; 161 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
162 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 162 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
163 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 163 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
164 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 164 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
165 165
166 /* SFP+ Module */ 166 /* SFP+ Module */
167 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; 167 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
168 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; 168 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
169 169
170 /* Link */ 170 /* Link */
171 mac->ops.check_link = ixgbe_check_mac_link_82598; 171 mac->ops.check_link = ixgbe_check_mac_link_82598;
172 mac->ops.setup_link = ixgbe_setup_mac_link_82598; 172 mac->ops.setup_link = ixgbe_setup_mac_link_82598;
173 mac->ops.flap_tx_laser = NULL; 173 mac->ops.flap_tx_laser = NULL;
174 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; 174 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
175 mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; 175 mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
176 176
177 /* Manageability interface */ 177 /* Manageability interface */
178 mac->ops.set_fw_drv_ver = NULL; 178 mac->ops.set_fw_drv_ver = NULL;
179 179
180 mac->ops.get_rtrup2tc = NULL; 180 mac->ops.get_rtrup2tc = NULL;
181 181
182 return ret_val; 182 return ret_val;
183} 183}
184 184
185/** 185/**
186 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init 186 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
187 * @hw: pointer to hardware structure 187 * @hw: pointer to hardware structure
188 * 188 *
189 * Initialize any function pointers that were not able to be 189 * Initialize any function pointers that were not able to be
190 * set during init_shared_code because the PHY/SFP type was 190 * set during init_shared_code because the PHY/SFP type was
191 * not known. Perform the SFP init if necessary. 191 * not known. Perform the SFP init if necessary.
192 * 192 *
193 **/ 193 **/
194s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) 194s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
195{ 195{
196 struct ixgbe_mac_info *mac = &hw->mac; 196 struct ixgbe_mac_info *mac = &hw->mac;
197 struct ixgbe_phy_info *phy = &hw->phy; 197 struct ixgbe_phy_info *phy = &hw->phy;
198 s32 ret_val = IXGBE_SUCCESS; 198 s32 ret_val = IXGBE_SUCCESS;
199 u16 list_offset, data_offset; 199 u16 list_offset, data_offset;
200 200
201 DEBUGFUNC("ixgbe_init_phy_ops_82598"); 201 DEBUGFUNC("ixgbe_init_phy_ops_82598");
202 202
203 /* Identify the PHY */ 203 /* Identify the PHY */
204 phy->ops.identify(hw); 204 phy->ops.identify(hw);
205 205
206 /* Overwrite the link function pointers if copper PHY */ 206 /* Overwrite the link function pointers if copper PHY */
207 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 207 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
208 mac->ops.setup_link = ixgbe_setup_copper_link_82598; 208 mac->ops.setup_link = ixgbe_setup_copper_link_82598;
209 mac->ops.get_link_capabilities = 209 mac->ops.get_link_capabilities =
210 ixgbe_get_copper_link_capabilities_generic; 210 ixgbe_get_copper_link_capabilities_generic;
211 } 211 }
212 212
213 switch (hw->phy.type) { 213 switch (hw->phy.type) {
214 case ixgbe_phy_tn: 214 case ixgbe_phy_tn:
215 phy->ops.setup_link = ixgbe_setup_phy_link_tnx; 215 phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
216 phy->ops.check_link = ixgbe_check_phy_link_tnx; 216 phy->ops.check_link = ixgbe_check_phy_link_tnx;
217 phy->ops.get_firmware_version = 217 phy->ops.get_firmware_version =
218 ixgbe_get_phy_firmware_version_tnx; 218 ixgbe_get_phy_firmware_version_tnx;
219 break; 219 break;
220 case ixgbe_phy_nl: 220 case ixgbe_phy_nl:
221 phy->ops.reset = ixgbe_reset_phy_nl; 221 phy->ops.reset = ixgbe_reset_phy_nl;
222 222
223 /* Call SFP+ identify routine to get the SFP+ module type */ 223 /* Call SFP+ identify routine to get the SFP+ module type */
224 ret_val = phy->ops.identify_sfp(hw); 224 ret_val = phy->ops.identify_sfp(hw);
225 if (ret_val != IXGBE_SUCCESS) 225 if (ret_val != IXGBE_SUCCESS)
226 goto out; 226 goto out;
227 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { 227 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
228 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 228 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
229 goto out; 229 goto out;
230 } 230 }
231 231
232 /* Check to see if SFP+ module is supported */ 232 /* Check to see if SFP+ module is supported */
233 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, 233 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
234 &list_offset, 234 &list_offset,
235 &data_offset); 235 &data_offset);
236 if (ret_val != IXGBE_SUCCESS) { 236 if (ret_val != IXGBE_SUCCESS) {
237 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; 237 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
238 goto out; 238 goto out;
239 } 239 }
240 break; 240 break;
241 default: 241 default:
242 break; 242 break;
243 } 243 }
244 244
245out: 245out:
246 return ret_val; 246 return ret_val;
247} 247}
248 248
249/** 249/**
250 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 250 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
251 * @hw: pointer to hardware structure 251 * @hw: pointer to hardware structure
252 * 252 *
253 * Starts the hardware using the generic start_hw function. 253 * Starts the hardware using the generic start_hw function.
254 * Disables relaxed ordering Then set pcie completion timeout 254 * Disables relaxed ordering Then set pcie completion timeout
255 * 255 *
256 **/ 256 **/
257s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 257s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
258{ 258{
259 u32 regval; 259 u32 regval;
260 u32 i; 260 u32 i;
261 s32 ret_val = IXGBE_SUCCESS; 261 s32 ret_val = IXGBE_SUCCESS;
262 262
263 DEBUGFUNC("ixgbe_start_hw_82598"); 263 DEBUGFUNC("ixgbe_start_hw_82598");
264 264
265 ret_val = ixgbe_start_hw_generic(hw); 265 ret_val = ixgbe_start_hw_generic(hw);
266 if (ret_val) 266 if (ret_val)
267 return ret_val; 267 return ret_val;
268 268
269 /* Disable relaxed ordering */ 269 /* Disable relaxed ordering */
270 for (i = 0; ((i < hw->mac.max_tx_queues) && 270 for (i = 0; ((i < hw->mac.max_tx_queues) &&
271 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 271 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
272 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 272 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
273 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 273 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
274 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 274 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
275 } 275 }
276 276
277 for (i = 0; ((i < hw->mac.max_rx_queues) && 277 for (i = 0; ((i < hw->mac.max_rx_queues) &&
278 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 278 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
279 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 279 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
280 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | 280 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
281 IXGBE_DCA_RXCTRL_HEAD_WRO_EN); 281 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
282 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 282 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
283 } 283 }
284 284
285 /* set the completion timeout for interface */ 285 /* set the completion timeout for interface */
286 ixgbe_set_pcie_completion_timeout(hw); 286 ixgbe_set_pcie_completion_timeout(hw);
287 287
288 return ret_val; 288 return ret_val;
289} 289}
290 290
291/** 291/**
292 * ixgbe_get_link_capabilities_82598 - Determines link capabilities 292 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
293 * @hw: pointer to hardware structure 293 * @hw: pointer to hardware structure
294 * @speed: pointer to link speed 294 * @speed: pointer to link speed
295 * @autoneg: boolean auto-negotiation value 295 * @autoneg: boolean auto-negotiation value
296 * 296 *
297 * Determines the link capabilities by reading the AUTOC register. 297 * Determines the link capabilities by reading the AUTOC register.
298 **/ 298 **/
299static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 299static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
300 ixgbe_link_speed *speed, 300 ixgbe_link_speed *speed,
301 bool *autoneg) 301 bool *autoneg)
302{ 302{
303 s32 status = IXGBE_SUCCESS; 303 s32 status = IXGBE_SUCCESS;
304 u32 autoc = 0; 304 u32 autoc = 0;
305 305
306 DEBUGFUNC("ixgbe_get_link_capabilities_82598"); 306 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
307 307
308 /* 308 /*
309 * Determine link capabilities based on the stored value of AUTOC, 309 * Determine link capabilities based on the stored value of AUTOC,
310 * which represents EEPROM defaults. If AUTOC value has not been 310 * which represents EEPROM defaults. If AUTOC value has not been
311 * stored, use the current register value. 311 * stored, use the current register value.
312 */ 312 */
313 if (hw->mac.orig_link_settings_stored) 313 if (hw->mac.orig_link_settings_stored)
314 autoc = hw->mac.orig_autoc; 314 autoc = hw->mac.orig_autoc;
315 else 315 else
316 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 316 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
317 317
318 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 318 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
319 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 319 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
320 *speed = IXGBE_LINK_SPEED_1GB_FULL; 320 *speed = IXGBE_LINK_SPEED_1GB_FULL;
321 *autoneg = FALSE; 321 *autoneg = FALSE;
322 break; 322 break;
323 323
324 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 324 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
325 *speed = IXGBE_LINK_SPEED_10GB_FULL; 325 *speed = IXGBE_LINK_SPEED_10GB_FULL;
326 *autoneg = FALSE; 326 *autoneg = FALSE;
327 break; 327 break;
328 328
329 case IXGBE_AUTOC_LMS_1G_AN: 329 case IXGBE_AUTOC_LMS_1G_AN:
330 *speed = IXGBE_LINK_SPEED_1GB_FULL; 330 *speed = IXGBE_LINK_SPEED_1GB_FULL;
331 *autoneg = TRUE; 331 *autoneg = TRUE;
332 break; 332 break;
333 333
334 case IXGBE_AUTOC_LMS_KX4_AN: 334 case IXGBE_AUTOC_LMS_KX4_AN:
335 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 335 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
336 *speed = IXGBE_LINK_SPEED_UNKNOWN; 336 *speed = IXGBE_LINK_SPEED_UNKNOWN;
337 if (autoc & IXGBE_AUTOC_KX4_SUPP) 337 if (autoc & IXGBE_AUTOC_KX4_SUPP)
338 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 338 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
339 if (autoc & IXGBE_AUTOC_KX_SUPP) 339 if (autoc & IXGBE_AUTOC_KX_SUPP)
340 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 340 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
341 *autoneg = TRUE; 341 *autoneg = TRUE;
342 break; 342 break;
343 343
344 default: 344 default:
345 status = IXGBE_ERR_LINK_SETUP; 345 status = IXGBE_ERR_LINK_SETUP;
346 break; 346 break;
347 } 347 }
348 348
349 return status; 349 return status;
350} 350}
351 351
352/** 352/**
353 * ixgbe_get_media_type_82598 - Determines media type 353 * ixgbe_get_media_type_82598 - Determines media type
354 * @hw: pointer to hardware structure 354 * @hw: pointer to hardware structure
355 * 355 *
356 * Returns the media type (fiber, copper, backplane) 356 * Returns the media type (fiber, copper, backplane)
357 **/ 357 **/
358static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) 358static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
359{ 359{
360 enum ixgbe_media_type media_type; 360 enum ixgbe_media_type media_type;
361 361
362 DEBUGFUNC("ixgbe_get_media_type_82598"); 362 DEBUGFUNC("ixgbe_get_media_type_82598");
363 363
364 /* Detect if there is a copper PHY attached. */ 364 /* Detect if there is a copper PHY attached. */
365 switch (hw->phy.type) { 365 switch (hw->phy.type) {
366 case ixgbe_phy_cu_unknown: 366 case ixgbe_phy_cu_unknown:
367 case ixgbe_phy_tn: 367 case ixgbe_phy_tn:
368 media_type = ixgbe_media_type_copper; 368 media_type = ixgbe_media_type_copper;
369 goto out; 369 goto out;
370 default: 370 default:
371 break; 371 break;
372 } 372 }
373 373
374 /* Media type for I82598 is based on device ID */ 374 /* Media type for I82598 is based on device ID */
375 switch (hw->device_id) { 375 switch (hw->device_id) {
376 case IXGBE_DEV_ID_82598: 376 case IXGBE_DEV_ID_82598:
377 case IXGBE_DEV_ID_82598_BX: 377 case IXGBE_DEV_ID_82598_BX:
378 /* Default device ID is mezzanine card KX/KX4 */ 378 /* Default device ID is mezzanine card KX/KX4 */
379 media_type = ixgbe_media_type_backplane; 379 media_type = ixgbe_media_type_backplane;
380 break; 380 break;
381 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 381 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
382 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 382 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
383 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 383 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
384 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 384 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
385 case IXGBE_DEV_ID_82598EB_XF_LR: 385 case IXGBE_DEV_ID_82598EB_XF_LR:
386 case IXGBE_DEV_ID_82598EB_SFP_LOM: 386 case IXGBE_DEV_ID_82598EB_SFP_LOM:
387 media_type = ixgbe_media_type_fiber; 387 media_type = ixgbe_media_type_fiber;
388 break; 388 break;
389 case IXGBE_DEV_ID_82598EB_CX4: 389 case IXGBE_DEV_ID_82598EB_CX4:
390 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 390 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
391 media_type = ixgbe_media_type_cx4; 391 media_type = ixgbe_media_type_cx4;
392 break; 392 break;
393 case IXGBE_DEV_ID_82598AT: 393 case IXGBE_DEV_ID_82598AT:
394 case IXGBE_DEV_ID_82598AT2: 394 case IXGBE_DEV_ID_82598AT2:
395 media_type = ixgbe_media_type_copper; 395 media_type = ixgbe_media_type_copper;
396 break; 396 break;
397 default: 397 default:
398 media_type = ixgbe_media_type_unknown; 398 media_type = ixgbe_media_type_unknown;
399 break; 399 break;
400 } 400 }
401out: 401out:
402 return media_type; 402 return media_type;
403} 403}
404 404
405/** 405/**
406 * ixgbe_fc_enable_82598 - Enable flow control 406 * ixgbe_fc_enable_82598 - Enable flow control
407 * @hw: pointer to hardware structure 407 * @hw: pointer to hardware structure
408 * 408 *
409 * Enable flow control according to the current settings. 409 * Enable flow control according to the current settings.
410 **/ 410 **/
411s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) 411s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
412{ 412{
413 s32 ret_val = IXGBE_SUCCESS; 413 s32 ret_val = IXGBE_SUCCESS;
414 u32 fctrl_reg; 414 u32 fctrl_reg;
415 u32 rmcs_reg; 415 u32 rmcs_reg;
416 u32 reg; 416 u32 reg;
417 u32 fcrtl, fcrth; 417 u32 fcrtl, fcrth;
418 u32 link_speed = 0; 418 u32 link_speed = 0;
419 int i; 419 int i;
420 bool link_up; 420 bool link_up;
421 421
422 DEBUGFUNC("ixgbe_fc_enable_82598"); 422 DEBUGFUNC("ixgbe_fc_enable_82598");
423 423
424 /* Validate the water mark configuration */ 424 /* Validate the water mark configuration */
425 if (!hw->fc.pause_time) { 425 if (!hw->fc.pause_time) {
426 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 426 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
427 goto out; 427 goto out;
428 } 428 }
429 429
430 /* Low water mark of zero causes XOFF floods */ 430 /* Low water mark of zero causes XOFF floods */
431 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 431 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
432 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 432 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
433 hw->fc.high_water[i]) { 433 hw->fc.high_water[i]) {
434 if (!hw->fc.low_water[i] || 434 if (!hw->fc.low_water[i] ||
435 hw->fc.low_water[i] >= hw->fc.high_water[i]) { 435 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
436 DEBUGOUT("Invalid water mark configuration\n"); 436 DEBUGOUT("Invalid water mark configuration\n");
437 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 437 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
438 goto out; 438 goto out;
439 } 439 }
440 } 440 }
441 } 441 }
442 442
443 /* 443 /*
444 * On 82598 having Rx FC on causes resets while doing 1G 444 * On 82598 having Rx FC on causes resets while doing 1G
445 * so if it's on turn it off once we know link_speed. For 445 * so if it's on turn it off once we know link_speed. For
446 * more details see 82598 Specification update. 446 * more details see 82598 Specification update.
447 */ 447 */
448 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); 448 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
449 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { 449 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
450 switch (hw->fc.requested_mode) { 450 switch (hw->fc.requested_mode) {
451 case ixgbe_fc_full: 451 case ixgbe_fc_full:
452 hw->fc.requested_mode = ixgbe_fc_tx_pause; 452 hw->fc.requested_mode = ixgbe_fc_tx_pause;
453 break; 453 break;
454 case ixgbe_fc_rx_pause: 454 case ixgbe_fc_rx_pause:
455 hw->fc.requested_mode = ixgbe_fc_none; 455 hw->fc.requested_mode = ixgbe_fc_none;
456 break; 456 break;
457 default: 457 default:
458 /* no change */ 458 /* no change */
459 break; 459 break;
460 } 460 }
461 } 461 }
462 462
463 /* Negotiate the fc mode to use */ 463 /* Negotiate the fc mode to use */
464 ixgbe_fc_autoneg(hw); 464 ixgbe_fc_autoneg(hw);
465 465
466 /* Disable any previous flow control settings */ 466 /* Disable any previous flow control settings */
467 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 467 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
468 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 468 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
469 469
470 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 470 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
471 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 471 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
472 472
473 /* 473 /*
474 * The possible values of fc.current_mode are: 474 * The possible values of fc.current_mode are:
475 * 0: Flow control is completely disabled 475 * 0: Flow control is completely disabled
476 * 1: Rx flow control is enabled (we can receive pause frames, 476 * 1: Rx flow control is enabled (we can receive pause frames,
477 * but not send pause frames). 477 * but not send pause frames).
478 * 2: Tx flow control is enabled (we can send pause frames but 478 * 2: Tx flow control is enabled (we can send pause frames but
479 * we do not support receiving pause frames). 479 * we do not support receiving pause frames).
480 * 3: Both Rx and Tx flow control (symmetric) are enabled. 480 * 3: Both Rx and Tx flow control (symmetric) are enabled.
481 * other: Invalid. 481 * other: Invalid.
482 */ 482 */
483 switch (hw->fc.current_mode) { 483 switch (hw->fc.current_mode) {
484 case ixgbe_fc_none: 484 case ixgbe_fc_none:
485 /* 485 /*
486 * Flow control is disabled by software override or autoneg. 486 * Flow control is disabled by software override or autoneg.
487 * The code below will actually disable it in the HW. 487 * The code below will actually disable it in the HW.
488 */ 488 */
489 break; 489 break;
490 case ixgbe_fc_rx_pause: 490 case ixgbe_fc_rx_pause:
491 /* 491 /*
492 * Rx Flow control is enabled and Tx Flow control is 492 * Rx Flow control is enabled and Tx Flow control is
493 * disabled by software override. Since there really 493 * disabled by software override. Since there really
494 * isn't a way to advertise that we are capable of RX 494 * isn't a way to advertise that we are capable of RX
495 * Pause ONLY, we will advertise that we support both 495 * Pause ONLY, we will advertise that we support both
496 * symmetric and asymmetric Rx PAUSE. Later, we will 496 * symmetric and asymmetric Rx PAUSE. Later, we will
497 * disable the adapter's ability to send PAUSE frames. 497 * disable the adapter's ability to send PAUSE frames.
498 */ 498 */
499 fctrl_reg |= IXGBE_FCTRL_RFCE; 499 fctrl_reg |= IXGBE_FCTRL_RFCE;
500 break; 500 break;
501 case ixgbe_fc_tx_pause: 501 case ixgbe_fc_tx_pause:
502 /* 502 /*
503 * Tx Flow control is enabled, and Rx Flow control is 503 * Tx Flow control is enabled, and Rx Flow control is
504 * disabled by software override. 504 * disabled by software override.
505 */ 505 */
506 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 506 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
507 break; 507 break;
508 case ixgbe_fc_full: 508 case ixgbe_fc_full:
509 /* Flow control (both Rx and Tx) is enabled by SW override. */ 509 /* Flow control (both Rx and Tx) is enabled by SW override. */
510 fctrl_reg |= IXGBE_FCTRL_RFCE; 510 fctrl_reg |= IXGBE_FCTRL_RFCE;
511 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 511 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
512 break; 512 break;
513 default: 513 default:
514 DEBUGOUT("Flow control param set incorrectly\n"); 514 DEBUGOUT("Flow control param set incorrectly\n");
515 ret_val = IXGBE_ERR_CONFIG; 515 ret_val = IXGBE_ERR_CONFIG;
516 goto out; 516 goto out;
517 break; 517 break;
518 } 518 }
519 519
520 /* Set 802.3x based flow control settings. */ 520 /* Set 802.3x based flow control settings. */
521 fctrl_reg |= IXGBE_FCTRL_DPF; 521 fctrl_reg |= IXGBE_FCTRL_DPF;
522 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 522 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
523 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 523 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
524 524
525 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 525 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
526 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 526 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
527 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 527 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
528 hw->fc.high_water[i]) { 528 hw->fc.high_water[i]) {
529 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 529 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
530 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 530 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
531 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); 531 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
532 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); 532 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
533 } else { 533 } else {
534 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); 534 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
535 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); 535 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
536 } 536 }
537 537
538 } 538 }
539 539
540 /* Configure pause time (2 TCs per register) */ 540 /* Configure pause time (2 TCs per register) */
541 reg = hw->fc.pause_time * 0x00010001; 541 reg = (u32)hw->fc.pause_time * 0x00010001;
542 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 542 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
543 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 543 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
544 544
545 /* Configure flow control refresh threshold value */ 545 /* Configure flow control refresh threshold value */
546 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 546 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
547 547
548out: 548out:
549 return ret_val; 549 return ret_val;
550} 550}
551 551
552/** 552/**
553 * ixgbe_start_mac_link_82598 - Configures MAC link settings 553 * ixgbe_start_mac_link_82598 - Configures MAC link settings
554 * @hw: pointer to hardware structure 554 * @hw: pointer to hardware structure
555 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 555 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
556 * 556 *
557 * Configures link settings based on values in the ixgbe_hw struct. 557 * Configures link settings based on values in the ixgbe_hw struct.
558 * Restarts the link. Performs autonegotiation if needed. 558 * Restarts the link. Performs autonegotiation if needed.
559 **/ 559 **/
560static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, 560static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
561 bool autoneg_wait_to_complete) 561 bool autoneg_wait_to_complete)
562{ 562{
563 u32 autoc_reg; 563 u32 autoc_reg;
564 u32 links_reg; 564 u32 links_reg;
565 u32 i; 565 u32 i;
566 s32 status = IXGBE_SUCCESS; 566 s32 status = IXGBE_SUCCESS;
567 567
568 DEBUGFUNC("ixgbe_start_mac_link_82598"); 568 DEBUGFUNC("ixgbe_start_mac_link_82598");
569 569
570 /* Restart link */ 570 /* Restart link */
571 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 571 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
572 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 572 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
573 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 573 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
574 574
575 /* Only poll for autoneg to complete if specified to do so */ 575 /* Only poll for autoneg to complete if specified to do so */
576 if (autoneg_wait_to_complete) { 576 if (autoneg_wait_to_complete) {
577 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 577 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
578 IXGBE_AUTOC_LMS_KX4_AN || 578 IXGBE_AUTOC_LMS_KX4_AN ||
579 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 579 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
580 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 580 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
581 links_reg = 0; /* Just in case Autoneg time = 0 */ 581 links_reg = 0; /* Just in case Autoneg time = 0 */
582 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 582 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
583 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 583 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
584 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 584 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
585 break; 585 break;
586 msec_delay(100); 586 msec_delay(100);
587 } 587 }
588 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 588 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
589 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 589 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
590 DEBUGOUT("Autonegotiation did not complete.\n"); 590 DEBUGOUT("Autonegotiation did not complete.\n");
591 } 591 }
592 } 592 }
593 } 593 }
594 594
595 /* Add delay to filter out noises during initial link setup */ 595 /* Add delay to filter out noises during initial link setup */
596 msec_delay(50); 596 msec_delay(50);
597 597
598 return status; 598 return status;
599} 599}
600 600
601/** 601/**
602 * ixgbe_validate_link_ready - Function looks for phy link 602 * ixgbe_validate_link_ready - Function looks for phy link
603 * @hw: pointer to hardware structure 603 * @hw: pointer to hardware structure
604 * 604 *
605 * Function indicates success when phy link is available. If phy is not ready 605 * Function indicates success when phy link is available. If phy is not ready
606 * within 5 seconds of MAC indicating link, the function returns error. 606 * within 5 seconds of MAC indicating link, the function returns error.
607 **/ 607 **/
608static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) 608static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
609{ 609{
610 u32 timeout; 610 u32 timeout;
611 u16 an_reg; 611 u16 an_reg;
612 612
613 if (hw->device_id != IXGBE_DEV_ID_82598AT2) 613 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
614 return IXGBE_SUCCESS; 614 return IXGBE_SUCCESS;
615 615
616 for (timeout = 0; 616 for (timeout = 0;
617 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { 617 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
618 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, 618 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
619 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); 619 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
620 620
621 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && 621 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
622 (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) 622 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
623 break; 623 break;
624 624
625 msec_delay(100); 625 msec_delay(100);
626 } 626 }
627 627
628 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { 628 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
629 DEBUGOUT("Link was indicated but link is down\n"); 629 DEBUGOUT("Link was indicated but link is down\n");
630 return IXGBE_ERR_LINK_SETUP; 630 return IXGBE_ERR_LINK_SETUP;
631 } 631 }
632 632
633 return IXGBE_SUCCESS; 633 return IXGBE_SUCCESS;
634} 634}
635 635
636/** 636/**
637 * ixgbe_check_mac_link_82598 - Get link/speed status 637 * ixgbe_check_mac_link_82598 - Get link/speed status
638 * @hw: pointer to hardware structure 638 * @hw: pointer to hardware structure
639 * @speed: pointer to link speed 639 * @speed: pointer to link speed
640 * @link_up: TRUE is link is up, FALSE otherwise 640 * @link_up: TRUE is link is up, FALSE otherwise
641 * @link_up_wait_to_complete: bool used to wait for link up or not 641 * @link_up_wait_to_complete: bool used to wait for link up or not
642 * 642 *
643 * Reads the links register to determine if link is up and the current speed 643 * Reads the links register to determine if link is up and the current speed
644 **/ 644 **/
645static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 645static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
646 ixgbe_link_speed *speed, bool *link_up, 646 ixgbe_link_speed *speed, bool *link_up,
647 bool link_up_wait_to_complete) 647 bool link_up_wait_to_complete)
648{ 648{
649 u32 links_reg; 649 u32 links_reg;
650 u32 i; 650 u32 i;
651 u16 link_reg, adapt_comp_reg; 651 u16 link_reg, adapt_comp_reg;
652 652
653 DEBUGFUNC("ixgbe_check_mac_link_82598"); 653 DEBUGFUNC("ixgbe_check_mac_link_82598");
654 654
655 /* 655 /*
656 * SERDES PHY requires us to read link status from undocumented 656 * SERDES PHY requires us to read link status from undocumented
657 * register 0xC79F. Bit 0 set indicates link is up/ready; clear 657 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
658 * indicates link down. OxC00C is read to check that the XAUI lanes 658 * indicates link down. OxC00C is read to check that the XAUI lanes
659 * are active. Bit 0 clear indicates active; set indicates inactive. 659 * are active. Bit 0 clear indicates active; set indicates inactive.
660 */ 660 */
661 if (hw->phy.type == ixgbe_phy_nl) { 661 if (hw->phy.type == ixgbe_phy_nl) {
662 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 662 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
663 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); 663 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
664 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, 664 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
665 &adapt_comp_reg); 665 &adapt_comp_reg);
666 if (link_up_wait_to_complete) { 666 if (link_up_wait_to_complete) {
667 for (i = 0; i < hw->mac.max_link_up_time; i++) { 667 for (i = 0; i < hw->mac.max_link_up_time; i++) {
668 if ((link_reg & 1) && 668 if ((link_reg & 1) &&
669 ((adapt_comp_reg & 1) == 0)) { 669 ((adapt_comp_reg & 1) == 0)) {
670 *link_up = TRUE; 670 *link_up = TRUE;
671 break; 671 break;
672 } else { 672 } else {
673 *link_up = FALSE; 673 *link_up = FALSE;
674 } 674 }
675 msec_delay(100); 675 msec_delay(100);
676 hw->phy.ops.read_reg(hw, 0xC79F, 676 hw->phy.ops.read_reg(hw, 0xC79F,
677 IXGBE_TWINAX_DEV, 677 IXGBE_TWINAX_DEV,
678 &link_reg); 678 &link_reg);
679 hw->phy.ops.read_reg(hw, 0xC00C, 679 hw->phy.ops.read_reg(hw, 0xC00C,
680 IXGBE_TWINAX_DEV, 680 IXGBE_TWINAX_DEV,
681 &adapt_comp_reg); 681 &adapt_comp_reg);
682 } 682 }
683 } else { 683 } else {
684 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) 684 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
685 *link_up = TRUE; 685 *link_up = TRUE;
686 else 686 else
687 *link_up = FALSE; 687 *link_up = FALSE;
688 } 688 }
689 689
690 if (*link_up == FALSE) 690 if (*link_up == FALSE)
691 goto out; 691 goto out;
692 } 692 }
693 693
694 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 694 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
695 if (link_up_wait_to_complete) { 695 if (link_up_wait_to_complete) {
696 for (i = 0; i < hw->mac.max_link_up_time; i++) { 696 for (i = 0; i < hw->mac.max_link_up_time; i++) {
697 if (links_reg & IXGBE_LINKS_UP) { 697 if (links_reg & IXGBE_LINKS_UP) {
698 *link_up = TRUE; 698 *link_up = TRUE;
699 break; 699 break;
700 } else { 700 } else {
701 *link_up = FALSE; 701 *link_up = FALSE;
702 } 702 }
703 msec_delay(100); 703 msec_delay(100);
704 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 704 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
705 } 705 }
706 } else { 706 } else {
707 if (links_reg & IXGBE_LINKS_UP) 707 if (links_reg & IXGBE_LINKS_UP)
708 *link_up = TRUE; 708 *link_up = TRUE;
709 else 709 else
710 *link_up = FALSE; 710 *link_up = FALSE;
711 } 711 }
712 712
713 if (links_reg & IXGBE_LINKS_SPEED) 713 if (links_reg & IXGBE_LINKS_SPEED)
714 *speed = IXGBE_LINK_SPEED_10GB_FULL; 714 *speed = IXGBE_LINK_SPEED_10GB_FULL;
715 else 715 else
716 *speed = IXGBE_LINK_SPEED_1GB_FULL; 716 *speed = IXGBE_LINK_SPEED_1GB_FULL;
717 717
718 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && 718 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
719 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) 719 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
720 *link_up = FALSE; 720 *link_up = FALSE;
721 721
722out: 722out:
723 return IXGBE_SUCCESS; 723 return IXGBE_SUCCESS;
724} 724}
725 725
726/** 726/**
727 * ixgbe_setup_mac_link_82598 - Set MAC link speed 727 * ixgbe_setup_mac_link_82598 - Set MAC link speed
728 * @hw: pointer to hardware structure 728 * @hw: pointer to hardware structure
729 * @speed: new link speed 729 * @speed: new link speed
730 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 730 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
731 * 731 *
732 * Set the link speed in the AUTOC register and restarts link. 732 * Set the link speed in the AUTOC register and restarts link.
733 **/ 733 **/
734static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, 734static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
735 ixgbe_link_speed speed, 735 ixgbe_link_speed speed,
736 bool autoneg_wait_to_complete) 736 bool autoneg_wait_to_complete)
737{ 737{
738 bool autoneg = FALSE; 738 bool autoneg = FALSE;
739 s32 status = IXGBE_SUCCESS; 739 s32 status = IXGBE_SUCCESS;
740 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 740 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
741 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 741 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
742 u32 autoc = curr_autoc; 742 u32 autoc = curr_autoc;
743 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 743 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
744 744
745 DEBUGFUNC("ixgbe_setup_mac_link_82598"); 745 DEBUGFUNC("ixgbe_setup_mac_link_82598");
746 746
747 /* Check to see if speed passed in is supported. */ 747 /* Check to see if speed passed in is supported. */
748 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 748 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
749 speed &= link_capabilities; 749 speed &= link_capabilities;
750 750
751 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 751 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
752 status = IXGBE_ERR_LINK_SETUP; 752 status = IXGBE_ERR_LINK_SETUP;
753 753
754 /* Set KX4/KX support according to speed requested */ 754 /* Set KX4/KX support according to speed requested */
755 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || 755 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
756 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { 756 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
757 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; 757 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
758 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 758 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
759 autoc |= IXGBE_AUTOC_KX4_SUPP; 759 autoc |= IXGBE_AUTOC_KX4_SUPP;
760 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 760 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
761 autoc |= IXGBE_AUTOC_KX_SUPP; 761 autoc |= IXGBE_AUTOC_KX_SUPP;
762 if (autoc != curr_autoc) 762 if (autoc != curr_autoc)
763 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 763 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
764 } 764 }
765 765
766 if (status == IXGBE_SUCCESS) { 766 if (status == IXGBE_SUCCESS) {
767 /* 767 /*
768 * Setup and restart the link based on the new values in 768 * Setup and restart the link based on the new values in
769 * ixgbe_hw This will write the AUTOC register based on the new 769 * ixgbe_hw This will write the AUTOC register based on the new
770 * stored values 770 * stored values
771 */ 771 */
772 status = ixgbe_start_mac_link_82598(hw, 772 status = ixgbe_start_mac_link_82598(hw,
773 autoneg_wait_to_complete); 773 autoneg_wait_to_complete);
774 } 774 }
775 775
776 return status; 776 return status;
777} 777}
778 778
779 779
780/** 780/**
781 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field 781 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
782 * @hw: pointer to hardware structure 782 * @hw: pointer to hardware structure
783 * @speed: new link speed 783 * @speed: new link speed
784 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 784 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
785 * 785 *
786 * Sets the link speed in the AUTOC register in the MAC and restarts link. 786 * Sets the link speed in the AUTOC register in the MAC and restarts link.
787 **/ 787 **/
788static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 788static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
789 ixgbe_link_speed speed, 789 ixgbe_link_speed speed,
790 bool autoneg_wait_to_complete) 790 bool autoneg_wait_to_complete)
791{ 791{
792 s32 status; 792 s32 status;
793 793
794 DEBUGFUNC("ixgbe_setup_copper_link_82598"); 794 DEBUGFUNC("ixgbe_setup_copper_link_82598");
795 795
796 /* Setup the PHY according to input speed */ 796 /* Setup the PHY according to input speed */
797 status = hw->phy.ops.setup_link_speed(hw, speed, 797 status = hw->phy.ops.setup_link_speed(hw, speed,
798 autoneg_wait_to_complete); 798 autoneg_wait_to_complete);
799 /* Set up MAC */ 799 /* Set up MAC */
800 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 800 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
801 801
802 return status; 802 return status;
803} 803}
804 804
805/** 805/**
806 * ixgbe_reset_hw_82598 - Performs hardware reset 806 * ixgbe_reset_hw_82598 - Performs hardware reset
807 * @hw: pointer to hardware structure 807 * @hw: pointer to hardware structure
808 * 808 *
809 * Resets the hardware by resetting the transmit and receive units, masks and 809 * Resets the hardware by resetting the transmit and receive units, masks and
810 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 810 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
811 * reset. 811 * reset.
812 **/ 812 **/
813static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) 813static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
814{ 814{
815 s32 status = IXGBE_SUCCESS; 815 s32 status = IXGBE_SUCCESS;
816 s32 phy_status = IXGBE_SUCCESS; 816 s32 phy_status = IXGBE_SUCCESS;
817 u32 ctrl; 817 u32 ctrl;
818 u32 gheccr; 818 u32 gheccr;
819 u32 i; 819 u32 i;
820 u32 autoc; 820 u32 autoc;
821 u8 analog_val; 821 u8 analog_val;
822 822
823 DEBUGFUNC("ixgbe_reset_hw_82598"); 823 DEBUGFUNC("ixgbe_reset_hw_82598");
824 824
825 /* Call adapter stop to disable tx/rx and clear interrupts */ 825 /* Call adapter stop to disable tx/rx and clear interrupts */
826 status = hw->mac.ops.stop_adapter(hw); 826 status = hw->mac.ops.stop_adapter(hw);
827 if (status != IXGBE_SUCCESS) 827 if (status != IXGBE_SUCCESS)
828 goto reset_hw_out; 828 goto reset_hw_out;
829 829
830 /* 830 /*
831 * Power up the Atlas Tx lanes if they are currently powered down. 831 * Power up the Atlas Tx lanes if they are currently powered down.
832 * Atlas Tx lanes are powered down for MAC loopback tests, but 832 * Atlas Tx lanes are powered down for MAC loopback tests, but
833 * they are not automatically restored on reset. 833 * they are not automatically restored on reset.
834 */ 834 */
835 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 835 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
836 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 836 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
837 /* Enable Tx Atlas so packets can be transmitted again */ 837 /* Enable Tx Atlas so packets can be transmitted again */
838 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 838 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
839 &analog_val); 839 &analog_val);
840 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 840 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
841 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 841 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
842 analog_val); 842 analog_val);
843 843
844 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 844 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
845 &analog_val); 845 &analog_val);
846 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 846 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
847 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 847 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
848 analog_val); 848 analog_val);
849 849
850 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 850 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
851 &analog_val); 851 &analog_val);
852 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 852 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
853 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 853 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
854 analog_val); 854 analog_val);
855 855
856 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 856 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
857 &analog_val); 857 &analog_val);
858 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 858 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
859 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 859 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
860 analog_val); 860 analog_val);
861 } 861 }
862 862
863 /* Reset PHY */ 863 /* Reset PHY */
864 if (hw->phy.reset_disable == FALSE) { 864 if (hw->phy.reset_disable == FALSE) {
865 /* PHY ops must be identified and initialized prior to reset */ 865 /* PHY ops must be identified and initialized prior to reset */
866 866
867 /* Init PHY and function pointers, perform SFP setup */ 867 /* Init PHY and function pointers, perform SFP setup */
868 phy_status = hw->phy.ops.init(hw); 868 phy_status = hw->phy.ops.init(hw);
869 if ((phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) || 869 if ((phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) ||
870 (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)) 870 (phy_status == IXGBE_ERR_SFP_NOT_PRESENT))
871 goto mac_reset_top; 871 goto mac_reset_top;
872 872
873 hw->phy.ops.reset(hw); 873 hw->phy.ops.reset(hw);
874 } 874 }
875 875
876mac_reset_top: 876mac_reset_top:
877 /* 877 /*
878 * Issue global reset to the MAC. This needs to be a SW reset. 878 * Issue global reset to the MAC. This needs to be a SW reset.
879 * If link reset is used, it might reset the MAC when mng is using it 879 * If link reset is used, it might reset the MAC when mng is using it
880 */ 880 */
881 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; 881 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
882 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 882 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
883 IXGBE_WRITE_FLUSH(hw); 883 IXGBE_WRITE_FLUSH(hw);
884 884
885 /* Poll for reset bit to self-clear indicating reset is complete */ 885 /* Poll for reset bit to self-clear indicating reset is complete */
886 for (i = 0; i < 10; i++) { 886 for (i = 0; i < 10; i++) {
887 usec_delay(1); 887 usec_delay(1);
888 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 888 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
889 if (!(ctrl & IXGBE_CTRL_RST)) 889 if (!(ctrl & IXGBE_CTRL_RST))
890 break; 890 break;
891 } 891 }
892 if (ctrl & IXGBE_CTRL_RST) { 892 if (ctrl & IXGBE_CTRL_RST) {
893 status = IXGBE_ERR_RESET_FAILED; 893 status = IXGBE_ERR_RESET_FAILED;
894 DEBUGOUT("Reset polling failed to complete.\n"); 894 DEBUGOUT("Reset polling failed to complete.\n");
895 } 895 }
896 896
897 msec_delay(50); 897 msec_delay(50);
898 898
899 /* 899 /*
900 * Double resets are required for recovery from certain error 900 * Double resets are required for recovery from certain error
901 * conditions. Between resets, it is necessary to stall to allow time 901 * conditions. Between resets, it is necessary to stall to allow time
902 * for any pending HW events to complete. 902 * for any pending HW events to complete.
903 */ 903 */
904 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 904 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
905 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 905 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
906 goto mac_reset_top; 906 goto mac_reset_top;
907 } 907 }
908 908
909 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 909 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
910 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); 910 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
911 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); 911 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
912 912
913 /* 913 /*
914 * Store the original AUTOC value if it has not been 914 * Store the original AUTOC value if it has not been
915 * stored off yet. Otherwise restore the stored original 915 * stored off yet. Otherwise restore the stored original
916 * AUTOC value since the reset operation sets back to defaults. 916 * AUTOC value since the reset operation sets back to defaults.
917 */ 917 */
918 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 918 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
919 if (hw->mac.orig_link_settings_stored == FALSE) { 919 if (hw->mac.orig_link_settings_stored == FALSE) {
920 hw->mac.orig_autoc = autoc; 920 hw->mac.orig_autoc = autoc;
921 hw->mac.orig_link_settings_stored = TRUE; 921 hw->mac.orig_link_settings_stored = TRUE;
922 } else if (autoc != hw->mac.orig_autoc) { 922 } else if (autoc != hw->mac.orig_autoc) {
923 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 923 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
924 } 924 }
925 925
926 /* Store the permanent mac address */ 926 /* Store the permanent mac address */
927 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 927 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
928 928
929 /* 929 /*
930 * Store MAC address from RAR0, clear receive address registers, and 930 * Store MAC address from RAR0, clear receive address registers, and
931 * clear the multicast table 931 * clear the multicast table
932 */ 932 */
933 hw->mac.ops.init_rx_addrs(hw); 933 hw->mac.ops.init_rx_addrs(hw);
934 934
935reset_hw_out: 935reset_hw_out:
936 if (phy_status != IXGBE_SUCCESS) 936 if (phy_status != IXGBE_SUCCESS)
937 status = phy_status; 937 status = phy_status;
938 938
939 return status; 939 return status;
940} 940}
941 941
942/** 942/**
943 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 943 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
944 * @hw: pointer to hardware struct 944 * @hw: pointer to hardware struct
945 * @rar: receive address register index to associate with a VMDq index 945 * @rar: receive address register index to associate with a VMDq index
946 * @vmdq: VMDq set index 946 * @vmdq: VMDq set index
947 **/ 947 **/
948s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 948s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
949{ 949{
950 u32 rar_high; 950 u32 rar_high;
951 u32 rar_entries = hw->mac.num_rar_entries; 951 u32 rar_entries = hw->mac.num_rar_entries;
952 952
953 DEBUGFUNC("ixgbe_set_vmdq_82598"); 953 DEBUGFUNC("ixgbe_set_vmdq_82598");
954 954
955 /* Make sure we are using a valid rar index range */ 955 /* Make sure we are using a valid rar index range */
956 if (rar >= rar_entries) { 956 if (rar >= rar_entries) {
957 DEBUGOUT1("RAR index %d is out of range.\n", rar); 957 DEBUGOUT1("RAR index %d is out of range.\n", rar);
958 return IXGBE_ERR_INVALID_ARGUMENT; 958 return IXGBE_ERR_INVALID_ARGUMENT;
959 } 959 }
960 960
961 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 961 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
962 rar_high &= ~IXGBE_RAH_VIND_MASK; 962 rar_high &= ~IXGBE_RAH_VIND_MASK;
963 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 963 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
964 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 964 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
965 return IXGBE_SUCCESS; 965 return IXGBE_SUCCESS;
966} 966}
967 967
968/** 968/**
969 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 969 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
970 * @hw: pointer to hardware struct 970 * @hw: pointer to hardware struct
971 * @rar: receive address register index to associate with a VMDq index 971 * @rar: receive address register index to associate with a VMDq index
972 * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 972 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
973 **/ 973 **/
974static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 974static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
975{ 975{
976 u32 rar_high; 976 u32 rar_high;
977 u32 rar_entries = hw->mac.num_rar_entries; 977 u32 rar_entries = hw->mac.num_rar_entries;
978 978
979 UNREFERENCED_1PARAMETER(vmdq); 979 UNREFERENCED_1PARAMETER(vmdq);
980 980
981 /* Make sure we are using a valid rar index range */ 981 /* Make sure we are using a valid rar index range */
982 if (rar >= rar_entries) { 982 if (rar >= rar_entries) {
983 DEBUGOUT1("RAR index %d is out of range.\n", rar); 983 DEBUGOUT1("RAR index %d is out of range.\n", rar);
984 return IXGBE_ERR_INVALID_ARGUMENT; 984 return IXGBE_ERR_INVALID_ARGUMENT;
985 } 985 }
986 986
987 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 987 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
988 if (rar_high & IXGBE_RAH_VIND_MASK) { 988 if (rar_high & IXGBE_RAH_VIND_MASK) {
989 rar_high &= ~IXGBE_RAH_VIND_MASK; 989 rar_high &= ~IXGBE_RAH_VIND_MASK;
990 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 990 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
991 } 991 }
992 992
993 return IXGBE_SUCCESS; 993 return IXGBE_SUCCESS;
994} 994}
995 995
996/** 996/**
997 * ixgbe_set_vfta_82598 - Set VLAN filter table 997 * ixgbe_set_vfta_82598 - Set VLAN filter table
998 * @hw: pointer to hardware structure 998 * @hw: pointer to hardware structure
999 * @vlan: VLAN id to write to VLAN filter 999 * @vlan: VLAN id to write to VLAN filter
1000 * @vind: VMDq output index that maps queue to VLAN id in VFTA 1000 * @vind: VMDq output index that maps queue to VLAN id in VFTA
1001 * @vlan_on: boolean flag to turn on/off VLAN in VFTA 1001 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1002 * @vlvf_bypass: boolean flag - unused 1002 * @vlvf_bypass: boolean flag - unused
1003 * 1003 *
1004 * Turn on/off specified VLAN in the VLAN filter table. 1004 * Turn on/off specified VLAN in the VLAN filter table.
1005 **/ 1005 **/
1006s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 1006s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1007 bool vlan_on, bool vlvf_bypass) 1007 bool vlan_on, bool vlvf_bypass)
1008{ 1008{
1009 u32 regindex; 1009 u32 regindex;
1010 u32 bitindex; 1010 u32 bitindex;
1011 u32 bits; 1011 u32 bits;
1012 u32 vftabyte; 1012 u32 vftabyte;
1013 1013
1014 UNREFERENCED_1PARAMETER(vlvf_bypass); 1014 UNREFERENCED_1PARAMETER(vlvf_bypass);
1015 1015
1016 DEBUGFUNC("ixgbe_set_vfta_82598"); 1016 DEBUGFUNC("ixgbe_set_vfta_82598");
1017 1017
1018 if (vlan > 4095) 1018 if (vlan > 4095)
1019 return IXGBE_ERR_PARAM; 1019 return IXGBE_ERR_PARAM;
1020 1020
1021 /* Determine 32-bit word position in array */ 1021 /* Determine 32-bit word position in array */
1022 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 1022 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1023 1023
1024 /* Determine the location of the (VMD) queue index */ 1024 /* Determine the location of the (VMD) queue index */
1025 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 1025 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1026 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 1026 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1027 1027
1028 /* Set the nibble for VMD queue index */ 1028 /* Set the nibble for VMD queue index */
1029 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 1029 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1030 bits &= (~(0x0F << bitindex)); 1030 bits &= (~(0x0F << bitindex));
1031 bits |= (vind << bitindex); 1031 bits |= (vind << bitindex);
1032 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 1032 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1033 1033
1034 /* Determine the location of the bit for this VLAN id */ 1034 /* Determine the location of the bit for this VLAN id */
1035 bitindex = vlan & 0x1F; /* lower five bits */ 1035 bitindex = vlan & 0x1F; /* lower five bits */
1036 1036
1037 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 1037 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1038 if (vlan_on) 1038 if (vlan_on)
1039 /* Turn on this VLAN id */ 1039 /* Turn on this VLAN id */
1040 bits |= (1 << bitindex); 1040 bits |= (1 << bitindex);
1041 else 1041 else
1042 /* Turn off this VLAN id */ 1042 /* Turn off this VLAN id */
1043 bits &= ~(1 << bitindex); 1043 bits &= ~(1 << bitindex);
1044 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 1044 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1045 1045
1046 return IXGBE_SUCCESS; 1046 return IXGBE_SUCCESS;
1047} 1047}
1048 1048
1049/** 1049/**
1050 * ixgbe_clear_vfta_82598 - Clear VLAN filter table 1050 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1051 * @hw: pointer to hardware structure 1051 * @hw: pointer to hardware structure
1052 * 1052 *
1053 * Clears the VLAN filer table, and the VMDq index associated with the filter 1053 * Clears the VLAN filer table, and the VMDq index associated with the filter
1054 **/ 1054 **/
1055static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 1055static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1056{ 1056{
1057 u32 offset; 1057 u32 offset;
1058 u32 vlanbyte; 1058 u32 vlanbyte;
1059 1059
1060 DEBUGFUNC("ixgbe_clear_vfta_82598"); 1060 DEBUGFUNC("ixgbe_clear_vfta_82598");
1061 1061
1062 for (offset = 0; offset < hw->mac.vft_size; offset++) 1062 for (offset = 0; offset < hw->mac.vft_size; offset++)
1063 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1063 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1064 1064
1065 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 1065 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1066 for (offset = 0; offset < hw->mac.vft_size; offset++) 1066 for (offset = 0; offset < hw->mac.vft_size; offset++)
1067 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 1067 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1068 0); 1068 0);
1069 1069
1070 return IXGBE_SUCCESS; 1070 return IXGBE_SUCCESS;
1071} 1071}
1072 1072
1073/** 1073/**
1074 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 1074 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1075 * @hw: pointer to hardware structure 1075 * @hw: pointer to hardware structure
1076 * @reg: analog register to read 1076 * @reg: analog register to read
1077 * @val: read value 1077 * @val: read value
1078 * 1078 *
1079 * Performs read operation to Atlas analog register specified. 1079 * Performs read operation to Atlas analog register specified.
1080 **/ 1080 **/
1081s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 1081s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1082{ 1082{
1083 u32 atlas_ctl; 1083 u32 atlas_ctl;
1084 1084
1085 DEBUGFUNC("ixgbe_read_analog_reg8_82598"); 1085 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1086 1086
1087 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 1087 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1088 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 1088 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1089 IXGBE_WRITE_FLUSH(hw); 1089 IXGBE_WRITE_FLUSH(hw);
1090 usec_delay(10); 1090 usec_delay(10);
1091 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 1091 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1092 *val = (u8)atlas_ctl; 1092 *val = (u8)atlas_ctl;
1093 1093
1094 return IXGBE_SUCCESS; 1094 return IXGBE_SUCCESS;
1095} 1095}
1096 1096
1097/** 1097/**
1098 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 1098 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1099 * @hw: pointer to hardware structure 1099 * @hw: pointer to hardware structure
1100 * @reg: atlas register to write 1100 * @reg: atlas register to write
1101 * @val: value to write 1101 * @val: value to write
1102 * 1102 *
1103 * Performs write operation to Atlas analog register specified. 1103 * Performs write operation to Atlas analog register specified.
1104 **/ 1104 **/
1105s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 1105s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1106{ 1106{
1107 u32 atlas_ctl; 1107 u32 atlas_ctl;
1108 1108
1109 DEBUGFUNC("ixgbe_write_analog_reg8_82598"); 1109 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1110 1110
1111 atlas_ctl = (reg << 8) | val; 1111 atlas_ctl = (reg << 8) | val;
1112 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1112 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1113 IXGBE_WRITE_FLUSH(hw); 1113 IXGBE_WRITE_FLUSH(hw);
1114 usec_delay(10); 1114 usec_delay(10);
1115 1115
1116 return IXGBE_SUCCESS; 1116 return IXGBE_SUCCESS;
1117} 1117}
1118 1118
1119/** 1119/**
1120 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. 1120 * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1121 * @hw: pointer to hardware structure 1121 * @hw: pointer to hardware structure
1122 * @dev_addr: address to read from 1122 * @dev_addr: address to read from
1123 * @byte_offset: byte offset to read from dev_addr 1123 * @byte_offset: byte offset to read from dev_addr
1124 * @eeprom_data: value read 1124 * @eeprom_data: value read
1125 * 1125 *
1126 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1126 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1127 **/ 1127 **/
1128static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, 1128static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1129 u8 byte_offset, u8 *eeprom_data) 1129 u8 byte_offset, u8 *eeprom_data)
1130{ 1130{
1131 s32 status = IXGBE_SUCCESS; 1131 s32 status = IXGBE_SUCCESS;
1132 u16 sfp_addr = 0; 1132 u16 sfp_addr = 0;
1133 u16 sfp_data = 0; 1133 u16 sfp_data = 0;
1134 u16 sfp_stat = 0; 1134 u16 sfp_stat = 0;
1135 u16 gssr; 1135 u16 gssr;
1136 u32 i; 1136 u32 i;
1137 1137
1138 DEBUGFUNC("ixgbe_read_i2c_phy_82598"); 1138 DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1139 1139
1140 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) 1140 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1141 gssr = IXGBE_GSSR_PHY1_SM; 1141 gssr = IXGBE_GSSR_PHY1_SM;
1142 else 1142 else
1143 gssr = IXGBE_GSSR_PHY0_SM; 1143 gssr = IXGBE_GSSR_PHY0_SM;
1144 1144
1145 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) 1145 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
1146 return IXGBE_ERR_SWFW_SYNC; 1146 return IXGBE_ERR_SWFW_SYNC;
1147 1147
1148 if (hw->phy.type == ixgbe_phy_nl) { 1148 if (hw->phy.type == ixgbe_phy_nl) {
1149 /* 1149 /*
1150 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to 1150 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1151 * 0xC30D. These registers are used to talk to the SFP+ 1151 * 0xC30D. These registers are used to talk to the SFP+
1152 * module's EEPROM through the SDA/SCL (I2C) interface. 1152 * module's EEPROM through the SDA/SCL (I2C) interface.
1153 */ 1153 */
1154 sfp_addr = (dev_addr << 8) + byte_offset; 1154 sfp_addr = (dev_addr << 8) + byte_offset;
1155 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); 1155 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1156 hw->phy.ops.write_reg_mdi(hw, 1156 hw->phy.ops.write_reg_mdi(hw,
1157 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, 1157 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1158 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1158 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1159 sfp_addr); 1159 sfp_addr);
1160 1160
1161 /* Poll status */ 1161 /* Poll status */
1162 for (i = 0; i < 100; i++) { 1162 for (i = 0; i < 100; i++) {
1163 hw->phy.ops.read_reg_mdi(hw, 1163 hw->phy.ops.read_reg_mdi(hw,
1164 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, 1164 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1165 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 1165 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1166 &sfp_stat); 1166 &sfp_stat);
1167 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; 1167 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1168 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) 1168 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1169 break; 1169 break;
1170 msec_delay(10); 1170 msec_delay(10);
1171 } 1171 }
1172 1172
1173 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { 1173 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1174 DEBUGOUT("EEPROM read did not pass.\n"); 1174 DEBUGOUT("EEPROM read did not pass.\n");
1175 status = IXGBE_ERR_SFP_NOT_PRESENT; 1175 status = IXGBE_ERR_SFP_NOT_PRESENT;
1176 goto out; 1176 goto out;
1177 } 1177 }
1178 1178
1179 /* Read data */ 1179 /* Read data */
1180 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, 1180 hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1181 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); 1181 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1182 1182
1183 *eeprom_data = (u8)(sfp_data >> 8); 1183 *eeprom_data = (u8)(sfp_data >> 8);
1184 } else { 1184 } else {
1185 status = IXGBE_ERR_PHY; 1185 status = IXGBE_ERR_PHY;
1186 } 1186 }
1187 1187
1188out: 1188out:
1189 hw->mac.ops.release_swfw_sync(hw, gssr); 1189 hw->mac.ops.release_swfw_sync(hw, gssr);
1190 return status; 1190 return status;
1191} 1191}
1192 1192
1193/** 1193/**
1194 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. 1194 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1195 * @hw: pointer to hardware structure 1195 * @hw: pointer to hardware structure
1196 * @byte_offset: EEPROM byte offset to read 1196 * @byte_offset: EEPROM byte offset to read
1197 * @eeprom_data: value read 1197 * @eeprom_data: value read
1198 * 1198 *
1199 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. 1199 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1200 **/ 1200 **/
1201s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1201s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1202 u8 *eeprom_data) 1202 u8 *eeprom_data)
1203{ 1203{
1204 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, 1204 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1205 byte_offset, eeprom_data); 1205 byte_offset, eeprom_data);
1206} 1206}
1207 1207
1208/** 1208/**
1209 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. 1209 * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1210 * @hw: pointer to hardware structure 1210 * @hw: pointer to hardware structure
1211 * @byte_offset: byte offset at address 0xA2 1211 * @byte_offset: byte offset at address 0xA2
1212 * @sff8472_data: value read 1212 * @sff8472_data: value read
1213 * 1213 *
1214 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C 1214 * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1215 **/ 1215 **/
1216static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, 1216static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1217 u8 *sff8472_data) 1217 u8 *sff8472_data)
1218{ 1218{
1219 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, 1219 return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1220 byte_offset, sff8472_data); 1220 byte_offset, sff8472_data);
1221} 1221}
1222 1222
1223/** 1223/**
1224 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 1224 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1225 * @hw: pointer to hardware structure 1225 * @hw: pointer to hardware structure
1226 * 1226 *
1227 * Determines physical layer capabilities of the current configuration. 1227 * Determines physical layer capabilities of the current configuration.
1228 **/ 1228 **/
1229u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1229u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1230{ 1230{
1231 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1231 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1232 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1232 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1233 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 1233 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1234 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 1234 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1235 u16 ext_ability = 0; 1235 u16 ext_ability = 0;
1236 1236
1237 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); 1237 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1238 1238
1239 hw->phy.ops.identify(hw); 1239 hw->phy.ops.identify(hw);
1240 1240
1241 /* Copper PHY must be checked before AUTOC LMS to determine correct 1241 /* Copper PHY must be checked before AUTOC LMS to determine correct
1242 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ 1242 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1243 switch (hw->phy.type) { 1243 switch (hw->phy.type) {
1244 case ixgbe_phy_tn: 1244 case ixgbe_phy_tn:
1245 case ixgbe_phy_cu_unknown: 1245 case ixgbe_phy_cu_unknown:
1246 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1246 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1247 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1247 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1248 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1248 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1249 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1249 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1250 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 1250 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1251 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 1251 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1252 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 1252 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1253 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1253 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1254 goto out; 1254 goto out;
1255 default: 1255 default:
1256 break; 1256 break;
1257 } 1257 }
1258 1258
1259 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1259 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1260 case IXGBE_AUTOC_LMS_1G_AN: 1260 case IXGBE_AUTOC_LMS_1G_AN:
1261 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 1261 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1262 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) 1262 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1263 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1263 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1264 else 1264 else
1265 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; 1265 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1266 break; 1266 break;
1267 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 1267 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1268 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) 1268 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1269 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1269 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1270 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) 1270 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1271 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1271 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1272 else /* XAUI */ 1272 else /* XAUI */
1273 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1273 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1274 break; 1274 break;
1275 case IXGBE_AUTOC_LMS_KX4_AN: 1275 case IXGBE_AUTOC_LMS_KX4_AN:
1276 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: 1276 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1277 if (autoc & IXGBE_AUTOC_KX_SUPP) 1277 if (autoc & IXGBE_AUTOC_KX_SUPP)
1278 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 1278 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1279 if (autoc & IXGBE_AUTOC_KX4_SUPP) 1279 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1280 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 1280 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1281 break; 1281 break;
1282 default: 1282 default:
1283 break; 1283 break;
1284 } 1284 }
1285 1285
1286 if (hw->phy.type == ixgbe_phy_nl) { 1286 if (hw->phy.type == ixgbe_phy_nl) {
1287 hw->phy.ops.identify_sfp(hw); 1287 hw->phy.ops.identify_sfp(hw);
1288 1288
1289 switch (hw->phy.sfp_type) { 1289 switch (hw->phy.sfp_type) {
1290 case ixgbe_sfp_type_da_cu: 1290 case ixgbe_sfp_type_da_cu:
1291 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1291 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1292 break; 1292 break;
1293 case ixgbe_sfp_type_sr: 1293 case ixgbe_sfp_type_sr:
1294 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1294 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1295 break; 1295 break;
1296 case ixgbe_sfp_type_lr: 1296 case ixgbe_sfp_type_lr:
1297 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1297 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1298 break; 1298 break;
1299 default: 1299 default:
1300 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1300 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1301 break; 1301 break;
1302 } 1302 }
1303 } 1303 }
1304 1304
1305 switch (hw->device_id) { 1305 switch (hw->device_id) {
1306 case IXGBE_DEV_ID_82598_DA_DUAL_PORT: 1306 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1307 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 1307 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1308 break; 1308 break;
1309 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1309 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1310 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1310 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1311 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: 1311 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1312 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1312 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1313 break; 1313 break;
1314 case IXGBE_DEV_ID_82598EB_XF_LR: 1314 case IXGBE_DEV_ID_82598EB_XF_LR:
1315 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1315 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1316 break; 1316 break;
1317 default: 1317 default:
1318 break; 1318 break;
1319 } 1319 }
1320 1320
1321out: 1321out:
1322 return physical_layer; 1322 return physical_layer;
1323} 1323}
1324 1324
1325/** 1325/**
1326 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple 1326 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1327 * port devices. 1327 * port devices.
1328 * @hw: pointer to the HW structure 1328 * @hw: pointer to the HW structure
1329 * 1329 *
1330 * Calls common function and corrects issue with some single port devices 1330 * Calls common function and corrects issue with some single port devices
1331 * that enable LAN1 but not LAN0. 1331 * that enable LAN1 but not LAN0.
1332 **/ 1332 **/
1333void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) 1333void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1334{ 1334{
1335 struct ixgbe_bus_info *bus = &hw->bus; 1335 struct ixgbe_bus_info *bus = &hw->bus;
1336 u16 pci_gen = 0; 1336 u16 pci_gen = 0;
1337 u16 pci_ctrl2 = 0; 1337 u16 pci_ctrl2 = 0;
1338 1338
1339 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); 1339 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1340 1340
1341 ixgbe_set_lan_id_multi_port_pcie(hw); 1341 ixgbe_set_lan_id_multi_port_pcie(hw);
1342 1342
1343 /* check if LAN0 is disabled */ 1343 /* check if LAN0 is disabled */
1344 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); 1344 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1345 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { 1345 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1346 1346
1347 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); 1347 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1348 1348
1349 /* if LAN0 is completely disabled force function to 0 */ 1349 /* if LAN0 is completely disabled force function to 0 */
1350 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && 1350 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1351 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && 1351 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1352 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { 1352 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1353 1353
1354 bus->func = 0; 1354 bus->func = 0;
1355 } 1355 }
1356 } 1356 }
1357} 1357}
1358 1358
1359/** 1359/**
1360 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering 1360 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1361 * @hw: pointer to hardware structure 1361 * @hw: pointer to hardware structure
1362 * 1362 *
1363 **/ 1363 **/
1364void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) 1364void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1365{ 1365{
1366 u32 regval; 1366 u32 regval;
1367 u32 i; 1367 u32 i;
1368 1368
1369 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); 1369 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1370 1370
1371 /* Enable relaxed ordering */ 1371 /* Enable relaxed ordering */
1372 for (i = 0; ((i < hw->mac.max_tx_queues) && 1372 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1373 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1373 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1374 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1374 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1375 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1375 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1376 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); 1376 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1377 } 1377 }
1378 1378
1379 for (i = 0; ((i < hw->mac.max_rx_queues) && 1379 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1380 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { 1380 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1381 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 1381 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1382 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | 1382 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1383 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 1383 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1384 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 1384 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1385 } 1385 }
1386 1386
1387} 1387}
1388 1388
1389/** 1389/**
1390 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer 1390 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1391 * @hw: pointer to hardware structure 1391 * @hw: pointer to hardware structure
1392 * @num_pb: number of packet buffers to allocate 1392 * @num_pb: number of packet buffers to allocate
1393 * @headroom: reserve n KB of headroom 1393 * @headroom: reserve n KB of headroom
1394 * @strategy: packet buffer allocation strategy 1394 * @strategy: packet buffer allocation strategy
1395 **/ 1395 **/
1396static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, 1396static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1397 u32 headroom, int strategy) 1397 u32 headroom, int strategy)
1398{ 1398{
1399 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1399 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1400 u8 i = 0; 1400 u8 i = 0;
1401 UNREFERENCED_1PARAMETER(headroom); 1401 UNREFERENCED_1PARAMETER(headroom);
1402 1402
1403 if (!num_pb) 1403 if (!num_pb)
1404 return; 1404 return;
1405 1405
1406 /* Setup Rx packet buffer sizes */ 1406 /* Setup Rx packet buffer sizes */
1407 switch (strategy) { 1407 switch (strategy) {
1408 case PBA_STRATEGY_WEIGHTED: 1408 case PBA_STRATEGY_WEIGHTED:
1409 /* Setup the first four at 80KB */ 1409 /* Setup the first four at 80KB */
1410 rxpktsize = IXGBE_RXPBSIZE_80KB; 1410 rxpktsize = IXGBE_RXPBSIZE_80KB;
1411 for (; i < 4; i++) 1411 for (; i < 4; i++)
1412 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1412 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1413 /* Setup the last four at 48KB...don't re-init i */ 1413 /* Setup the last four at 48KB...don't re-init i */
1414 rxpktsize = IXGBE_RXPBSIZE_48KB; 1414 rxpktsize = IXGBE_RXPBSIZE_48KB;
1415 /* Fall Through */ 1415 /* Fall Through */
1416 case PBA_STRATEGY_EQUAL: 1416 case PBA_STRATEGY_EQUAL:
1417 default: 1417 default:
1418 /* Divide the remaining Rx packet buffer evenly among the TCs */ 1418 /* Divide the remaining Rx packet buffer evenly among the TCs */
1419 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1419 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1420 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); 1420 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1421 break; 1421 break;
1422 } 1422 }
1423 1423
1424 /* Setup Tx packet buffer sizes */ 1424 /* Setup Tx packet buffer sizes */
1425 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) 1425 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1426 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); 1426 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1427} 1427}
1428 1428
1429/** 1429/**
1430 * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit 1430 * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
1431 * @hw: pointer to hardware structure 1431 * @hw: pointer to hardware structure
1432 * @regval: register value to write to RXCTRL 1432 * @regval: register value to write to RXCTRL
1433 * 1433 *
1434 * Enables the Rx DMA unit 1434 * Enables the Rx DMA unit
1435 **/ 1435 **/
1436s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) 1436s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
1437{ 1437{
1438 DEBUGFUNC("ixgbe_enable_rx_dma_82598"); 1438 DEBUGFUNC("ixgbe_enable_rx_dma_82598");
1439 1439
1440 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 1440 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
1441 1441
1442 return IXGBE_SUCCESS; 1442 return IXGBE_SUCCESS;
1443} 1443}