Thu Mar 5 15:33:13 2020 UTC ()
s/Intialize/Initialize/ in comment.


(msaitoh)
diff -r1.116 -r1.117 src/sys/dev/pci/if_ti.c
diff -r1.4 -r1.5 src/sys/dev/pci/qat/qat.c

cvs diff -r1.116 -r1.117 src/sys/dev/pci/if_ti.c (switch to unified diff)

--- src/sys/dev/pci/if_ti.c 2020/03/03 05:41:36 1.116
+++ src/sys/dev/pci/if_ti.c 2020/03/05 15:33:13 1.117
@@ -1,1767 +1,1767 @@ @@ -1,1767 +1,1767 @@
1/* $NetBSD: if_ti.c,v 1.116 2020/03/03 05:41:36 msaitoh Exp $ */ 1/* $NetBSD: if_ti.c,v 1.117 2020/03/05 15:33:13 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997, 1998, 1999 4 * Copyright (c) 1997, 1998, 1999
5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul. 17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors 18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software 19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission. 20 * without specific prior written permission.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE. 32 * THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 * FreeBSD Id: if_ti.c,v 1.15 1999/08/14 15:45:03 wpaul Exp 34 * FreeBSD Id: if_ti.c,v 1.15 1999/08/14 15:45:03 wpaul Exp
35 */ 35 */
36 36
37/* 37/*
38 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. 38 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD.
39 * Manuals, sample driver and firmware source kits are available 39 * Manuals, sample driver and firmware source kits are available
40 * from http://www.alteon.com/support/openkits. 40 * from http://www.alteon.com/support/openkits.
41 * 41 *
42 * Written by Bill Paul <wpaul@ctr.columbia.edu> 42 * Written by Bill Paul <wpaul@ctr.columbia.edu>
43 * Electrical Engineering Department 43 * Electrical Engineering Department
44 * Columbia University, New York City 44 * Columbia University, New York City
45 */ 45 */
46 46
47/* 47/*
48 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 48 * The Alteon Networks Tigon chip contains an embedded R4000 CPU,
49 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 49 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs
50 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 50 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The
51 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 51 * Tigon supports hardware IP, TCP and UCP checksumming, multicast
52 * filtering and jumbo (9014 byte) frames. The hardware is largely 52 * filtering and jumbo (9014 byte) frames. The hardware is largely
53 * controlled by firmware, which must be loaded into the NIC during 53 * controlled by firmware, which must be loaded into the NIC during
54 * initialization. 54 * initialization.
55 * 55 *
56 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 56 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware
57 * revision, which supports new features such as extended commands, 57 * revision, which supports new features such as extended commands,
58 * extended jumbo receive ring desciptors and a mini receive ring. 58 * extended jumbo receive ring desciptors and a mini receive ring.
59 * 59 *
60 * Alteon Networks is to be commended for releasing such a vast amount 60 * Alteon Networks is to be commended for releasing such a vast amount
61 * of development material for the Tigon NIC without requiring an NDA 61 * of development material for the Tigon NIC without requiring an NDA
62 * (although they really should have done it a long time ago). With 62 * (although they really should have done it a long time ago). With
63 * any luck, the other vendors will finally wise up and follow Alteon's 63 * any luck, the other vendors will finally wise up and follow Alteon's
64 * stellar example. 64 * stellar example.
65 * 65 *
66 * The firmware for the Tigon 1 and 2 NICs is compiled directly into 66 * The firmware for the Tigon 1 and 2 NICs is compiled directly into
67 * this driver by #including it as a C header file. This bloats the 67 * this driver by #including it as a C header file. This bloats the
68 * driver somewhat, but it's the easiest method considering that the 68 * driver somewhat, but it's the easiest method considering that the
69 * driver code and firmware code need to be kept in sync. The source 69 * driver code and firmware code need to be kept in sync. The source
70 * for the firmware is not provided with the FreeBSD distribution since 70 * for the firmware is not provided with the FreeBSD distribution since
71 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. 71 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3.
72 * 72 *
73 * The following people deserve special thanks: 73 * The following people deserve special thanks:
74 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 74 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board
75 * for testing 75 * for testing
76 * - Raymond Lee of Netgear, for providing a pair of Netgear 76 * - Raymond Lee of Netgear, for providing a pair of Netgear
77 * GA620 Tigon 2 boards for testing 77 * GA620 Tigon 2 boards for testing
78 * - Ulf Zimmermann, for bringing the GA620 to my attention and 78 * - Ulf Zimmermann, for bringing the GA620 to my attention and
79 * convincing me to write this driver. 79 * convincing me to write this driver.
80 * - Andrew Gallatin for providing FreeBSD/Alpha support. 80 * - Andrew Gallatin for providing FreeBSD/Alpha support.
81 */ 81 */
82 82
83#include <sys/cdefs.h> 83#include <sys/cdefs.h>
84__KERNEL_RCSID(0, "$NetBSD: if_ti.c,v 1.116 2020/03/03 05:41:36 msaitoh Exp $"); 84__KERNEL_RCSID(0, "$NetBSD: if_ti.c,v 1.117 2020/03/05 15:33:13 msaitoh Exp $");
85 85
86#include "opt_inet.h" 86#include "opt_inet.h"
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/systm.h> 89#include <sys/systm.h>
90#include <sys/sockio.h> 90#include <sys/sockio.h>
91#include <sys/mbuf.h> 91#include <sys/mbuf.h>
92#include <sys/malloc.h> 92#include <sys/malloc.h>
93#include <sys/kernel.h> 93#include <sys/kernel.h>
94#include <sys/socket.h> 94#include <sys/socket.h>
95#include <sys/queue.h> 95#include <sys/queue.h>
96#include <sys/device.h> 96#include <sys/device.h>
97#include <sys/reboot.h> 97#include <sys/reboot.h>
98 98
99#include <net/if.h> 99#include <net/if.h>
100#include <net/if_arp.h> 100#include <net/if_arp.h>
101#include <net/if_ether.h> 101#include <net/if_ether.h>
102#include <net/if_dl.h> 102#include <net/if_dl.h>
103#include <net/if_media.h> 103#include <net/if_media.h>
104 104
105#include <net/bpf.h> 105#include <net/bpf.h>
106 106
107#ifdef INET 107#ifdef INET
108#include <netinet/in.h> 108#include <netinet/in.h>
109#include <netinet/if_inarp.h> 109#include <netinet/if_inarp.h>
110#include <netinet/in_systm.h> 110#include <netinet/in_systm.h>
111#include <netinet/ip.h> 111#include <netinet/ip.h>
112#endif 112#endif
113 113
114 114
115#include <sys/bus.h> 115#include <sys/bus.h>
116 116
117#include <dev/pci/pcireg.h> 117#include <dev/pci/pcireg.h>
118#include <dev/pci/pcivar.h> 118#include <dev/pci/pcivar.h>
119#include <dev/pci/pcidevs.h> 119#include <dev/pci/pcidevs.h>
120 120
121#include <dev/pci/if_tireg.h> 121#include <dev/pci/if_tireg.h>
122 122
123#include <dev/microcode/tigon/ti_fw.h> 123#include <dev/microcode/tigon/ti_fw.h>
124#include <dev/microcode/tigon/ti_fw2.h> 124#include <dev/microcode/tigon/ti_fw2.h>
125 125
126#define TI_HOSTADDR(x, y) \ 126#define TI_HOSTADDR(x, y) \
127 do { \ 127 do { \
128 (x).ti_addr_lo = (uint32_t)(y); \ 128 (x).ti_addr_lo = (uint32_t)(y); \
129 if (sizeof(bus_addr_t) == 8) \ 129 if (sizeof(bus_addr_t) == 8) \
130 (x).ti_addr_hi = \ 130 (x).ti_addr_hi = \
131 (uint32_t)(((uint64_t)(y) >> 32)); \ 131 (uint32_t)(((uint64_t)(y) >> 32)); \
132 else \ 132 else \
133 (x).ti_addr_hi = 0; \ 133 (x).ti_addr_hi = 0; \
134 } while (/*CONSTCOND*/0) 134 } while (/*CONSTCOND*/0)
135 135
136/* 136/*
137 * Various supported device vendors/types and their names. 137 * Various supported device vendors/types and their names.
138 */ 138 */
139 139
140static const struct ti_type ti_devs[] = { 140static const struct ti_type ti_devs[] = {
141 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC, 141 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC,
142 "Alteon AceNIC 1000BASE-SX Ethernet" }, 142 "Alteon AceNIC 1000BASE-SX Ethernet" },
143 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC_COPPER, 143 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC_COPPER,
144 "Alteon AceNIC 1000BASE-T Ethernet" }, 144 "Alteon AceNIC 1000BASE-T Ethernet" },
145 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C985, 145 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C985,
146 "3Com 3c985-SX Gigabit Ethernet" }, 146 "3Com 3c985-SX Gigabit Ethernet" },
147 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620, 147 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620,
148 "Netgear GA620 1000BASE-SX Ethernet" }, 148 "Netgear GA620 1000BASE-SX Ethernet" },
149 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620T, 149 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620T,
150 "Netgear GA620 1000BASE-T Ethernet" }, 150 "Netgear GA620 1000BASE-T Ethernet" },
151 { PCI_VENDOR_SGI, PCI_PRODUCT_SGI_TIGON, 151 { PCI_VENDOR_SGI, PCI_PRODUCT_SGI_TIGON,
152 "Silicon Graphics Gigabit Ethernet" }, 152 "Silicon Graphics Gigabit Ethernet" },
153 { 0, 0, NULL } 153 { 0, 0, NULL }
154}; 154};
155 155
156static const struct ti_type *ti_type_match(struct pci_attach_args *); 156static const struct ti_type *ti_type_match(struct pci_attach_args *);
157static int ti_probe(device_t, cfdata_t, void *); 157static int ti_probe(device_t, cfdata_t, void *);
158static void ti_attach(device_t, device_t, void *); 158static void ti_attach(device_t, device_t, void *);
159static bool ti_shutdown(device_t, int); 159static bool ti_shutdown(device_t, int);
160static void ti_txeof_tigon1(struct ti_softc *); 160static void ti_txeof_tigon1(struct ti_softc *);
161static void ti_txeof_tigon2(struct ti_softc *); 161static void ti_txeof_tigon2(struct ti_softc *);
162static void ti_rxeof(struct ti_softc *); 162static void ti_rxeof(struct ti_softc *);
163 163
164static void ti_stats_update(struct ti_softc *); 164static void ti_stats_update(struct ti_softc *);
165static int ti_encap_tigon1(struct ti_softc *, struct mbuf *, uint32_t *); 165static int ti_encap_tigon1(struct ti_softc *, struct mbuf *, uint32_t *);
166static int ti_encap_tigon2(struct ti_softc *, struct mbuf *, uint32_t *); 166static int ti_encap_tigon2(struct ti_softc *, struct mbuf *, uint32_t *);
167 167
168static int ti_intr(void *); 168static int ti_intr(void *);
169static void ti_start(struct ifnet *); 169static void ti_start(struct ifnet *);
170static int ti_ioctl(struct ifnet *, u_long, void *); 170static int ti_ioctl(struct ifnet *, u_long, void *);
171static void ti_init(void *); 171static void ti_init(void *);
172static void ti_init2(struct ti_softc *); 172static void ti_init2(struct ti_softc *);
173static void ti_stop(struct ti_softc *); 173static void ti_stop(struct ti_softc *);
174static void ti_watchdog(struct ifnet *); 174static void ti_watchdog(struct ifnet *);
175static int ti_ifmedia_upd(struct ifnet *); 175static int ti_ifmedia_upd(struct ifnet *);
176static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 176static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *);
177 177
178static uint32_t ti_eeprom_putbyte(struct ti_softc *, int); 178static uint32_t ti_eeprom_putbyte(struct ti_softc *, int);
179static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *); 179static uint8_t ti_eeprom_getbyte(struct ti_softc *, int, uint8_t *);
180static int ti_read_eeprom(struct ti_softc *, void *, int, int); 180static int ti_read_eeprom(struct ti_softc *, void *, int, int);
181 181
182static void ti_add_mcast(struct ti_softc *, struct ether_addr *); 182static void ti_add_mcast(struct ti_softc *, struct ether_addr *);
183static void ti_del_mcast(struct ti_softc *, struct ether_addr *); 183static void ti_del_mcast(struct ti_softc *, struct ether_addr *);
184static void ti_setmulti(struct ti_softc *); 184static void ti_setmulti(struct ti_softc *);
185 185
186static void ti_mem(struct ti_softc *, uint32_t, uint32_t, const void *); 186static void ti_mem(struct ti_softc *, uint32_t, uint32_t, const void *);
187static void ti_loadfw(struct ti_softc *); 187static void ti_loadfw(struct ti_softc *);
188static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 188static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *);
189static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, void *, int); 189static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, void *, int);
190static void ti_handle_events(struct ti_softc *); 190static void ti_handle_events(struct ti_softc *);
191static int ti_alloc_jumbo_mem(struct ti_softc *); 191static int ti_alloc_jumbo_mem(struct ti_softc *);
192static void *ti_jalloc(struct ti_softc *); 192static void *ti_jalloc(struct ti_softc *);
193static void ti_jfree(struct mbuf *, void *, size_t, void *); 193static void ti_jfree(struct mbuf *, void *, size_t, void *);
194static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 194static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t);
195static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 195static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t);
196static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 196static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *);
197static int ti_init_rx_ring_std(struct ti_softc *); 197static int ti_init_rx_ring_std(struct ti_softc *);
198static void ti_free_rx_ring_std(struct ti_softc *); 198static void ti_free_rx_ring_std(struct ti_softc *);
199static int ti_init_rx_ring_jumbo(struct ti_softc *); 199static int ti_init_rx_ring_jumbo(struct ti_softc *);
200static void ti_free_rx_ring_jumbo(struct ti_softc *); 200static void ti_free_rx_ring_jumbo(struct ti_softc *);
201static int ti_init_rx_ring_mini(struct ti_softc *); 201static int ti_init_rx_ring_mini(struct ti_softc *);
202static void ti_free_rx_ring_mini(struct ti_softc *); 202static void ti_free_rx_ring_mini(struct ti_softc *);
203static void ti_free_tx_ring(struct ti_softc *); 203static void ti_free_tx_ring(struct ti_softc *);
204static int ti_init_tx_ring(struct ti_softc *); 204static int ti_init_tx_ring(struct ti_softc *);
205 205
206static int ti_64bitslot_war(struct ti_softc *); 206static int ti_64bitslot_war(struct ti_softc *);
207static int ti_chipinit(struct ti_softc *); 207static int ti_chipinit(struct ti_softc *);
208static int ti_gibinit(struct ti_softc *); 208static int ti_gibinit(struct ti_softc *);
209 209
210static int ti_ether_ioctl(struct ifnet *, u_long, void *); 210static int ti_ether_ioctl(struct ifnet *, u_long, void *);
211 211
212CFATTACH_DECL_NEW(ti, sizeof(struct ti_softc), 212CFATTACH_DECL_NEW(ti, sizeof(struct ti_softc),
213 ti_probe, ti_attach, NULL, NULL); 213 ti_probe, ti_attach, NULL, NULL);
214 214
215/* 215/*
216 * Send an instruction or address to the EEPROM, check for ACK. 216 * Send an instruction or address to the EEPROM, check for ACK.
217 */ 217 */
218static uint32_t 218static uint32_t
219ti_eeprom_putbyte(struct ti_softc *sc, int byte) 219ti_eeprom_putbyte(struct ti_softc *sc, int byte)
220{ 220{
221 int i, ack = 0; 221 int i, ack = 0;
222 222
223 /* 223 /*
224 * Make sure we're in TX mode. 224 * Make sure we're in TX mode.
225 */ 225 */
226 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 226 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
227 227
228 /* 228 /*
229 * Feed in each bit and stobe the clock. 229 * Feed in each bit and stobe the clock.
230 */ 230 */
231 for (i = 0x80; i; i >>= 1) { 231 for (i = 0x80; i; i >>= 1) {
232 if (byte & i) { 232 if (byte & i) {
233 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 233 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
234 } else { 234 } else {
235 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 235 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT);
236 } 236 }
237 DELAY(1); 237 DELAY(1);
238 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 238 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
239 DELAY(1); 239 DELAY(1);
240 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 240 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
241 } 241 }
242 242
243 /* 243 /*
244 * Turn off TX mode. 244 * Turn off TX mode.
245 */ 245 */
246 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 246 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
247 247
248 /* 248 /*
249 * Check for ack. 249 * Check for ack.
250 */ 250 */
251 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 251 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
252 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 252 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN;
253 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 253 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
254 254
255 return (ack); 255 return (ack);
256} 256}
257 257
258/* 258/*
259 * Read a byte of data stored in the EEPROM at address 'addr.' 259 * Read a byte of data stored in the EEPROM at address 'addr.'
260 * We have to send two address bytes since the EEPROM can hold 260 * We have to send two address bytes since the EEPROM can hold
261 * more than 256 bytes of data. 261 * more than 256 bytes of data.
262 */ 262 */
263static uint8_t 263static uint8_t
264ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest) 264ti_eeprom_getbyte(struct ti_softc *sc, int addr, uint8_t *dest)
265{ 265{
266 int i; 266 int i;
267 uint8_t byte = 0; 267 uint8_t byte = 0;
268 268
269 EEPROM_START(); 269 EEPROM_START();
270 270
271 /* 271 /*
272 * Send write control code to EEPROM. 272 * Send write control code to EEPROM.
273 */ 273 */
274 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 274 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
275 printf("%s: failed to send write command, status: %x\n", 275 printf("%s: failed to send write command, status: %x\n",
276 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 276 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
277 return (1); 277 return (1);
278 } 278 }
279 279
280 /* 280 /*
281 * Send first byte of address of byte we want to read. 281 * Send first byte of address of byte we want to read.
282 */ 282 */
283 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 283 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) {
284 printf("%s: failed to send address, status: %x\n", 284 printf("%s: failed to send address, status: %x\n",
285 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 285 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
286 return (1); 286 return (1);
287 } 287 }
288 /* 288 /*
289 * Send second byte address of byte we want to read. 289 * Send second byte address of byte we want to read.
290 */ 290 */
291 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 291 if (ti_eeprom_putbyte(sc, addr & 0xFF)) {
292 printf("%s: failed to send address, status: %x\n", 292 printf("%s: failed to send address, status: %x\n",
293 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 293 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
294 return (1); 294 return (1);
295 } 295 }
296 296
297 EEPROM_STOP(); 297 EEPROM_STOP();
298 EEPROM_START(); 298 EEPROM_START();
299 /* 299 /*
300 * Send read control code to EEPROM. 300 * Send read control code to EEPROM.
301 */ 301 */
302 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 302 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
303 printf("%s: failed to send read command, status: %x\n", 303 printf("%s: failed to send read command, status: %x\n",
304 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 304 device_xname(sc->sc_dev), CSR_READ_4(sc, TI_MISC_LOCAL_CTL));
305 return (1); 305 return (1);
306 } 306 }
307 307
308 /* 308 /*
309 * Start reading bits from EEPROM. 309 * Start reading bits from EEPROM.
310 */ 310 */
311 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 311 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN);
312 for (i = 0x80; i; i >>= 1) { 312 for (i = 0x80; i; i >>= 1) {
313 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 313 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
314 DELAY(1); 314 DELAY(1);
315 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 315 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN)
316 byte |= i; 316 byte |= i;
317 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 317 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK);
318 DELAY(1); 318 DELAY(1);
319 } 319 }
320 320
321 EEPROM_STOP(); 321 EEPROM_STOP();
322 322
323 /* 323 /*
324 * No ACK generated for read, so just return byte. 324 * No ACK generated for read, so just return byte.
325 */ 325 */
326 326
327 *dest = byte; 327 *dest = byte;
328 328
329 return (0); 329 return (0);
330} 330}
331 331
332/* 332/*
333 * Read a sequence of bytes from the EEPROM. 333 * Read a sequence of bytes from the EEPROM.
334 */ 334 */
335static int 335static int
336ti_read_eeprom(struct ti_softc *sc, void *destv, int off, int cnt) 336ti_read_eeprom(struct ti_softc *sc, void *destv, int off, int cnt)
337{ 337{
338 char *dest = destv; 338 char *dest = destv;
339 int err = 0, i; 339 int err = 0, i;
340 uint8_t byte = 0; 340 uint8_t byte = 0;
341 341
342 for (i = 0; i < cnt; i++) { 342 for (i = 0; i < cnt; i++) {
343 err = ti_eeprom_getbyte(sc, off + i, &byte); 343 err = ti_eeprom_getbyte(sc, off + i, &byte);
344 if (err) 344 if (err)
345 break; 345 break;
346 *(dest + i) = byte; 346 *(dest + i) = byte;
347 } 347 }
348 348
349 return (err ? 1 : 0); 349 return (err ? 1 : 0);
350} 350}
351 351
352/* 352/*
353 * NIC memory access function. Can be used to either clear a section 353 * NIC memory access function. Can be used to either clear a section
354 * of NIC local memory or (if tbuf is non-NULL) copy data into it. 354 * of NIC local memory or (if tbuf is non-NULL) copy data into it.
355 */ 355 */
356static void 356static void
357ti_mem(struct ti_softc *sc, uint32_t addr, uint32_t len, const void *xbuf) 357ti_mem(struct ti_softc *sc, uint32_t addr, uint32_t len, const void *xbuf)
358{ 358{
359 int segptr, segsize, cnt; 359 int segptr, segsize, cnt;
360 const void *ptr; 360 const void *ptr;
361 361
362 segptr = addr; 362 segptr = addr;
363 cnt = len; 363 cnt = len;
364 ptr = xbuf; 364 ptr = xbuf;
365 365
366 while (cnt) { 366 while (cnt) {
367 if (cnt < TI_WINLEN) 367 if (cnt < TI_WINLEN)
368 segsize = cnt; 368 segsize = cnt;
369 else 369 else
370 segsize = TI_WINLEN - (segptr % TI_WINLEN); 370 segsize = TI_WINLEN - (segptr % TI_WINLEN);
371 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 371 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
372 if (xbuf == NULL) { 372 if (xbuf == NULL) {
373 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, 373 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
374 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, 374 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0,
375 segsize / 4); 375 segsize / 4);
376 } else { 376 } else {
377#ifdef __BUS_SPACE_HAS_STREAM_METHODS 377#ifdef __BUS_SPACE_HAS_STREAM_METHODS
378 bus_space_write_region_stream_4(sc->ti_btag, 378 bus_space_write_region_stream_4(sc->ti_btag,
379 sc->ti_bhandle, 379 sc->ti_bhandle,
380 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 380 TI_WINDOW + (segptr & (TI_WINLEN - 1)),
381 (const uint32_t *)ptr, segsize / 4); 381 (const uint32_t *)ptr, segsize / 4);
382#else 382#else
383 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 383 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
384 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 384 TI_WINDOW + (segptr & (TI_WINLEN - 1)),
385 (const uint32_t *)ptr, segsize / 4); 385 (const uint32_t *)ptr, segsize / 4);
386#endif 386#endif
387 ptr = (const char *)ptr + segsize; 387 ptr = (const char *)ptr + segsize;
388 } 388 }
389 segptr += segsize; 389 segptr += segsize;
390 cnt -= segsize; 390 cnt -= segsize;
391 } 391 }
392 392
393 return; 393 return;
394} 394}
395 395
396/* 396/*
397 * Load firmware image into the NIC. Check that the firmware revision 397 * Load firmware image into the NIC. Check that the firmware revision
398 * is acceptable and see if we want the firmware for the Tigon 1 or 398 * is acceptable and see if we want the firmware for the Tigon 1 or
399 * Tigon 2. 399 * Tigon 2.
400 */ 400 */
401static void 401static void
402ti_loadfw(struct ti_softc *sc) 402ti_loadfw(struct ti_softc *sc)
403{ 403{
404 switch (sc->ti_hwrev) { 404 switch (sc->ti_hwrev) {
405 case TI_HWREV_TIGON: 405 case TI_HWREV_TIGON:
406 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || 406 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR ||
407 tigonFwReleaseMinor != TI_FIRMWARE_MINOR || 407 tigonFwReleaseMinor != TI_FIRMWARE_MINOR ||
408 tigonFwReleaseFix != TI_FIRMWARE_FIX) { 408 tigonFwReleaseFix != TI_FIRMWARE_FIX) {
409 printf("%s: firmware revision mismatch; want " 409 printf("%s: firmware revision mismatch; want "
410 "%d.%d.%d, got %d.%d.%d\n", device_xname(sc->sc_dev), 410 "%d.%d.%d, got %d.%d.%d\n", device_xname(sc->sc_dev),
411 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 411 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
412 TI_FIRMWARE_FIX, tigonFwReleaseMajor, 412 TI_FIRMWARE_FIX, tigonFwReleaseMajor,
413 tigonFwReleaseMinor, tigonFwReleaseFix); 413 tigonFwReleaseMinor, tigonFwReleaseFix);
414 return; 414 return;
415 } 415 }
416 ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText); 416 ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText);
417 ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData); 417 ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData);
418 ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, tigonFwRodata); 418 ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, tigonFwRodata);
419 ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); 419 ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL);
420 ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); 420 ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL);
421 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); 421 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr);
422 break; 422 break;
423 case TI_HWREV_TIGON_II: 423 case TI_HWREV_TIGON_II:
424 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || 424 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR ||
425 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || 425 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR ||
426 tigon2FwReleaseFix != TI_FIRMWARE_FIX) { 426 tigon2FwReleaseFix != TI_FIRMWARE_FIX) {
427 printf("%s: firmware revision mismatch; want " 427 printf("%s: firmware revision mismatch; want "
428 "%d.%d.%d, got %d.%d.%d\n", device_xname(sc->sc_dev), 428 "%d.%d.%d, got %d.%d.%d\n", device_xname(sc->sc_dev),
429 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 429 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR,
430 TI_FIRMWARE_FIX, tigon2FwReleaseMajor, 430 TI_FIRMWARE_FIX, tigon2FwReleaseMajor,
431 tigon2FwReleaseMinor, tigon2FwReleaseFix); 431 tigon2FwReleaseMinor, tigon2FwReleaseFix);
432 return; 432 return;
433 } 433 }
434 ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, tigon2FwText); 434 ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, tigon2FwText);
435 ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, tigon2FwData); 435 ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, tigon2FwData);
436 ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, 436 ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen,
437 tigon2FwRodata); 437 tigon2FwRodata);
438 ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); 438 ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL);
439 ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); 439 ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL);
440 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); 440 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr);
441 break; 441 break;
442 default: 442 default:
443 printf("%s: can't load firmware: unknown hardware rev\n", 443 printf("%s: can't load firmware: unknown hardware rev\n",
444 device_xname(sc->sc_dev)); 444 device_xname(sc->sc_dev));
445 break; 445 break;
446 } 446 }
447 447
448 return; 448 return;
449} 449}
450 450
451/* 451/*
452 * Send the NIC a command via the command ring. 452 * Send the NIC a command via the command ring.
453 */ 453 */
454static void 454static void
455ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 455ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd)
456{ 456{
457 uint32_t index; 457 uint32_t index;
458 458
459 index = sc->ti_cmd_saved_prodidx; 459 index = sc->ti_cmd_saved_prodidx;
460 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); 460 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
461 TI_INC(index, TI_CMD_RING_CNT); 461 TI_INC(index, TI_CMD_RING_CNT);
462 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 462 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
463 sc->ti_cmd_saved_prodidx = index; 463 sc->ti_cmd_saved_prodidx = index;
464} 464}
465 465
466/* 466/*
467 * Send the NIC an extended command. The 'len' parameter specifies the 467 * Send the NIC an extended command. The 'len' parameter specifies the
468 * number of command slots to include after the initial command. 468 * number of command slots to include after the initial command.
469 */ 469 */
470static void 470static void
471ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, void *argv, int len) 471ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, void *argv, int len)
472{ 472{
473 char *arg = argv; 473 char *arg = argv;
474 uint32_t index; 474 uint32_t index;
475 int i; 475 int i;
476 476
477 index = sc->ti_cmd_saved_prodidx; 477 index = sc->ti_cmd_saved_prodidx;
478 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd)); 478 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(uint32_t *)(cmd));
479 TI_INC(index, TI_CMD_RING_CNT); 479 TI_INC(index, TI_CMD_RING_CNT);
480 for (i = 0; i < len; i++) { 480 for (i = 0; i < len; i++) {
481 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 481 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4),
482 *(uint32_t *)(&arg[i * 4])); 482 *(uint32_t *)(&arg[i * 4]));
483 TI_INC(index, TI_CMD_RING_CNT); 483 TI_INC(index, TI_CMD_RING_CNT);
484 } 484 }
485 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 485 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index);
486 sc->ti_cmd_saved_prodidx = index; 486 sc->ti_cmd_saved_prodidx = index;
487} 487}
488 488
489/* 489/*
490 * Handle events that have triggered interrupts. 490 * Handle events that have triggered interrupts.
491 */ 491 */
492static void 492static void
493ti_handle_events(struct ti_softc *sc) 493ti_handle_events(struct ti_softc *sc)
494{ 494{
495 struct ti_event_desc *e; 495 struct ti_event_desc *e;
496 496
497 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 497 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) {
498 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 498 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx];
499 switch (TI_EVENT_EVENT(e)) { 499 switch (TI_EVENT_EVENT(e)) {
500 case TI_EV_LINKSTAT_CHANGED: 500 case TI_EV_LINKSTAT_CHANGED:
501 sc->ti_linkstat = TI_EVENT_CODE(e); 501 sc->ti_linkstat = TI_EVENT_CODE(e);
502 if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) 502 if (sc->ti_linkstat == TI_EV_CODE_LINK_UP)
503 printf("%s: 10/100 link up\n", 503 printf("%s: 10/100 link up\n",
504 device_xname(sc->sc_dev)); 504 device_xname(sc->sc_dev));
505 else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) 505 else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP)
506 printf("%s: gigabit link up\n", 506 printf("%s: gigabit link up\n",
507 device_xname(sc->sc_dev)); 507 device_xname(sc->sc_dev));
508 else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 508 else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN)
509 printf("%s: link down\n", 509 printf("%s: link down\n",
510 device_xname(sc->sc_dev)); 510 device_xname(sc->sc_dev));
511 break; 511 break;
512 case TI_EV_ERROR: 512 case TI_EV_ERROR:
513 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD) 513 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD)
514 printf("%s: invalid command\n", 514 printf("%s: invalid command\n",
515 device_xname(sc->sc_dev)); 515 device_xname(sc->sc_dev));
516 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD) 516 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD)
517 printf("%s: unknown command\n", 517 printf("%s: unknown command\n",
518 device_xname(sc->sc_dev)); 518 device_xname(sc->sc_dev));
519 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG) 519 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG)
520 printf("%s: bad config data\n", 520 printf("%s: bad config data\n",
521 device_xname(sc->sc_dev)); 521 device_xname(sc->sc_dev));
522 break; 522 break;
523 case TI_EV_FIRMWARE_UP: 523 case TI_EV_FIRMWARE_UP:
524 ti_init2(sc); 524 ti_init2(sc);
525 break; 525 break;
526 case TI_EV_STATS_UPDATED: 526 case TI_EV_STATS_UPDATED:
527 ti_stats_update(sc); 527 ti_stats_update(sc);
528 break; 528 break;
529 case TI_EV_RESET_JUMBO_RING: 529 case TI_EV_RESET_JUMBO_RING:
530 case TI_EV_MCAST_UPDATED: 530 case TI_EV_MCAST_UPDATED:
531 /* Who cares. */ 531 /* Who cares. */
532 break; 532 break;
533 default: 533 default:
534 printf("%s: unknown event: %d\n", 534 printf("%s: unknown event: %d\n",
535 device_xname(sc->sc_dev), TI_EVENT_EVENT(e)); 535 device_xname(sc->sc_dev), TI_EVENT_EVENT(e));
536 break; 536 break;
537 } 537 }
538 /* Advance the consumer index. */ 538 /* Advance the consumer index. */
539 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 539 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT);
540 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 540 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx);
541 } 541 }
542 542
543 return; 543 return;
544} 544}
545 545
546/* 546/*
547 * Memory management for the jumbo receive ring is a pain in the 547 * Memory management for the jumbo receive ring is a pain in the
548 * butt. We need to allocate at least 9018 bytes of space per frame, 548 * butt. We need to allocate at least 9018 bytes of space per frame,
549 * _and_ it has to be contiguous (unless you use the extended 549 * _and_ it has to be contiguous (unless you use the extended
550 * jumbo descriptor format). Using malloc() all the time won't 550 * jumbo descriptor format). Using malloc() all the time won't
551 * work: malloc() allocates memory in powers of two, which means we 551 * work: malloc() allocates memory in powers of two, which means we
552 * would end up wasting a considerable amount of space by allocating 552 * would end up wasting a considerable amount of space by allocating
553 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 553 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have
554 * to do our own memory management. 554 * to do our own memory management.
555 * 555 *
556 * The driver needs to allocate a contiguous chunk of memory at boot 556 * The driver needs to allocate a contiguous chunk of memory at boot
557 * time. We then chop this up ourselves into 9K pieces and use them 557 * time. We then chop this up ourselves into 9K pieces and use them
558 * as external mbuf storage. 558 * as external mbuf storage.
559 * 559 *
560 * One issue here is how much memory to allocate. The jumbo ring has 560 * One issue here is how much memory to allocate. The jumbo ring has
561 * 256 slots in it, but at 9K per slot than can consume over 2MB of 561 * 256 slots in it, but at 9K per slot than can consume over 2MB of
562 * RAM. This is a bit much, especially considering we also need 562 * RAM. This is a bit much, especially considering we also need
563 * RAM for the standard ring and mini ring (on the Tigon 2). To 563 * RAM for the standard ring and mini ring (on the Tigon 2). To
564 * save space, we only actually allocate enough memory for 64 slots 564 * save space, we only actually allocate enough memory for 64 slots
565 * by default, which works out to between 500 and 600K. This can 565 * by default, which works out to between 500 and 600K. This can
566 * be tuned by changing a #define in if_tireg.h. 566 * be tuned by changing a #define in if_tireg.h.
567 */ 567 */
568 568
569static int 569static int
570ti_alloc_jumbo_mem(struct ti_softc *sc) 570ti_alloc_jumbo_mem(struct ti_softc *sc)
571{ 571{
572 char *ptr; 572 char *ptr;
573 int i; 573 int i;
574 struct ti_jpool_entry *entry; 574 struct ti_jpool_entry *entry;
575 bus_dma_segment_t dmaseg; 575 bus_dma_segment_t dmaseg;
576 int error, dmanseg; 576 int error, dmanseg;
577 577
578 /* Grab a big chunk o' storage. */ 578 /* Grab a big chunk o' storage. */
579 if ((error = bus_dmamem_alloc(sc->sc_dmat, 579 if ((error = bus_dmamem_alloc(sc->sc_dmat,
580 TI_JMEM, PAGE_SIZE, 0, &dmaseg, 1, &dmanseg, 580 TI_JMEM, PAGE_SIZE, 0, &dmaseg, 1, &dmanseg,
581 BUS_DMA_NOWAIT)) != 0) { 581 BUS_DMA_NOWAIT)) != 0) {
582 aprint_error_dev(sc->sc_dev, 582 aprint_error_dev(sc->sc_dev,
583 "can't allocate jumbo buffer, error = %d\n", error); 583 "can't allocate jumbo buffer, error = %d\n", error);
584 return (error); 584 return (error);
585 } 585 }
586 586
587 if ((error = bus_dmamem_map(sc->sc_dmat, &dmaseg, dmanseg, 587 if ((error = bus_dmamem_map(sc->sc_dmat, &dmaseg, dmanseg,
588 TI_JMEM, (void **)&sc->ti_cdata.ti_jumbo_buf, 588 TI_JMEM, (void **)&sc->ti_cdata.ti_jumbo_buf,
589 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 589 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
590 aprint_error_dev(sc->sc_dev, 590 aprint_error_dev(sc->sc_dev,
591 "can't map jumbo buffer, error = %d\n", error); 591 "can't map jumbo buffer, error = %d\n", error);
592 return (error); 592 return (error);
593 } 593 }
594 594
595 if ((error = bus_dmamap_create(sc->sc_dmat, 595 if ((error = bus_dmamap_create(sc->sc_dmat,
596 TI_JMEM, 1, 596 TI_JMEM, 1,
597 TI_JMEM, 0, BUS_DMA_NOWAIT, 597 TI_JMEM, 0, BUS_DMA_NOWAIT,
598 &sc->jumbo_dmamap)) != 0) { 598 &sc->jumbo_dmamap)) != 0) {
599 aprint_error_dev(sc->sc_dev, 599 aprint_error_dev(sc->sc_dev,
600 "can't create jumbo buffer DMA map, error = %d\n", error); 600 "can't create jumbo buffer DMA map, error = %d\n", error);
601 return (error); 601 return (error);
602 } 602 }
603 603
604 if ((error = bus_dmamap_load(sc->sc_dmat, sc->jumbo_dmamap, 604 if ((error = bus_dmamap_load(sc->sc_dmat, sc->jumbo_dmamap,
605 sc->ti_cdata.ti_jumbo_buf, TI_JMEM, NULL, 605 sc->ti_cdata.ti_jumbo_buf, TI_JMEM, NULL,
606 BUS_DMA_NOWAIT)) != 0) { 606 BUS_DMA_NOWAIT)) != 0) {
607 aprint_error_dev(sc->sc_dev, 607 aprint_error_dev(sc->sc_dev,
608 "can't load jumbo buffer DMA map, error = %d\n", error); 608 "can't load jumbo buffer DMA map, error = %d\n", error);
609 return (error); 609 return (error);
610 } 610 }
611 sc->jumbo_dmaaddr = sc->jumbo_dmamap->dm_segs[0].ds_addr; 611 sc->jumbo_dmaaddr = sc->jumbo_dmamap->dm_segs[0].ds_addr;
612 612
613 SIMPLEQ_INIT(&sc->ti_jfree_listhead); 613 SIMPLEQ_INIT(&sc->ti_jfree_listhead);
614 SIMPLEQ_INIT(&sc->ti_jinuse_listhead); 614 SIMPLEQ_INIT(&sc->ti_jinuse_listhead);
615 615
616 /* 616 /*
617 * Now divide it up into 9K pieces and save the addresses 617 * Now divide it up into 9K pieces and save the addresses
618 * in an array. 618 * in an array.
619 */ 619 */
620 ptr = sc->ti_cdata.ti_jumbo_buf; 620 ptr = sc->ti_cdata.ti_jumbo_buf;
621 for (i = 0; i < TI_JSLOTS; i++) { 621 for (i = 0; i < TI_JSLOTS; i++) {
622 sc->ti_cdata.ti_jslots[i] = ptr; 622 sc->ti_cdata.ti_jslots[i] = ptr;
623 ptr += TI_JLEN; 623 ptr += TI_JLEN;
624 entry = malloc(sizeof(struct ti_jpool_entry), 624 entry = malloc(sizeof(struct ti_jpool_entry),
625 M_DEVBUF, M_WAITOK); 625 M_DEVBUF, M_WAITOK);
626 entry->slot = i; 626 entry->slot = i;
627 SIMPLEQ_INSERT_HEAD(&sc->ti_jfree_listhead, entry, 627 SIMPLEQ_INSERT_HEAD(&sc->ti_jfree_listhead, entry,
628 jpool_entries); 628 jpool_entries);
629 } 629 }
630 630
631 return (0); 631 return (0);
632} 632}
633 633
634/* 634/*
635 * Allocate a jumbo buffer. 635 * Allocate a jumbo buffer.
636 */ 636 */
637static void * 637static void *
638ti_jalloc(struct ti_softc *sc) 638ti_jalloc(struct ti_softc *sc)
639{ 639{
640 struct ti_jpool_entry *entry; 640 struct ti_jpool_entry *entry;
641 641
642 entry = SIMPLEQ_FIRST(&sc->ti_jfree_listhead); 642 entry = SIMPLEQ_FIRST(&sc->ti_jfree_listhead);
643 643
644 if (entry == NULL) { 644 if (entry == NULL) {
645 printf("%s: no free jumbo buffers\n", device_xname(sc->sc_dev)); 645 printf("%s: no free jumbo buffers\n", device_xname(sc->sc_dev));
646 return (NULL); 646 return (NULL);
647 } 647 }
648 648
649 SIMPLEQ_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); 649 SIMPLEQ_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries);
650 SIMPLEQ_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); 650 SIMPLEQ_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries);
651 651
652 return (sc->ti_cdata.ti_jslots[entry->slot]); 652 return (sc->ti_cdata.ti_jslots[entry->slot]);
653} 653}
654 654
655/* 655/*
656 * Release a jumbo buffer. 656 * Release a jumbo buffer.
657 */ 657 */
658static void 658static void
659ti_jfree(struct mbuf *m, void *tbuf, size_t size, void *arg) 659ti_jfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
660{ 660{
661 struct ti_softc *sc; 661 struct ti_softc *sc;
662 int i, s; 662 int i, s;
663 struct ti_jpool_entry *entry; 663 struct ti_jpool_entry *entry;
664 664
665 /* Extract the softc struct pointer. */ 665 /* Extract the softc struct pointer. */
666 sc = (struct ti_softc *)arg; 666 sc = (struct ti_softc *)arg;
667 667
668 if (sc == NULL) 668 if (sc == NULL)
669 panic("ti_jfree: didn't get softc pointer!"); 669 panic("ti_jfree: didn't get softc pointer!");
670 670
671 /* calculate the slot this buffer belongs to */ 671 /* calculate the slot this buffer belongs to */
672 672
673 i = ((char *)tbuf 673 i = ((char *)tbuf
674 - (char *)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; 674 - (char *)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN;
675 675
676 if ((i < 0) || (i >= TI_JSLOTS)) 676 if ((i < 0) || (i >= TI_JSLOTS))
677 panic("ti_jfree: asked to free buffer that we don't manage!"); 677 panic("ti_jfree: asked to free buffer that we don't manage!");
678 678
679 s = splvm(); 679 s = splvm();
680 entry = SIMPLEQ_FIRST(&sc->ti_jinuse_listhead); 680 entry = SIMPLEQ_FIRST(&sc->ti_jinuse_listhead);
681 if (entry == NULL) 681 if (entry == NULL)
682 panic("ti_jfree: buffer not in use!"); 682 panic("ti_jfree: buffer not in use!");
683 entry->slot = i; 683 entry->slot = i;
684 SIMPLEQ_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); 684 SIMPLEQ_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries);
685 SIMPLEQ_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 685 SIMPLEQ_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries);
686 686
687 if (__predict_true(m != NULL)) 687 if (__predict_true(m != NULL))
688 pool_cache_put(mb_cache, m); 688 pool_cache_put(mb_cache, m);
689 splx(s); 689 splx(s);
690} 690}
691 691
692 692
693/* 693/*
694 * Initialize a standard receive ring descriptor. 694 * Initialize a standard receive ring descriptor.
695 */ 695 */
696static int 696static int
697ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap) 697ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap)
698{ 698{
699 struct mbuf *m_new = NULL; 699 struct mbuf *m_new = NULL;
700 struct ti_rx_desc *r; 700 struct ti_rx_desc *r;
701 int error; 701 int error;
702 702
703 if (dmamap == NULL) { 703 if (dmamap == NULL) {
704 /* if (m) panic() */ 704 /* if (m) panic() */
705 705
706 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 706 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
707 MCLBYTES, 0, BUS_DMA_NOWAIT, 707 MCLBYTES, 0, BUS_DMA_NOWAIT,
708 &dmamap)) != 0) { 708 &dmamap)) != 0) {
709 aprint_error_dev(sc->sc_dev, 709 aprint_error_dev(sc->sc_dev,
710 "can't create recv map, error = %d\n", error); 710 "can't create recv map, error = %d\n", error);
711 return (ENOMEM); 711 return (ENOMEM);
712 } 712 }
713 } 713 }
714 sc->std_dmamap[i] = dmamap; 714 sc->std_dmamap[i] = dmamap;
715 715
716 if (m == NULL) { 716 if (m == NULL) {
717 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 717 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
718 if (m_new == NULL) { 718 if (m_new == NULL) {
719 aprint_error_dev(sc->sc_dev, 719 aprint_error_dev(sc->sc_dev,
720 "mbuf allocation failed -- packet dropped!\n"); 720 "mbuf allocation failed -- packet dropped!\n");
721 return (ENOBUFS); 721 return (ENOBUFS);
722 } 722 }
723 723
724 MCLGET(m_new, M_DONTWAIT); 724 MCLGET(m_new, M_DONTWAIT);
725 if (!(m_new->m_flags & M_EXT)) { 725 if (!(m_new->m_flags & M_EXT)) {
726 aprint_error_dev(sc->sc_dev, 726 aprint_error_dev(sc->sc_dev,
727 "cluster allocation failed -- packet dropped!\n"); 727 "cluster allocation failed -- packet dropped!\n");
728 m_freem(m_new); 728 m_freem(m_new);
729 return (ENOBUFS); 729 return (ENOBUFS);
730 } 730 }
731 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 731 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
732 m_adj(m_new, ETHER_ALIGN); 732 m_adj(m_new, ETHER_ALIGN);
733 733
734 if ((error = bus_dmamap_load(sc->sc_dmat, dmamap, 734 if ((error = bus_dmamap_load(sc->sc_dmat, dmamap,
735 mtod(m_new, void *), m_new->m_len, NULL, 735 mtod(m_new, void *), m_new->m_len, NULL,
736 BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) { 736 BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
737 aprint_error_dev(sc->sc_dev, 737 aprint_error_dev(sc->sc_dev,
738 "can't load recv map, error = %d\n", error); 738 "can't load recv map, error = %d\n", error);
739 m_freem(m_new); 739 m_freem(m_new);
740 return (ENOMEM); 740 return (ENOMEM);
741 } 741 }
742 } else { 742 } else {
743 m_new = m; 743 m_new = m;
744 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 744 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
745 m_new->m_data = m_new->m_ext.ext_buf; 745 m_new->m_data = m_new->m_ext.ext_buf;
746 m_adj(m_new, ETHER_ALIGN); 746 m_adj(m_new, ETHER_ALIGN);
747 747
748 /* reuse the dmamap */ 748 /* reuse the dmamap */
749 } 749 }
750 750
751 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 751 sc->ti_cdata.ti_rx_std_chain[i] = m_new;
752 r = &sc->ti_rdata->ti_rx_std_ring[i]; 752 r = &sc->ti_rdata->ti_rx_std_ring[i];
753 TI_HOSTADDR(r->ti_addr, dmamap->dm_segs[0].ds_addr); 753 TI_HOSTADDR(r->ti_addr, dmamap->dm_segs[0].ds_addr);
754 r->ti_type = TI_BDTYPE_RECV_BD; 754 r->ti_type = TI_BDTYPE_RECV_BD;
755 r->ti_flags = 0; 755 r->ti_flags = 0;
756 if (sc->ethercom.ec_if.if_capenable & IFCAP_CSUM_IPv4_Rx) 756 if (sc->ethercom.ec_if.if_capenable & IFCAP_CSUM_IPv4_Rx)
757 r->ti_flags |= TI_BDFLAG_IP_CKSUM; 757 r->ti_flags |= TI_BDFLAG_IP_CKSUM;
758 if (sc->ethercom.ec_if.if_capenable & 758 if (sc->ethercom.ec_if.if_capenable &
759 (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 759 (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
760 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 760 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
761 r->ti_len = m_new->m_len; /* == ds_len */ 761 r->ti_len = m_new->m_len; /* == ds_len */
762 r->ti_idx = i; 762 r->ti_idx = i;
763 763
764 return (0); 764 return (0);
765} 765}
766 766
767/* 767/*
768 * Intialize a mini receive ring descriptor. This only applies to 768 * Initialize a mini receive ring descriptor. This only applies to
769 * the Tigon 2. 769 * the Tigon 2.
770 */ 770 */
771static int 771static int
772ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap) 772ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap)
773{ 773{
774 struct mbuf *m_new = NULL; 774 struct mbuf *m_new = NULL;
775 struct ti_rx_desc *r; 775 struct ti_rx_desc *r;
776 int error; 776 int error;
777 777
778 if (dmamap == NULL) { 778 if (dmamap == NULL) {
779 /* if (m) panic() */ 779 /* if (m) panic() */
780 780
781 if ((error = bus_dmamap_create(sc->sc_dmat, MHLEN, 1, 781 if ((error = bus_dmamap_create(sc->sc_dmat, MHLEN, 1,
782 MHLEN, 0, BUS_DMA_NOWAIT, 782 MHLEN, 0, BUS_DMA_NOWAIT,
783 &dmamap)) != 0) { 783 &dmamap)) != 0) {
784 aprint_error_dev(sc->sc_dev, 784 aprint_error_dev(sc->sc_dev,
785 "can't create recv map, error = %d\n", error); 785 "can't create recv map, error = %d\n", error);
786 return (ENOMEM); 786 return (ENOMEM);
787 } 787 }
788 } 788 }
789 sc->mini_dmamap[i] = dmamap; 789 sc->mini_dmamap[i] = dmamap;
790 790
791 if (m == NULL) { 791 if (m == NULL) {
792 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 792 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
793 if (m_new == NULL) { 793 if (m_new == NULL) {
794 aprint_error_dev(sc->sc_dev, 794 aprint_error_dev(sc->sc_dev,
795 "mbuf allocation failed -- packet dropped!\n"); 795 "mbuf allocation failed -- packet dropped!\n");
796 return (ENOBUFS); 796 return (ENOBUFS);
797 } 797 }
798 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 798 m_new->m_len = m_new->m_pkthdr.len = MHLEN;
799 m_adj(m_new, ETHER_ALIGN); 799 m_adj(m_new, ETHER_ALIGN);
800 800
801 if ((error = bus_dmamap_load(sc->sc_dmat, dmamap, 801 if ((error = bus_dmamap_load(sc->sc_dmat, dmamap,
802 mtod(m_new, void *), m_new->m_len, NULL, 802 mtod(m_new, void *), m_new->m_len, NULL,
803 BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) { 803 BUS_DMA_READ | BUS_DMA_NOWAIT)) != 0) {
804 aprint_error_dev(sc->sc_dev, 804 aprint_error_dev(sc->sc_dev,
805 "can't load recv map, error = %d\n", error); 805 "can't load recv map, error = %d\n", error);
806 m_freem(m_new); 806 m_freem(m_new);
807 return (ENOMEM); 807 return (ENOMEM);
808 } 808 }
809 } else { 809 } else {
810 m_new = m; 810 m_new = m;
811 m_new->m_data = m_new->m_pktdat; 811 m_new->m_data = m_new->m_pktdat;
812 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 812 m_new->m_len = m_new->m_pkthdr.len = MHLEN;
813 m_adj(m_new, ETHER_ALIGN); 813 m_adj(m_new, ETHER_ALIGN);
814 814
815 /* reuse the dmamap */ 815 /* reuse the dmamap */
816 } 816 }
817 817
818 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 818 r = &sc->ti_rdata->ti_rx_mini_ring[i];
819 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 819 sc->ti_cdata.ti_rx_mini_chain[i] = m_new;
820 TI_HOSTADDR(r->ti_addr, dmamap->dm_segs[0].ds_addr); 820 TI_HOSTADDR(r->ti_addr, dmamap->dm_segs[0].ds_addr);
821 r->ti_type = TI_BDTYPE_RECV_BD; 821 r->ti_type = TI_BDTYPE_RECV_BD;
822 r->ti_flags = TI_BDFLAG_MINI_RING; 822 r->ti_flags = TI_BDFLAG_MINI_RING;
823 if (sc->ethercom.ec_if.if_capenable & IFCAP_CSUM_IPv4_Rx) 823 if (sc->ethercom.ec_if.if_capenable & IFCAP_CSUM_IPv4_Rx)
824 r->ti_flags |= TI_BDFLAG_IP_CKSUM; 824 r->ti_flags |= TI_BDFLAG_IP_CKSUM;
825 if (sc->ethercom.ec_if.if_capenable & 825 if (sc->ethercom.ec_if.if_capenable &
826 (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 826 (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
827 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 827 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
828 r->ti_len = m_new->m_len; /* == ds_len */ 828 r->ti_len = m_new->m_len; /* == ds_len */
829 r->ti_idx = i; 829 r->ti_idx = i;
830 830
831 return (0); 831 return (0);
832} 832}
833 833
834/* 834/*
835 * Initialize a jumbo receive ring descriptor. This allocates 835 * Initialize a jumbo receive ring descriptor. This allocates
836 * a jumbo buffer from the pool managed internally by the driver. 836 * a jumbo buffer from the pool managed internally by the driver.
837 */ 837 */
838static int 838static int
839ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m) 839ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m)
840{ 840{
841 struct mbuf *m_new = NULL; 841 struct mbuf *m_new = NULL;
842 struct ti_rx_desc *r; 842 struct ti_rx_desc *r;
843 843
844 if (m == NULL) { 844 if (m == NULL) {
845 void * tbuf = NULL; 845 void * tbuf = NULL;
846 846
847 /* Allocate the mbuf. */ 847 /* Allocate the mbuf. */
848 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 848 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
849 if (m_new == NULL) { 849 if (m_new == NULL) {
850 aprint_error_dev(sc->sc_dev, 850 aprint_error_dev(sc->sc_dev,
851 "mbuf allocation failed -- packet dropped!\n"); 851 "mbuf allocation failed -- packet dropped!\n");
852 return (ENOBUFS); 852 return (ENOBUFS);
853 } 853 }
854 854
855 /* Allocate the jumbo buffer */ 855 /* Allocate the jumbo buffer */
856 tbuf = ti_jalloc(sc); 856 tbuf = ti_jalloc(sc);
857 if (tbuf == NULL) { 857 if (tbuf == NULL) {
858 m_freem(m_new); 858 m_freem(m_new);
859 aprint_error_dev(sc->sc_dev, 859 aprint_error_dev(sc->sc_dev,
860 "jumbo allocation failed -- packet dropped!\n"); 860 "jumbo allocation failed -- packet dropped!\n");
861 return (ENOBUFS); 861 return (ENOBUFS);
862 } 862 }
863 863
864 /* Attach the buffer to the mbuf. */ 864 /* Attach the buffer to the mbuf. */
865 MEXTADD(m_new, tbuf, ETHER_MAX_LEN_JUMBO, 865 MEXTADD(m_new, tbuf, ETHER_MAX_LEN_JUMBO,
866 M_DEVBUF, ti_jfree, sc); 866 M_DEVBUF, ti_jfree, sc);
867 m_new->m_flags |= M_EXT_RW; 867 m_new->m_flags |= M_EXT_RW;
868 m_new->m_len = m_new->m_pkthdr.len = ETHER_MAX_LEN_JUMBO; 868 m_new->m_len = m_new->m_pkthdr.len = ETHER_MAX_LEN_JUMBO;
869 } else { 869 } else {
870 m_new = m; 870 m_new = m;
871 m_new->m_data = m_new->m_ext.ext_buf; 871 m_new->m_data = m_new->m_ext.ext_buf;
872 m_new->m_ext.ext_size = ETHER_MAX_LEN_JUMBO; 872 m_new->m_ext.ext_size = ETHER_MAX_LEN_JUMBO;
873 } 873 }
874 874
875 m_adj(m_new, ETHER_ALIGN); 875 m_adj(m_new, ETHER_ALIGN);
876 /* Set up the descriptor. */ 876 /* Set up the descriptor. */
877 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 877 r = &sc->ti_rdata->ti_rx_jumbo_ring[i];
878 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 878 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new;
879 TI_HOSTADDR(r->ti_addr, sc->jumbo_dmaaddr + 879 TI_HOSTADDR(r->ti_addr, sc->jumbo_dmaaddr +
880 (mtod(m_new, char *) - (char *)sc->ti_cdata.ti_jumbo_buf)); 880 (mtod(m_new, char *) - (char *)sc->ti_cdata.ti_jumbo_buf));
881 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 881 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD;
882 r->ti_flags = TI_BDFLAG_JUMBO_RING; 882 r->ti_flags = TI_BDFLAG_JUMBO_RING;
883 if (sc->ethercom.ec_if.if_capenable & IFCAP_CSUM_IPv4_Rx) 883 if (sc->ethercom.ec_if.if_capenable & IFCAP_CSUM_IPv4_Rx)
884 r->ti_flags |= TI_BDFLAG_IP_CKSUM; 884 r->ti_flags |= TI_BDFLAG_IP_CKSUM;
885 if (sc->ethercom.ec_if.if_capenable & 885 if (sc->ethercom.ec_if.if_capenable &
886 (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 886 (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
887 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 887 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM;
888 r->ti_len = m_new->m_len; 888 r->ti_len = m_new->m_len;
889 r->ti_idx = i; 889 r->ti_idx = i;
890 890
891 return (0); 891 return (0);
892} 892}
893 893
894/* 894/*
895 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 895 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
896 * that's 1MB or memory, which is a lot. For now, we fill only the first 896 * that's 1MB or memory, which is a lot. For now, we fill only the first
897 * 256 ring entries and hope that our CPU is fast enough to keep up with 897 * 256 ring entries and hope that our CPU is fast enough to keep up with
898 * the NIC. 898 * the NIC.
899 */ 899 */
900static int 900static int
901ti_init_rx_ring_std(struct ti_softc *sc) 901ti_init_rx_ring_std(struct ti_softc *sc)
902{ 902{
903 int i; 903 int i;
904 struct ti_cmd_desc cmd; 904 struct ti_cmd_desc cmd;
905 905
906 for (i = 0; i < TI_SSLOTS; i++) { 906 for (i = 0; i < TI_SSLOTS; i++) {
907 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 907 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
908 return (ENOBUFS); 908 return (ENOBUFS);
909 }; 909 };
910 910
911 TI_UPDATE_STDPROD(sc, i - 1); 911 TI_UPDATE_STDPROD(sc, i - 1);
912 sc->ti_std = i - 1; 912 sc->ti_std = i - 1;
913 913
914 return (0); 914 return (0);
915} 915}
916 916
917static void 917static void
918ti_free_rx_ring_std(struct ti_softc *sc) 918ti_free_rx_ring_std(struct ti_softc *sc)
919{ 919{
920 int i; 920 int i;
921 921
922 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 922 for (i = 0; i < TI_STD_RX_RING_CNT; i++) {
923 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 923 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) {
924 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 924 m_freem(sc->ti_cdata.ti_rx_std_chain[i]);
925 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 925 sc->ti_cdata.ti_rx_std_chain[i] = NULL;
926 926
927 /* if (sc->std_dmamap[i] == 0) panic() */ 927 /* if (sc->std_dmamap[i] == 0) panic() */
928 bus_dmamap_destroy(sc->sc_dmat, sc->std_dmamap[i]); 928 bus_dmamap_destroy(sc->sc_dmat, sc->std_dmamap[i]);
929 sc->std_dmamap[i] = 0; 929 sc->std_dmamap[i] = 0;
930 } 930 }
931 memset((char *)&sc->ti_rdata->ti_rx_std_ring[i], 0, 931 memset((char *)&sc->ti_rdata->ti_rx_std_ring[i], 0,
932 sizeof(struct ti_rx_desc)); 932 sizeof(struct ti_rx_desc));
933 } 933 }
934 934
935 return; 935 return;
936} 936}
937 937
938static int 938static int
939ti_init_rx_ring_jumbo(struct ti_softc *sc) 939ti_init_rx_ring_jumbo(struct ti_softc *sc)
940{ 940{
941 int i; 941 int i;
942 struct ti_cmd_desc cmd; 942 struct ti_cmd_desc cmd;
943 943
944 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 944 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
945 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 945 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
946 return (ENOBUFS); 946 return (ENOBUFS);
947 }; 947 };
948 948
949 TI_UPDATE_JUMBOPROD(sc, i - 1); 949 TI_UPDATE_JUMBOPROD(sc, i - 1);
950 sc->ti_jumbo = i - 1; 950 sc->ti_jumbo = i - 1;
951 951
952 return (0); 952 return (0);
953} 953}
954 954
955static void 955static void
956ti_free_rx_ring_jumbo(struct ti_softc *sc) 956ti_free_rx_ring_jumbo(struct ti_softc *sc)
957{ 957{
958 int i; 958 int i;
959 959
960 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 960 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) {
961 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 961 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) {
962 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 962 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]);
963 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 963 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL;
964 } 964 }
965 memset((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], 0, 965 memset((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], 0,
966 sizeof(struct ti_rx_desc)); 966 sizeof(struct ti_rx_desc));
967 } 967 }
968 968
969 return; 969 return;
970} 970}
971 971
972static int 972static int
973ti_init_rx_ring_mini(struct ti_softc *sc) 973ti_init_rx_ring_mini(struct ti_softc *sc)
974{ 974{
975 int i; 975 int i;
976 976
977 for (i = 0; i < TI_MSLOTS; i++) { 977 for (i = 0; i < TI_MSLOTS; i++) {
978 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS) 978 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS)
979 return (ENOBUFS); 979 return (ENOBUFS);
980 }; 980 };
981 981
982 TI_UPDATE_MINIPROD(sc, i - 1); 982 TI_UPDATE_MINIPROD(sc, i - 1);
983 sc->ti_mini = i - 1; 983 sc->ti_mini = i - 1;
984 984
985 return (0); 985 return (0);
986} 986}
987 987
988static void 988static void
989ti_free_rx_ring_mini(struct ti_softc *sc) 989ti_free_rx_ring_mini(struct ti_softc *sc)
990{ 990{
991 int i; 991 int i;
992 992
993 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 993 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) {
994 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 994 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) {
995 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 995 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]);
996 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 996 sc->ti_cdata.ti_rx_mini_chain[i] = NULL;
997 997
998 /* if (sc->mini_dmamap[i] == 0) panic() */ 998 /* if (sc->mini_dmamap[i] == 0) panic() */
999 bus_dmamap_destroy(sc->sc_dmat, sc->mini_dmamap[i]); 999 bus_dmamap_destroy(sc->sc_dmat, sc->mini_dmamap[i]);
1000 sc->mini_dmamap[i] = 0; 1000 sc->mini_dmamap[i] = 0;
1001 } 1001 }
1002 memset((char *)&sc->ti_rdata->ti_rx_mini_ring[i], 0, 1002 memset((char *)&sc->ti_rdata->ti_rx_mini_ring[i], 0,
1003 sizeof(struct ti_rx_desc)); 1003 sizeof(struct ti_rx_desc));
1004 } 1004 }
1005 1005
1006 return; 1006 return;
1007} 1007}
1008 1008
1009static void 1009static void
1010ti_free_tx_ring(struct ti_softc *sc) 1010ti_free_tx_ring(struct ti_softc *sc)
1011{ 1011{
1012 int i; 1012 int i;
1013 struct txdmamap_pool_entry *dma; 1013 struct txdmamap_pool_entry *dma;
1014 1014
1015 for (i = 0; i < TI_TX_RING_CNT; i++) { 1015 for (i = 0; i < TI_TX_RING_CNT; i++) {
1016 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 1016 if (sc->ti_cdata.ti_tx_chain[i] != NULL) {
1017 m_freem(sc->ti_cdata.ti_tx_chain[i]); 1017 m_freem(sc->ti_cdata.ti_tx_chain[i]);
1018 sc->ti_cdata.ti_tx_chain[i] = NULL; 1018 sc->ti_cdata.ti_tx_chain[i] = NULL;
1019 1019
1020 /* if (sc->txdma[i] == 0) panic() */ 1020 /* if (sc->txdma[i] == 0) panic() */
1021 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1021 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1022 link); 1022 link);
1023 sc->txdma[i] = 0; 1023 sc->txdma[i] = 0;
1024 } 1024 }
1025 memset((char *)&sc->ti_rdata->ti_tx_ring[i], 0, 1025 memset((char *)&sc->ti_rdata->ti_tx_ring[i], 0,
1026 sizeof(struct ti_tx_desc)); 1026 sizeof(struct ti_tx_desc));
1027 } 1027 }
1028 1028
1029 while ((dma = SIMPLEQ_FIRST(&sc->txdma_list))) { 1029 while ((dma = SIMPLEQ_FIRST(&sc->txdma_list))) {
1030 SIMPLEQ_REMOVE_HEAD(&sc->txdma_list, link); 1030 SIMPLEQ_REMOVE_HEAD(&sc->txdma_list, link);
1031 bus_dmamap_destroy(sc->sc_dmat, dma->dmamap); 1031 bus_dmamap_destroy(sc->sc_dmat, dma->dmamap);
1032 free(dma, M_DEVBUF); 1032 free(dma, M_DEVBUF);
1033 } 1033 }
1034 1034
1035 return; 1035 return;
1036} 1036}
1037 1037
1038static int 1038static int
1039ti_init_tx_ring(struct ti_softc *sc) 1039ti_init_tx_ring(struct ti_softc *sc)
1040{ 1040{
1041 int i, error; 1041 int i, error;
1042 bus_dmamap_t dmamap; 1042 bus_dmamap_t dmamap;
1043 struct txdmamap_pool_entry *dma; 1043 struct txdmamap_pool_entry *dma;
1044 1044
1045 sc->ti_txcnt = 0; 1045 sc->ti_txcnt = 0;
1046 sc->ti_tx_saved_considx = 0; 1046 sc->ti_tx_saved_considx = 0;
1047 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 1047 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0);
1048 1048
1049 SIMPLEQ_INIT(&sc->txdma_list); 1049 SIMPLEQ_INIT(&sc->txdma_list);
1050 for (i = 0; i < TI_RSLOTS; i++) { 1050 for (i = 0; i < TI_RSLOTS; i++) {
1051 /* I've seen mbufs with 30 fragments. */ 1051 /* I've seen mbufs with 30 fragments. */
1052 if ((error = bus_dmamap_create(sc->sc_dmat, 1052 if ((error = bus_dmamap_create(sc->sc_dmat,
1053 ETHER_MAX_LEN_JUMBO, 40, ETHER_MAX_LEN_JUMBO, 0, 1053 ETHER_MAX_LEN_JUMBO, 40, ETHER_MAX_LEN_JUMBO, 0,
1054 BUS_DMA_NOWAIT, &dmamap)) != 0) { 1054 BUS_DMA_NOWAIT, &dmamap)) != 0) {
1055 aprint_error_dev(sc->sc_dev, 1055 aprint_error_dev(sc->sc_dev,
1056 "can't create tx map, error = %d\n", error); 1056 "can't create tx map, error = %d\n", error);
1057 return (ENOMEM); 1057 return (ENOMEM);
1058 } 1058 }
1059 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1059 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1060 if (!dma) { 1060 if (!dma) {
1061 aprint_error_dev(sc->sc_dev, 1061 aprint_error_dev(sc->sc_dev,
1062 "can't alloc txdmamap_pool_entry\n"); 1062 "can't alloc txdmamap_pool_entry\n");
1063 bus_dmamap_destroy(sc->sc_dmat, dmamap); 1063 bus_dmamap_destroy(sc->sc_dmat, dmamap);
1064 return (ENOMEM); 1064 return (ENOMEM);
1065 } 1065 }
1066 dma->dmamap = dmamap; 1066 dma->dmamap = dmamap;
1067 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, dma, link); 1067 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, dma, link);
1068 } 1068 }
1069 1069
1070 return (0); 1070 return (0);
1071} 1071}
1072 1072
1073/* 1073/*
1074 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 1074 * The Tigon 2 firmware has a new way to add/delete multicast addresses,
1075 * but we have to support the old way too so that Tigon 1 cards will 1075 * but we have to support the old way too so that Tigon 1 cards will
1076 * work. 1076 * work.
1077 */ 1077 */
1078static void 1078static void
1079ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 1079ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr)
1080{ 1080{
1081 struct ti_cmd_desc cmd; 1081 struct ti_cmd_desc cmd;
1082 uint16_t *m; 1082 uint16_t *m;
1083 uint32_t ext[2] = {0, 0}; 1083 uint32_t ext[2] = {0, 0};
1084 1084
1085 m = (uint16_t *)&addr->ether_addr_octet[0]; /* XXX */ 1085 m = (uint16_t *)&addr->ether_addr_octet[0]; /* XXX */
1086 1086
1087 switch (sc->ti_hwrev) { 1087 switch (sc->ti_hwrev) {
1088 case TI_HWREV_TIGON: 1088 case TI_HWREV_TIGON:
1089 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1089 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1090 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1090 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1091 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 1091 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0);
1092 break; 1092 break;
1093 case TI_HWREV_TIGON_II: 1093 case TI_HWREV_TIGON_II:
1094 ext[0] = htons(m[0]); 1094 ext[0] = htons(m[0]);
1095 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1095 ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1096 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (void *)&ext, 2); 1096 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (void *)&ext, 2);
1097 break; 1097 break;
1098 default: 1098 default:
1099 printf("%s: unknown hwrev\n", device_xname(sc->sc_dev)); 1099 printf("%s: unknown hwrev\n", device_xname(sc->sc_dev));
1100 break; 1100 break;
1101 } 1101 }
1102 1102
1103 return; 1103 return;
1104} 1104}
1105 1105
1106static void 1106static void
1107ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 1107ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr)
1108{ 1108{
1109 struct ti_cmd_desc cmd; 1109 struct ti_cmd_desc cmd;
1110 uint16_t *m; 1110 uint16_t *m;
1111 uint32_t ext[2] = {0, 0}; 1111 uint32_t ext[2] = {0, 0};
1112 1112
1113 m = (uint16_t *)&addr->ether_addr_octet[0]; /* XXX */ 1113 m = (uint16_t *)&addr->ether_addr_octet[0]; /* XXX */
1114 1114
1115 switch (sc->ti_hwrev) { 1115 switch (sc->ti_hwrev) {
1116 case TI_HWREV_TIGON: 1116 case TI_HWREV_TIGON:
1117 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1117 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0]));
1118 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1118 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2]));
1119 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 1119 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0);
1120 break; 1120 break;
1121 case TI_HWREV_TIGON_II: 1121 case TI_HWREV_TIGON_II:
1122 ext[0] = htons(m[0]); 1122 ext[0] = htons(m[0]);
1123 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1123 ext[1] = (htons(m[1]) << 16) | htons(m[2]);
1124 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (void *)&ext, 2); 1124 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (void *)&ext, 2);
1125 break; 1125 break;
1126 default: 1126 default:
1127 printf("%s: unknown hwrev\n", device_xname(sc->sc_dev)); 1127 printf("%s: unknown hwrev\n", device_xname(sc->sc_dev));
1128 break; 1128 break;
1129 } 1129 }
1130 1130
1131 return; 1131 return;
1132} 1132}
1133 1133
1134/* 1134/*
1135 * Configure the Tigon's multicast address filter. 1135 * Configure the Tigon's multicast address filter.
1136 * 1136 *
1137 * The actual multicast table management is a bit of a pain, thanks to 1137 * The actual multicast table management is a bit of a pain, thanks to
1138 * slight brain damage on the part of both Alteon and us. With our 1138 * slight brain damage on the part of both Alteon and us. With our
1139 * multicast code, we are only alerted when the multicast address table 1139 * multicast code, we are only alerted when the multicast address table
1140 * changes and at that point we only have the current list of addresses: 1140 * changes and at that point we only have the current list of addresses:
1141 * we only know the current state, not the previous state, so we don't 1141 * we only know the current state, not the previous state, so we don't
1142 * actually know what addresses were removed or added. The firmware has 1142 * actually know what addresses were removed or added. The firmware has
1143 * state, but we can't get our grubby mits on it, and there is no 'delete 1143 * state, but we can't get our grubby mits on it, and there is no 'delete
1144 * all multicast addresses' command. Hence, we have to maintain our own 1144 * all multicast addresses' command. Hence, we have to maintain our own
1145 * state so we know what addresses have been programmed into the NIC at 1145 * state so we know what addresses have been programmed into the NIC at
1146 * any given time. 1146 * any given time.
1147 */ 1147 */
1148static void 1148static void
1149ti_setmulti(struct ti_softc *sc) 1149ti_setmulti(struct ti_softc *sc)
1150{ 1150{
1151 struct ethercom *ec = &sc->ethercom; 1151 struct ethercom *ec = &sc->ethercom;
1152 struct ifnet *ifp = &ec->ec_if; 1152 struct ifnet *ifp = &ec->ec_if;
1153 struct ti_cmd_desc cmd; 1153 struct ti_cmd_desc cmd;
1154 struct ti_mc_entry *mc; 1154 struct ti_mc_entry *mc;
1155 uint32_t intrs; 1155 uint32_t intrs;
1156 struct ether_multi *enm; 1156 struct ether_multi *enm;
1157 struct ether_multistep step; 1157 struct ether_multistep step;
1158 1158
1159 /* Disable interrupts. */ 1159 /* Disable interrupts. */
1160 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1160 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR);
1161 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1161 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1162 1162
1163 /* First, zot all the existing filters. */ 1163 /* First, zot all the existing filters. */
1164 while ((mc = SIMPLEQ_FIRST(&sc->ti_mc_listhead)) != NULL) { 1164 while ((mc = SIMPLEQ_FIRST(&sc->ti_mc_listhead)) != NULL) {
1165 ti_del_mcast(sc, &mc->mc_addr); 1165 ti_del_mcast(sc, &mc->mc_addr);
1166 SIMPLEQ_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1166 SIMPLEQ_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries);
1167 free(mc, M_DEVBUF); 1167 free(mc, M_DEVBUF);
1168 } 1168 }
1169 1169
1170 /* 1170 /*
1171 * Remember all multicast addresses so that we can delete them 1171 * Remember all multicast addresses so that we can delete them
1172 * later. Punt if there is a range of addresses or memory shortage. 1172 * later. Punt if there is a range of addresses or memory shortage.
1173 */ 1173 */
1174 ETHER_LOCK(ec); 1174 ETHER_LOCK(ec);
1175 ETHER_FIRST_MULTI(step, ec, enm); 1175 ETHER_FIRST_MULTI(step, ec, enm);
1176 while (enm != NULL) { 1176 while (enm != NULL) {
1177 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1177 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1178 ETHER_ADDR_LEN) != 0) { 1178 ETHER_ADDR_LEN) != 0) {
1179 ETHER_UNLOCK(ec); 1179 ETHER_UNLOCK(ec);
1180 goto allmulti; 1180 goto allmulti;
1181 } 1181 }
1182 if ((mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, 1182 if ((mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF,
1183 M_NOWAIT)) == NULL) { 1183 M_NOWAIT)) == NULL) {
1184 ETHER_UNLOCK(ec); 1184 ETHER_UNLOCK(ec);
1185 goto allmulti; 1185 goto allmulti;
1186 } 1186 }
1187 memcpy(&mc->mc_addr, enm->enm_addrlo, ETHER_ADDR_LEN); 1187 memcpy(&mc->mc_addr, enm->enm_addrlo, ETHER_ADDR_LEN);
1188 SIMPLEQ_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1188 SIMPLEQ_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries);
1189 ETHER_NEXT_MULTI(step, enm); 1189 ETHER_NEXT_MULTI(step, enm);
1190 } 1190 }
1191 ETHER_UNLOCK(ec); 1191 ETHER_UNLOCK(ec);
1192 1192
1193 /* Accept only programmed multicast addresses */ 1193 /* Accept only programmed multicast addresses */
1194 ifp->if_flags &= ~IFF_ALLMULTI; 1194 ifp->if_flags &= ~IFF_ALLMULTI;
1195 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1195 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0);
1196 1196
1197 /* Now program new ones. */ 1197 /* Now program new ones. */
1198 SIMPLEQ_FOREACH(mc, &sc->ti_mc_listhead, mc_entries) 1198 SIMPLEQ_FOREACH(mc, &sc->ti_mc_listhead, mc_entries)
1199 ti_add_mcast(sc, &mc->mc_addr); 1199 ti_add_mcast(sc, &mc->mc_addr);
1200 1200
1201 /* Re-enable interrupts. */ 1201 /* Re-enable interrupts. */
1202 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1202 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1203 1203
1204 return; 1204 return;
1205 1205
1206allmulti: 1206allmulti:
1207 /* No need to keep individual multicast addresses */ 1207 /* No need to keep individual multicast addresses */
1208 while ((mc = SIMPLEQ_FIRST(&sc->ti_mc_listhead)) != NULL) { 1208 while ((mc = SIMPLEQ_FIRST(&sc->ti_mc_listhead)) != NULL) {
1209 SIMPLEQ_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1209 SIMPLEQ_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries);
1210 free(mc, M_DEVBUF); 1210 free(mc, M_DEVBUF);
1211 } 1211 }
1212 1212
1213 /* Accept all multicast addresses */ 1213 /* Accept all multicast addresses */
1214 ifp->if_flags |= IFF_ALLMULTI; 1214 ifp->if_flags |= IFF_ALLMULTI;
1215 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1215 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0);
1216 1216
1217 /* Re-enable interrupts. */ 1217 /* Re-enable interrupts. */
1218 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1218 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs);
1219} 1219}
1220 1220
1221/* 1221/*
1222 * Check to see if the BIOS has configured us for a 64 bit slot when 1222 * Check to see if the BIOS has configured us for a 64 bit slot when
1223 * we aren't actually in one. If we detect this condition, we can work 1223 * we aren't actually in one. If we detect this condition, we can work
1224 * around it on the Tigon 2 by setting a bit in the PCI state register, 1224 * around it on the Tigon 2 by setting a bit in the PCI state register,
1225 * but for the Tigon 1 we must give up and abort the interface attach. 1225 * but for the Tigon 1 we must give up and abort the interface attach.
1226 */ 1226 */
1227static int 1227static int
1228ti_64bitslot_war(struct ti_softc *sc) 1228ti_64bitslot_war(struct ti_softc *sc)
1229{ 1229{
1230 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1230 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) {
1231 CSR_WRITE_4(sc, 0x600, 0); 1231 CSR_WRITE_4(sc, 0x600, 0);
1232 CSR_WRITE_4(sc, 0x604, 0); 1232 CSR_WRITE_4(sc, 0x604, 0);
1233 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1233 CSR_WRITE_4(sc, 0x600, 0x5555AAAA);
1234 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1234 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) {
1235 if (sc->ti_hwrev == TI_HWREV_TIGON) 1235 if (sc->ti_hwrev == TI_HWREV_TIGON)
1236 return (EINVAL); 1236 return (EINVAL);
1237 else { 1237 else {
1238 TI_SETBIT(sc, TI_PCI_STATE, 1238 TI_SETBIT(sc, TI_PCI_STATE,
1239 TI_PCISTATE_32BIT_BUS); 1239 TI_PCISTATE_32BIT_BUS);
1240 return (0); 1240 return (0);
1241 } 1241 }
1242 } 1242 }
1243 } 1243 }
1244 1244
1245 return (0); 1245 return (0);
1246} 1246}
1247 1247
1248/* 1248/*
1249 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1249 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1250 * self-test results. 1250 * self-test results.
1251 */ 1251 */
1252static int 1252static int
1253ti_chipinit(struct ti_softc *sc) 1253ti_chipinit(struct ti_softc *sc)
1254{ 1254{
1255 uint32_t cacheline; 1255 uint32_t cacheline;
1256 uint32_t pci_writemax = 0; 1256 uint32_t pci_writemax = 0;
1257 uint32_t rev; 1257 uint32_t rev;
1258 1258
1259 /* Initialize link to down state. */ 1259 /* Initialize link to down state. */
1260 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1260 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN;
1261 1261
1262 /* Set endianness before we access any non-PCI registers. */ 1262 /* Set endianness before we access any non-PCI registers. */
1263#if BYTE_ORDER == BIG_ENDIAN 1263#if BYTE_ORDER == BIG_ENDIAN
1264 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1264 CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1265 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); 1265 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24));
1266#else 1266#else
1267 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1267 CSR_WRITE_4(sc, TI_MISC_HOST_CTL,
1268 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1268 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24));
1269#endif 1269#endif
1270 1270
1271 /* Check the ROM failed bit to see if self-tests passed. */ 1271 /* Check the ROM failed bit to see if self-tests passed. */
1272 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1272 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) {
1273 printf("%s: board self-diagnostics failed!\n", 1273 printf("%s: board self-diagnostics failed!\n",
1274 device_xname(sc->sc_dev)); 1274 device_xname(sc->sc_dev));
1275 return (ENODEV); 1275 return (ENODEV);
1276 } 1276 }
1277 1277
1278 /* Halt the CPU. */ 1278 /* Halt the CPU. */
1279 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1279 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT);
1280 1280
1281 /* Figure out the hardware revision. */ 1281 /* Figure out the hardware revision. */
1282 rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK; 1282 rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK;
1283 switch (rev) { 1283 switch (rev) {
1284 case TI_REV_TIGON_I: 1284 case TI_REV_TIGON_I:
1285 sc->ti_hwrev = TI_HWREV_TIGON; 1285 sc->ti_hwrev = TI_HWREV_TIGON;
1286 break; 1286 break;
1287 case TI_REV_TIGON_II: 1287 case TI_REV_TIGON_II:
1288 sc->ti_hwrev = TI_HWREV_TIGON_II; 1288 sc->ti_hwrev = TI_HWREV_TIGON_II;
1289 break; 1289 break;
1290 default: 1290 default:
1291 printf("%s: unsupported chip revision 0x%x\n", 1291 printf("%s: unsupported chip revision 0x%x\n",
1292 device_xname(sc->sc_dev), rev); 1292 device_xname(sc->sc_dev), rev);
1293 return (ENODEV); 1293 return (ENODEV);
1294 } 1294 }
1295 1295
1296 /* Do special setup for Tigon 2. */ 1296 /* Do special setup for Tigon 2. */
1297 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1297 if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1298 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1298 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT);
1299 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_256K); 1299 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_256K);
1300 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1300 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS);
1301 } 1301 }
1302 1302
1303 /* Set up the PCI state register. */ 1303 /* Set up the PCI state register. */
1304 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD); 1304 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD);
1305 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1305 if (sc->ti_hwrev == TI_HWREV_TIGON_II) {
1306 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1306 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT);
1307 } 1307 }
1308 1308
1309 /* Clear the read/write max DMA parameters. */ 1309 /* Clear the read/write max DMA parameters. */
1310 TI_CLRBIT(sc, TI_PCI_STATE, 1310 TI_CLRBIT(sc, TI_PCI_STATE,
1311 (TI_PCISTATE_WRITE_MAXDMA | TI_PCISTATE_READ_MAXDMA)); 1311 (TI_PCISTATE_WRITE_MAXDMA | TI_PCISTATE_READ_MAXDMA));
1312 1312
1313 /* Get cache line size. */ 1313 /* Get cache line size. */
1314 cacheline = PCI_CACHELINE(CSR_READ_4(sc, PCI_BHLC_REG)); 1314 cacheline = PCI_CACHELINE(CSR_READ_4(sc, PCI_BHLC_REG));
1315 1315
1316 /* 1316 /*
1317 * If the system has set enabled the PCI memory write 1317 * If the system has set enabled the PCI memory write
1318 * and invalidate command in the command register, set 1318 * and invalidate command in the command register, set
1319 * the write max parameter accordingly. This is necessary 1319 * the write max parameter accordingly. This is necessary
1320 * to use MWI with the Tigon 2. 1320 * to use MWI with the Tigon 2.
1321 */ 1321 */
1322 if (CSR_READ_4(sc, PCI_COMMAND_STATUS_REG) 1322 if (CSR_READ_4(sc, PCI_COMMAND_STATUS_REG)
1323 & PCI_COMMAND_INVALIDATE_ENABLE) { 1323 & PCI_COMMAND_INVALIDATE_ENABLE) {
1324 switch (cacheline) { 1324 switch (cacheline) {
1325 case 1: 1325 case 1:
1326 case 4: 1326 case 4:
1327 case 8: 1327 case 8:
1328 case 16: 1328 case 16:
1329 case 32: 1329 case 32:
1330 case 64: 1330 case 64:
1331 break; 1331 break;
1332 default: 1332 default:
1333 /* Disable PCI memory write and invalidate. */ 1333 /* Disable PCI memory write and invalidate. */
1334 if (bootverbose) 1334 if (bootverbose)
1335 printf("%s: cache line size %d not " 1335 printf("%s: cache line size %d not "
1336 "supported; disabling PCI MWI\n", 1336 "supported; disabling PCI MWI\n",
1337 device_xname(sc->sc_dev), cacheline); 1337 device_xname(sc->sc_dev), cacheline);
1338 CSR_WRITE_4(sc, PCI_COMMAND_STATUS_REG, 1338 CSR_WRITE_4(sc, PCI_COMMAND_STATUS_REG,
1339 CSR_READ_4(sc, PCI_COMMAND_STATUS_REG) 1339 CSR_READ_4(sc, PCI_COMMAND_STATUS_REG)
1340 & ~PCI_COMMAND_INVALIDATE_ENABLE); 1340 & ~PCI_COMMAND_INVALIDATE_ENABLE);
1341 break; 1341 break;
1342 } 1342 }
1343 } 1343 }
1344 1344
1345#ifdef __brokenalpha__ 1345#ifdef __brokenalpha__
1346 /* 1346 /*
1347 * From the Alteon sample driver: 1347 * From the Alteon sample driver:
1348 * Must insure that we do not cross an 8K (bytes) boundary 1348 * Must insure that we do not cross an 8K (bytes) boundary
1349 * for DMA reads. Our highest limit is 1K bytes. This is a 1349 * for DMA reads. Our highest limit is 1K bytes. This is a
1350 * restriction on some ALPHA platforms with early revision 1350 * restriction on some ALPHA platforms with early revision
1351 * 21174 PCI chipsets, such as the AlphaPC 164lx 1351 * 21174 PCI chipsets, such as the AlphaPC 164lx
1352 */ 1352 */
1353 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax | TI_PCI_READMAX_1024); 1353 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax | TI_PCI_READMAX_1024);
1354#else 1354#else
1355 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1355 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax);
1356#endif 1356#endif
1357 1357
1358 /* This sets the min dma param all the way up (0xff). */ 1358 /* This sets the min dma param all the way up (0xff). */
1359 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1359 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA);
1360 1360
1361 /* Configure DMA variables. */ 1361 /* Configure DMA variables. */
1362#if BYTE_ORDER == BIG_ENDIAN 1362#if BYTE_ORDER == BIG_ENDIAN
1363 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | 1363 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD |
1364 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | 1364 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD |
1365 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1365 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB |
1366 TI_OPMODE_DONT_FRAG_JUMBO); 1366 TI_OPMODE_DONT_FRAG_JUMBO);
1367#else 1367#else
1368 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA | 1368 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA |
1369 TI_OPMODE_WORDSWAP_BD | TI_OPMODE_DONT_FRAG_JUMBO | 1369 TI_OPMODE_WORDSWAP_BD | TI_OPMODE_DONT_FRAG_JUMBO |
1370 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB); 1370 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB);
1371#endif 1371#endif
1372 1372
1373 /* 1373 /*
1374 * Only allow 1 DMA channel to be active at a time. 1374 * Only allow 1 DMA channel to be active at a time.
1375 * I don't think this is a good idea, but without it 1375 * I don't think this is a good idea, but without it
1376 * the firmware racks up lots of nicDmaReadRingFull 1376 * the firmware racks up lots of nicDmaReadRingFull
1377 * errors. 1377 * errors.
1378 * Incompatible with hardware assisted checksums. 1378 * Incompatible with hardware assisted checksums.
1379 */ 1379 */
1380 if ((sc->ethercom.ec_if.if_capenable & 1380 if ((sc->ethercom.ec_if.if_capenable &
1381 (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1381 (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1382 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1382 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1383 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx)) == 0) 1383 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx)) == 0)
1384 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); 1384 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE);
1385 1385
1386 /* Recommended settings from Tigon manual. */ 1386 /* Recommended settings from Tigon manual. */
1387 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1387 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W);
1388 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1388 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W);
1389 1389
1390 if (ti_64bitslot_war(sc)) { 1390 if (ti_64bitslot_war(sc)) {
1391 printf("%s: bios thinks we're in a 64 bit slot, " 1391 printf("%s: bios thinks we're in a 64 bit slot, "
1392 "but we aren't", device_xname(sc->sc_dev)); 1392 "but we aren't", device_xname(sc->sc_dev));
1393 return (EINVAL); 1393 return (EINVAL);
1394 } 1394 }
1395 1395
1396 return (0); 1396 return (0);
1397} 1397}
1398 1398
1399/* 1399/*
1400 * Initialize the general information block and firmware, and 1400 * Initialize the general information block and firmware, and
1401 * start the CPU(s) running. 1401 * start the CPU(s) running.
1402 */ 1402 */
1403static int 1403static int
1404ti_gibinit(struct ti_softc *sc) 1404ti_gibinit(struct ti_softc *sc)
1405{ 1405{
1406 struct ti_rcb *rcb; 1406 struct ti_rcb *rcb;
1407 int i; 1407 int i;
1408 struct ifnet *ifp; 1408 struct ifnet *ifp;
1409 1409
1410 ifp = &sc->ethercom.ec_if; 1410 ifp = &sc->ethercom.ec_if;
1411 1411
1412 /* Disable interrupts for now. */ 1412 /* Disable interrupts for now. */
1413 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1413 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1);
1414 1414
1415 /* Tell the chip where to find the general information block. */ 1415 /* Tell the chip where to find the general information block. */
1416 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1416 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0);
1417 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, TI_CDGIBADDR(sc)); 1417 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, TI_CDGIBADDR(sc));
1418 1418
1419 /* Load the firmware into SRAM. */ 1419 /* Load the firmware into SRAM. */
1420 ti_loadfw(sc); 1420 ti_loadfw(sc);
1421 1421
1422 /* Set up the contents of the general info and ring control blocks. */ 1422 /* Set up the contents of the general info and ring control blocks. */
1423 1423
1424 /* Set up the event ring and producer pointer. */ 1424 /* Set up the event ring and producer pointer. */
1425 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1425 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb;
1426 1426
1427 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDEVENTADDR(sc, 0)); 1427 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDEVENTADDR(sc, 0));
1428 rcb->ti_flags = 0; 1428 rcb->ti_flags = 0;
1429 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr, 1429 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr,
1430 TI_CDEVPRODADDR(sc)); 1430 TI_CDEVPRODADDR(sc));
1431 1431
1432 sc->ti_ev_prodidx.ti_idx = 0; 1432 sc->ti_ev_prodidx.ti_idx = 0;
1433 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1433 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0);
1434 sc->ti_ev_saved_considx = 0; 1434 sc->ti_ev_saved_considx = 0;
1435 1435
1436 /* Set up the command ring and producer mailbox. */ 1436 /* Set up the command ring and producer mailbox. */
1437 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1437 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb;
1438 1438
1439 TI_HOSTADDR(rcb->ti_hostaddr, TI_GCR_NIC_ADDR(TI_GCR_CMDRING)); 1439 TI_HOSTADDR(rcb->ti_hostaddr, TI_GCR_NIC_ADDR(TI_GCR_CMDRING));
1440 rcb->ti_flags = 0; 1440 rcb->ti_flags = 0;
1441 rcb->ti_max_len = 0; 1441 rcb->ti_max_len = 0;
1442 for (i = 0; i < TI_CMD_RING_CNT; i++) { 1442 for (i = 0; i < TI_CMD_RING_CNT; i++) {
1443 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1443 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0);
1444 } 1444 }
1445 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1445 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0);
1446 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1446 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0);
1447 sc->ti_cmd_saved_prodidx = 0; 1447 sc->ti_cmd_saved_prodidx = 0;
1448 1448
1449 /* 1449 /*
1450 * Assign the address of the stats refresh buffer. 1450 * Assign the address of the stats refresh buffer.
1451 * We re-use the current stats buffer for this to 1451 * We re-use the current stats buffer for this to
1452 * conserve memory. 1452 * conserve memory.
1453 */ 1453 */
1454 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr, 1454 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr,
1455 TI_CDSTATSADDR(sc)); 1455 TI_CDSTATSADDR(sc));
1456 1456
1457 /* Set up the standard receive ring. */ 1457 /* Set up the standard receive ring. */
1458 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1458 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb;
1459 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXSTDADDR(sc, 0)); 1459 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXSTDADDR(sc, 0));
1460 rcb->ti_max_len = ETHER_MAX_LEN; 1460 rcb->ti_max_len = ETHER_MAX_LEN;
1461 rcb->ti_flags = 0; 1461 rcb->ti_flags = 0;
1462 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 1462 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
1463 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM; 1463 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM;
1464 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 1464 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
1465 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM; 1465 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM;
1466 if (VLAN_ATTACHED(&sc->ethercom)) 1466 if (VLAN_ATTACHED(&sc->ethercom))
1467 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1467 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1468 1468
1469 /* Set up the jumbo receive ring. */ 1469 /* Set up the jumbo receive ring. */
1470 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1470 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb;
1471 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXJUMBOADDR(sc, 0)); 1471 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXJUMBOADDR(sc, 0));
1472 rcb->ti_max_len = ETHER_MAX_LEN_JUMBO; 1472 rcb->ti_max_len = ETHER_MAX_LEN_JUMBO;
1473 rcb->ti_flags = 0; 1473 rcb->ti_flags = 0;
1474 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 1474 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
1475 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM; 1475 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM;
1476 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 1476 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
1477 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM; 1477 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM;
1478 if (VLAN_ATTACHED(&sc->ethercom)) 1478 if (VLAN_ATTACHED(&sc->ethercom))
1479 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1479 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1480 1480
1481 /* 1481 /*
1482 * Set up the mini ring. Only activated on the 1482 * Set up the mini ring. Only activated on the
1483 * Tigon 2 but the slot in the config block is 1483 * Tigon 2 but the slot in the config block is
1484 * still there on the Tigon 1. 1484 * still there on the Tigon 1.
1485 */ 1485 */
1486 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1486 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb;
1487 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXMINIADDR(sc, 0)); 1487 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXMINIADDR(sc, 0));
1488 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1488 rcb->ti_max_len = MHLEN - ETHER_ALIGN;
1489 if (sc->ti_hwrev == TI_HWREV_TIGON) 1489 if (sc->ti_hwrev == TI_HWREV_TIGON)
1490 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1490 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED;
1491 else 1491 else
1492 rcb->ti_flags = 0; 1492 rcb->ti_flags = 0;
1493 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 1493 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
1494 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM; 1494 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM;
1495 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 1495 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
1496 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM; 1496 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM;
1497 if (VLAN_ATTACHED(&sc->ethercom)) 1497 if (VLAN_ATTACHED(&sc->ethercom))
1498 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1498 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1499 1499
1500 /* 1500 /*
1501 * Set up the receive return ring. 1501 * Set up the receive return ring.
1502 */ 1502 */
1503 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1503 rcb = &sc->ti_rdata->ti_info.ti_return_rcb;
1504 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXRTNADDR(sc, 0)); 1504 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDRXRTNADDR(sc, 0));
1505 rcb->ti_flags = 0; 1505 rcb->ti_flags = 0;
1506 rcb->ti_max_len = TI_RETURN_RING_CNT; 1506 rcb->ti_max_len = TI_RETURN_RING_CNT;
1507 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr, 1507 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr,
1508 TI_CDRTNPRODADDR(sc)); 1508 TI_CDRTNPRODADDR(sc));
1509 1509
1510 /* 1510 /*
1511 * Set up the tx ring. Note: for the Tigon 2, we have the option 1511 * Set up the tx ring. Note: for the Tigon 2, we have the option
1512 * of putting the transmit ring in the host's address space and 1512 * of putting the transmit ring in the host's address space and
1513 * letting the chip DMA it instead of leaving the ring in the NIC's 1513 * letting the chip DMA it instead of leaving the ring in the NIC's
1514 * memory and accessing it through the shared memory region. We 1514 * memory and accessing it through the shared memory region. We
1515 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1515 * do this for the Tigon 2, but it doesn't work on the Tigon 1,
1516 * so we have to revert to the shared memory scheme if we detect 1516 * so we have to revert to the shared memory scheme if we detect
1517 * a Tigon 1 chip. 1517 * a Tigon 1 chip.
1518 */ 1518 */
1519 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1519 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE);
1520 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1520 if (sc->ti_hwrev == TI_HWREV_TIGON) {
1521 sc->ti_tx_ring_nic = 1521 sc->ti_tx_ring_nic =
1522 (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); 1522 (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW);
1523 } 1523 }
1524 memset((char *)sc->ti_rdata->ti_tx_ring, 0, 1524 memset((char *)sc->ti_rdata->ti_tx_ring, 0,
1525 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1525 TI_TX_RING_CNT * sizeof(struct ti_tx_desc));
1526 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1526 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb;
1527 if (sc->ti_hwrev == TI_HWREV_TIGON) 1527 if (sc->ti_hwrev == TI_HWREV_TIGON)
1528 rcb->ti_flags = 0; 1528 rcb->ti_flags = 0;
1529 else 1529 else
1530 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1530 rcb->ti_flags = TI_RCB_FLAG_HOST_RING;
1531 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) 1531 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx)
1532 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM; 1532 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM;
1533 /* 1533 /*
1534 * When we get the packet, there is a pseudo-header seed already 1534 * When we get the packet, there is a pseudo-header seed already
1535 * in the th_sum or uh_sum field. Make sure the firmware doesn't 1535 * in the th_sum or uh_sum field. Make sure the firmware doesn't
1536 * compute the pseudo-header checksum again! 1536 * compute the pseudo-header checksum again!
1537 */ 1537 */
1538 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx)) 1538 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx))
1539 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 1539 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM |
1540 TI_RCB_FLAG_NO_PHDR_CKSUM; 1540 TI_RCB_FLAG_NO_PHDR_CKSUM;
1541 if (VLAN_ATTACHED(&sc->ethercom)) 1541 if (VLAN_ATTACHED(&sc->ethercom))
1542 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1542 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST;
1543 rcb->ti_max_len = TI_TX_RING_CNT; 1543 rcb->ti_max_len = TI_TX_RING_CNT;
1544 if (sc->ti_hwrev == TI_HWREV_TIGON) 1544 if (sc->ti_hwrev == TI_HWREV_TIGON)
1545 TI_HOSTADDR(rcb->ti_hostaddr, TI_TX_RING_BASE); 1545 TI_HOSTADDR(rcb->ti_hostaddr, TI_TX_RING_BASE);
1546 else 1546 else
1547 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDTXADDR(sc, 0)); 1547 TI_HOSTADDR(rcb->ti_hostaddr, TI_CDTXADDR(sc, 0));
1548 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr, 1548 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr,
1549 TI_CDTXCONSADDR(sc)); 1549 TI_CDTXCONSADDR(sc));
1550 1550
1551 /* 1551 /*
1552 * We're done frobbing the General Information Block. Sync 1552 * We're done frobbing the General Information Block. Sync
1553 * it. Note we take care of the first stats sync here, as 1553 * it. Note we take care of the first stats sync here, as
1554 * well. 1554 * well.
1555 */ 1555 */
1556 TI_CDGIBSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1556 TI_CDGIBSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1557 1557
1558 /* Set up tuneables */ 1558 /* Set up tuneables */
1559 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN) || 1559 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN) ||
1560 (sc->ethercom.ec_capenable & ETHERCAP_VLAN_MTU)) 1560 (sc->ethercom.ec_capenable & ETHERCAP_VLAN_MTU))
1561 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 1561 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS,
1562 (sc->ti_rx_coal_ticks / 10)); 1562 (sc->ti_rx_coal_ticks / 10));
1563 else 1563 else
1564 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); 1564 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks);
1565 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1565 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks);
1566 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1566 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks);
1567 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1567 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds);
1568 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1568 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds);
1569 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1569 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio);
1570 1570
1571 /* Turn interrupts on. */ 1571 /* Turn interrupts on. */
1572 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1572 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0);
1573 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1573 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0);
1574 1574
1575 /* Start CPU. */ 1575 /* Start CPU. */
1576 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT | TI_CPUSTATE_STEP)); 1576 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT | TI_CPUSTATE_STEP));
1577 1577
1578 return (0); 1578 return (0);
1579} 1579}
1580 1580
1581/* 1581/*
1582 * look for id in the device list, returning the first match 1582 * look for id in the device list, returning the first match
1583 */ 1583 */
1584static const struct ti_type * 1584static const struct ti_type *
1585ti_type_match(struct pci_attach_args *pa) 1585ti_type_match(struct pci_attach_args *pa)
1586{ 1586{
1587 const struct ti_type *t; 1587 const struct ti_type *t;
1588 1588
1589 t = ti_devs; 1589 t = ti_devs;
1590 while (t->ti_name != NULL) { 1590 while (t->ti_name != NULL) {
1591 if ((PCI_VENDOR(pa->pa_id) == t->ti_vid) && 1591 if ((PCI_VENDOR(pa->pa_id) == t->ti_vid) &&
1592 (PCI_PRODUCT(pa->pa_id) == t->ti_did)) { 1592 (PCI_PRODUCT(pa->pa_id) == t->ti_did)) {
1593 return (t); 1593 return (t);
1594 } 1594 }
1595 t++; 1595 t++;
1596 } 1596 }
1597 1597
1598 return (NULL); 1598 return (NULL);
1599} 1599}
1600 1600
1601/* 1601/*
1602 * Probe for a Tigon chip. Check the PCI vendor and device IDs 1602 * Probe for a Tigon chip. Check the PCI vendor and device IDs
1603 * against our list and return its name if we find a match. 1603 * against our list and return its name if we find a match.
1604 */ 1604 */
1605static int 1605static int
1606ti_probe(device_t parent, cfdata_t match, void *aux) 1606ti_probe(device_t parent, cfdata_t match, void *aux)
1607{ 1607{
1608 struct pci_attach_args *pa = aux; 1608 struct pci_attach_args *pa = aux;
1609 const struct ti_type *t; 1609 const struct ti_type *t;
1610 1610
1611 t = ti_type_match(pa); 1611 t = ti_type_match(pa);
1612 1612
1613 return ((t == NULL) ? 0 : 1); 1613 return ((t == NULL) ? 0 : 1);
1614} 1614}
1615 1615
1616static void 1616static void
1617ti_attach(device_t parent, device_t self, void *aux) 1617ti_attach(device_t parent, device_t self, void *aux)
1618{ 1618{
1619 uint32_t command; 1619 uint32_t command;
1620 struct ifnet *ifp; 1620 struct ifnet *ifp;
1621 struct ti_softc *sc; 1621 struct ti_softc *sc;
1622 uint8_t eaddr[ETHER_ADDR_LEN]; 1622 uint8_t eaddr[ETHER_ADDR_LEN];
1623 struct pci_attach_args *pa = aux; 1623 struct pci_attach_args *pa = aux;
1624 pci_chipset_tag_t pc = pa->pa_pc; 1624 pci_chipset_tag_t pc = pa->pa_pc;
1625 pci_intr_handle_t ih; 1625 pci_intr_handle_t ih;
1626 const char *intrstr = NULL; 1626 const char *intrstr = NULL;
1627 bus_dma_segment_t dmaseg; 1627 bus_dma_segment_t dmaseg;
1628 int error, dmanseg, nolinear; 1628 int error, dmanseg, nolinear;
1629 const struct ti_type *t; 1629 const struct ti_type *t;
1630 char intrbuf[PCI_INTRSTR_LEN]; 1630 char intrbuf[PCI_INTRSTR_LEN];
1631 1631
1632 t = ti_type_match(pa); 1632 t = ti_type_match(pa);
1633 if (t == NULL) { 1633 if (t == NULL) {
1634 aprint_error("ti_attach: were did the card go ?\n"); 1634 aprint_error("ti_attach: were did the card go ?\n");
1635 return; 1635 return;
1636 } 1636 }
1637 1637
1638 aprint_normal(": %s (rev. 0x%02x)\n", t->ti_name, 1638 aprint_normal(": %s (rev. 0x%02x)\n", t->ti_name,
1639 PCI_REVISION(pa->pa_class)); 1639 PCI_REVISION(pa->pa_class));
1640 1640
1641 sc = device_private(self); 1641 sc = device_private(self);
1642 sc->sc_dev = self; 1642 sc->sc_dev = self;
1643 1643
1644 /* 1644 /*
1645 * Map control/status registers. 1645 * Map control/status registers.
1646 */ 1646 */
1647 nolinear = 0; 1647 nolinear = 0;
1648 if (pci_mapreg_map(pa, 0x10, 1648 if (pci_mapreg_map(pa, 0x10,
1649 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 1649 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
1650 BUS_SPACE_MAP_LINEAR , &sc->ti_btag, &sc->ti_bhandle, 1650 BUS_SPACE_MAP_LINEAR , &sc->ti_btag, &sc->ti_bhandle,
1651 NULL, NULL)) { 1651 NULL, NULL)) {
1652 nolinear = 1; 1652 nolinear = 1;
1653 if (pci_mapreg_map(pa, 0x10, 1653 if (pci_mapreg_map(pa, 0x10,
1654 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 1654 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
1655 0 , &sc->ti_btag, &sc->ti_bhandle, NULL, NULL)) { 1655 0 , &sc->ti_btag, &sc->ti_bhandle, NULL, NULL)) {
1656 aprint_error_dev(self, "can't map memory space\n"); 1656 aprint_error_dev(self, "can't map memory space\n");
1657 return; 1657 return;
1658 } 1658 }
1659 } 1659 }
1660 if (nolinear == 0) 1660 if (nolinear == 0)
1661 sc->ti_vhandle = bus_space_vaddr(sc->ti_btag, sc->ti_bhandle); 1661 sc->ti_vhandle = bus_space_vaddr(sc->ti_btag, sc->ti_bhandle);
1662 else 1662 else
1663 sc->ti_vhandle = NULL; 1663 sc->ti_vhandle = NULL;
1664 1664
1665 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1665 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1666 command |= PCI_COMMAND_MASTER_ENABLE; 1666 command |= PCI_COMMAND_MASTER_ENABLE;
1667 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1667 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1668 1668
1669 /* Allocate interrupt */ 1669 /* Allocate interrupt */
1670 if (pci_intr_map(pa, &ih)) { 1670 if (pci_intr_map(pa, &ih)) {
1671 aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n"); 1671 aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n");
1672 return; 1672 return;
1673 } 1673 }
1674 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 1674 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
1675 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, ti_intr, sc, 1675 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, ti_intr, sc,
1676 device_xname(self)); 1676 device_xname(self));
1677 if (sc->sc_ih == NULL) { 1677 if (sc->sc_ih == NULL) {
1678 aprint_error_dev(sc->sc_dev, "couldn't establish interrupt"); 1678 aprint_error_dev(sc->sc_dev, "couldn't establish interrupt");
1679 if (intrstr != NULL) 1679 if (intrstr != NULL)
1680 aprint_error(" at %s", intrstr); 1680 aprint_error(" at %s", intrstr);
1681 aprint_error("\n"); 1681 aprint_error("\n");
1682 return; 1682 return;
1683 } 1683 }
1684 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1684 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1685 1685
1686 if (ti_chipinit(sc)) { 1686 if (ti_chipinit(sc)) {
1687 aprint_error_dev(self, "chip initialization failed\n"); 1687 aprint_error_dev(self, "chip initialization failed\n");
1688 goto fail2; 1688 goto fail2;
1689 } 1689 }
1690 1690
1691 /* 1691 /*
1692 * Deal with some chip diffrences. 1692 * Deal with some chip diffrences.
1693 */ 1693 */
1694 switch (sc->ti_hwrev) { 1694 switch (sc->ti_hwrev) {
1695 case TI_HWREV_TIGON: 1695 case TI_HWREV_TIGON:
1696 sc->sc_tx_encap = ti_encap_tigon1; 1696 sc->sc_tx_encap = ti_encap_tigon1;
1697 sc->sc_tx_eof = ti_txeof_tigon1; 1697 sc->sc_tx_eof = ti_txeof_tigon1;
1698 if (nolinear == 1) 1698 if (nolinear == 1)
1699 aprint_error_dev(self, 1699 aprint_error_dev(self,
1700 "memory space not mapped linear\n"); 1700 "memory space not mapped linear\n");
1701 break; 1701 break;
1702 1702
1703 case TI_HWREV_TIGON_II: 1703 case TI_HWREV_TIGON_II:
1704 sc->sc_tx_encap = ti_encap_tigon2; 1704 sc->sc_tx_encap = ti_encap_tigon2;
1705 sc->sc_tx_eof = ti_txeof_tigon2; 1705 sc->sc_tx_eof = ti_txeof_tigon2;
1706 break; 1706 break;
1707 1707
1708 default: 1708 default:
1709 aprint_error_dev(self, "Unknown chip version: %d\n", 1709 aprint_error_dev(self, "Unknown chip version: %d\n",
1710 sc->ti_hwrev); 1710 sc->ti_hwrev);
1711 goto fail2; 1711 goto fail2;
1712 } 1712 }
1713 1713
1714 /* Zero out the NIC's on-board SRAM. */ 1714 /* Zero out the NIC's on-board SRAM. */
1715 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 1715 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL);
1716 1716
1717 /* Init again -- zeroing memory may have clobbered some registers. */ 1717 /* Init again -- zeroing memory may have clobbered some registers. */
1718 if (ti_chipinit(sc)) { 1718 if (ti_chipinit(sc)) {
1719 aprint_error_dev(self, "chip initialization failed\n"); 1719 aprint_error_dev(self, "chip initialization failed\n");
1720 goto fail2; 1720 goto fail2;
1721 } 1721 }
1722 1722
1723 /* 1723 /*
1724 * Get station address from the EEPROM. Note: the manual states 1724 * Get station address from the EEPROM. Note: the manual states
1725 * that the MAC address is at offset 0x8c, however the data is 1725 * that the MAC address is at offset 0x8c, however the data is
1726 * stored as two longwords (since that's how it's loaded into 1726 * stored as two longwords (since that's how it's loaded into
1727 * the NIC). This means the MAC address is actually preceded 1727 * the NIC). This means the MAC address is actually preceded
1728 * by two zero bytes. We need to skip over those. 1728 * by two zero bytes. We need to skip over those.
1729 */ 1729 */
1730 if (ti_read_eeprom(sc, (void *)&eaddr, 1730 if (ti_read_eeprom(sc, (void *)&eaddr,
1731 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1731 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1732 aprint_error_dev(self, "failed to read station address\n"); 1732 aprint_error_dev(self, "failed to read station address\n");
1733 goto fail2; 1733 goto fail2;
1734 } 1734 }
1735 1735
1736 /* 1736 /*
1737 * A Tigon chip was detected. Inform the world. 1737 * A Tigon chip was detected. Inform the world.
1738 */ 1738 */
1739 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); 1739 aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr));
1740 1740
1741 if (pci_dma64_available(pa)) 1741 if (pci_dma64_available(pa))
1742 sc->sc_dmat = pa->pa_dmat64; 1742 sc->sc_dmat = pa->pa_dmat64;
1743 else 1743 else
1744 sc->sc_dmat = pa->pa_dmat; 1744 sc->sc_dmat = pa->pa_dmat;
1745 1745
1746 /* Allocate the general information block and ring buffers. */ 1746 /* Allocate the general information block and ring buffers. */
1747 if ((error = bus_dmamem_alloc(sc->sc_dmat, 1747 if ((error = bus_dmamem_alloc(sc->sc_dmat,
1748 sizeof(struct ti_ring_data), PAGE_SIZE, 0, &dmaseg, 1, &dmanseg, 1748 sizeof(struct ti_ring_data), PAGE_SIZE, 0, &dmaseg, 1, &dmanseg,
1749 BUS_DMA_NOWAIT)) != 0) { 1749 BUS_DMA_NOWAIT)) != 0) {
1750 aprint_error_dev(self, 1750 aprint_error_dev(self,
1751 "can't allocate ring buffer, error = %d\n", error); 1751 "can't allocate ring buffer, error = %d\n", error);
1752 goto fail2; 1752 goto fail2;
1753 } 1753 }
1754 1754
1755 if ((error = bus_dmamem_map(sc->sc_dmat, &dmaseg, dmanseg, 1755 if ((error = bus_dmamem_map(sc->sc_dmat, &dmaseg, dmanseg,
1756 sizeof(struct ti_ring_data), (void **)&sc->ti_rdata, 1756 sizeof(struct ti_ring_data), (void **)&sc->ti_rdata,
1757 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1757 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
1758 aprint_error_dev(self, 1758 aprint_error_dev(self,
1759 "can't map ring buffer, error = %d\n", error); 1759 "can't map ring buffer, error = %d\n", error);
1760 goto fail2; 1760 goto fail2;
1761 } 1761 }
1762 1762
1763 if ((error = bus_dmamap_create(sc->sc_dmat, 1763 if ((error = bus_dmamap_create(sc->sc_dmat,
1764 sizeof(struct ti_ring_data), 1, 1764 sizeof(struct ti_ring_data), 1,
1765 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT, 1765 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT,
1766 &sc->info_dmamap)) != 0) { 1766 &sc->info_dmamap)) != 0) {
1767 aprint_error_dev(self, 1767 aprint_error_dev(self,

cvs diff -r1.4 -r1.5 src/sys/dev/pci/qat/qat.c (switch to unified diff)

--- src/sys/dev/pci/qat/qat.c 2020/02/01 13:48:18 1.4
+++ src/sys/dev/pci/qat/qat.c 2020/03/05 15:33:13 1.5
@@ -1,2029 +1,2029 @@ @@ -1,2029 +1,2029 @@
1/* $NetBSD: qat.c,v 1.4 2020/02/01 13:48:18 riastradh Exp $ */ 1/* $NetBSD: qat.c,v 1.5 2020/03/05 15:33:13 msaitoh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2019 Internet Initiative Japan, Inc. 4 * Copyright (c) 2019 Internet Initiative Japan, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved. 30 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
31 * 31 *
32 * Redistribution and use in source and binary forms, with or without 32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions 33 * modification, are permitted provided that the following conditions
34 * are met: 34 * are met:
35 * 35 *
36 * * Redistributions of source code must retain the above copyright 36 * * Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer. 37 * notice, this list of conditions and the following disclaimer.
38 * * Redistributions in binary form must reproduce the above copyright 38 * * Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in 39 * notice, this list of conditions and the following disclaimer in
40 * the documentation and/or other materials provided with the 40 * the documentation and/or other materials provided with the
41 * distribution. 41 * distribution.
42 * * Neither the name of Intel Corporation nor the names of its 42 * * Neither the name of Intel Corporation nor the names of its
43 * contributors may be used to endorse or promote products derived 43 * contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission. 44 * from this software without specific prior written permission.
45 * 45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 47 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 48 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 49 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 50 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 52 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */ 57 */
58 58
59#include <sys/cdefs.h> 59#include <sys/cdefs.h>
60__KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.4 2020/02/01 13:48:18 riastradh Exp $"); 60__KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.5 2020/03/05 15:33:13 msaitoh Exp $");
61 61
62#include <sys/param.h> 62#include <sys/param.h>
63#include <sys/systm.h> 63#include <sys/systm.h>
64#include <sys/kernel.h> 64#include <sys/kernel.h>
65#include <sys/device.h> 65#include <sys/device.h>
66#include <sys/module.h> 66#include <sys/module.h>
67#include <sys/kmem.h> 67#include <sys/kmem.h>
68#include <sys/mutex.h> 68#include <sys/mutex.h>
69#include <sys/bitops.h> 69#include <sys/bitops.h>
70#include <sys/atomic.h> 70#include <sys/atomic.h>
71#include <sys/mbuf.h> 71#include <sys/mbuf.h>
72#include <sys/cprng.h> 72#include <sys/cprng.h>
73#include <sys/cpu.h> 73#include <sys/cpu.h>
74#include <sys/interrupt.h> 74#include <sys/interrupt.h>
75#include <sys/md5.h> 75#include <sys/md5.h>
76#include <sys/sha1.h> 76#include <sys/sha1.h>
77#include <sys/sha2.h> 77#include <sys/sha2.h>
78 78
79#include <opencrypto/cryptodev.h> 79#include <opencrypto/cryptodev.h>
80#include <opencrypto/cryptosoft.h> 80#include <opencrypto/cryptosoft.h>
81#include <opencrypto/xform.h> 81#include <opencrypto/xform.h>
82 82
83/* XXX same as sys/arch/x86/x86/via_padlock.c */ 83/* XXX same as sys/arch/x86/x86/via_padlock.c */
84#include <opencrypto/cryptosoft_xform.c> 84#include <opencrypto/cryptosoft_xform.c>
85 85
86#include <dev/pci/pcireg.h> 86#include <dev/pci/pcireg.h>
87#include <dev/pci/pcivar.h> 87#include <dev/pci/pcivar.h>
88#include <dev/pci/pcidevs.h> 88#include <dev/pci/pcidevs.h>
89 89
90#include "qatreg.h" 90#include "qatreg.h"
91#include "qatvar.h" 91#include "qatvar.h"
92#include "qat_aevar.h" 92#include "qat_aevar.h"
93 93
94extern struct qat_hw qat_hw_c2xxx; 94extern struct qat_hw qat_hw_c2xxx;
95extern struct qat_hw qat_hw_c3xxx; 95extern struct qat_hw qat_hw_c3xxx;
96extern struct qat_hw qat_hw_c62x; 96extern struct qat_hw qat_hw_c62x;
97extern struct qat_hw qat_hw_d15xx; 97extern struct qat_hw qat_hw_d15xx;
98 98
99static const struct qat_product { 99static const struct qat_product {
100 pci_vendor_id_t qatp_vendor; 100 pci_vendor_id_t qatp_vendor;
101 pci_product_id_t qatp_product; 101 pci_product_id_t qatp_product;
102 const char *qatp_name; 102 const char *qatp_name;
103 enum qat_chip_type qatp_chip; 103 enum qat_chip_type qatp_chip;
104 const struct qat_hw *qatp_hw; 104 const struct qat_hw *qatp_hw;
105} qat_products[] = { 105} qat_products[] = {
106 106
107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS, 107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
108 "Intel C2000 QuickAssist Physical Function", 108 "Intel C2000 QuickAssist Physical Function",
109 QAT_CHIP_C2XXX, &qat_hw_c2xxx }, 109 QAT_CHIP_C2XXX, &qat_hw_c2xxx },
110 110
111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT, 111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
112 "Intel C3000 QuickAssist Physical Function", 112 "Intel C3000 QuickAssist Physical Function",
113 QAT_CHIP_C3XXX, &qat_hw_c3xxx }, 113 QAT_CHIP_C3XXX, &qat_hw_c3xxx },
114#ifdef notyet 114#ifdef notyet
115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT_VF, 115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT_VF,
116 "Intel C3000 QuickAssist Virtual Function", 116 "Intel C3000 QuickAssist Virtual Function",
117 QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf }, 117 QAT_CHIP_C3XXX_IOV, &qat_hw_c3xxxvf },
118#endif 118#endif
119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT, 119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
120 "Intel C620/Xeon D-2100 QuickAssist Physical Function", 120 "Intel C620/Xeon D-2100 QuickAssist Physical Function",
121 QAT_CHIP_C62X, &qat_hw_c62x }, 121 QAT_CHIP_C62X, &qat_hw_c62x },
122#ifdef notyet 122#ifdef notyet
123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT_VF, 123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT_VF,
124 "Intel C620/Xeon D-2100 QuickAssist Virtual Function", 124 "Intel C620/Xeon D-2100 QuickAssist Virtual Function",
125 QAT_CHIP_C62X_IOV, &qat_hw_c62xvf }, 125 QAT_CHIP_C62X_IOV, &qat_hw_c62xvf },
126#endif 126#endif
127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT, 127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
128 "Intel Xeon D-1500 QuickAssist Physical Function", 128 "Intel Xeon D-1500 QuickAssist Physical Function",
129 QAT_CHIP_D15XX, &qat_hw_d15xx }, 129 QAT_CHIP_D15XX, &qat_hw_d15xx },
130#ifdef notyet 130#ifdef notyet
131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT_VF, 131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT_VF,
132 "Intel Xeon D-1500 QuickAssist Virtual Function", 132 "Intel Xeon D-1500 QuickAssist Virtual Function",
133 QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf }, 133 QAT_CHIP_D15XX_IOV, &qat_hw_d15xxvf },
134#endif 134#endif
135 { 0, 0, NULL, 0, NULL }, 135 { 0, 0, NULL, 0, NULL },
136}; 136};
137 137
138/* md5 16 bytes - Initialiser state can be found in RFC 1321*/ 138/* md5 16 bytes - Initialiser state can be found in RFC 1321*/
139static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = { 139static const uint8_t md5_initial_state[QAT_HASH_MD5_STATE_SIZE] = {
140 0x01, 0x23, 0x45, 0x67, 140 0x01, 0x23, 0x45, 0x67,
141 0x89, 0xab, 0xcd, 0xef, 141 0x89, 0xab, 0xcd, 0xef,
142 0xfe, 0xdc, 0xba, 0x98, 142 0xfe, 0xdc, 0xba, 0x98,
143 0x76, 0x54, 0x32, 0x10, 143 0x76, 0x54, 0x32, 0x10,
144}; 144};
145 145
146/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */ 146/* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
147static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = { 147static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
148 0x67, 0x45, 0x23, 0x01, 148 0x67, 0x45, 0x23, 0x01,
149 0xef, 0xcd, 0xab, 0x89, 149 0xef, 0xcd, 0xab, 0x89,
150 0x98, 0xba, 0xdc, 0xfe, 150 0x98, 0xba, 0xdc, 0xfe,
151 0x10, 0x32, 0x54, 0x76, 151 0x10, 0x32, 0x54, 0x76,
152 0xc3, 0xd2, 0xe1, 0xf0 152 0xc3, 0xd2, 0xe1, 0xf0
153}; 153};
154 154
155/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */ 155/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
156static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = { 156static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
157 0x6a, 0x09, 0xe6, 0x67, 157 0x6a, 0x09, 0xe6, 0x67,
158 0xbb, 0x67, 0xae, 0x85, 158 0xbb, 0x67, 0xae, 0x85,
159 0x3c, 0x6e, 0xf3, 0x72, 159 0x3c, 0x6e, 0xf3, 0x72,
160 0xa5, 0x4f, 0xf5, 0x3a, 160 0xa5, 0x4f, 0xf5, 0x3a,
161 0x51, 0x0e, 0x52, 0x7f, 161 0x51, 0x0e, 0x52, 0x7f,
162 0x9b, 0x05, 0x68, 0x8c, 162 0x9b, 0x05, 0x68, 0x8c,
163 0x1f, 0x83, 0xd9, 0xab, 163 0x1f, 0x83, 0xd9, 0xab,
164 0x5b, 0xe0, 0xcd, 0x19 164 0x5b, 0xe0, 0xcd, 0x19
165}; 165};
166 166
167/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ 167/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
168static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = { 168static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
169 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 169 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
170 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, 170 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
171 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, 171 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
172 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 172 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
173 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 173 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
174 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 174 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
175 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 175 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
176 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 176 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
177}; 177};
178 178
179/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */ 179/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
180static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = { 180static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
181 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 181 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
182 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 182 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
183 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 183 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
184 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 184 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
185 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 185 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
186 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 186 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
187 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 187 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
188 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 188 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
189}; 189};
190 190
191/* Hash Algorithm specific structure */ 191/* Hash Algorithm specific structure */
192 192
193static const struct qat_sym_hash_alg_info md5_info = { 193static const struct qat_sym_hash_alg_info md5_info = {
194 QAT_HASH_MD5_DIGEST_SIZE, 194 QAT_HASH_MD5_DIGEST_SIZE,
195 QAT_HASH_MD5_BLOCK_SIZE, 195 QAT_HASH_MD5_BLOCK_SIZE,
196 md5_initial_state, 196 md5_initial_state,
197 QAT_HASH_MD5_STATE_SIZE, 197 QAT_HASH_MD5_STATE_SIZE,
198 &swcr_auth_hash_hmac_md5_96, 198 &swcr_auth_hash_hmac_md5_96,
199 offsetof(MD5_CTX, state), 199 offsetof(MD5_CTX, state),
200 4, 200 4,
201}; 201};
202 202
203static const struct qat_sym_hash_alg_info sha1_info = { 203static const struct qat_sym_hash_alg_info sha1_info = {
204 QAT_HASH_SHA1_DIGEST_SIZE, 204 QAT_HASH_SHA1_DIGEST_SIZE,
205 QAT_HASH_SHA1_BLOCK_SIZE, 205 QAT_HASH_SHA1_BLOCK_SIZE,
206 sha1_initial_state, 206 sha1_initial_state,
207 QAT_HASH_SHA1_STATE_SIZE, 207 QAT_HASH_SHA1_STATE_SIZE,
208 &swcr_auth_hash_hmac_sha1_96, 208 &swcr_auth_hash_hmac_sha1_96,
209 offsetof(SHA1_CTX, state), 209 offsetof(SHA1_CTX, state),
210 4, 210 4,
211}; 211};
212 212
213static const struct qat_sym_hash_alg_info sha256_info = { 213static const struct qat_sym_hash_alg_info sha256_info = {
214 QAT_HASH_SHA256_DIGEST_SIZE, 214 QAT_HASH_SHA256_DIGEST_SIZE,
215 QAT_HASH_SHA256_BLOCK_SIZE, 215 QAT_HASH_SHA256_BLOCK_SIZE,
216 sha256_initial_state, 216 sha256_initial_state,
217 QAT_HASH_SHA256_STATE_SIZE, 217 QAT_HASH_SHA256_STATE_SIZE,
218 &swcr_auth_hash_hmac_sha2_256, 218 &swcr_auth_hash_hmac_sha2_256,
219 offsetof(SHA256_CTX, state), 219 offsetof(SHA256_CTX, state),
220 4, 220 4,
221}; 221};
222 222
223static const struct qat_sym_hash_alg_info sha384_info = { 223static const struct qat_sym_hash_alg_info sha384_info = {
224 QAT_HASH_SHA384_DIGEST_SIZE, 224 QAT_HASH_SHA384_DIGEST_SIZE,
225 QAT_HASH_SHA384_BLOCK_SIZE, 225 QAT_HASH_SHA384_BLOCK_SIZE,
226 sha384_initial_state, 226 sha384_initial_state,
227 QAT_HASH_SHA384_STATE_SIZE, 227 QAT_HASH_SHA384_STATE_SIZE,
228 &swcr_auth_hash_hmac_sha2_384, 228 &swcr_auth_hash_hmac_sha2_384,
229 offsetof(SHA384_CTX, state), 229 offsetof(SHA384_CTX, state),
230 8, 230 8,
231}; 231};
232 232
233static const struct qat_sym_hash_alg_info sha512_info = { 233static const struct qat_sym_hash_alg_info sha512_info = {
234 QAT_HASH_SHA512_DIGEST_SIZE, 234 QAT_HASH_SHA512_DIGEST_SIZE,
235 QAT_HASH_SHA512_BLOCK_SIZE, 235 QAT_HASH_SHA512_BLOCK_SIZE,
236 sha512_initial_state, 236 sha512_initial_state,
237 QAT_HASH_SHA512_STATE_SIZE, 237 QAT_HASH_SHA512_STATE_SIZE,
238 &swcr_auth_hash_hmac_sha2_512, 238 &swcr_auth_hash_hmac_sha2_512,
239 offsetof(SHA512_CTX, state), 239 offsetof(SHA512_CTX, state),
240 8, 240 8,
241}; 241};
242 242
243static const struct qat_sym_hash_alg_info aes_gcm_info = { 243static const struct qat_sym_hash_alg_info aes_gcm_info = {
244 QAT_HASH_AES_GCM_DIGEST_SIZE, 244 QAT_HASH_AES_GCM_DIGEST_SIZE,
245 QAT_HASH_AES_GCM_BLOCK_SIZE, 245 QAT_HASH_AES_GCM_BLOCK_SIZE,
246 NULL, 0, 246 NULL, 0,
247 NULL, 0, 0, /* XXX */ 247 NULL, 0, 0, /* XXX */
248}; 248};
249 249
250/* Hash QAT specific structures */ 250/* Hash QAT specific structures */
251 251
252static const struct qat_sym_hash_qat_info md5_config = { 252static const struct qat_sym_hash_qat_info md5_config = {
253 HW_AUTH_ALGO_MD5, 253 HW_AUTH_ALGO_MD5,
254 QAT_HASH_MD5_BLOCK_SIZE, 254 QAT_HASH_MD5_BLOCK_SIZE,
255 HW_MD5_STATE1_SZ, 255 HW_MD5_STATE1_SZ,
256 HW_MD5_STATE2_SZ 256 HW_MD5_STATE2_SZ
257}; 257};
258 258
259static const struct qat_sym_hash_qat_info sha1_config = { 259static const struct qat_sym_hash_qat_info sha1_config = {
260 HW_AUTH_ALGO_SHA1, 260 HW_AUTH_ALGO_SHA1,
261 QAT_HASH_SHA1_BLOCK_SIZE, 261 QAT_HASH_SHA1_BLOCK_SIZE,
262 HW_SHA1_STATE1_SZ, 262 HW_SHA1_STATE1_SZ,
263 HW_SHA1_STATE2_SZ 263 HW_SHA1_STATE2_SZ
264}; 264};
265 265
266static const struct qat_sym_hash_qat_info sha256_config = { 266static const struct qat_sym_hash_qat_info sha256_config = {
267 HW_AUTH_ALGO_SHA256, 267 HW_AUTH_ALGO_SHA256,
268 QAT_HASH_SHA256_BLOCK_SIZE, 268 QAT_HASH_SHA256_BLOCK_SIZE,
269 HW_SHA256_STATE1_SZ, 269 HW_SHA256_STATE1_SZ,
270 HW_SHA256_STATE2_SZ 270 HW_SHA256_STATE2_SZ
271}; 271};
272 272
273static const struct qat_sym_hash_qat_info sha384_config = { 273static const struct qat_sym_hash_qat_info sha384_config = {
274 HW_AUTH_ALGO_SHA384, 274 HW_AUTH_ALGO_SHA384,
275 QAT_HASH_SHA384_BLOCK_SIZE, 275 QAT_HASH_SHA384_BLOCK_SIZE,
276 HW_SHA384_STATE1_SZ, 276 HW_SHA384_STATE1_SZ,
277 HW_SHA384_STATE2_SZ 277 HW_SHA384_STATE2_SZ
278}; 278};
279 279
280static const struct qat_sym_hash_qat_info sha512_config = { 280static const struct qat_sym_hash_qat_info sha512_config = {
281 HW_AUTH_ALGO_SHA512, 281 HW_AUTH_ALGO_SHA512,
282 QAT_HASH_SHA512_BLOCK_SIZE, 282 QAT_HASH_SHA512_BLOCK_SIZE,
283 HW_SHA512_STATE1_SZ, 283 HW_SHA512_STATE1_SZ,
284 HW_SHA512_STATE2_SZ 284 HW_SHA512_STATE2_SZ
285}; 285};
286 286
287static const struct qat_sym_hash_qat_info aes_gcm_config = { 287static const struct qat_sym_hash_qat_info aes_gcm_config = {
288 HW_AUTH_ALGO_GALOIS_128, 288 HW_AUTH_ALGO_GALOIS_128,
289 0, 289 0,
290 HW_GALOIS_128_STATE1_SZ, 290 HW_GALOIS_128_STATE1_SZ,
291 HW_GALOIS_H_SZ + 291 HW_GALOIS_H_SZ +
292 HW_GALOIS_LEN_A_SZ + 292 HW_GALOIS_LEN_A_SZ +
293 HW_GALOIS_E_CTR0_SZ 293 HW_GALOIS_E_CTR0_SZ
294}; 294};
295 295
296static const struct qat_sym_hash_def qat_sym_hash_defs[] = { 296static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
297 [QAT_SYM_HASH_MD5] = { &md5_info, &md5_config }, 297 [QAT_SYM_HASH_MD5] = { &md5_info, &md5_config },
298 [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config }, 298 [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
299 [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config }, 299 [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
300 [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config }, 300 [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
301 [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config }, 301 [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
302 [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config }, 302 [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
303}; 303};
304 304
305const struct qat_product * 305const struct qat_product *
306 qat_lookup(const struct pci_attach_args *); 306 qat_lookup(const struct pci_attach_args *);
307int qat_match(struct device *, struct cfdata *, void *); 307int qat_match(struct device *, struct cfdata *, void *);
308void qat_attach(struct device *, struct device *, void *); 308void qat_attach(struct device *, struct device *, void *);
309void qat_init(struct device *); 309void qat_init(struct device *);
310int qat_start(struct device *); 310int qat_start(struct device *);
311int qat_detach(struct device *, int); 311int qat_detach(struct device *, int);
312 312
313int qat_alloc_msix_intr(struct qat_softc *, 313int qat_alloc_msix_intr(struct qat_softc *,
314 struct pci_attach_args *); 314 struct pci_attach_args *);
315void * qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t, 315void * qat_establish_msix_intr(struct qat_softc *, pci_intr_handle_t,
316 int (*)(void *), void *, const char *, int); 316 int (*)(void *), void *, const char *, int);
317int qat_setup_msix_intr(struct qat_softc *); 317int qat_setup_msix_intr(struct qat_softc *);
318 318
319int qat_etr_init(struct qat_softc *); 319int qat_etr_init(struct qat_softc *);
320int qat_etr_bank_init(struct qat_softc *, int); 320int qat_etr_bank_init(struct qat_softc *, int);
321 321
322int qat_etr_ap_bank_init(struct qat_softc *); 322int qat_etr_ap_bank_init(struct qat_softc *);
323void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int); 323void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
324void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *, 324void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
325 uint32_t, int); 325 uint32_t, int);
326void qat_etr_ap_bank_setup_ring(struct qat_softc *, 326void qat_etr_ap_bank_setup_ring(struct qat_softc *,
327 struct qat_ring *); 327 struct qat_ring *);
328int qat_etr_verify_ring_size(uint32_t, uint32_t); 328int qat_etr_verify_ring_size(uint32_t, uint32_t);
329 329
330int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *, 330int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
331 struct qat_ring *); 331 struct qat_ring *);
332int qat_etr_bank_intr(void *); 332int qat_etr_bank_intr(void *);
333 333
334void qat_arb_update(struct qat_softc *, struct qat_bank *); 334void qat_arb_update(struct qat_softc *, struct qat_bank *);
335 335
336struct qat_sym_cookie * 336struct qat_sym_cookie *
337 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *); 337 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *);
338void qat_crypto_free_sym_cookie(struct qat_crypto_bank *, 338void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
339 struct qat_sym_cookie *); 339 struct qat_sym_cookie *);
340int qat_crypto_load_buf(struct qat_softc *, struct cryptop *, 340int qat_crypto_load_buf(struct qat_softc *, struct cryptop *,
341 struct qat_sym_cookie *, struct qat_crypto_desc const *, 341 struct qat_sym_cookie *, struct qat_crypto_desc const *,
342 uint8_t *, int, bus_addr_t *); 342 uint8_t *, int, bus_addr_t *);
343int qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *, 343int qat_crypto_load_iv(struct qat_sym_cookie *, struct cryptop *,
344 struct cryptodesc *, struct qat_crypto_desc const *); 344 struct cryptodesc *, struct qat_crypto_desc const *);
345int qat_crypto_process(void *, struct cryptop *, int); 345int qat_crypto_process(void *, struct cryptop *, int);
346int qat_crypto_setup_ring(struct qat_softc *, 346int qat_crypto_setup_ring(struct qat_softc *,
347 struct qat_crypto_bank *); 347 struct qat_crypto_bank *);
348int qat_crypto_new_session(void *, uint32_t *, struct cryptoini *); 348int qat_crypto_new_session(void *, uint32_t *, struct cryptoini *);
349int qat_crypto_free_session0(struct qat_crypto *, 349int qat_crypto_free_session0(struct qat_crypto *,
350 struct qat_session *); 350 struct qat_session *);
351void qat_crypto_check_free_session(struct qat_crypto *, 351void qat_crypto_check_free_session(struct qat_crypto *,
352 struct qat_session *); 352 struct qat_session *);
353int qat_crypto_free_session(void *, uint64_t); 353int qat_crypto_free_session(void *, uint64_t);
354int qat_crypto_bank_init(struct qat_softc *, 354int qat_crypto_bank_init(struct qat_softc *,
355 struct qat_crypto_bank *); 355 struct qat_crypto_bank *);
356int qat_crypto_init(struct qat_softc *); 356int qat_crypto_init(struct qat_softc *);
357int qat_crypto_start(struct qat_softc *); 357int qat_crypto_start(struct qat_softc *);
358int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *); 358int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
359 359
360CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc), 360CFATTACH_DECL_NEW(qat, sizeof(struct qat_softc),
361 qat_match, qat_attach, qat_detach, NULL); 361 qat_match, qat_attach, qat_detach, NULL);
362 362
363struct qat_softc *gsc = NULL; 363struct qat_softc *gsc = NULL;
364 364
365#ifdef QAT_DUMP 365#ifdef QAT_DUMP
366int qat_dump = QAT_DUMP; 366int qat_dump = QAT_DUMP;
367#endif 367#endif
368 368
369const struct qat_product * 369const struct qat_product *
370qat_lookup(const struct pci_attach_args *pa) 370qat_lookup(const struct pci_attach_args *pa)
371{ 371{
372 const struct qat_product *qatp; 372 const struct qat_product *qatp;
373 373
374 for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) { 374 for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
375 if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor && 375 if (PCI_VENDOR(pa->pa_id) == qatp->qatp_vendor &&
376 PCI_PRODUCT(pa->pa_id) == qatp->qatp_product) 376 PCI_PRODUCT(pa->pa_id) == qatp->qatp_product)
377 return qatp; 377 return qatp;
378 } 378 }
379 return NULL; 379 return NULL;
380} 380}
381 381
382int 382int
383qat_match(struct device *parent, struct cfdata *cf, void *aux) 383qat_match(struct device *parent, struct cfdata *cf, void *aux)
384{ 384{
385 struct pci_attach_args *pa = aux; 385 struct pci_attach_args *pa = aux;
386 386
387 if (qat_lookup(pa) != NULL) 387 if (qat_lookup(pa) != NULL)
388 return 1; 388 return 1;
389 389
390 return 0; 390 return 0;
391} 391}
392 392
393void 393void
394qat_attach(struct device *parent, struct device *self, void *aux) 394qat_attach(struct device *parent, struct device *self, void *aux)
395{ 395{
396 struct qat_softc *sc = device_private(self); 396 struct qat_softc *sc = device_private(self);
397 struct pci_attach_args *pa = aux; 397 struct pci_attach_args *pa = aux;
398 pci_chipset_tag_t pc = pa->pa_pc; 398 pci_chipset_tag_t pc = pa->pa_pc;
399 const struct qat_product *qatp; 399 const struct qat_product *qatp;
400 char cap[256]; 400 char cap[256];
401 pcireg_t cmd, memtype, msixoff, fusectl; 401 pcireg_t cmd, memtype, msixoff, fusectl;
402 bus_size_t msixtbl_offset; 402 bus_size_t msixtbl_offset;
403 int i, bar, msixtbl_bar; 403 int i, bar, msixtbl_bar;
404 404
405 sc->sc_dev = self; 405 sc->sc_dev = self;
406 sc->sc_pc = pc; 406 sc->sc_pc = pc;
407 sc->sc_pcitag = pa->pa_tag; 407 sc->sc_pcitag = pa->pa_tag;
408 408
409 gsc = sc; /* for debug */ 409 gsc = sc; /* for debug */
410 410
411 qatp = qat_lookup(pa); 411 qatp = qat_lookup(pa);
412 KASSERT(qatp != NULL); 412 KASSERT(qatp != NULL);
413 413
414 if (pci_dma64_available(pa)) 414 if (pci_dma64_available(pa))
415 sc->sc_dmat = pa->pa_dmat64; 415 sc->sc_dmat = pa->pa_dmat64;
416 else 416 else
417 sc->sc_dmat = pa->pa_dmat; 417 sc->sc_dmat = pa->pa_dmat;
418 418
419 aprint_naive(": Crypto processor\n"); 419 aprint_naive(": Crypto processor\n");
420 sc->sc_rev = PCI_REVISION(pa->pa_class); 420 sc->sc_rev = PCI_REVISION(pa->pa_class);
421 aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev); 421 aprint_normal(": %s (rev. 0x%02x)\n", qatp->qatp_name, sc->sc_rev);
422 422
423 memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw)); 423 memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
424 424
425 /* Determine active accelerators and engines */ 425 /* Determine active accelerators and engines */
426 sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc); 426 sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
427 sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc); 427 sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
428 428
429 sc->sc_accel_num = 0; 429 sc->sc_accel_num = 0;
430 for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) { 430 for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
431 if (sc->sc_accel_mask & (1 << i)) 431 if (sc->sc_accel_mask & (1 << i))
432 sc->sc_accel_num++; 432 sc->sc_accel_num++;
433 } 433 }
434 sc->sc_ae_num = 0; 434 sc->sc_ae_num = 0;
435 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) { 435 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
436 if (sc->sc_ae_mask & (1 << i)) { 436 if (sc->sc_ae_mask & (1 << i)) {
437 sc->sc_ae_num++; 437 sc->sc_ae_num++;
438 } 438 }
439 } 439 }
440 440
441 if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) { 441 if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
442 aprint_error_dev(sc->sc_dev, "couldn't find acceleration"); 442 aprint_error_dev(sc->sc_dev, "couldn't find acceleration");
443 goto fail; 443 goto fail;
444 } 444 }
445 445
446 KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL); 446 KASSERT(sc->sc_accel_num <= MAX_NUM_ACCEL);
447 KASSERT(sc->sc_ae_num <= MAX_NUM_AE); 447 KASSERT(sc->sc_ae_num <= MAX_NUM_AE);
448 448
449 /* Determine SKU and capabilities */ 449 /* Determine SKU and capabilities */
450 sc->sc_sku = sc->sc_hw.qhw_get_sku(sc); 450 sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
451 sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc); 451 sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
452 sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc); 452 sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
453 453
454 aprint_normal_dev(sc->sc_dev, 454 aprint_normal_dev(sc->sc_dev,
455 "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n", 455 "sku %d accel %d accel_mask 0x%x ae %d ae_mask 0x%x\n",
456 sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask, 456 sc->sc_sku, sc->sc_accel_num, sc->sc_accel_mask,
457 sc->sc_ae_num, sc->sc_ae_mask); 457 sc->sc_ae_num, sc->sc_ae_mask);
458 snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap); 458 snprintb(cap, sizeof(cap), QAT_ACCEL_CAP_BITS, sc->sc_accel_cap);
459 aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap); 459 aprint_normal_dev(sc->sc_dev, "accel capabilities %s\n", cap);
460 460
461 /* Map BARs */ 461 /* Map BARs */
462 462
463 msixtbl_bar = 0; 463 msixtbl_bar = 0;
464 msixtbl_offset = 0; 464 msixtbl_offset = 0;
465 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) { 465 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, NULL)) {
466 pcireg_t msixtbl; 466 pcireg_t msixtbl;
467 msixtbl = pci_conf_read(pc, pa->pa_tag, 467 msixtbl = pci_conf_read(pc, pa->pa_tag,
468 msixoff + PCI_MSIX_TBLOFFSET); 468 msixoff + PCI_MSIX_TBLOFFSET);
469 msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK; 469 msixtbl_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
470 msixtbl_bar = PCI_MAPREG_START + 470 msixtbl_bar = PCI_MAPREG_START +
471 ((msixtbl & PCI_MSIX_TBLBIR_MASK) << 2); 471 ((msixtbl & PCI_MSIX_TBLBIR_MASK) << 2);
472 } 472 }
473 473
474 i = 0; 474 i = 0;
475 if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) { 475 if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
476 KASSERT(sc->sc_hw.qhw_sram_bar_id == 0); 476 KASSERT(sc->sc_hw.qhw_sram_bar_id == 0);
477 fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG); 477 fusectl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, FUSECTL_REG);
478 /* Skip SRAM BAR */ 478 /* Skip SRAM BAR */
479 i = (fusectl & FUSECTL_MASK) ? 1 : 0; 479 i = (fusectl & FUSECTL_MASK) ? 1 : 0;
480 } 480 }
481 for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) { 481 for (bar = PCI_MAPREG_START; bar <= PCI_MAPREG_END; bar += 4) {
482 bus_size_t size; 482 bus_size_t size;
483 bus_addr_t addr; 483 bus_addr_t addr;
484 484
485 if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0) 485 if (pci_mapreg_probe(pc, pa->pa_tag, bar, &memtype) == 0)
486 continue; 486 continue;
487 487
488 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) 488 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM)
489 continue; 489 continue;
490 490
491 /* MSI-X table will be mapped by pci_msix_alloc_map */ 491 /* MSI-X table will be mapped by pci_msix_alloc_map */
492 if (bar == msixtbl_bar) 492 if (bar == msixtbl_bar)
493 size = msixtbl_offset; 493 size = msixtbl_offset;
494 else 494 else
495 size = 0; 495 size = 0;
496 496
497 if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0, 497 if (pci_mapreg_submap(pa, bar, memtype, 0, size, 0,
498 &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) { 498 &sc->sc_csrt[i], &sc->sc_csrh[i], &addr, &sc->sc_csrs[i])) {
499 aprint_error_dev(sc->sc_dev, 499 aprint_error_dev(sc->sc_dev,
500 "couldn't map bar 0x%02x\n", bar); 500 "couldn't map bar 0x%02x\n", bar);
501 goto fail; 501 goto fail;
502 } 502 }
503 503
504 aprint_verbose_dev(sc->sc_dev, 504 aprint_verbose_dev(sc->sc_dev,
505 "region #%d bar 0x%02x size 0x%x at 0x%llx" 505 "region #%d bar 0x%02x size 0x%x at 0x%llx"
506 " mapped to %p\n", i, bar, 506 " mapped to %p\n", i, bar,
507 (int)sc->sc_csrs[i], (unsigned long long)addr, 507 (int)sc->sc_csrs[i], (unsigned long long)addr,
508 bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i])); 508 bus_space_vaddr(sc->sc_csrt[i], sc->sc_csrh[i]));
509 509
510 i++; 510 i++;
511 if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT) 511 if (PCI_MAPREG_MEM_TYPE(memtype) == PCI_MAPREG_MEM_TYPE_64BIT)
512 bar += 4; 512 bar += 4;
513 } 513 }
514 514
515 /* XXX Enable advanced error reporting */ 515 /* XXX Enable advanced error reporting */
516 516
517 /* Enable bus mastering */ 517 /* Enable bus mastering */
518 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 518 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
519 cmd |= PCI_COMMAND_MASTER_ENABLE; 519 cmd |= PCI_COMMAND_MASTER_ENABLE;
520 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 520 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
521 521
522 if (qat_alloc_msix_intr(sc, pa)) 522 if (qat_alloc_msix_intr(sc, pa))
523 goto fail; 523 goto fail;
524 524
525 config_mountroot(self, qat_init); 525 config_mountroot(self, qat_init);
526 526
527fail: 527fail:
528 /* XXX */ 528 /* XXX */
529 return; 529 return;
530} 530}
531 531
532void 532void
533qat_init(struct device *self) 533qat_init(struct device *self)
534{ 534{
535 int error; 535 int error;
536 struct qat_softc *sc = device_private(self); 536 struct qat_softc *sc = device_private(self);
537 537
538 aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n"); 538 aprint_verbose_dev(sc->sc_dev, "Initializing ETR\n");
539 error = qat_etr_init(sc); 539 error = qat_etr_init(sc);
540 if (error) { 540 if (error) {
541 aprint_error_dev(sc->sc_dev, 541 aprint_error_dev(sc->sc_dev,
542 "Could not initialize ETR: %d\n", error); 542 "Could not initialize ETR: %d\n", error);
543 return; 543 return;
544 } 544 }
545 545
546 aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n"); 546 aprint_verbose_dev(sc->sc_dev, "Initializing admin comms\n");
547 if (sc->sc_hw.qhw_init_admin_comms != NULL && 547 if (sc->sc_hw.qhw_init_admin_comms != NULL &&
548 (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) { 548 (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
549 aprint_error_dev(sc->sc_dev, 549 aprint_error_dev(sc->sc_dev,
550 "Could not initialize admin comms: %d\n", error); 550 "Could not initialize admin comms: %d\n", error);
551 return; 551 return;
552 } 552 }
553 553
554 aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n"); 554 aprint_verbose_dev(sc->sc_dev, "Initializing hw arbiter\n");
555 if (sc->sc_hw.qhw_init_arb != NULL && 555 if (sc->sc_hw.qhw_init_arb != NULL &&
556 (error = sc->sc_hw.qhw_init_arb(sc)) != 0) { 556 (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
557 aprint_error_dev(sc->sc_dev, 557 aprint_error_dev(sc->sc_dev,
558 "Could not initialize hw arbiter: %d\n", error); 558 "Could not initialize hw arbiter: %d\n", error);
559 return; 559 return;
560 } 560 }
561 561
562 aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n"); 562 aprint_verbose_dev(sc->sc_dev, "Initializing acceleration engine\n");
563 error = qat_ae_init(sc); 563 error = qat_ae_init(sc);
564 if (error) { 564 if (error) {
565 aprint_error_dev(sc->sc_dev, 565 aprint_error_dev(sc->sc_dev,
566 "Could not initialize Acceleration Engine: %d\n", error); 566 "Could not initialize Acceleration Engine: %d\n", error);
567 return; 567 return;
568 } 568 }
569 569
570 aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n"); 570 aprint_verbose_dev(sc->sc_dev, "Loading acceleration engine firmware\n");
571 error = qat_aefw_load(sc); 571 error = qat_aefw_load(sc);
572 if (error) { 572 if (error) {
573 aprint_error_dev(sc->sc_dev, 573 aprint_error_dev(sc->sc_dev,
574 "Could not load firmware: %d\n", error); 574 "Could not load firmware: %d\n", error);
575 return; 575 return;
576 } 576 }
577 577
578 aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n"); 578 aprint_verbose_dev(sc->sc_dev, "Establishing interrupts\n");
579 error = qat_setup_msix_intr(sc); 579 error = qat_setup_msix_intr(sc);
580 if (error) { 580 if (error) {
581 aprint_error_dev(sc->sc_dev, 581 aprint_error_dev(sc->sc_dev,
582 "Could not setup interrupts: %d\n", error); 582 "Could not setup interrupts: %d\n", error);
583 return; 583 return;
584 } 584 }
585 585
586 sc->sc_hw.qhw_enable_intr(sc); 586 sc->sc_hw.qhw_enable_intr(sc);
587 587
588 error = qat_crypto_init(sc); 588 error = qat_crypto_init(sc);
589 if (error) { 589 if (error) {
590 aprint_error_dev(sc->sc_dev, 590 aprint_error_dev(sc->sc_dev,
591 "Could not initialize service: %d\n", error); 591 "Could not initialize service: %d\n", error);
592 return; 592 return;
593 } 593 }
594 594
595 aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n"); 595 aprint_verbose_dev(sc->sc_dev, "Enabling error correction\n");
596 if (sc->sc_hw.qhw_enable_error_correction != NULL) 596 if (sc->sc_hw.qhw_enable_error_correction != NULL)
597 sc->sc_hw.qhw_enable_error_correction(sc); 597 sc->sc_hw.qhw_enable_error_correction(sc);
598 598
599 aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n"); 599 aprint_verbose_dev(sc->sc_dev, "Initializing watchdog timer\n");
600 if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL && 600 if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
601 (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) { 601 (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
602 aprint_error_dev(sc->sc_dev, 602 aprint_error_dev(sc->sc_dev,
603 "Could not initialize watchdog timer: %d\n", error); 603 "Could not initialize watchdog timer: %d\n", error);
604 return; 604 return;
605 } 605 }
606 606
607 error = qat_start(self); 607 error = qat_start(self);
608 if (error) { 608 if (error) {
609 aprint_error_dev(sc->sc_dev, 609 aprint_error_dev(sc->sc_dev,
610 "Could not start: %d\n", error); 610 "Could not start: %d\n", error);
611 return; 611 return;
612 } 612 }
613} 613}
614 614
615int 615int
616qat_start(struct device *self) 616qat_start(struct device *self)
617{ 617{
618 struct qat_softc *sc = device_private(self); 618 struct qat_softc *sc = device_private(self);
619 int error; 619 int error;
620 620
621 error = qat_ae_start(sc); 621 error = qat_ae_start(sc);
622 if (error) 622 if (error)
623 return error; 623 return error;
624  624
625 if (sc->sc_hw.qhw_send_admin_init != NULL && 625 if (sc->sc_hw.qhw_send_admin_init != NULL &&
626 (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) { 626 (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
627 return error; 627 return error;
628 } 628 }
629 629
630 error = qat_crypto_start(sc); 630 error = qat_crypto_start(sc);
631 if (error) 631 if (error)
632 return error; 632 return error;
633 633
634 return 0; 634 return 0;
635} 635}
636 636
637int 637int
638qat_detach(struct device *self, int flags) 638qat_detach(struct device *self, int flags)
639{ 639{
640 640
641 return 0; 641 return 0;
642} 642}
643 643
644void * 644void *
645qat_alloc_mem(size_t size) 645qat_alloc_mem(size_t size)
646{ 646{
647 size_t *sptr; 647 size_t *sptr;
648 sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP); 648 sptr = kmem_zalloc(size + sizeof(size), KM_SLEEP);
649 *sptr = size; 649 *sptr = size;
650 return ++sptr; 650 return ++sptr;
651} 651}
652 652
653void 653void
654qat_free_mem(void *ptr) 654qat_free_mem(void *ptr)
655{ 655{
656 size_t *sptr = ptr, size; 656 size_t *sptr = ptr, size;
657 size = *(--sptr); 657 size = *(--sptr);
658 kmem_free(sptr, size + sizeof(size)); 658 kmem_free(sptr, size + sizeof(size));
659} 659}
660 660
661void 661void
662qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm) 662qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
663{ 663{
664 664
665 bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map); 665 bus_dmamap_unload(sc->sc_dmat, qdm->qdm_dma_map);
666 bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map); 666 bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
667 bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size); 667 bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, qdm->qdm_dma_size);
668 bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1); 668 bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
669 explicit_memset(qdm, 0, sizeof(*qdm)); 669 explicit_memset(qdm, 0, sizeof(*qdm));
670} 670}
671 671
672int 672int
673qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm, 673qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
674 bus_size_t size, bus_size_t alignment) 674 bus_size_t size, bus_size_t alignment)
675{ 675{
676 int error = 0, nseg; 676 int error = 0, nseg;
677 677
678 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, 678 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment,
679 0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT); 679 0, &qdm->qdm_dma_seg, 1, &nseg, BUS_DMA_NOWAIT);
680 if (error) { 680 if (error) {
681 aprint_error_dev(sc->sc_dev, 681 aprint_error_dev(sc->sc_dev,
682 "couldn't allocate dmamem, error = %d\n", error); 682 "couldn't allocate dmamem, error = %d\n", error);
683 goto fail_0; 683 goto fail_0;
684 } 684 }
685 KASSERT(nseg == 1); 685 KASSERT(nseg == 1);
686 error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg, 686 error = bus_dmamem_map(sc->sc_dmat, &qdm->qdm_dma_seg,
687 nseg, size, &qdm->qdm_dma_vaddr, 687 nseg, size, &qdm->qdm_dma_vaddr,
688 BUS_DMA_COHERENT | BUS_DMA_NOWAIT); 688 BUS_DMA_COHERENT | BUS_DMA_NOWAIT);
689 if (error) { 689 if (error) {
690 aprint_error_dev(sc->sc_dev, 690 aprint_error_dev(sc->sc_dev,
691 "couldn't map dmamem, error = %d\n", error); 691 "couldn't map dmamem, error = %d\n", error);
692 goto fail_1; 692 goto fail_1;
693 } 693 }
694 qdm->qdm_dma_size = size; 694 qdm->qdm_dma_size = size;
695 error = bus_dmamap_create(sc->sc_dmat, size, nseg, size, 695 error = bus_dmamap_create(sc->sc_dmat, size, nseg, size,
696 0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map); 696 0, BUS_DMA_NOWAIT, &qdm->qdm_dma_map);
697 if (error) { 697 if (error) {
698 aprint_error_dev(sc->sc_dev, 698 aprint_error_dev(sc->sc_dev,
699 "couldn't create dmamem map, error = %d\n", error); 699 "couldn't create dmamem map, error = %d\n", error);
700 goto fail_2; 700 goto fail_2;
701 } 701 }
702 error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map, 702 error = bus_dmamap_load(sc->sc_dmat, qdm->qdm_dma_map,
703 qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT); 703 qdm->qdm_dma_vaddr, size, NULL, BUS_DMA_NOWAIT);
704 if (error) { 704 if (error) {
705 aprint_error_dev(sc->sc_dev, 705 aprint_error_dev(sc->sc_dev,
706 "couldn't load dmamem map, error = %d\n", error); 706 "couldn't load dmamem map, error = %d\n", error);
707 goto fail_3; 707 goto fail_3;
708 } 708 }
709 709
710 return 0; 710 return 0;
711fail_3: 711fail_3:
712 bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map); 712 bus_dmamap_destroy(sc->sc_dmat, qdm->qdm_dma_map);
713 qdm->qdm_dma_map = NULL; 713 qdm->qdm_dma_map = NULL;
714fail_2: 714fail_2:
715 bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size); 715 bus_dmamem_unmap(sc->sc_dmat, qdm->qdm_dma_vaddr, size);
716 qdm->qdm_dma_vaddr = NULL; 716 qdm->qdm_dma_vaddr = NULL;
717 qdm->qdm_dma_size = 0; 717 qdm->qdm_dma_size = 0;
718fail_1: 718fail_1:
719 bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1); 719 bus_dmamem_free(sc->sc_dmat, &qdm->qdm_dma_seg, 1);
720fail_0: 720fail_0:
721 return error; 721 return error;
722} 722}
723 723
724int 724int
725qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa) 725qat_alloc_msix_intr(struct qat_softc *sc, struct pci_attach_args *pa)
726{ 726{
727 u_int *ih_map, vec; 727 u_int *ih_map, vec;
728 int error, count, ihi; 728 int error, count, ihi;
729 729
730 count = sc->sc_hw.qhw_num_banks + 1; 730 count = sc->sc_hw.qhw_num_banks + 1;
731 ih_map = qat_alloc_mem(sizeof(*ih_map) * count); 731 ih_map = qat_alloc_mem(sizeof(*ih_map) * count);
732 ihi = 0; 732 ihi = 0;
733 733
734 for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++) 734 for (vec = 0; vec < sc->sc_hw.qhw_num_banks; vec++)
735 ih_map[ihi++] = vec; 735 ih_map[ihi++] = vec;
736 736
737 vec += sc->sc_hw.qhw_msix_ae_vec_gap; 737 vec += sc->sc_hw.qhw_msix_ae_vec_gap;
738 ih_map[ihi++] = vec; 738 ih_map[ihi++] = vec;
739 739
740 error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count); 740 error = pci_msix_alloc_map(pa, &sc->sc_ih, ih_map, count);
741 qat_free_mem(ih_map); 741 qat_free_mem(ih_map);
742 if (error) { 742 if (error) {
743 aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n", 743 aprint_error_dev(sc->sc_dev, "couldn't allocate msix %d: %d\n",
744 count, error); 744 count, error);
745 } 745 }
746 746
747 return error; 747 return error;
748} 748}
749 749
750void * 750void *
751qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih, 751qat_establish_msix_intr(struct qat_softc *sc, pci_intr_handle_t ih,
752 int (*func)(void *), void *arg, 752 int (*func)(void *), void *arg,
753 const char *name, int index) 753 const char *name, int index)
754{ 754{
755 kcpuset_t *affinity; 755 kcpuset_t *affinity;
756 int error; 756 int error;
757 char buf[PCI_INTRSTR_LEN]; 757 char buf[PCI_INTRSTR_LEN];
758 char intrxname[INTRDEVNAMEBUF]; 758 char intrxname[INTRDEVNAMEBUF];
759 const char *intrstr; 759 const char *intrstr;
760 void *cookie; 760 void *cookie;
761 761
762 snprintf(intrxname, sizeof(intrxname), "%s%s%d", 762 snprintf(intrxname, sizeof(intrxname), "%s%s%d",
763 device_xname(sc->sc_dev), name, index); 763 device_xname(sc->sc_dev), name, index);
764 764
765 intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf)); 765 intrstr = pci_intr_string(sc->sc_pc, ih, buf, sizeof(buf));
766 766
767 pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true); 767 pci_intr_setattr(sc->sc_pc, &ih, PCI_INTR_MPSAFE, true);
768 768
769 cookie = pci_intr_establish_xname(sc->sc_pc, ih, 769 cookie = pci_intr_establish_xname(sc->sc_pc, ih,
770 IPL_NET, func, arg, intrxname); 770 IPL_NET, func, arg, intrxname);
771 771
772 aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n", 772 aprint_normal_dev(sc->sc_dev, "%s%d interrupting at %s\n",
773 name, index, intrstr); 773 name, index, intrstr);
774 774
775 kcpuset_create(&affinity, true); 775 kcpuset_create(&affinity, true);
776 kcpuset_set(affinity, index % ncpu); 776 kcpuset_set(affinity, index % ncpu);
777 error = interrupt_distribute(cookie, affinity, NULL); 777 error = interrupt_distribute(cookie, affinity, NULL);
778 if (error) { 778 if (error) {
779 aprint_error_dev(sc->sc_dev, 779 aprint_error_dev(sc->sc_dev,
780 "couldn't distribute interrupt: %s%d\n", name, index); 780 "couldn't distribute interrupt: %s%d\n", name, index);
781 } 781 }
782 kcpuset_destroy(affinity); 782 kcpuset_destroy(affinity);
783 783
784 return cookie; 784 return cookie;
785} 785}
786 786
787int 787int
788qat_setup_msix_intr(struct qat_softc *sc) 788qat_setup_msix_intr(struct qat_softc *sc)
789{ 789{
790 int i; 790 int i;
791 pci_intr_handle_t ih; 791 pci_intr_handle_t ih;
792 792
793 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { 793 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
794 struct qat_bank *qb = &sc->sc_etr_banks[i]; 794 struct qat_bank *qb = &sc->sc_etr_banks[i];
795 ih = sc->sc_ih[i]; 795 ih = sc->sc_ih[i];
796 796
797 qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih, 797 qb->qb_ih_cookie = qat_establish_msix_intr(sc, ih,
798 qat_etr_bank_intr, qb, "bank", i); 798 qat_etr_bank_intr, qb, "bank", i);
799 if (qb->qb_ih_cookie == NULL) 799 if (qb->qb_ih_cookie == NULL)
800 return ENOMEM; 800 return ENOMEM;
801 } 801 }
802 802
803 sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i], 803 sc->sc_ae_ih_cookie = qat_establish_msix_intr(sc, sc->sc_ih[i],
804 qat_ae_cluster_intr, sc, "aeclust", 0); 804 qat_ae_cluster_intr, sc, "aeclust", 0);
805 if (sc->sc_ae_ih_cookie == NULL) 805 if (sc->sc_ae_ih_cookie == NULL)
806 return ENOMEM; 806 return ENOMEM;
807 807
808 return 0; 808 return 0;
809} 809}
810 810
811int 811int
812qat_etr_init(struct qat_softc *sc) 812qat_etr_init(struct qat_softc *sc)
813{ 813{
814 int i; 814 int i;
815 int error = 0; 815 int error = 0;
816 816
817 sc->sc_etr_banks = qat_alloc_mem( 817 sc->sc_etr_banks = qat_alloc_mem(
818 sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks); 818 sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
819 819
820 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) { 820 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
821 error = qat_etr_bank_init(sc, i); 821 error = qat_etr_bank_init(sc, i);
822 if (error) { 822 if (error) {
823 goto fail; 823 goto fail;
824 } 824 }
825 } 825 }
826 826
827 if (sc->sc_hw.qhw_num_ap_banks) { 827 if (sc->sc_hw.qhw_num_ap_banks) {
828 sc->sc_etr_ap_banks = qat_alloc_mem( 828 sc->sc_etr_ap_banks = qat_alloc_mem(
829 sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks); 829 sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
830 error = qat_etr_ap_bank_init(sc); 830 error = qat_etr_ap_bank_init(sc);
831 if (error) { 831 if (error) {
832 goto fail; 832 goto fail;
833 } 833 }
834 } 834 }
835 835
836 return 0; 836 return 0;
837 837
838fail: 838fail:
839 if (sc->sc_etr_banks != NULL) { 839 if (sc->sc_etr_banks != NULL) {
840 qat_free_mem(sc->sc_etr_banks); 840 qat_free_mem(sc->sc_etr_banks);
841 sc->sc_etr_banks = NULL; 841 sc->sc_etr_banks = NULL;
842 } 842 }
843 if (sc->sc_etr_ap_banks != NULL) { 843 if (sc->sc_etr_ap_banks != NULL) {
844 qat_free_mem(sc->sc_etr_ap_banks); 844 qat_free_mem(sc->sc_etr_ap_banks);
845 sc->sc_etr_ap_banks = NULL; 845 sc->sc_etr_ap_banks = NULL;
846 } 846 }
847 return error; 847 return error;
848} 848}
849 849
850int 850int
851qat_etr_bank_init(struct qat_softc *sc, int bank) 851qat_etr_bank_init(struct qat_softc *sc, int bank)
852{ 852{
853 struct qat_bank *qb = &sc->sc_etr_banks[bank]; 853 struct qat_bank *qb = &sc->sc_etr_banks[bank];
854 int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap; 854 int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
855 855
856 KASSERT(bank < sc->sc_hw.qhw_num_banks); 856 KASSERT(bank < sc->sc_hw.qhw_num_banks);
857 857
858 mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET); 858 mutex_init(&qb->qb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
859 859
860 qb->qb_sc = sc; 860 qb->qb_sc = sc;
861 qb->qb_bank = bank; 861 qb->qb_bank = bank;
862 qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT; 862 qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
863 QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR, 863 QAT_EVCNT_ATTACH(sc, &qb->qb_ev_rxintr, EVCNT_TYPE_INTR,
864 qb->qb_ev_rxintr_name, "bank%d rxintr", bank); 864 qb->qb_ev_rxintr_name, "bank%d rxintr", bank);
865 865
866 /* Clean CSRs for all rings within the bank */ 866 /* Clean CSRs for all rings within the bank */
867 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) { 867 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
868 struct qat_ring *qr = &qb->qb_et_rings[i]; 868 struct qat_ring *qr = &qb->qb_et_rings[i];
869 869
870 qat_etr_bank_ring_write_4(sc, bank, i, 870 qat_etr_bank_ring_write_4(sc, bank, i,
871 ETR_RING_CONFIG, 0); 871 ETR_RING_CONFIG, 0);
872 qat_etr_bank_ring_base_write_8(sc, bank, i, 0); 872 qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
873 873
874 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) { 874 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
875 qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t)); 875 qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
876 } else if (sc->sc_hw.qhw_tx_rings_mask & 876 } else if (sc->sc_hw.qhw_tx_rings_mask &
877 (1 << (i - tx_rx_gap))) { 877 (1 << (i - tx_rx_gap))) {
878 /* Share inflight counter with rx and tx */ 878 /* Share inflight counter with rx and tx */
879 qr->qr_inflight = 879 qr->qr_inflight =
880 qb->qb_et_rings[i - tx_rx_gap].qr_inflight; 880 qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
881 } 881 }
882 } 882 }
883 883
884 if (sc->sc_hw.qhw_init_etr_intr != NULL) { 884 if (sc->sc_hw.qhw_init_etr_intr != NULL) {
885 sc->sc_hw.qhw_init_etr_intr(sc, bank); 885 sc->sc_hw.qhw_init_etr_intr(sc, bank);
886 } else { 886 } else {
887 /* common code in qat 1.7 */ 887 /* common code in qat 1.7 */
888 qat_etr_bank_write_4(sc, bank, ETR_INT_REG, 888 qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
889 ETR_INT_REG_CLEAR_MASK); 889 ETR_INT_REG_CLEAR_MASK);
890 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank / 890 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
891 ETR_RINGS_PER_INT_SRCSEL; i++) { 891 ETR_RINGS_PER_INT_SRCSEL; i++) {
892 qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL + 892 qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
893 (i * ETR_INT_SRCSEL_NEXT_OFFSET), 893 (i * ETR_INT_SRCSEL_NEXT_OFFSET),
894 ETR_INT_SRCSEL_MASK); 894 ETR_INT_SRCSEL_MASK);
895 } 895 }
896 } 896 }
897 897
898 return 0; 898 return 0;
899} 899}
900 900
901int 901int
902qat_etr_ap_bank_init(struct qat_softc *sc) 902qat_etr_ap_bank_init(struct qat_softc *sc)
903{ 903{
904 int ap_bank; 904 int ap_bank;
905 905
906 for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) { 906 for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
907 struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank]; 907 struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
908 908
909 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK, 909 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
910 ETR_AP_NF_MASK_INIT); 910 ETR_AP_NF_MASK_INIT);
911 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0); 911 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
912 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK, 912 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
913 ETR_AP_NE_MASK_INIT); 913 ETR_AP_NE_MASK_INIT);
914 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0); 914 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
915 915
916 memset(qab, 0, sizeof(*qab)); 916 memset(qab, 0, sizeof(*qab));
917 } 917 }
918 918
919 return 0; 919 return 0;
920} 920}
921 921
922void 922void
923qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask) 923qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
924{ 924{
925 if (set_mask) 925 if (set_mask)
926 *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); 926 *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
927 else 927 else
928 *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring)); 928 *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
929} 929}
930 930
931void 931void
932qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest, 932qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
933 uint32_t ring, int set_dest) 933 uint32_t ring, int set_dest)
934{ 934{
935 uint32_t ae_mask; 935 uint32_t ae_mask;
936 uint8_t mailbox, ae, nae; 936 uint8_t mailbox, ae, nae;
937 uint8_t *dest = (uint8_t *)ap_dest; 937 uint8_t *dest = (uint8_t *)ap_dest;
938 938
939 mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring); 939 mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
940 940
941 nae = 0; 941 nae = 0;
942 ae_mask = sc->sc_ae_mask; 942 ae_mask = sc->sc_ae_mask;
943 for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) { 943 for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
944 if ((ae_mask & (1 << ae)) == 0) 944 if ((ae_mask & (1 << ae)) == 0)
945 continue; 945 continue;
946 946
947 if (set_dest) { 947 if (set_dest) {
948 dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) | 948 dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
949 __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) | 949 __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
950 ETR_AP_DEST_ENABLE; 950 ETR_AP_DEST_ENABLE;
951 } else { 951 } else {
952 dest[nae] = 0; 952 dest[nae] = 0;
953 } 953 }
954 nae++; 954 nae++;
955 if (nae == ETR_MAX_AE_PER_MAILBOX) 955 if (nae == ETR_MAX_AE_PER_MAILBOX)
956 break; 956 break;
957 957
958 } 958 }
959} 959}
960 960
961void 961void
962qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr) 962qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
963{ 963{
964 struct qat_ap_bank *qab; 964 struct qat_ap_bank *qab;
965 int ap_bank; 965 int ap_bank;
966 966
967 if (sc->sc_hw.qhw_num_ap_banks == 0) 967 if (sc->sc_hw.qhw_num_ap_banks == 0)
968 return; 968 return;
969 969
970 ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring); 970 ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
971 KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks); 971 KASSERT(ap_bank < sc->sc_hw.qhw_num_ap_banks);
972 qab = &sc->sc_etr_ap_banks[ap_bank]; 972 qab = &sc->sc_etr_ap_banks[ap_bank];
973 973
974 if (qr->qr_cb == NULL) { 974 if (qr->qr_cb == NULL) {
975 qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1); 975 qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
976 if (!qab->qab_ne_dest) { 976 if (!qab->qab_ne_dest) {
977 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest, 977 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
978 qr->qr_ring, 1); 978 qr->qr_ring, 1);
979 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 979 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
980 qab->qab_ne_dest); 980 qab->qab_ne_dest);
981 } 981 }
982 } else { 982 } else {
983 qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1); 983 qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
984 if (!qab->qab_nf_dest) { 984 if (!qab->qab_nf_dest) {
985 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest, 985 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
986 qr->qr_ring, 1); 986 qr->qr_ring, 1);
987 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 987 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
988 qab->qab_nf_dest); 988 qab->qab_nf_dest);
989 } 989 }
990 } 990 }
991} 991}
992 992
993int 993int
994qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs) 994qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
995{ 995{
996 int i = QAT_MIN_RING_SIZE; 996 int i = QAT_MIN_RING_SIZE;
997 997
998 for (; i <= QAT_MAX_RING_SIZE; i++) 998 for (; i <= QAT_MAX_RING_SIZE; i++)
999 if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i)) 999 if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
1000 return i; 1000 return i;
1001 1001
1002 return QAT_DEFAULT_RING_SIZE; 1002 return QAT_DEFAULT_RING_SIZE;
1003} 1003}
1004 1004
1005int 1005int
1006qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring, 1006qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
1007 uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg, 1007 uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
1008 const char *name, struct qat_ring **rqr) 1008 const char *name, struct qat_ring **rqr)
1009{ 1009{
1010 struct qat_bank *qb; 1010 struct qat_bank *qb;
1011 struct qat_ring *qr = NULL; 1011 struct qat_ring *qr = NULL;
1012 int error; 1012 int error;
1013 uint32_t ring_size_bytes, ring_config; 1013 uint32_t ring_size_bytes, ring_config;
1014 uint64_t ring_base; 1014 uint64_t ring_base;
1015 uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512; 1015 uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
1016 uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0; 1016 uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
1017 1017
1018 KASSERT(bank < sc->sc_hw.qhw_num_banks); 1018 KASSERT(bank < sc->sc_hw.qhw_num_banks);
1019 1019
1020 /* Allocate a ring from specified bank */ 1020 /* Allocate a ring from specified bank */
1021 qb = &sc->sc_etr_banks[bank]; 1021 qb = &sc->sc_etr_banks[bank];
1022 1022
1023 if (ring >= sc->sc_hw.qhw_num_rings_per_bank) 1023 if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
1024 return EINVAL; 1024 return EINVAL;
1025 if (qb->qb_allocated_rings & (1 << ring)) 1025 if (qb->qb_allocated_rings & (1 << ring))
1026 return ENOENT; 1026 return ENOENT;
1027 qr = &qb->qb_et_rings[ring]; 1027 qr = &qb->qb_et_rings[ring];
1028 qb->qb_allocated_rings |= 1 << ring; 1028 qb->qb_allocated_rings |= 1 << ring;
1029 1029
1030 /* Intialize allocated ring */ 1030 /* Initialize allocated ring */
1031 qr->qr_ring = ring; 1031 qr->qr_ring = ring;
1032 qr->qr_bank = bank; 1032 qr->qr_bank = bank;
1033 qr->qr_name = name; 1033 qr->qr_name = name;
1034 qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring; 1034 qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
1035 qr->qr_ring_mask = (1 << ring); 1035 qr->qr_ring_mask = (1 << ring);
1036 qr->qr_cb = cb; 1036 qr->qr_cb = cb;
1037 qr->qr_cb_arg = cb_arg; 1037 qr->qr_cb_arg = cb_arg;
1038 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR, 1038 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxintr, EVCNT_TYPE_INTR,
1039 qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring); 1039 qr->qr_ev_rxintr_name, "bank%d ring%d rxintr", bank, ring);
1040 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC, 1040 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_rxmsg, EVCNT_TYPE_MISC,
1041 qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring); 1041 qr->qr_ev_rxmsg_name, "bank%d ring%d rxmsg", bank, ring);
1042 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC, 1042 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txmsg, EVCNT_TYPE_MISC,
1043 qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring); 1043 qr->qr_ev_txmsg_name, "bank%d ring%d txmsg", bank, ring);
1044 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC, 1044 QAT_EVCNT_ATTACH(sc, &qr->qr_ev_txfull, EVCNT_TYPE_MISC,
1045 qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring); 1045 qr->qr_ev_txfull_name, "bank%d ring%d txfull", bank, ring);
1046 1046
1047 /* Setup the shadow variables */ 1047 /* Setup the shadow variables */
1048 qr->qr_head = 0; 1048 qr->qr_head = 0;
1049 qr->qr_tail = 0; 1049 qr->qr_tail = 0;
1050 qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size); 1050 qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
1051 qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs); 1051 qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
1052 1052
1053 /* 1053 /*
1054 * To make sure that ring is alligned to ring size allocate 1054 * To make sure that ring is alligned to ring size allocate
1055 * at least 4k and then tell the user it is smaller. 1055 * at least 4k and then tell the user it is smaller.
1056 */ 1056 */
1057 ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size); 1057 ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
1058 ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes); 1058 ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
1059 error = qat_alloc_dmamem(sc, &qr->qr_dma, 1059 error = qat_alloc_dmamem(sc, &qr->qr_dma,
1060 ring_size_bytes, ring_size_bytes); 1060 ring_size_bytes, ring_size_bytes);
1061 if (error) 1061 if (error)
1062 return error; 1062 return error;
1063 1063
1064 KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1); 1064 KASSERT(qr->qr_dma.qdm_dma_map->dm_nsegs == 1);
1065 1065
1066 qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr; 1066 qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
1067 qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr; 1067 qr->qr_ring_paddr = qr->qr_dma.qdm_dma_map->dm_segs[0].ds_addr;
1068 1068
1069 aprint_verbose_dev(sc->sc_dev, 1069 aprint_verbose_dev(sc->sc_dev,
1070 "allocate ring %d of bank %d for %s " 1070 "allocate ring %d of bank %d for %s "
1071 "size %d %d at vaddr %p paddr 0x%llx\n", 1071 "size %d %d at vaddr %p paddr 0x%llx\n",
1072 ring, bank, name, ring_size_bytes, 1072 ring, bank, name, ring_size_bytes,
1073 (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len, 1073 (int)qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len,
1074 qr->qr_ring_vaddr, 1074 qr->qr_ring_vaddr,
1075 (unsigned long long)qr->qr_ring_paddr); 1075 (unsigned long long)qr->qr_ring_paddr);
1076 1076
1077 memset(qr->qr_ring_vaddr, QAT_RING_PATTERN, 1077 memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
1078 qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len); 1078 qr->qr_dma.qdm_dma_map->dm_segs[0].ds_len);
1079 1079
1080 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0, 1080 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, 0,
1081 qr->qr_dma.qdm_dma_map->dm_mapsize, 1081 qr->qr_dma.qdm_dma_map->dm_mapsize,
1082 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1082 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1083 1083
1084 if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) { 1084 if (((uintptr_t)qr->qr_ring_paddr & (ring_size_bytes - 1)) != 0) {
1085 aprint_error_dev(sc->sc_dev, "ring address not aligned\n"); 1085 aprint_error_dev(sc->sc_dev, "ring address not aligned\n");
1086 return EFAULT; 1086 return EFAULT;
1087 } 1087 }
1088 1088
1089 if (cb == NULL) { 1089 if (cb == NULL) {
1090 ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size); 1090 ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
1091 } else { 1091 } else {
1092 ring_config = 1092 ring_config =
1093 ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne); 1093 ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
1094 } 1094 }
1095 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config); 1095 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
1096 1096
1097 ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size); 1097 ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
1098 qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base); 1098 qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
1099 1099
1100 if (sc->sc_hw.qhw_init_arb != NULL) 1100 if (sc->sc_hw.qhw_init_arb != NULL)
1101 qat_arb_update(sc, qb); 1101 qat_arb_update(sc, qb);
1102 1102
1103 mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1103 mutex_init(&qr->qr_ring_mtx, MUTEX_DEFAULT, IPL_NET);
1104 1104
1105 qat_etr_ap_bank_setup_ring(sc, qr); 1105 qat_etr_ap_bank_setup_ring(sc, qr);
1106 1106
1107 if (cb != NULL) { 1107 if (cb != NULL) {
1108 uint32_t intr_mask; 1108 uint32_t intr_mask;
1109 1109
1110 qb->qb_intr_mask |= qr->qr_ring_mask; 1110 qb->qb_intr_mask |= qr->qr_ring_mask;
1111 intr_mask = qb->qb_intr_mask; 1111 intr_mask = qb->qb_intr_mask;
1112 1112
1113 aprint_verbose_dev(sc->sc_dev, 1113 aprint_verbose_dev(sc->sc_dev,
1114 "update intr mask for bank %d " 1114 "update intr mask for bank %d "
1115 "(coalescing time %dns): 0x%08x\n", 1115 "(coalescing time %dns): 0x%08x\n",
1116 bank, qb->qb_coalescing_time, intr_mask); 1116 bank, qb->qb_coalescing_time, intr_mask);
1117 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, 1117 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN,
1118 intr_mask); 1118 intr_mask);
1119 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL, 1119 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
1120 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); 1120 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1121 } 1121 }
1122 1122
1123 *rqr = qr; 1123 *rqr = qr;
1124 1124
1125 return 0; 1125 return 0;
1126} 1126}
1127 1127
1128static inline u_int 1128static inline u_int
1129qat_modulo(u_int data, u_int shift) 1129qat_modulo(u_int data, u_int shift)
1130{ 1130{
1131 u_int div = data >> shift; 1131 u_int div = data >> shift;
1132 u_int mult = div << shift; 1132 u_int mult = div << shift;
1133 return data - mult; 1133 return data - mult;
1134} 1134}
1135 1135
1136int 1136int
1137qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg) 1137qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
1138{ 1138{
1139 uint32_t inflight; 1139 uint32_t inflight;
1140 uint32_t *addr; 1140 uint32_t *addr;
1141 1141
1142 mutex_spin_enter(&qr->qr_ring_mtx); 1142 mutex_spin_enter(&qr->qr_ring_mtx);
1143 1143
1144 inflight = atomic_inc_32_nv(qr->qr_inflight); 1144 inflight = atomic_inc_32_nv(qr->qr_inflight);
1145 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) { 1145 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
1146 atomic_dec_32(qr->qr_inflight); 1146 atomic_dec_32(qr->qr_inflight);
1147 QAT_EVCNT_INCR(&qr->qr_ev_txfull); 1147 QAT_EVCNT_INCR(&qr->qr_ev_txfull);
1148 mutex_spin_exit(&qr->qr_ring_mtx); 1148 mutex_spin_exit(&qr->qr_ring_mtx);
1149 return EBUSY; 1149 return EBUSY;
1150 } 1150 }
1151 QAT_EVCNT_INCR(&qr->qr_ev_txmsg); 1151 QAT_EVCNT_INCR(&qr->qr_ev_txmsg);
1152 1152
1153 addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail); 1153 addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
1154 1154
1155 memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); 1155 memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1156#ifdef QAT_DUMP 1156#ifdef QAT_DUMP
1157 qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr, 1157 qat_dump_raw(QAT_DUMP_RING_MSG, "put_msg", addr,
1158 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size)); 1158 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1159#endif 1159#endif
1160 1160
1161 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail, 1161 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_tail,
1162 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1162 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1163 BUS_DMASYNC_PREWRITE); 1163 BUS_DMASYNC_PREWRITE);
1164 1164
1165 qr->qr_tail = qat_modulo(qr->qr_tail + 1165 qr->qr_tail = qat_modulo(qr->qr_tail +
1166 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1166 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1167 QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 1167 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1168 1168
1169 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, 1169 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1170 ETR_RING_TAIL_OFFSET, qr->qr_tail); 1170 ETR_RING_TAIL_OFFSET, qr->qr_tail);
1171 1171
1172 mutex_spin_exit(&qr->qr_ring_mtx); 1172 mutex_spin_exit(&qr->qr_ring_mtx);
1173 1173
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177int 1177int
1178qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb, 1178qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
1179 struct qat_ring *qr) 1179 struct qat_ring *qr)
1180{ 1180{
1181 int handled = 0; 1181 int handled = 0;
1182 uint32_t *msg; 1182 uint32_t *msg;
1183 uint32_t nmsg = 0; 1183 uint32_t nmsg = 0;
1184 1184
1185 mutex_spin_enter(&qr->qr_ring_mtx); 1185 mutex_spin_enter(&qr->qr_ring_mtx);
1186 1186
1187 QAT_EVCNT_INCR(&qr->qr_ev_rxintr); 1187 QAT_EVCNT_INCR(&qr->qr_ev_rxintr);
1188 1188
1189 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); 1189 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1190 1190
1191 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head, 1191 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1192 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1192 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1194 1194
1195 while (*msg != ETR_RING_EMPTY_ENTRY_SIG) { 1195 while (*msg != ETR_RING_EMPTY_ENTRY_SIG) {
1196 atomic_dec_32(qr->qr_inflight); 1196 atomic_dec_32(qr->qr_inflight);
1197 QAT_EVCNT_INCR(&qr->qr_ev_rxmsg); 1197 QAT_EVCNT_INCR(&qr->qr_ev_rxmsg);
1198 1198
1199 if (qr->qr_cb != NULL) { 1199 if (qr->qr_cb != NULL) {
1200 mutex_spin_exit(&qr->qr_ring_mtx); 1200 mutex_spin_exit(&qr->qr_ring_mtx);
1201 handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg); 1201 handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
1202 mutex_spin_enter(&qr->qr_ring_mtx); 1202 mutex_spin_enter(&qr->qr_ring_mtx);
1203 } 1203 }
1204 1204
1205 *msg = ETR_RING_EMPTY_ENTRY_SIG; 1205 *msg = ETR_RING_EMPTY_ENTRY_SIG;
1206 1206
1207 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head, 1207 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1208 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1208 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1209 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1209 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1210 1210
1211 qr->qr_head = qat_modulo(qr->qr_head + 1211 qr->qr_head = qat_modulo(qr->qr_head +
1212 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1212 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1213 QAT_RING_SIZE_MODULO(qr->qr_ring_size)); 1213 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1214 nmsg++; 1214 nmsg++;
1215 1215
1216 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head); 1216 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1217 1217
1218 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head, 1218 bus_dmamap_sync(sc->sc_dmat, qr->qr_dma.qdm_dma_map, qr->qr_head,
1219 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size), 1219 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1220 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1221 } 1221 }
1222 1222
1223 if (nmsg > 0) { 1223 if (nmsg > 0) {
1224 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring, 1224 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1225 ETR_RING_HEAD_OFFSET, qr->qr_head); 1225 ETR_RING_HEAD_OFFSET, qr->qr_head);
1226 } 1226 }
1227 1227
1228 mutex_spin_exit(&qr->qr_ring_mtx); 1228 mutex_spin_exit(&qr->qr_ring_mtx);
1229 1229
1230 return handled; 1230 return handled;
1231} 1231}
1232 1232
1233int 1233int
1234qat_etr_bank_intr(void *arg) 1234qat_etr_bank_intr(void *arg)
1235{ 1235{
1236 struct qat_bank *qb = arg; 1236 struct qat_bank *qb = arg;
1237 struct qat_softc *sc = qb->qb_sc; 1237 struct qat_softc *sc = qb->qb_sc;
1238 uint32_t estat; 1238 uint32_t estat;
1239 int i, handled = 0; 1239 int i, handled = 0;
1240 1240
1241 mutex_spin_enter(&qb->qb_bank_mtx); 1241 mutex_spin_enter(&qb->qb_bank_mtx);
1242 1242
1243 QAT_EVCNT_INCR(&qb->qb_ev_rxintr); 1243 QAT_EVCNT_INCR(&qb->qb_ev_rxintr);
1244 1244
1245 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0); 1245 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
1246 1246
1247 /* Now handle all the responses */ 1247 /* Now handle all the responses */
1248 estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT); 1248 estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
1249 estat &= qb->qb_intr_mask; 1249 estat &= qb->qb_intr_mask;
1250 1250
1251 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 1251 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
1252 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time); 1252 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1253 1253
1254 mutex_spin_exit(&qb->qb_bank_mtx); 1254 mutex_spin_exit(&qb->qb_bank_mtx);
1255 1255
1256 while ((i = ffs32(estat)) != 0) { 1256 while ((i = ffs32(estat)) != 0) {
1257 struct qat_ring *qr = &qb->qb_et_rings[--i]; 1257 struct qat_ring *qr = &qb->qb_et_rings[--i];
1258 estat &= ~(1 << i); 1258 estat &= ~(1 << i);
1259 handled |= qat_etr_ring_intr(sc, qb, qr); 1259 handled |= qat_etr_ring_intr(sc, qb, qr);
1260 } 1260 }
1261 1261
1262 return handled; 1262 return handled;
1263} 1263}
1264 1264
1265void 1265void
1266qat_arb_update(struct qat_softc *sc, struct qat_bank *qb) 1266qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
1267{ 1267{
1268 1268
1269 qat_arb_ringsrvarben_write_4(sc, qb->qb_bank, 1269 qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
1270 qb->qb_allocated_rings & 0xff); 1270 qb->qb_allocated_rings & 0xff);
1271} 1271}
1272 1272
1273struct qat_sym_cookie * 1273struct qat_sym_cookie *
1274qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb) 1274qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
1275{ 1275{
1276 struct qat_sym_cookie *qsc; 1276 struct qat_sym_cookie *qsc;
1277 1277
1278 mutex_spin_enter(&qcb->qcb_bank_mtx); 1278 mutex_spin_enter(&qcb->qcb_bank_mtx);
1279 1279
1280 if (qcb->qcb_symck_free_count == 0) { 1280 if (qcb->qcb_symck_free_count == 0) {
1281 QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck); 1281 QAT_EVCNT_INCR(&qcb->qcb_ev_no_symck);
1282 mutex_spin_exit(&qcb->qcb_bank_mtx); 1282 mutex_spin_exit(&qcb->qcb_bank_mtx);
1283 return NULL; 1283 return NULL;
1284 } 1284 }
1285 1285
1286 qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count]; 1286 qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
1287 1287
1288 mutex_spin_exit(&qcb->qcb_bank_mtx); 1288 mutex_spin_exit(&qcb->qcb_bank_mtx);
1289 1289
1290 return qsc; 1290 return qsc;
1291} 1291}
1292 1292
1293void 1293void
1294qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc) 1294qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb, struct qat_sym_cookie *qsc)
1295{ 1295{
1296 1296
1297 mutex_spin_enter(&qcb->qcb_bank_mtx); 1297 mutex_spin_enter(&qcb->qcb_bank_mtx);
1298 qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc; 1298 qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
1299 mutex_spin_exit(&qcb->qcb_bank_mtx); 1299 mutex_spin_exit(&qcb->qcb_bank_mtx);
1300} 1300}
1301 1301
1302 1302
1303void 1303void
1304qat_memcpy_htobe64(void *dst, const void *src, size_t len) 1304qat_memcpy_htobe64(void *dst, const void *src, size_t len)
1305{ 1305{
1306 uint64_t *dst0 = dst; 1306 uint64_t *dst0 = dst;
1307 const uint64_t *src0 = src; 1307 const uint64_t *src0 = src;
1308 size_t i; 1308 size_t i;
1309 1309
1310 KASSERT(len % sizeof(*dst0) == 0); 1310 KASSERT(len % sizeof(*dst0) == 0);
1311 1311
1312 for (i = 0; i < len / sizeof(*dst0); i++) 1312 for (i = 0; i < len / sizeof(*dst0); i++)
1313 *(dst0 + i) = htobe64(*(src0 + i)); 1313 *(dst0 + i) = htobe64(*(src0 + i));
1314} 1314}
1315 1315
1316void 1316void
1317qat_memcpy_htobe32(void *dst, const void *src, size_t len) 1317qat_memcpy_htobe32(void *dst, const void *src, size_t len)
1318{ 1318{
1319 uint32_t *dst0 = dst; 1319 uint32_t *dst0 = dst;
1320 const uint32_t *src0 = src; 1320 const uint32_t *src0 = src;
1321 size_t i; 1321 size_t i;
1322 1322
1323 KASSERT(len % sizeof(*dst0) == 0); 1323 KASSERT(len % sizeof(*dst0) == 0);
1324 1324
1325 for (i = 0; i < len / sizeof(*dst0); i++) 1325 for (i = 0; i < len / sizeof(*dst0); i++)
1326 *(dst0 + i) = htobe32(*(src0 + i)); 1326 *(dst0 + i) = htobe32(*(src0 + i));
1327} 1327}
1328 1328
1329void 1329void
1330qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte) 1330qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
1331{ 1331{
1332 switch (wordbyte) { 1332 switch (wordbyte) {
1333 case 4: 1333 case 4:
1334 qat_memcpy_htobe32(dst, src, len); 1334 qat_memcpy_htobe32(dst, src, len);
1335 break; 1335 break;
1336 case 8: 1336 case 8:
1337 qat_memcpy_htobe64(dst, src, len); 1337 qat_memcpy_htobe64(dst, src, len);
1338 break; 1338 break;
1339 default: 1339 default:
1340 KASSERT(0); 1340 KASSERT(0);
1341 } 1341 }
1342} 1342}
1343 1343
1344void 1344void
1345qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria, 1345qat_crypto_hmac_precompute(struct qat_crypto_desc *desc, struct cryptoini *cria,
1346 struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2) 1346 struct qat_sym_hash_def const *hash_def, uint8_t *state1, uint8_t *state2)
1347{ 1347{
1348 int i, state_swap; 1348 int i, state_swap;
1349 struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah; 1349 struct swcr_auth_hash const *sah = hash_def->qshd_alg->qshai_sah;
1350 uint32_t blklen = hash_def->qshd_alg->qshai_block_len; 1350 uint32_t blklen = hash_def->qshd_alg->qshai_block_len;
1351 uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset; 1351 uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
1352 uint32_t state_size = hash_def->qshd_alg->qshai_state_size; 1352 uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
1353 uint32_t state_word = hash_def->qshd_alg->qshai_state_word; 1353 uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
1354 uint32_t keylen = cria->cri_klen / 8; 1354 uint32_t keylen = cria->cri_klen / 8;
1355 uint32_t padlen = blklen - keylen; 1355 uint32_t padlen = blklen - keylen;
1356 uint8_t *ipad = desc->qcd_hash_state_prefix_buf; 1356 uint8_t *ipad = desc->qcd_hash_state_prefix_buf;
1357 uint8_t *opad = desc->qcd_hash_state_prefix_buf + 1357 uint8_t *opad = desc->qcd_hash_state_prefix_buf +
1358 sizeof(desc->qcd_hash_state_prefix_buf) / 2; 1358 sizeof(desc->qcd_hash_state_prefix_buf) / 2;
1359 /* XXX 1359 /* XXX
1360 * For "stack protector not protecting local variables" error, 1360 * For "stack protector not protecting local variables" error,
1361 * use constant variable. 1361 * use constant variable.
1362 * Currently, the max length is sizeof(aesxcbc_ctx) used by 1362 * Currently, the max length is sizeof(aesxcbc_ctx) used by
1363 * swcr_auth_hash_aes_xcbc_mac 1363 * swcr_auth_hash_aes_xcbc_mac
1364 */ 1364 */
1365 uint8_t ctx[sizeof(aesxcbc_ctx)]; 1365 uint8_t ctx[sizeof(aesxcbc_ctx)];
1366 1366
1367 memcpy(ipad, cria->cri_key, keylen); 1367 memcpy(ipad, cria->cri_key, keylen);
1368 memcpy(opad, cria->cri_key, keylen); 1368 memcpy(opad, cria->cri_key, keylen);
1369 1369
1370 if (padlen > 0) { 1370 if (padlen > 0) {
1371 memset(ipad + keylen, 0, padlen); 1371 memset(ipad + keylen, 0, padlen);
1372 memset(opad + keylen, 0, padlen); 1372 memset(opad + keylen, 0, padlen);
1373 } 1373 }
1374 for (i = 0; i < blklen; i++) { 1374 for (i = 0; i < blklen; i++) {
1375 ipad[i] ^= 0x36; 1375 ipad[i] ^= 0x36;
1376 opad[i] ^= 0x5c; 1376 opad[i] ^= 0x5c;
1377 } 1377 }
1378 1378
1379 /* ipad */ 1379 /* ipad */
1380 sah->Init(ctx); 1380 sah->Init(ctx);
1381 /* Check the endian of kernel built-in hash state */ 1381 /* Check the endian of kernel built-in hash state */
1382 state_swap = memcmp(hash_def->qshd_alg->qshai_init_state, 1382 state_swap = memcmp(hash_def->qshd_alg->qshai_init_state,
1383 ((uint8_t *)ctx) + state_offset, state_word); 1383 ((uint8_t *)ctx) + state_offset, state_word);
1384 sah->Update(ctx, ipad, blklen); 1384 sah->Update(ctx, ipad, blklen);
1385 if (state_swap == 0) { 1385 if (state_swap == 0) {
1386 memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size); 1386 memcpy(state1, ((uint8_t *)ctx) + state_offset, state_size);
1387 } else { 1387 } else {
1388 qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset, 1388 qat_memcpy_htobe(state1, ((uint8_t *)ctx) + state_offset,
1389 state_size, state_word); 1389 state_size, state_word);
1390 } 1390 }
1391 1391
1392 /* opad */ 1392 /* opad */
1393 sah->Init(ctx); 1393 sah->Init(ctx);
1394 sah->Update(ctx, opad, blklen); 1394 sah->Update(ctx, opad, blklen);
1395 if (state_swap == 0) { 1395 if (state_swap == 0) {
1396 memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size); 1396 memcpy(state2, ((uint8_t *)ctx) + state_offset, state_size);
1397 } else { 1397 } else {
1398 qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset, 1398 qat_memcpy_htobe(state2, ((uint8_t *)ctx) + state_offset,
1399 state_size, state_word); 1399 state_size, state_word);
1400 } 1400 }
1401} 1401}
1402 1402
1403uint16_t 1403uint16_t
1404qat_crypto_load_cipher_cryptoini( 1404qat_crypto_load_cipher_cryptoini(
1405 struct qat_crypto_desc *desc, struct cryptoini *crie) 1405 struct qat_crypto_desc *desc, struct cryptoini *crie)
1406{ 1406{
1407 enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL; 1407 enum hw_cipher_algo algo = HW_CIPHER_ALGO_NULL;
1408 enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE; 1408 enum hw_cipher_mode mode = HW_CIPHER_CBC_MODE;
1409 enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT; 1409 enum hw_cipher_convert key_convert = HW_CIPHER_NO_CONVERT;
1410 1410
1411 switch (crie->cri_alg) { 1411 switch (crie->cri_alg) {
1412 case CRYPTO_DES_CBC: 1412 case CRYPTO_DES_CBC:
1413 algo = HW_CIPHER_ALGO_DES; 1413 algo = HW_CIPHER_ALGO_DES;
1414 desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ; 1414 desc->qcd_cipher_blk_sz = HW_DES_BLK_SZ;
1415 break; 1415 break;
1416 case CRYPTO_3DES_CBC: 1416 case CRYPTO_3DES_CBC:
1417 algo = HW_CIPHER_ALGO_3DES; 1417 algo = HW_CIPHER_ALGO_3DES;
1418 desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ; 1418 desc->qcd_cipher_blk_sz = HW_3DES_BLK_SZ;
1419 break; 1419 break;
1420 case CRYPTO_AES_CBC: 1420 case CRYPTO_AES_CBC:
1421 switch (crie->cri_klen / 8) { 1421 switch (crie->cri_klen / 8) {
1422 case HW_AES_128_KEY_SZ: 1422 case HW_AES_128_KEY_SZ:
1423 algo = HW_CIPHER_ALGO_AES128; 1423 algo = HW_CIPHER_ALGO_AES128;
1424 break; 1424 break;
1425 case HW_AES_192_KEY_SZ: 1425 case HW_AES_192_KEY_SZ:
1426 algo = HW_CIPHER_ALGO_AES192; 1426 algo = HW_CIPHER_ALGO_AES192;
1427 break; 1427 break;
1428 case HW_AES_256_KEY_SZ: 1428 case HW_AES_256_KEY_SZ:
1429 algo = HW_CIPHER_ALGO_AES256; 1429 algo = HW_CIPHER_ALGO_AES256;
1430 break; 1430 break;
1431 default: 1431 default:
1432 KASSERT(0); 1432 KASSERT(0);
1433 break; 1433 break;
1434 } 1434 }
1435 desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ; 1435 desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
1436 /* 1436 /*
1437 * AES decrypt key needs to be reversed. 1437 * AES decrypt key needs to be reversed.
1438 * Instead of reversing the key at session registration, 1438 * Instead of reversing the key at session registration,
1439 * it is instead reversed on-the-fly by setting the KEY_CONVERT 1439 * it is instead reversed on-the-fly by setting the KEY_CONVERT
1440 * bit here 1440 * bit here
1441 */ 1441 */
1442 if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT) 1442 if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
1443 key_convert = HW_CIPHER_KEY_CONVERT; 1443 key_convert = HW_CIPHER_KEY_CONVERT;
1444 1444
1445 break; 1445 break;
1446 default: 1446 default:
1447 KASSERT(0); 1447 KASSERT(0);
1448 break; 1448 break;
1449 } 1449 }
1450 1450
1451 return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, 1451 return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert,
1452 desc->qcd_cipher_dir); 1452 desc->qcd_cipher_dir);
1453} 1453}
1454 1454
1455uint16_t 1455uint16_t
1456qat_crypto_load_auth_cryptoini( 1456qat_crypto_load_auth_cryptoini(
1457 struct qat_crypto_desc *desc, struct cryptoini *cria, 1457 struct qat_crypto_desc *desc, struct cryptoini *cria,
1458 struct qat_sym_hash_def const **hash_def) 1458 struct qat_sym_hash_def const **hash_def)
1459{ 1459{
1460 const struct swcr_auth_hash *sah; 1460 const struct swcr_auth_hash *sah;
1461 enum qat_sym_hash_algorithm algo = 0; 1461 enum qat_sym_hash_algorithm algo = 0;
1462 1462
1463 switch (cria->cri_alg) { 1463 switch (cria->cri_alg) {
1464 case CRYPTO_MD5_HMAC_96: 1464 case CRYPTO_MD5_HMAC_96:
1465 algo = QAT_SYM_HASH_MD5; 1465 algo = QAT_SYM_HASH_MD5;
1466 break; 1466 break;
1467 case CRYPTO_SHA1_HMAC_96: 1467 case CRYPTO_SHA1_HMAC_96:
1468 algo = QAT_SYM_HASH_SHA1; 1468 algo = QAT_SYM_HASH_SHA1;
1469 break; 1469 break;
1470 case CRYPTO_SHA2_256_HMAC: 1470 case CRYPTO_SHA2_256_HMAC:
1471 algo = QAT_SYM_HASH_SHA256; 1471 algo = QAT_SYM_HASH_SHA256;
1472 break; 1472 break;
1473 case CRYPTO_SHA2_384_HMAC: 1473 case CRYPTO_SHA2_384_HMAC:
1474 algo = QAT_SYM_HASH_SHA384; 1474 algo = QAT_SYM_HASH_SHA384;
1475 break; 1475 break;
1476 case CRYPTO_SHA2_512_HMAC: 1476 case CRYPTO_SHA2_512_HMAC:
1477 algo = QAT_SYM_HASH_SHA512; 1477 algo = QAT_SYM_HASH_SHA512;
1478 break; 1478 break;
1479 default: 1479 default:
1480 KASSERT(0); 1480 KASSERT(0);
1481 break; 1481 break;
1482 } 1482 }
1483 *hash_def = &qat_sym_hash_defs[algo]; 1483 *hash_def = &qat_sym_hash_defs[algo];
1484 sah = (*hash_def)->qshd_alg->qshai_sah; 1484 sah = (*hash_def)->qshd_alg->qshai_sah;
1485 KASSERT(sah != NULL); 1485 KASSERT(sah != NULL);
1486 desc->qcd_auth_sz = sah->auth_hash->authsize; 1486 desc->qcd_auth_sz = sah->auth_hash->authsize;
1487 1487
1488 return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1, 1488 return HW_AUTH_CONFIG_BUILD(HW_AUTH_MODE1,
1489 (*hash_def)->qshd_qat->qshqi_algo_enc, 1489 (*hash_def)->qshd_qat->qshqi_algo_enc,
1490 (*hash_def)->qshd_alg->qshai_digest_len); 1490 (*hash_def)->qshd_alg->qshai_digest_len);
1491} 1491}
1492 1492
1493int 1493int
1494qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp, 1494qat_crypto_load_buf(struct qat_softc *sc, struct cryptop *crp,
1495 struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc, 1495 struct qat_sym_cookie *qsc, struct qat_crypto_desc const *desc,
1496 uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr) 1496 uint8_t *icv_buf, int icv_offset, bus_addr_t *icv_paddr)
1497{ 1497{
1498 int error, i, nsegs; 1498 int error, i, nsegs;
1499 1499
1500 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1500 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1501 struct mbuf *m = (struct mbuf *)crp->crp_buf; 1501 struct mbuf *m = (struct mbuf *)crp->crp_buf;
1502 1502
1503 if (icv_offset >= 0) { 1503 if (icv_offset >= 0) {
1504 if (m_length(m) == icv_offset) { 1504 if (m_length(m) == icv_offset) {
1505 m_copyback(m, icv_offset, desc->qcd_auth_sz, 1505 m_copyback(m, icv_offset, desc->qcd_auth_sz,
1506 icv_buf); 1506 icv_buf);
1507 if (m_length(m) == icv_offset) 1507 if (m_length(m) == icv_offset)
1508 return ENOBUFS; 1508 return ENOBUFS;
1509 } else { 1509 } else {
1510 struct mbuf *m0; 1510 struct mbuf *m0;
1511 m0 = m_pulldown(m, icv_offset, 1511 m0 = m_pulldown(m, icv_offset,
1512 desc->qcd_auth_sz, NULL); 1512 desc->qcd_auth_sz, NULL);
1513 if (m0 == NULL) 1513 if (m0 == NULL)
1514 return ENOBUFS; 1514 return ENOBUFS;
1515 } 1515 }
1516 } 1516 }
1517 1517
1518 error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap, 1518 error = bus_dmamap_load_mbuf(sc->sc_dmat, qsc->qsc_buf_dmamap,
1519 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1519 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1520 if (error == EFBIG) { 1520 if (error == EFBIG) {
1521 struct mbuf *m_new; 1521 struct mbuf *m_new;
1522 m_new = m_defrag(m, M_DONTWAIT); 1522 m_new = m_defrag(m, M_DONTWAIT);
1523 if (m_new != NULL) { 1523 if (m_new != NULL) {
1524 crp->crp_buf = m_new; 1524 crp->crp_buf = m_new;
1525 qsc->qsc_buf = m_new; 1525 qsc->qsc_buf = m_new;
1526 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1526 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1527 qsc->qsc_buf_dmamap, m_new, 1527 qsc->qsc_buf_dmamap, m_new,
1528 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1528 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1529 if (error) { 1529 if (error) {
1530 m_freem(m_new); 1530 m_freem(m_new);
1531 crp->crp_buf = NULL; 1531 crp->crp_buf = NULL;
1532 } 1532 }
1533 } 1533 }
1534 } 1534 }
1535 1535
1536 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1536 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1537 error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap, 1537 error = bus_dmamap_load_uio(sc->sc_dmat, qsc->qsc_buf_dmamap,
1538 (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1538 (struct uio *)crp->crp_buf, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1539 } else { 1539 } else {
1540 error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap, 1540 error = bus_dmamap_load(sc->sc_dmat, qsc->qsc_buf_dmamap,
1541 crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT); 1541 crp->crp_buf, crp->crp_ilen, NULL, BUS_DMA_NOWAIT);
1542 } 1542 }
1543 if (error) { 1543 if (error) {
1544 aprint_debug_dev(sc->sc_dev, 1544 aprint_debug_dev(sc->sc_dev,
1545 "can't load crp_buf, error %d\n", error); 1545 "can't load crp_buf, error %d\n", error);
1546 crp->crp_etype = error; 1546 crp->crp_etype = error;
1547 return error; 1547 return error;
1548 } 1548 }
1549 1549
1550 nsegs = qsc->qsc_buf_dmamap->dm_nsegs; 1550 nsegs = qsc->qsc_buf_dmamap->dm_nsegs;
1551 qsc->qsc_buf_list.num_buffers = nsegs; 1551 qsc->qsc_buf_list.num_buffers = nsegs;
1552 for (i = 0; i < nsegs; i++) { 1552 for (i = 0; i < nsegs; i++) {
1553 struct flat_buffer_desc *flatbuf = 1553 struct flat_buffer_desc *flatbuf =
1554 &qsc->qsc_buf_list.phy_buffers[i]; 1554 &qsc->qsc_buf_list.phy_buffers[i];
1555 bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr; 1555 bus_addr_t paddr = qsc->qsc_buf_dmamap->dm_segs[i].ds_addr;
1556 bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len; 1556 bus_size_t len = qsc->qsc_buf_dmamap->dm_segs[i].ds_len;
1557 1557
1558 flatbuf->data_len_in_bytes = len; 1558 flatbuf->data_len_in_bytes = len;
1559 flatbuf->phy_buffer = (uint64_t)paddr; 1559 flatbuf->phy_buffer = (uint64_t)paddr;
1560 1560
1561 if (icv_offset >= 0) { 1561 if (icv_offset >= 0) {
1562 if (icv_offset < len) 1562 if (icv_offset < len)
1563 *icv_paddr = paddr + icv_offset; 1563 *icv_paddr = paddr + icv_offset;
1564 else 1564 else
1565 icv_offset -= len; 1565 icv_offset -= len;
1566 } 1566 }
1567 } 1567 }
1568 1568
1569 bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0, 1569 bus_dmamap_sync(sc->sc_dmat, qsc->qsc_buf_dmamap, 0,
1570 qsc->qsc_buf_dmamap->dm_mapsize, 1570 qsc->qsc_buf_dmamap->dm_mapsize,
1571 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1571 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1572 1572
1573 return 0; 1573 return 0;
1574} 1574}
1575 1575
1576int 1576int
1577qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp, 1577qat_crypto_load_iv(struct qat_sym_cookie *qsc, struct cryptop *crp,
1578 struct cryptodesc *crde, struct qat_crypto_desc const *desc) 1578 struct cryptodesc *crde, struct qat_crypto_desc const *desc)
1579{ 1579{
1580 uint32_t rand; 1580 uint32_t rand;
1581 uint32_t ivlen = desc->qcd_cipher_blk_sz; 1581 uint32_t ivlen = desc->qcd_cipher_blk_sz;
1582 int i; 1582 int i;
1583 1583
1584 if (crde->crd_flags & CRD_F_IV_EXPLICIT) { 1584 if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
1585 memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen); 1585 memcpy(qsc->qsc_iv_buf, crde->crd_iv, ivlen);
1586 } else { 1586 } else {
1587 if (crde->crd_flags & CRD_F_ENCRYPT) { 1587 if (crde->crd_flags & CRD_F_ENCRYPT) {
1588 for (i = 0; i + sizeof(rand) <= ivlen; 1588 for (i = 0; i + sizeof(rand) <= ivlen;
1589 i += sizeof(rand)) { 1589 i += sizeof(rand)) {
1590 rand = cprng_fast32(); 1590 rand = cprng_fast32();
1591 memcpy(qsc->qsc_iv_buf + i, &rand, sizeof(rand)); 1591 memcpy(qsc->qsc_iv_buf + i, &rand, sizeof(rand));
1592 } 1592 }
1593 if (sizeof(qsc->qsc_iv_buf) % sizeof(rand) != 0) { 1593 if (sizeof(qsc->qsc_iv_buf) % sizeof(rand) != 0) {
1594 rand = cprng_fast32(); 1594 rand = cprng_fast32();
1595 memcpy(qsc->qsc_iv_buf + i, &rand, 1595 memcpy(qsc->qsc_iv_buf + i, &rand,
1596 sizeof(qsc->qsc_iv_buf) - i); 1596 sizeof(qsc->qsc_iv_buf) - i);
1597 } 1597 }
1598 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1598 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1599 /* get iv from buf */ 1599 /* get iv from buf */
1600 m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen, 1600 m_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
1601 qsc->qsc_iv_buf); 1601 qsc->qsc_iv_buf);
1602 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1602 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1603 cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen, 1603 cuio_copydata(qsc->qsc_buf, crde->crd_inject, ivlen,
1604 qsc->qsc_iv_buf); 1604 qsc->qsc_iv_buf);
1605 } 1605 }
1606 } 1606 }
1607 1607
1608 if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 && 1608 if ((crde->crd_flags & CRD_F_ENCRYPT) != 0 &&
1609 (crde->crd_flags & CRD_F_IV_PRESENT) == 0) { 1609 (crde->crd_flags & CRD_F_IV_PRESENT) == 0) {
1610 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1610 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1611 m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen, 1611 m_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
1612 qsc->qsc_iv_buf); 1612 qsc->qsc_iv_buf);
1613 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1613 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1614 cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen, 1614 cuio_copyback(qsc->qsc_buf, crde->crd_inject, ivlen,
1615 qsc->qsc_iv_buf); 1615 qsc->qsc_iv_buf);
1616 } 1616 }
1617 } 1617 }
1618 1618
1619 return 0; 1619 return 0;
1620} 1620}
1621 1621
1622static inline struct qat_crypto_bank * 1622static inline struct qat_crypto_bank *
1623qat_crypto_select_bank(struct qat_crypto *qcy) 1623qat_crypto_select_bank(struct qat_crypto *qcy)
1624{ 1624{
1625 u_int cpuid = cpu_index(curcpu()); 1625 u_int cpuid = cpu_index(curcpu());
1626 1626
1627 return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks]; 1627 return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
1628} 1628}
1629 1629
1630int 1630int
1631qat_crypto_process(void *arg, struct cryptop *crp, int hint) 1631qat_crypto_process(void *arg, struct cryptop *crp, int hint)
1632{ 1632{
1633 struct qat_crypto *qcy = arg; 1633 struct qat_crypto *qcy = arg;
1634 struct qat_crypto_bank *qcb; 1634 struct qat_crypto_bank *qcb;
1635 struct qat_session *qs = NULL; 1635 struct qat_session *qs = NULL;
1636 struct qat_crypto_desc const *desc; 1636 struct qat_crypto_desc const *desc;
1637 struct qat_sym_cookie *qsc = NULL; 1637 struct qat_sym_cookie *qsc = NULL;
1638 struct qat_sym_bulk_cookie *qsbc; 1638 struct qat_sym_bulk_cookie *qsbc;
1639 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 1639 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
1640 bus_addr_t icv_paddr = 0; 1640 bus_addr_t icv_paddr = 0;
1641 int error, icv_offset = -1; 1641 int error, icv_offset = -1;
1642 uint8_t icv_buf[CRYPTO_MAX_MAC_LEN]; 1642 uint8_t icv_buf[CRYPTO_MAX_MAC_LEN];
1643 1643
1644 qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)]; 1644 qs = qcy->qcy_sessions[CRYPTO_SESID2LID(crp->crp_sid)];
1645 mutex_spin_enter(&qs->qs_session_mtx); 1645 mutex_spin_enter(&qs->qs_session_mtx);
1646 KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE); 1646 KASSERT(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
1647 qs->qs_inflight++; 1647 qs->qs_inflight++;
1648 mutex_spin_exit(&qs->qs_session_mtx); 1648 mutex_spin_exit(&qs->qs_session_mtx);
1649 1649
1650 qcb = qat_crypto_select_bank(qcy); 1650 qcb = qat_crypto_select_bank(qcy);
1651 1651
1652 qsc = qat_crypto_alloc_sym_cookie(qcb); 1652 qsc = qat_crypto_alloc_sym_cookie(qcb);
1653 if (qsc == NULL) { 1653 if (qsc == NULL) {
1654 error = ENOBUFS; 1654 error = ENOBUFS;
1655 goto fail; 1655 goto fail;
1656 } 1656 }
1657 1657
1658 error = 0; 1658 error = 0;
1659 desc = &qs->qs_dec_desc; 1659 desc = &qs->qs_dec_desc;
1660 crd = crp->crp_desc; 1660 crd = crp->crp_desc;
1661 while (crd != NULL) { 1661 while (crd != NULL) {
1662 switch (crd->crd_alg) { 1662 switch (crd->crd_alg) {
1663 case CRYPTO_DES_CBC: 1663 case CRYPTO_DES_CBC:
1664 case CRYPTO_3DES_CBC: 1664 case CRYPTO_3DES_CBC:
1665 case CRYPTO_AES_CBC: 1665 case CRYPTO_AES_CBC:
1666 if (crde != NULL) 1666 if (crde != NULL)
1667 error = EINVAL; 1667 error = EINVAL;
1668 if (crd->crd_flags & CRD_F_ENCRYPT) { 1668 if (crd->crd_flags & CRD_F_ENCRYPT) {
1669 /* use encrypt desc */ 1669 /* use encrypt desc */
1670 desc = &qs->qs_enc_desc; 1670 desc = &qs->qs_enc_desc;
1671 if (crda != NULL) 1671 if (crda != NULL)
1672 error = ENOTSUP; 1672 error = ENOTSUP;
1673 } 1673 }
1674 crde = crd; 1674 crde = crd;
1675 break; 1675 break;
1676 case CRYPTO_MD5_HMAC_96: 1676 case CRYPTO_MD5_HMAC_96:
1677 case CRYPTO_SHA1_HMAC_96: 1677 case CRYPTO_SHA1_HMAC_96:
1678 case CRYPTO_SHA2_256_HMAC: 1678 case CRYPTO_SHA2_256_HMAC:
1679 case CRYPTO_SHA2_384_HMAC: 1679 case CRYPTO_SHA2_384_HMAC:
1680 case CRYPTO_SHA2_512_HMAC: 1680 case CRYPTO_SHA2_512_HMAC:
1681 if (crda != NULL) 1681 if (crda != NULL)
1682 error = EINVAL; 1682 error = EINVAL;
1683 if (crde != NULL && 1683 if (crde != NULL &&
1684 (crde->crd_flags & CRD_F_ENCRYPT) == 0) 1684 (crde->crd_flags & CRD_F_ENCRYPT) == 0)
1685 error = EINVAL; 1685 error = EINVAL;
1686 crda = crd; 1686 crda = crd;
1687 icv_offset = crd->crd_inject; 1687 icv_offset = crd->crd_inject;
1688 break; 1688 break;
1689 } 1689 }
1690 if (error) 1690 if (error)
1691 goto fail; 1691 goto fail;
1692 1692
1693 crd = crd->crd_next; 1693 crd = crd->crd_next;
1694 } 1694 }
1695 1695
1696 qsc->qsc_buf = crp->crp_buf; 1696 qsc->qsc_buf = crp->crp_buf;
1697 1697
1698 if (crde != NULL) { 1698 if (crde != NULL) {
1699 error = qat_crypto_load_iv(qsc, crp, crde, desc); 1699 error = qat_crypto_load_iv(qsc, crp, crde, desc);
1700 if (error) 1700 if (error)
1701 goto fail; 1701 goto fail;
1702 } 1702 }
1703 1703
1704 error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf, 1704 error = qat_crypto_load_buf(qcy->qcy_sc, crp, qsc, desc, icv_buf,
1705 icv_offset, &icv_paddr); 1705 icv_offset, &icv_paddr);
1706 if (error) 1706 if (error)
1707 goto fail; 1707 goto fail;
1708 1708
1709 qsbc = &qsc->u.qsc_bulk_cookie; 1709 qsbc = &qsc->u.qsc_bulk_cookie;
1710 1710
1711 qsbc->qsbc_crypto = qcy; 1711 qsbc->qsbc_crypto = qcy;
1712 qsbc->qsbc_session = qs; 1712 qsbc->qsbc_session = qs;
1713 qsbc->qsbc_cb_tag = crp; 1713 qsbc->qsbc_cb_tag = crp;
1714 1714
1715 qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, 1715 qcy->qcy_sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc,
1716 crde, crda, icv_paddr); 1716 crde, crda, icv_paddr);
1717 1717
1718 bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0, 1718 bus_dmamap_sync(qcy->qcy_sc->sc_dmat, *qsc->qsc_self_dmamap, 0,
1719 offsetof(struct qat_sym_cookie, qsc_self_dmamap), 1719 offsetof(struct qat_sym_cookie, qsc_self_dmamap),
1720 BUS_DMASYNC_PREWRITE); 1720 BUS_DMASYNC_PREWRITE);
1721 1721
1722 error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx, 1722 error = qat_etr_put_msg(qcy->qcy_sc, qcb->qcb_sym_tx,
1723 (uint32_t *)qsbc->qsbc_msg); 1723 (uint32_t *)qsbc->qsbc_msg);
1724 if (error) 1724 if (error)
1725 goto fail; 1725 goto fail;
1726 1726
1727 return 0; 1727 return 0;
1728fail: 1728fail:
1729 if (qsc) 1729 if (qsc)
1730 qat_crypto_free_sym_cookie(qcb, qsc); 1730 qat_crypto_free_sym_cookie(qcb, qsc);
1731 mutex_spin_enter(&qs->qs_session_mtx); 1731 mutex_spin_enter(&qs->qs_session_mtx);
1732 qs->qs_inflight--; 1732 qs->qs_inflight--;
1733 qat_crypto_check_free_session(qcy, qs); 1733 qat_crypto_check_free_session(qcy, qs);
1734 crp->crp_etype = error; 1734 crp->crp_etype = error;
1735 crypto_done(crp); 1735 crypto_done(crp);
1736 return 0; 1736 return 0;
1737} 1737}
1738 1738
1739int 1739int
1740qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1740qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1741{ 1741{
1742 int error, i, bank; 1742 int error, i, bank;
1743 int curname = 0; 1743 int curname = 0;
1744 char *name; 1744 char *name;
1745 1745
1746 bank = qcb->qcb_bank; 1746 bank = qcb->qcb_bank;
1747 1747
1748 name = qcb->qcb_ring_names[curname++]; 1748 name = qcb->qcb_ring_names[curname++];
1749 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank); 1749 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
1750 error = qat_etr_setup_ring(sc, qcb->qcb_bank, 1750 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1751 sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size, 1751 sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
1752 NULL, NULL, name, &qcb->qcb_sym_tx); 1752 NULL, NULL, name, &qcb->qcb_sym_tx);
1753 if (error) 1753 if (error)
1754 return error; 1754 return error;
1755 1755
1756 name = qcb->qcb_ring_names[curname++]; 1756 name = qcb->qcb_ring_names[curname++];
1757 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank); 1757 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
1758 error = qat_etr_setup_ring(sc, qcb->qcb_bank, 1758 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1759 sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size, 1759 sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
1760 qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx); 1760 qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
1761 if (error) 1761 if (error)
1762 return error; 1762 return error;
1763 1763
1764 for (i = 0; i < QAT_NSYMCOOKIE; i++) { 1764 for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1765 struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i]; 1765 struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
1766 struct qat_sym_cookie *qsc; 1766 struct qat_sym_cookie *qsc;
1767 1767
1768 error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie), 1768 error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_sym_cookie),
1769 QAT_OPTIMAL_ALIGN); 1769 QAT_OPTIMAL_ALIGN);
1770 if (error) 1770 if (error)
1771 return error; 1771 return error;
1772 1772
1773 qsc = qdm->qdm_dma_vaddr; 1773 qsc = qdm->qdm_dma_vaddr;
1774 qsc->qsc_self_dmamap = &qdm->qdm_dma_map; 1774 qsc->qsc_self_dmamap = &qdm->qdm_dma_map;
1775 qsc->qsc_bulk_req_params_buf_paddr = 1775 qsc->qsc_bulk_req_params_buf_paddr =
1776 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1776 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1777 u.qsc_bulk_cookie.qsbc_req_params_buf); 1777 u.qsc_bulk_cookie.qsbc_req_params_buf);
1778 qsc->qsc_buffer_list_desc_paddr = 1778 qsc->qsc_buffer_list_desc_paddr =
1779 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1779 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1780 qsc_buf_list); 1780 qsc_buf_list);
1781 qsc->qsc_iv_buf_paddr = 1781 qsc->qsc_iv_buf_paddr =
1782 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie, 1782 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1783 qsc_iv_buf); 1783 qsc_iv_buf);
1784 qcb->qcb_symck_free[i] = qsc; 1784 qcb->qcb_symck_free[i] = qsc;
1785 qcb->qcb_symck_free_count++; 1785 qcb->qcb_symck_free_count++;
1786 1786
1787 error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN, 1787 error = bus_dmamap_create(sc->sc_dmat, QAT_MAXLEN,
1788 QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap); 1788 QAT_MAXSEG, MCLBYTES, 0, 0, &qsc->qsc_buf_dmamap);
1789 if (error) 1789 if (error)
1790 return error; 1790 return error;
1791 } 1791 }
1792 1792
1793 return 0; 1793 return 0;
1794} 1794}
1795 1795
1796int 1796int
1797qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb) 1797qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1798{ 1798{
1799 int error; 1799 int error;
1800 1800
1801 mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET); 1801 mutex_init(&qcb->qcb_bank_mtx, MUTEX_DEFAULT, IPL_NET);
1802 1802
1803 QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC, 1803 QAT_EVCNT_ATTACH(sc, &qcb->qcb_ev_no_symck, EVCNT_TYPE_MISC,
1804 qcb->qcb_ev_no_symck_name, "crypto no_symck"); 1804 qcb->qcb_ev_no_symck_name, "crypto no_symck");
1805 1805
1806 error = qat_crypto_setup_ring(sc, qcb); 1806 error = qat_crypto_setup_ring(sc, qcb);
1807 if (error) 1807 if (error)
1808 return error; 1808 return error;
1809 1809
1810 return 0; 1810 return 0;
1811} 1811}
1812 1812
1813int 1813int
1814qat_crypto_init(struct qat_softc *sc) 1814qat_crypto_init(struct qat_softc *sc)
1815{ 1815{
1816 struct qat_crypto *qcy = &sc->sc_crypto; 1816 struct qat_crypto *qcy = &sc->sc_crypto;
1817 int error, bank, i; 1817 int error, bank, i;
1818 int num_banks; 1818 int num_banks;
1819 1819
1820 qcy->qcy_sc = sc; 1820 qcy->qcy_sc = sc;
1821 1821
1822 if (sc->sc_hw.qhw_init_arb != NULL) 1822 if (sc->sc_hw.qhw_init_arb != NULL)
1823 num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks); 1823 num_banks = uimin(ncpu, sc->sc_hw.qhw_num_banks);
1824 else 1824 else
1825 num_banks = sc->sc_ae_num; 1825 num_banks = sc->sc_ae_num;
1826 1826
1827 qcy->qcy_num_banks = num_banks; 1827 qcy->qcy_num_banks = num_banks;
1828 1828
1829 qcy->qcy_banks = 1829 qcy->qcy_banks =
1830 qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks); 1830 qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
1831 1831
1832 for (bank = 0; bank < num_banks; bank++) { 1832 for (bank = 0; bank < num_banks; bank++) {
1833 struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank]; 1833 struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
1834 qcb->qcb_bank = bank; 1834 qcb->qcb_bank = bank;
1835 qcb->qcb_crypto = qcy; 1835 qcb->qcb_crypto = qcy;
1836 error = qat_crypto_bank_init(sc, qcb); 1836 error = qat_crypto_bank_init(sc, qcb);
1837 if (error) 1837 if (error)
1838 return error; 1838 return error;
1839 } 1839 }
1840 1840
1841 mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET); 1841 mutex_init(&qcy->qcy_crypto_mtx, MUTEX_DEFAULT, IPL_NET);
1842 1842
1843 for (i = 0; i < QAT_NSESSION; i++) { 1843 for (i = 0; i < QAT_NSESSION; i++) {
1844 struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i]; 1844 struct qat_dmamem *qdm = &qcy->qcy_session_dmamems[i];
1845 struct qat_session *qs; 1845 struct qat_session *qs;
1846 1846
1847 error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session), 1847 error = qat_alloc_dmamem(sc, qdm, sizeof(struct qat_session),
1848 QAT_OPTIMAL_ALIGN); 1848 QAT_OPTIMAL_ALIGN);
1849 if (error) 1849 if (error)
1850 return error; 1850 return error;
1851 1851
1852 qs = qdm->qdm_dma_vaddr; 1852 qs = qdm->qdm_dma_vaddr;
1853 qs->qs_lid = i; 1853 qs->qs_lid = i;
1854 qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr; 1854 qs->qs_dec_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
1855 qs->qs_dec_desc.qcd_hash_state_paddr = 1855 qs->qs_dec_desc.qcd_hash_state_paddr =
1856 qs->qs_dec_desc.qcd_desc_paddr + 1856 qs->qs_dec_desc.qcd_desc_paddr +
1857 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); 1857 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1858 qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr + 1858 qs->qs_enc_desc.qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
1859 offsetof(struct qat_session, qs_enc_desc); 1859 offsetof(struct qat_session, qs_enc_desc);
1860 qs->qs_enc_desc.qcd_hash_state_paddr = 1860 qs->qs_enc_desc.qcd_hash_state_paddr =
1861 qs->qs_enc_desc.qcd_desc_paddr + 1861 qs->qs_enc_desc.qcd_desc_paddr +
1862 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf); 1862 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1863 1863
1864 mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET); 1864 mutex_init(&qs->qs_session_mtx, MUTEX_DEFAULT, IPL_NET);
1865 1865
1866 qcy->qcy_sessions[i] = qs; 1866 qcy->qcy_sessions[i] = qs;
1867 qcy->qcy_session_free[i] = qs; 1867 qcy->qcy_session_free[i] = qs;
1868 qcy->qcy_session_free_count++; 1868 qcy->qcy_session_free_count++;
1869 } 1869 }
1870 1870
1871 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC, 1871 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_new_sess, EVCNT_TYPE_MISC,
1872 qcy->qcy_ev_new_sess_name, "crypto new_sess"); 1872 qcy->qcy_ev_new_sess_name, "crypto new_sess");
1873 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC, 1873 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_free_sess, EVCNT_TYPE_MISC,
1874 qcy->qcy_ev_free_sess_name, "crypto free_sess"); 1874 qcy->qcy_ev_free_sess_name, "crypto free_sess");
1875 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC, 1875 QAT_EVCNT_ATTACH(sc, &qcy->qcy_ev_no_sess, EVCNT_TYPE_MISC,
1876 qcy->qcy_ev_no_sess_name, "crypto no_sess"); 1876 qcy->qcy_ev_no_sess_name, "crypto no_sess");
1877 1877
1878 return 0; 1878 return 0;
1879} 1879}
1880 1880
1881int 1881int
1882qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri) 1882qat_crypto_new_session(void *arg, uint32_t *lid, struct cryptoini *cri)
1883{ 1883{
1884 struct qat_crypto *qcy = arg; 1884 struct qat_crypto *qcy = arg;
1885 struct qat_session *qs = NULL; 1885 struct qat_session *qs = NULL;
1886 struct cryptoini *crie = NULL; 1886 struct cryptoini *crie = NULL;
1887 struct cryptoini *cria = NULL; 1887 struct cryptoini *cria = NULL;
1888 int slice, error; 1888 int slice, error;
1889 1889
1890 mutex_spin_enter(&qcy->qcy_crypto_mtx); 1890 mutex_spin_enter(&qcy->qcy_crypto_mtx);
1891 1891
1892 if (qcy->qcy_session_free_count == 0) { 1892 if (qcy->qcy_session_free_count == 0) {
1893 QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess); 1893 QAT_EVCNT_INCR(&qcy->qcy_ev_no_sess);
1894 mutex_spin_exit(&qcy->qcy_crypto_mtx); 1894 mutex_spin_exit(&qcy->qcy_crypto_mtx);
1895 return ENOBUFS; 1895 return ENOBUFS;
1896 } 1896 }
1897 qs = qcy->qcy_session_free[--qcy->qcy_session_free_count]; 1897 qs = qcy->qcy_session_free[--qcy->qcy_session_free_count];
1898 QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess); 1898 QAT_EVCNT_INCR(&qcy->qcy_ev_new_sess);
1899 1899
1900 mutex_spin_exit(&qcy->qcy_crypto_mtx); 1900 mutex_spin_exit(&qcy->qcy_crypto_mtx);
1901 1901
1902 qs->qs_status = QAT_SESSION_STATUS_ACTIVE; 1902 qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
1903 qs->qs_inflight = 0; 1903 qs->qs_inflight = 0;
1904 *lid = qs->qs_lid; 1904 *lid = qs->qs_lid;
1905 1905
1906 error = 0; 1906 error = 0;
1907 while (cri) { 1907 while (cri) {
1908 switch (cri->cri_alg) { 1908 switch (cri->cri_alg) {
1909 case CRYPTO_DES_CBC: 1909 case CRYPTO_DES_CBC:
1910 case CRYPTO_3DES_CBC: 1910 case CRYPTO_3DES_CBC:
1911 case CRYPTO_AES_CBC: 1911 case CRYPTO_AES_CBC:
1912 if (crie != NULL) 1912 if (crie != NULL)
1913 error = EINVAL; 1913 error = EINVAL;
1914 crie = cri; 1914 crie = cri;
1915 break; 1915 break;
1916 case CRYPTO_MD5_HMAC_96: 1916 case CRYPTO_MD5_HMAC_96:
1917 case CRYPTO_SHA1_HMAC_96: 1917 case CRYPTO_SHA1_HMAC_96:
1918 case CRYPTO_SHA2_256_HMAC: 1918 case CRYPTO_SHA2_256_HMAC:
1919 case CRYPTO_SHA2_384_HMAC: 1919 case CRYPTO_SHA2_384_HMAC:
1920 case CRYPTO_SHA2_512_HMAC: 1920 case CRYPTO_SHA2_512_HMAC:
1921 if (cria != NULL) 1921 if (cria != NULL)
1922 error = EINVAL; 1922 error = EINVAL;
1923 cria = cri; 1923 cria = cri;
1924 break; 1924 break;
1925 default: 1925 default:
1926 error = EINVAL; 1926 error = EINVAL;
1927 } 1927 }
1928 if (error) 1928 if (error)
1929 goto fail; 1929 goto fail;
1930 cri = cri->cri_next; 1930 cri = cri->cri_next;
1931 } 1931 }
1932 1932
1933 slice = 1; 1933 slice = 1;
1934 if (crie != NULL && cria != NULL) { 1934 if (crie != NULL && cria != NULL) {
1935 slice = 2; 1935 slice = 2;
1936 /* auth then decrypt */ 1936 /* auth then decrypt */
1937 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH; 1937 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
1938 qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER; 1938 qs->qs_dec_desc.qcd_slices[1] = FW_SLICE_CIPHER;
1939 qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT; 1939 qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
1940 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER; 1940 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
1941 /* encrypt then auth */ 1941 /* encrypt then auth */
1942 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER; 1942 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1943 qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH; 1943 qs->qs_enc_desc.qcd_slices[1] = FW_SLICE_AUTH;
1944 qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT; 1944 qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1945 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH; 1945 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
1946 } else if (crie != NULL) { 1946 } else if (crie != NULL) {
1947 /* decrypt */ 1947 /* decrypt */
1948 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER; 1948 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1949 qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT; 1949 qs->qs_dec_desc.qcd_cipher_dir = HW_CIPHER_DECRYPT;
1950 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER; 1950 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
1951 /* encrypt */ 1951 /* encrypt */
1952 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER; 1952 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_CIPHER;
1953 qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT; 1953 qs->qs_enc_desc.qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1954 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER; 1954 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_CIPHER;
1955 } else if (cria != NULL) { 1955 } else if (cria != NULL) {
1956 /* auth */ 1956 /* auth */
1957 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH; 1957 qs->qs_dec_desc.qcd_slices[0] = FW_SLICE_AUTH;
1958 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH; 1958 qs->qs_dec_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
1959 /* auth */ 1959 /* auth */
1960 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH; 1960 qs->qs_enc_desc.qcd_slices[0] = FW_SLICE_AUTH;
1961 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH; 1961 qs->qs_enc_desc.qcd_cmd_id = FW_LA_CMD_AUTH;
1962 } else { 1962 } else {
1963 error = EINVAL; 1963 error = EINVAL;
1964 goto fail; 1964 goto fail;
1965 } 1965 }
1966 qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR; 1966 qs->qs_dec_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
1967 qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR; 1967 qs->qs_enc_desc.qcd_slices[slice] = FW_SLICE_DRAM_WR;
1968 1968
1969 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria); 1969 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_dec_desc, crie, cria);
1970 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria); 1970 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, &qs->qs_enc_desc, crie, cria);
1971 1971
1972 return 0; 1972 return 0;
1973fail: 1973fail:
1974 if (qs != NULL) { 1974 if (qs != NULL) {
1975 mutex_spin_enter(&qs->qs_session_mtx); 1975 mutex_spin_enter(&qs->qs_session_mtx);
1976 qat_crypto_free_session0(qcy, qs); 1976 qat_crypto_free_session0(qcy, qs);
1977 } 1977 }
1978 return error; 1978 return error;
1979} 1979}
1980 1980
1981static inline void 1981static inline void
1982qat_crypto_clean_desc(struct qat_crypto_desc *desc) 1982qat_crypto_clean_desc(struct qat_crypto_desc *desc)
1983{ 1983{
1984 explicit_memset(desc->qcd_content_desc, 0, 1984 explicit_memset(desc->qcd_content_desc, 0,
1985 sizeof(desc->qcd_content_desc)); 1985 sizeof(desc->qcd_content_desc));
1986 explicit_memset(desc->qcd_hash_state_prefix_buf, 0, 1986 explicit_memset(desc->qcd_hash_state_prefix_buf, 0,
1987 sizeof(desc->qcd_hash_state_prefix_buf)); 1987 sizeof(desc->qcd_hash_state_prefix_buf));
1988 explicit_memset(desc->qcd_req_cache, 0, 1988 explicit_memset(desc->qcd_req_cache, 0,
1989 sizeof(desc->qcd_req_cache)); 1989 sizeof(desc->qcd_req_cache));
1990} 1990}
1991 1991
1992int 1992int
1993qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs) 1993qat_crypto_free_session0(struct qat_crypto *qcy, struct qat_session *qs)
1994{ 1994{
1995 1995
1996 qat_crypto_clean_desc(&qs->qs_dec_desc); 1996 qat_crypto_clean_desc(&qs->qs_dec_desc);
1997 qat_crypto_clean_desc(&qs->qs_enc_desc); 1997 qat_crypto_clean_desc(&qs->qs_enc_desc);
1998 qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE; 1998 qs->qs_status &= ~QAT_SESSION_STATUS_ACTIVE;
1999 1999
2000 mutex_spin_exit(&qs->qs_session_mtx); 2000 mutex_spin_exit(&qs->qs_session_mtx);
2001 2001
2002 mutex_spin_enter(&qcy->qcy_crypto_mtx); 2002 mutex_spin_enter(&qcy->qcy_crypto_mtx);
2003 2003
2004 qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs; 2004 qcy->qcy_session_free[qcy->qcy_session_free_count++] = qs;
2005 QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess); 2005 QAT_EVCNT_INCR(&qcy->qcy_ev_free_sess);
2006 2006
2007 mutex_spin_exit(&qcy->qcy_crypto_mtx); 2007 mutex_spin_exit(&qcy->qcy_crypto_mtx);
2008 2008
2009 return 0; 2009 return 0;
2010} 2010}
2011 2011
2012void 2012void
2013qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs) 2013qat_crypto_check_free_session(struct qat_crypto *qcy, struct qat_session *qs)
2014{ 2014{
2015 2015
2016 if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) && 2016 if ((qs->qs_status & QAT_SESSION_STATUS_FREEING) &&
2017 qs->qs_inflight == 0) { 2017 qs->qs_inflight == 0) {
2018 qat_crypto_free_session0(qcy, qs); 2018 qat_crypto_free_session0(qcy, qs);
2019 } else { 2019 } else {
2020 mutex_spin_exit(&qs->qs_session_mtx); 2020 mutex_spin_exit(&qs->qs_session_mtx);
2021 } 2021 }
2022} 2022}
2023 2023
2024int 2024int
2025qat_crypto_free_session(void *arg, uint64_t sid) 2025qat_crypto_free_session(void *arg, uint64_t sid)
2026{ 2026{
2027 struct qat_crypto *qcy = arg; 2027 struct qat_crypto *qcy = arg;
2028 struct qat_session *qs; 2028 struct qat_session *qs;
2029 int error; 2029 int error;