Tue Oct 13 08:03:59 2015 UTC ()
refactor: separate busdma initialize processing as functions


(knakahara)
diff -r1.354 -r1.355 src/sys/dev/pci/if_wm.c

cvs diff -r1.354 -r1.355 src/sys/dev/pci/if_wm.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_wm.c 2015/10/13 08:00:15 1.354
+++ src/sys/dev/pci/if_wm.c 2015/10/13 08:03:59 1.355
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_wm.c,v 1.354 2015/10/13 08:00:15 knakahara Exp $ */ 1/* $NetBSD: if_wm.c,v 1.355 2015/10/13 08:03:59 knakahara Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -73,27 +73,27 @@ @@ -73,27 +73,27 @@
73 * TODO (in order of importance): 73 * TODO (in order of importance):
74 * 74 *
75 * - Check XXX'ed comments 75 * - Check XXX'ed comments
76 * - EEE (Energy Efficiency Ethernet) 76 * - EEE (Energy Efficiency Ethernet)
77 * - Multi queue 77 * - Multi queue
78 * - Image Unique ID 78 * - Image Unique ID
79 * - LPLU other than PCH* 79 * - LPLU other than PCH*
80 * - Virtual Function 80 * - Virtual Function
81 * - Set LED correctly (based on contents in EEPROM) 81 * - Set LED correctly (based on contents in EEPROM)
82 * - Rework how parameters are loaded from the EEPROM. 82 * - Rework how parameters are loaded from the EEPROM.
83 */ 83 */
84 84
85#include <sys/cdefs.h> 85#include <sys/cdefs.h>
86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.354 2015/10/13 08:00:15 knakahara Exp $"); 86__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.355 2015/10/13 08:03:59 knakahara Exp $");
87 87
88#ifdef _KERNEL_OPT 88#ifdef _KERNEL_OPT
89#include "opt_net_mpsafe.h" 89#include "opt_net_mpsafe.h"
90#endif 90#endif
91 91
92#include <sys/param.h> 92#include <sys/param.h>
93#include <sys/systm.h> 93#include <sys/systm.h>
94#include <sys/callout.h> 94#include <sys/callout.h>
95#include <sys/mbuf.h> 95#include <sys/mbuf.h>
96#include <sys/malloc.h> 96#include <sys/malloc.h>
97#include <sys/kernel.h> 97#include <sys/kernel.h>
98#include <sys/socket.h> 98#include <sys/socket.h>
99#include <sys/ioctl.h> 99#include <sys/ioctl.h>
@@ -543,34 +543,41 @@ static int wm_add_rxbuf(struct wm_softc  @@ -543,34 +543,41 @@ static int wm_add_rxbuf(struct wm_softc
543static void wm_rxdrain(struct wm_softc *); 543static void wm_rxdrain(struct wm_softc *);
544static int wm_init(struct ifnet *); 544static int wm_init(struct ifnet *);
545static int wm_init_locked(struct ifnet *); 545static int wm_init_locked(struct ifnet *);
546static void wm_stop(struct ifnet *, int); 546static void wm_stop(struct ifnet *, int);
547static void wm_stop_locked(struct ifnet *, int); 547static void wm_stop_locked(struct ifnet *, int);
548static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *, 548static int wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
549 uint32_t *, uint8_t *); 549 uint32_t *, uint8_t *);
550static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 550static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
551static void wm_82547_txfifo_stall(void *); 551static void wm_82547_txfifo_stall(void *);
552static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 552static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
553/* DMA related */ 553/* DMA related */
554static int wm_alloc_tx_descs(struct wm_softc *); 554static int wm_alloc_tx_descs(struct wm_softc *);
555static void wm_free_tx_descs(struct wm_softc *); 555static void wm_free_tx_descs(struct wm_softc *);
 556static void wm_init_tx_descs(struct wm_softc *);
556static int wm_alloc_rx_descs(struct wm_softc *); 557static int wm_alloc_rx_descs(struct wm_softc *);
557static void wm_free_rx_descs(struct wm_softc *); 558static void wm_free_rx_descs(struct wm_softc *);
 559static void wm_init_rx_descs(struct wm_softc *);
558static int wm_alloc_tx_buffer(struct wm_softc *); 560static int wm_alloc_tx_buffer(struct wm_softc *);
559static void wm_free_tx_buffer(struct wm_softc *); 561static void wm_free_tx_buffer(struct wm_softc *);
 562static void wm_init_tx_buffer(struct wm_softc *);
560static int wm_alloc_rx_buffer(struct wm_softc *); 563static int wm_alloc_rx_buffer(struct wm_softc *);
561static void wm_free_rx_buffer(struct wm_softc *); 564static void wm_free_rx_buffer(struct wm_softc *);
 565static int wm_init_rx_buffer(struct wm_softc *);
 566static void wm_init_tx_queue(struct wm_softc *);
 567static int wm_init_rx_queue(struct wm_softc *);
562static int wm_alloc_txrx_queues(struct wm_softc *); 568static int wm_alloc_txrx_queues(struct wm_softc *);
563static void wm_free_txrx_queues(struct wm_softc *); 569static void wm_free_txrx_queues(struct wm_softc *);
 570static int wm_init_txrx_queues(struct wm_softc *);
564/* Start */ 571/* Start */
565static void wm_start(struct ifnet *); 572static void wm_start(struct ifnet *);
566static void wm_start_locked(struct ifnet *); 573static void wm_start_locked(struct ifnet *);
567static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *, 574static int wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
568 uint32_t *, uint32_t *, bool *); 575 uint32_t *, uint32_t *, bool *);
569static void wm_nq_start(struct ifnet *); 576static void wm_nq_start(struct ifnet *);
570static void wm_nq_start_locked(struct ifnet *); 577static void wm_nq_start_locked(struct ifnet *);
571/* Interrupt */ 578/* Interrupt */
572static int wm_txeof(struct wm_softc *); 579static int wm_txeof(struct wm_softc *);
573static void wm_rxeof(struct wm_softc *); 580static void wm_rxeof(struct wm_softc *);
574static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 581static void wm_linkintr_gmii(struct wm_softc *, uint32_t);
575static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 582static void wm_linkintr_tbi(struct wm_softc *, uint32_t);
576static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 583static void wm_linkintr_serdes(struct wm_softc *, uint32_t);
@@ -2275,38 +2282,26 @@ alloc_retry: @@ -2275,38 +2282,26 @@ alloc_retry:
2275 CTRL_EXT_SWDPINS_SHIFT; 2282 CTRL_EXT_SWDPINS_SHIFT;
2276 } else { 2283 } else {
2277 sc->sc_ctrl_ext |= 2284 sc->sc_ctrl_ext |=
2278 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) << 2285 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2279 CTRL_EXT_SWDPIO_SHIFT; 2286 CTRL_EXT_SWDPIO_SHIFT;
2280 } 2287 }
2281#endif 2288#endif
2282 2289
2283 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2290 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2284#if 0 2291#if 0
2285 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2292 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2286#endif 2293#endif
2287 2294
2288 /* 
2289 * Set up some register offsets that are different between 
2290 * the i82542 and the i82543 and later chips. 
2291 */ 
2292 if (sc->sc_type < WM_T_82543) { 
2293 sc->sc_rdt_reg = WMREG_OLD_RDT0; 
2294 sc->sc_tdt_reg = WMREG_OLD_TDT; 
2295 } else { 
2296 sc->sc_rdt_reg = WMREG_RDT; 
2297 sc->sc_tdt_reg = WMREG_TDT; 
2298 } 
2299 
2300 if (sc->sc_type == WM_T_PCH) { 2295 if (sc->sc_type == WM_T_PCH) {
2301 uint16_t val; 2296 uint16_t val;
2302 2297
2303 /* Save the NVM K1 bit setting */ 2298 /* Save the NVM K1 bit setting */
2304 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val); 2299 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2305 2300
2306 if ((val & NVM_K1_CONFIG_ENABLE) != 0) 2301 if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2307 sc->sc_nvm_k1_enabled = 1; 2302 sc->sc_nvm_k1_enabled = 1;
2308 else 2303 else
2309 sc->sc_nvm_k1_enabled = 0; 2304 sc->sc_nvm_k1_enabled = 0;
2310 } 2305 }
2311 2306
2312 /* 2307 /*
@@ -4085,27 +4080,26 @@ wm_init(struct ifnet *ifp) @@ -4085,27 +4080,26 @@ wm_init(struct ifnet *ifp)
4085 int ret; 4080 int ret;
4086 4081
4087 WM_BOTH_LOCK(sc); 4082 WM_BOTH_LOCK(sc);
4088 ret = wm_init_locked(ifp); 4083 ret = wm_init_locked(ifp);
4089 WM_BOTH_UNLOCK(sc); 4084 WM_BOTH_UNLOCK(sc);
4090 4085
4091 return ret; 4086 return ret;
4092} 4087}
4093 4088
4094static int 4089static int
4095wm_init_locked(struct ifnet *ifp) 4090wm_init_locked(struct ifnet *ifp)
4096{ 4091{
4097 struct wm_softc *sc = ifp->if_softc; 4092 struct wm_softc *sc = ifp->if_softc;
4098 struct wm_rxsoft *rxs; 
4099 int i, j, trynum, error = 0; 4093 int i, j, trynum, error = 0;
4100 uint32_t reg; 4094 uint32_t reg;
4101 4095
4102 KASSERT(WM_BOTH_LOCKED(sc)); 4096 KASSERT(WM_BOTH_LOCKED(sc));
4103 /* 4097 /*
4104 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 4098 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4105 * There is a small but measurable benefit to avoiding the adjusment 4099 * There is a small but measurable benefit to avoiding the adjusment
4106 * of the descriptor so that the headers are aligned, for normal mtu, 4100 * of the descriptor so that the headers are aligned, for normal mtu,
4107 * on such platforms. One possibility is that the DMA itself is 4101 * on such platforms. One possibility is that the DMA itself is
4108 * slightly more efficient if the front of the entire packet (instead 4102 * slightly more efficient if the front of the entire packet (instead
4109 * of the front of the headers) is aligned. 4103 * of the front of the headers) is aligned.
4110 * 4104 *
4111 * Note we must always set align_tweak to 0 if we are using 4105 * Note we must always set align_tweak to 0 if we are using
@@ -4173,144 +4167,29 @@ wm_init_locked(struct ifnet *ifp) @@ -4173,144 +4167,29 @@ wm_init_locked(struct ifnet *ifp)
4173 * XXX implement this division at link speed change! 4167 * XXX implement this division at link speed change!
4174 */ 4168 */
4175 4169
4176 /* 4170 /*
4177 * For N interrupts/sec, set this value to: 4171 * For N interrupts/sec, set this value to:
4178 * 1000000000 / (N * 256). Note that we set the 4172 * 1000000000 / (N * 256). Note that we set the
4179 * absolute and packet timer values to this value 4173 * absolute and packet timer values to this value
4180 * divided by 4 to get "simple timer" behavior. 4174 * divided by 4 to get "simple timer" behavior.
4181 */ 4175 */
4182 4176
4183 sc->sc_itr = 1500; /* 2604 ints/sec */ 4177 sc->sc_itr = 1500; /* 2604 ints/sec */
4184 } 4178 }
4185 4179
4186 /* Initialize the transmit descriptor ring. */ 4180 error = wm_init_txrx_queues(sc);
4187 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 4181 if (error)
4188 wm_cdtxsync(sc, 0, WM_NTXDESC(sc), 4182 goto out;
4189 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 
4190 sc->sc_txfree = WM_NTXDESC(sc); 
4191 sc->sc_txnext = 0; 
4192 
4193 if (sc->sc_type < WM_T_82543) { 
4194 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); 
4195 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); 
4196 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 
4197 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 
4198 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 
4199 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 
4200 } else { 
4201 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); 
4202 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0)); 
4203 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 
4204 CSR_WRITE(sc, WMREG_TDH, 0); 
4205 
4206 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 
4207 /* 
4208 * Don't write TDT before TCTL.EN is set. 
4209 * See the document. 
4210 */ 
4211 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE 
4212 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 
4213 | TXDCTL_WTHRESH(0)); 
4214 else { 
4215 /* ITR / 4 */ 
4216 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4); 
4217 if (sc->sc_type >= WM_T_82540) { 
4218 /* should be same */ 
4219 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4); 
4220 } 
4221 
4222 CSR_WRITE(sc, WMREG_TDT, 0); 
4223 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) | 
4224 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 
4225 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 
4226 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 
4227 } 
4228 } 
4229 
4230 /* Initialize the transmit job descriptors. */ 
4231 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 
4232 sc->sc_txsoft[i].txs_mbuf = NULL; 
4233 sc->sc_txsfree = WM_TXQUEUELEN(sc); 
4234 sc->sc_txsnext = 0; 
4235 sc->sc_txsdirty = 0; 
4236 
4237 /* 
4238 * Initialize the receive descriptor and receive job 
4239 * descriptor rings. 
4240 */ 
4241 if (sc->sc_type < WM_T_82543) { 
4242 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 
4243 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 
4244 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 
4245 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 
4246 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 
4247 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 
4248 
4249 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 
4250 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 
4251 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 
4252 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 
4253 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 
4254 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 
4255 } else { 
4256 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 
4257 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 
4258 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 
4259 
4260 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 
4261 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 
4262 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES); 
4263 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY 
4264 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 
4265 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE 
4266 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 
4267 | RXDCTL_WTHRESH(1)); 
4268 } else { 
4269 CSR_WRITE(sc, WMREG_RDH, 0); 
4270 CSR_WRITE(sc, WMREG_RDT, 0); 
4271 /* ITR/4 */ 
4272 CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD); 
4273 /* MUST be same */ 
4274 CSR_WRITE(sc, WMREG_RADV, sc->sc_itr); 
4275 } 
4276 } 
4277 for (i = 0; i < WM_NRXDESC; i++) { 
4278 rxs = &sc->sc_rxsoft[i]; 
4279 if (rxs->rxs_mbuf == NULL) { 
4280 if ((error = wm_add_rxbuf(sc, i)) != 0) { 
4281 log(LOG_ERR, "%s: unable to allocate or map " 
4282 "rx buffer %d, error = %d\n", 
4283 device_xname(sc->sc_dev), i, error); 
4284 /* 
4285 * XXX Should attempt to run with fewer receive 
4286 * XXX buffers instead of just failing. 
4287 */ 
4288 wm_rxdrain(sc); 
4289 goto out; 
4290 } 
4291 } else { 
4292 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 
4293 wm_init_rxdesc(sc, i); 
4294 /* 
4295 * For 82575 and newer device, the RX descriptors 
4296 * must be initialized after the setting of RCTL.EN in 
4297 * wm_set_filter() 
4298 */ 
4299 } 
4300 } 
4301 sc->sc_rxptr = 0; 
4302 sc->sc_rxdiscard = 0; 
4303 WM_RXCHAIN_RESET(sc); 
4304 4183
4305 /* 4184 /*
4306 * Clear out the VLAN table -- we don't use it (yet). 4185 * Clear out the VLAN table -- we don't use it (yet).
4307 */ 4186 */
4308 CSR_WRITE(sc, WMREG_VET, 0); 4187 CSR_WRITE(sc, WMREG_VET, 0);
4309 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 4188 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4310 trynum = 10; /* Due to hw errata */ 4189 trynum = 10; /* Due to hw errata */
4311 else 4190 else
4312 trynum = 1; 4191 trynum = 1;
4313 for (i = 0; i < WM_VLAN_TABSIZE; i++) 4192 for (i = 0; i < WM_VLAN_TABSIZE; i++)
4314 for (j = 0; j < trynum; j++) 4193 for (j = 0; j < trynum; j++)
4315 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 4194 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4316 4195
@@ -5363,26 +5242,236 @@ fail_0: @@ -5363,26 +5242,236 @@ fail_0:
5363 * wm_free_quques: 5242 * wm_free_quques:
5364 * Free {tx,rx}descs and {tx,rx} buffers 5243 * Free {tx,rx}descs and {tx,rx} buffers
5365 */ 5244 */
5366static void 5245static void
5367wm_free_txrx_queues(struct wm_softc *sc) 5246wm_free_txrx_queues(struct wm_softc *sc)
5368{ 5247{
5369 5248
5370 wm_free_rx_buffer(sc); 5249 wm_free_rx_buffer(sc);
5371 wm_free_rx_descs(sc); 5250 wm_free_rx_descs(sc);
5372 wm_free_tx_buffer(sc); 5251 wm_free_tx_buffer(sc);
5373 wm_free_tx_descs(sc); 5252 wm_free_tx_descs(sc);
5374} 5253}
5375 5254
 5255static void
 5256wm_init_tx_descs(struct wm_softc *sc)
 5257{
 5258
 5259 KASSERT(WM_TX_LOCKED(sc));
 5260
 5261 /* Initialize the transmit descriptor ring. */
 5262 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
 5263 wm_cdtxsync(sc, 0, WM_NTXDESC(sc),
 5264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
 5265 sc->sc_txfree = WM_NTXDESC(sc);
 5266 sc->sc_txnext = 0;
 5267
 5268 if (sc->sc_type < WM_T_82543) {
 5269 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
 5270 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
 5271 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
 5272 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
 5273 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
 5274 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
 5275 } else {
 5276 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
 5277 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
 5278 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
 5279 CSR_WRITE(sc, WMREG_TDH, 0);
 5280
 5281 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
 5282 /*
 5283 * Don't write TDT before TCTL.EN is set.
 5284 * See the document.
 5285 */
 5286 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_QUEUE_ENABLE
 5287 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
 5288 | TXDCTL_WTHRESH(0));
 5289 else {
 5290 /* ITR / 4 */
 5291 CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
 5292 if (sc->sc_type >= WM_T_82540) {
 5293 /* should be same */
 5294 CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
 5295 }
 5296
 5297 CSR_WRITE(sc, WMREG_TDT, 0);
 5298 CSR_WRITE(sc, WMREG_TXDCTL(0), TXDCTL_PTHRESH(0) |
 5299 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
 5300 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
 5301 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
 5302 }
 5303 }
 5304}
 5305
 5306static void
 5307wm_init_tx_buffer(struct wm_softc *sc)
 5308{
 5309 int i;
 5310
 5311 KASSERT(WM_TX_LOCKED(sc));
 5312
 5313 /* Initialize the transmit job descriptors. */
 5314 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
 5315 sc->sc_txsoft[i].txs_mbuf = NULL;
 5316 sc->sc_txsfree = WM_TXQUEUELEN(sc);
 5317 sc->sc_txsnext = 0;
 5318 sc->sc_txsdirty = 0;
 5319}
 5320
 5321static void
 5322wm_init_tx_queue(struct wm_softc *sc)
 5323{
 5324
 5325 KASSERT(WM_TX_LOCKED(sc));
 5326
 5327 /*
 5328 * Set up some register offsets that are different between
 5329 * the i82542 and the i82543 and later chips.
 5330 */
 5331 if (sc->sc_type < WM_T_82543) {
 5332 sc->sc_tdt_reg = WMREG_OLD_TDT;
 5333 } else {
 5334 sc->sc_tdt_reg = WMREG_TDT;
 5335 }
 5336
 5337 wm_init_tx_descs(sc);
 5338 wm_init_tx_buffer(sc);
 5339}
 5340
 5341static void
 5342wm_init_rx_descs(struct wm_softc *sc)
 5343{
 5344
 5345 KASSERT(WM_RX_LOCKED(sc));
 5346
 5347 /*
 5348 * Initialize the receive descriptor and receive job
 5349 * descriptor rings.
 5350 */
 5351 if (sc->sc_type < WM_T_82543) {
 5352 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
 5353 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
 5354 CSR_WRITE(sc, WMREG_OLD_RDLEN0,
 5355 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
 5356 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
 5357 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
 5358 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
 5359
 5360 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
 5361 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
 5362 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
 5363 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
 5364 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
 5365 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
 5366 } else {
 5367 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
 5368 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
 5369 CSR_WRITE(sc, WMREG_RDLEN,
 5370 sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
 5371
 5372 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
 5373 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
 5374 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
 5375 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
 5376 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
 5377 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
 5378 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
 5379 | RXDCTL_WTHRESH(1));
 5380 } else {
 5381 CSR_WRITE(sc, WMREG_RDH, 0);
 5382 CSR_WRITE(sc, WMREG_RDT, 0);
 5383 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
 5384 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
 5385 }
 5386 }
 5387}
 5388
 5389static int
 5390wm_init_rx_buffer(struct wm_softc *sc)
 5391{
 5392 struct wm_rxsoft *rxs;
 5393 int error, i;
 5394
 5395 KASSERT(WM_RX_LOCKED(sc));
 5396
 5397 for (i = 0; i < WM_NRXDESC; i++) {
 5398 rxs = &sc->sc_rxsoft[i];
 5399 if (rxs->rxs_mbuf == NULL) {
 5400 if ((error = wm_add_rxbuf(sc, i)) != 0) {
 5401 log(LOG_ERR, "%s: unable to allocate or map "
 5402 "rx buffer %d, error = %d\n",
 5403 device_xname(sc->sc_dev), i, error);
 5404 /*
 5405 * XXX Should attempt to run with fewer receive
 5406 * XXX buffers instead of just failing.
 5407 */
 5408 wm_rxdrain(sc);
 5409 return ENOMEM;
 5410 }
 5411 } else {
 5412 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
 5413 wm_init_rxdesc(sc, i);
 5414 /*
 5415 * For 82575 and newer device, the RX descriptors
 5416 * must be initialized after the setting of RCTL.EN in
 5417 * wm_set_filter()
 5418 */
 5419 }
 5420 }
 5421 sc->sc_rxptr = 0;
 5422 sc->sc_rxdiscard = 0;
 5423 WM_RXCHAIN_RESET(sc);
 5424
 5425 return 0;
 5426}
 5427
 5428static int
 5429wm_init_rx_queue(struct wm_softc *sc)
 5430{
 5431
 5432 KASSERT(WM_RX_LOCKED(sc));
 5433
 5434 /*
 5435 * Set up some register offsets that are different between
 5436 * the i82542 and the i82543 and later chips.
 5437 */
 5438 if (sc->sc_type < WM_T_82543) {
 5439 sc->sc_rdt_reg = WMREG_OLD_RDT0;
 5440 } else {
 5441 sc->sc_rdt_reg = WMREG_RDT;
 5442 }
 5443
 5444 wm_init_rx_descs(sc);
 5445 return wm_init_rx_buffer(sc);
 5446}
 5447
 5448/*
 5449 * wm_init_quques:
 5450 * Initialize {tx,rx}descs and {tx,rx} buffers
 5451 */
 5452static int
 5453wm_init_txrx_queues(struct wm_softc *sc)
 5454{
 5455 int error;
 5456
 5457 KASSERT(WM_BOTH_LOCKED(sc));
 5458
 5459 wm_init_tx_queue(sc);
 5460 error = wm_init_rx_queue(sc);
 5461
 5462 return error;
 5463}
 5464
5376/* 5465/*
5377 * wm_start: [ifnet interface function] 5466 * wm_start: [ifnet interface function]
5378 * 5467 *
5379 * Start packet transmission on the interface. 5468 * Start packet transmission on the interface.
5380 */ 5469 */
5381static void 5470static void
5382wm_start(struct ifnet *ifp) 5471wm_start(struct ifnet *ifp)
5383{ 5472{
5384 struct wm_softc *sc = ifp->if_softc; 5473 struct wm_softc *sc = ifp->if_softc;
5385 5474
5386 WM_TX_LOCK(sc); 5475 WM_TX_LOCK(sc);
5387 if (!sc->sc_stopping) 5476 if (!sc->sc_stopping)
5388 wm_start_locked(ifp); 5477 wm_start_locked(ifp);