Thu Mar 11 16:00:25 2021 UTC ()
Pull up the following (all via patch) requested by msaitoh in ticket #1231:

	sys/dev/pci/ixgbe/ixgbe.c			1.259, 1.278-1.279
	sys/dev/pci/ixgbe/ixgbe.h			1.75
	sys/dev/pci/ixgbe/ixgbe_netbsd.h		1.12
	sys/dev/pci/ixgbe/ixgbe_vf.c			1.24-1.26
	sys/dev/pci/ixgbe/ixgbe_x550.c			1.17
	sys/dev/pci/ixgbe/ixv.c				1.155-1.156
	sys/dev/pci/ixgbe/ix_txrx.c			1.64-67
	sys/dev/pci/files.pci				1.436
	share/man/man4/ixg.4				1.13-1.14
	share/man/man4/ixv.4				1.6-1.7

- Fix a problem that the RX path stalled when the mbuf cluster is
  exhausted.
- Modify some parameters to reduce packet dropping. See also the
  manual's OPTIONS section for the detail.
- ixv(4): The max number of queue(pair) is not 7 but 8. Correctly
  reset the hardware.
- Add "TX " to "Queue No Descriptor Available" evcnt(9) name to make
  it more understandable.
- Fix a bug that some advertise speeds can't be set with
  hw.ixgN.advertise_speed if both 2.5G and 5G are set. Fix the error
  message, too.
- Fix typo in comment or debug message.


(martin)
diff -r1.12 -r1.12.4.1 src/share/man/man4/ixg.4
diff -r1.4.2.1 -r1.4.2.2 src/share/man/man4/ixv.4
diff -r1.413.2.2 -r1.413.2.3 src/sys/dev/pci/files.pci
diff -r1.54.2.4 -r1.54.2.5 src/sys/dev/pci/ixgbe/ix_txrx.c
diff -r1.199.2.12 -r1.199.2.13 src/sys/dev/pci/ixgbe/ixgbe.c
diff -r1.56.2.3 -r1.56.2.4 src/sys/dev/pci/ixgbe/ixgbe.h
diff -r1.11 -r1.11.4.1 src/sys/dev/pci/ixgbe/ixgbe_netbsd.h
diff -r1.18.2.2 -r1.18.2.3 src/sys/dev/pci/ixgbe/ixgbe_vf.c
diff -r1.16 -r1.16.8.1 src/sys/dev/pci/ixgbe/ixgbe_x540.c
diff -r1.125.2.10 -r1.125.2.11 src/sys/dev/pci/ixgbe/ixv.c

cvs diff -r1.12 -r1.12.4.1 src/share/man/man4/ixg.4 (switch to unified diff)

--- src/share/man/man4/ixg.4 2018/05/09 08:01:16 1.12
+++ src/share/man/man4/ixg.4 2021/03/11 16:00:24 1.12.4.1
@@ -1,115 +1,135 @@ @@ -1,115 +1,135 @@
1.\" $NetBSD: ixg.4,v 1.12 2018/05/09 08:01:16 wiz Exp $ 1.\" $NetBSD: ixg.4,v 1.12.4.1 2021/03/11 16:00:24 martin Exp $
2.\" 2.\"
3.\" Copyright (c) 2001-2008, Intel Corporation 3.\" Copyright (c) 2001-2008, Intel Corporation
4.\" All rights reserved. 4.\" All rights reserved.
5.\" 5.\"
6.\" Redistribution and use in source and binary forms, with or without 6.\" Redistribution and use in source and binary forms, with or without
7.\" modification, are permitted provided that the following conditions are met: 7.\" modification, are permitted provided that the following conditions are met:
8.\" 8.\"
9.\" 1. Redistributions of source code must retain the above copyright notice, 9.\" 1. Redistributions of source code must retain the above copyright notice,
10.\" this list of conditions and the following disclaimer. 10.\" this list of conditions and the following disclaimer.
11.\" 11.\"
12.\" 2. Redistributions in binary form must reproduce the above copyright 12.\" 2. Redistributions in binary form must reproduce the above copyright
13.\" notice, this list of conditions and the following disclaimer in the 13.\" notice, this list of conditions and the following disclaimer in the
14.\" documentation and/or other materials provided with the distribution. 14.\" documentation and/or other materials provided with the distribution.
15.\" 15.\"
16.\" 3. Neither the name of the Intel Corporation nor the names of its 16.\" 3. Neither the name of the Intel Corporation nor the names of its
17.\" contributors may be used to endorse or promote products derived from 17.\" contributors may be used to endorse or promote products derived from
18.\" this software without specific prior written permission. 18.\" this software without specific prior written permission.
19.\" 19.\"
20.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30.\" POSSIBILITY OF SUCH DAMAGE. 30.\" POSSIBILITY OF SUCH DAMAGE.
31.\" 31.\"
32.\" * Other names and brands may be claimed as the property of others. 32.\" * Other names and brands may be claimed as the property of others.
33.\" 33.\"
34.\" $FreeBSD: src/share/man/man4/ixgbe.4,v 1.3 2010/12/19 23:54:31 yongari Exp $ 34.\" $FreeBSD: src/share/man/man4/ixgbe.4,v 1.3 2010/12/19 23:54:31 yongari Exp $
35.\" 35.\"
36.Dd May 9, 2018 36.Dd March 9, 2021
37.Dt IXG 4 37.Dt IXG 4
38.Os 38.Os
39.Sh NAME 39.Sh NAME
40.Nm ixg 40.Nm ixg
41.Nd Intel(R) 10Gb Ethernet driver 41.Nd Intel(R) 10Gb Ethernet driver
42.Sh SYNOPSIS 42.Sh SYNOPSIS
43.Cd "ixg* at pci? dev ? function ?" 43.Cd "ixg* at pci? dev ? function ?"
44.Sh DESCRIPTION 44.Sh DESCRIPTION
45The 45The
46.Nm 46.Nm
47driver provides support for PCI 10Gb Ethernet adapters based on 47driver provides support for PCI 10Gb Ethernet adapters based on
48the Intel(R) 82598EB, 82599, X540 and X550 Ethernet Controllers. 48the Intel(R) 82598EB, 82599, X540 and X550 Ethernet Controllers.
49The driver supports Jumbo Frames, TCP Segmentation Offload (TSO). 49The driver supports Jumbo Frames, TCP Segmentation Offload (TSO).
50.Pp 50.Pp
51For questions related to hardware requirements, 51For questions related to hardware requirements,
52refer to the documentation supplied with your Intel 10GbE adapter. 52refer to the documentation supplied with your Intel 10GbE adapter.
53All hardware requirements listed apply to use with 53All hardware requirements listed apply to use with
54.Nx . 54.Nx .
55.Pp 55.Pp
56Support for Jumbo Frames is provided via the interface MTU setting. 56Support for Jumbo Frames is provided via the interface MTU setting.
57Selecting an MTU larger than 1500 bytes with the 57Selecting an MTU larger than 1500 bytes with the
58.Xr ifconfig 8 58.Xr ifconfig 8
59utility configures the adapter to receive and transmit Jumbo Frames. 59utility configures the adapter to receive and transmit Jumbo Frames.
60On 60On
61.Nx , 61.Nx ,
62the maximum MTU size for Jumbo Frames is 9000 bytes. 62the maximum MTU size for Jumbo Frames is 9000 bytes.
63.Pp 63.Pp
64This driver version supports VLANs. 64This driver version supports VLANs.
65For information on enabling VLANs, see 65For information on enabling VLANs, see
66.Xr ifconfig 8 . 66.Xr ifconfig 8 .
67.Sh DIAGNOSTICS 67.Sh DIAGNOSTICS
68.Bl -diag 68.Bl -diag
69.It "ixg%d: Unable to allocate bus resource: memory" 69.It "ixg%d: Unable to allocate bus resource: memory"
70A fatal initialization error has occurred. 70A fatal initialization error has occurred.
71.It "ixg%d: Unable to allocate bus resource: interrupt" 71.It "ixg%d: Unable to allocate bus resource: interrupt"
72A fatal initialization error has occurred. 72A fatal initialization error has occurred.
73.It "ixg%d: watchdog timeout -- resetting" 73.It "ixg%d: watchdog timeout -- resetting"
74The device has stopped responding to the network, or there is a problem with 74The device has stopped responding to the network, or there is a problem with
75the network connection (cable). 75the network connection (cable).
76.El 76.El
77.Sh SUPPORT 77.Sh SUPPORT
78For general information and support, 78For general information and support,
79go to the Intel support website at: 79go to the Intel support website at:
80.Lk http://www.intel.com/support/ . 80.Lk http://www.intel.com/support/ .
81.\" .Pp 81.\" .Pp
82.\" If an issue is identified with the released source code on the supported kernel 82.\" If an issue is identified with the released source code on the supported kernel
83.\" with a supported adapter, email the specific information related to the 83.\" with a supported adapter, email the specific information related to the
84.\" issue to 84.\" issue to
85.\" .Aq freebsdnic@mailbox.intel.com . 85.\" .Aq freebsdnic@mailbox.intel.com .
 86.Sh OPTIONS
 87The
 88.Nm
 89driver doesn't use the common
 90.Xr MCLGET 9
 91interface and use the driver specific cluster allocation mechanism.
 92If it's exhausted, the
 93.Xr evcnt 9
 94counter "ixgX qY Rx no jumbo mbuf" is incremented.
 95If this is observed,
 96the number can be changed by the following config parameter:
 97.Bl -tag -width IXGBE_JCLNUM_MULTI -offset 3n
 98.It Dv IXGBE_JCLNUM_MULTI
 99The number of RX jumbo buffers (clusters) per queue is calculated by
 100.Dv IXGBE_JCLNUM_MULTI
 101* (number of rx descriptors).
 102The total number of clusters per queue is available via the
 103.Li hw.ixgN.num_jcl_per_queue
 104.Xr sysctl 7 .
 105.El
86.Sh SEE ALSO 106.Sh SEE ALSO
87.Xr arp 4 , 107.Xr arp 4 ,
88.Xr ixv 4 , 108.Xr ixv 4 ,
89.Xr netintro 4 , 109.Xr netintro 4 ,
90.Xr vlan 4 , 110.Xr vlan 4 ,
91.Xr ifconfig 8 111.Xr ifconfig 8
92.Sh HISTORY 112.Sh HISTORY
93The 113The
94.Nm 114.Nm
95device driver comes from 115device driver comes from
96.Fx , 116.Fx ,
97where it is called 117where it is called
98.Nm ixgbe . 118.Nm ixgbe .
99It first appeared in 119It first appeared in
100.Nx 6.0 . 120.Nx 6.0 .
101.Sh AUTHORS 121.Sh AUTHORS
102The 122The
103.Nm 123.Nm
104driver was written by 124driver was written by
105.An Intel Corporation Aq Mt freebsdnic@mailbox.intel.com . 125.An Intel Corporation Aq Mt freebsdnic@mailbox.intel.com .
106It was imported from 126It was imported from
107.Fx 127.Fx
108into 128into
109.Nx 129.Nx
110by 130by
111.An David Young Aq Mt dyoung@NetBSD.org . 131.An David Young Aq Mt dyoung@NetBSD.org .
112.Sh BUGS 132.Sh BUGS
113The hardware supports a maximum MTU of 16114 bytes, but the 133The hardware supports a maximum MTU of 16114 bytes, but the
114.Nx 134.Nx
115port of the driver supports only 9000 bytes. 135port of the driver supports only 9000 bytes.

cvs diff -r1.4.2.1 -r1.4.2.2 src/share/man/man4/ixv.4 (switch to unified diff)

--- src/share/man/man4/ixv.4 2019/09/26 19:07:23 1.4.2.1
+++ src/share/man/man4/ixv.4 2021/03/11 16:00:24 1.4.2.2
@@ -1,80 +1,100 @@ @@ -1,80 +1,100 @@
1.\" $NetBSD: ixv.4,v 1.4.2.1 2019/09/26 19:07:23 martin Exp $ 1.\" $NetBSD: ixv.4,v 1.4.2.2 2021/03/11 16:00:24 martin Exp $
2.\" 2.\"
3.\" Copyright (c) 2018 The NetBSD Foundation, Inc. 3.\" Copyright (c) 2018 The NetBSD Foundation, Inc.
4.\" All rights reserved. 4.\" All rights reserved.
5.\" 5.\"
6.\" This code is derived from software contributed to The NetBSD Foundation 6.\" This code is derived from software contributed to The NetBSD Foundation
7.\" by Masanobu SAITOH. 7.\" by Masanobu SAITOH.
8.\" 8.\"
9.\" Redistribution and use in source and binary forms, with or without 9.\" Redistribution and use in source and binary forms, with or without
10.\" modification, are permitted provided that the following conditions 10.\" modification, are permitted provided that the following conditions
11.\" are met: 11.\" are met:
12.\" 1. Redistributions of source code must retain the above copyright 12.\" 1. Redistributions of source code must retain the above copyright
13.\" notice, this list of conditions and the following disclaimer. 13.\" notice, this list of conditions and the following disclaimer.
14.\" 2. Redistributions in binary form must reproduce the above copyright 14.\" 2. Redistributions in binary form must reproduce the above copyright
15.\" notice, this list of conditions and the following disclaimer in the 15.\" notice, this list of conditions and the following disclaimer in the
16.\" documentation and/or other materials provided with the distribution. 16.\" documentation and/or other materials provided with the distribution.
17.\" 17.\"
18.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28.\" POSSIBILITY OF SUCH DAMAGE. 28.\" POSSIBILITY OF SUCH DAMAGE.
29.\" 29.\"
30.Dd September 5, 2019 30.Dd March 9, 2021
31.Dt IXV 4 31.Dt IXV 4
32.Os 32.Os
33.Sh NAME 33.Sh NAME
34.Nm ixv 34.Nm ixv
35.Nd Intel 10 Gigabit Ethernet virtual function 35.Nd Intel 10 Gigabit Ethernet virtual function
36.Sh SYNOPSIS 36.Sh SYNOPSIS
37.Cd "ixv* at pci? dev ? function ?" 37.Cd "ixv* at pci? dev ? function ?"
38.Sh DESCRIPTION 38.Sh DESCRIPTION
39The 39The
40.Nm 40.Nm
41driver supports Intel 10 Gigabit Ethernet virtual function that 82599 and 41driver supports Intel 10 Gigabit Ethernet virtual function that 82599 and
42newer chips support. 42newer chips support.
43It can be used on a 43It can be used on a
44.Nx 44.Nx
45guest that the host supports SR-IOV. 45guest that the host supports SR-IOV.
 46.Sh OPTIONS
 47The
 48.Nm
 49driver doesn't use the common
 50.Xr MCLGET 9
 51interface and use the driver specific cluster allocation mechanism.
 52If it's exhausted, the
 53.Xr evcnt 9
 54counter "ixgX qY Rx no jumbo mbuf" is incremented.
 55If this is observed,
 56the number can be changed by the following config parameter:
 57.Bl -tag -width IXGBE_JCLNUM_MULTI -offset 3n
 58.It Dv IXGBE_JCLNUM_MULTI
 59The number of RX jumbo buffers (clusters) per queue is calculated by
 60.Dv IXGBE_JCLNUM_MULTI
 61* (number of rx descriptors).
 62The total number of clusters per queue is available with the
 63.Li hw.ixgN.num_jcl_per_queue
 64.Xr sysctl 7 .
 65.El
46.Sh SEE ALSO 66.Sh SEE ALSO
47.Xr arp 4 , 67.Xr arp 4 ,
48.Xr ixg 4 , 68.Xr ixg 4 ,
49.Xr netintro 4 , 69.Xr netintro 4 ,
50.Xr vlan 4 , 70.Xr vlan 4 ,
51.Xr ifconfig 8 71.Xr ifconfig 8
52.Sh HISTORY 72.Sh HISTORY
53The 73The
54.Nm 74.Nm
55device driver comes from 75device driver comes from
56.Fx . 76.Fx .
57It first appeared in 77It first appeared in
58.Nx 8.0 . 78.Nx 8.0 .
59.Sh AUTHORS 79.Sh AUTHORS
60The 80The
61.Nm 81.Nm
62driver was written by 82driver was written by
63.An Intel Corporation Aq Mt freebsdnic@mailbox.intel.com . 83.An Intel Corporation Aq Mt freebsdnic@mailbox.intel.com .
64.Sh BUGS 84.Sh BUGS
65The following event counters are not cleared by 85The following event counters are not cleared by
66.Dv SIOCZIFDATA 86.Dv SIOCZIFDATA
67because the corresponding registers are read only and not cleared on read: 87because the corresponding registers are read only and not cleared on read:
68.Pp 88.Pp
69.Bl -item -offset indent -compact 89.Bl -item -offset indent -compact
70.It 90.It
71Good Packets Received 91Good Packets Received
72.It 92.It
73Good Octets Received 93Good Octets Received
74.It 94.It
75Multicast Packets Received 95Multicast Packets Received
76.It 96.It
77Good Packets Transmitted 97Good Packets Transmitted
78.It 98.It
79Good Octets Transmitted 99Good Octets Transmitted
80.El 100.El

cvs diff -r1.413.2.2 -r1.413.2.3 src/sys/dev/pci/files.pci (switch to unified diff)

--- src/sys/dev/pci/files.pci 2020/07/07 10:29:05 1.413.2.2
+++ src/sys/dev/pci/files.pci 2021/03/11 16:00:24 1.413.2.3
@@ -1,1170 +1,1171 @@ @@ -1,1170 +1,1171 @@
1# $NetBSD: files.pci,v 1.413.2.2 2020/07/07 10:29:05 martin Exp $ 1# $NetBSD: files.pci,v 1.413.2.3 2021/03/11 16:00:24 martin Exp $
2# 2#
3# Config file and device description for machine-independent PCI code. 3# Config file and device description for machine-independent PCI code.
4# Included by ports that need it. Requires that the SCSI files be 4# Included by ports that need it. Requires that the SCSI files be
5# defined first. 5# defined first.
6 6
7defflag opt_pci.h PCIVERBOSE PCI_CONFIG_DUMP PCI_NETBSD_CONFIGURE 7defflag opt_pci.h PCIVERBOSE PCI_CONFIG_DUMP PCI_NETBSD_CONFIGURE
8defparam opt_pci.h PCI_NETBSD_ENABLE_IDE 8defparam opt_pci.h PCI_NETBSD_ENABLE_IDE
9 9
10defflag opt_bktr.h BKTR_430_FX_MODE BKTR_GPIO_ACCESS BKTR_NO_MSP_RESET 10defflag opt_bktr.h BKTR_430_FX_MODE BKTR_GPIO_ACCESS BKTR_NO_MSP_RESET
11 BKTR_REVERSE_MUTE BKTR_SIS_VIA_MODE BKTR_USE_PLL 11 BKTR_REVERSE_MUTE BKTR_SIS_VIA_MODE BKTR_USE_PLL
12defparam opt_bktr.h BKTR_OVERRIDE_CARD BKTR_OVERRIDE_TUNER BKTR_OVERRIDE_DBX 12defparam opt_bktr.h BKTR_OVERRIDE_CARD BKTR_OVERRIDE_TUNER BKTR_OVERRIDE_DBX
13 BKTR_OVERRIDE_MSP BKTR_SYSTEM_DEFAULT 13 BKTR_OVERRIDE_MSP BKTR_SYSTEM_DEFAULT
14 14
15defflag opt_pciide.h PCIIDE_CMD064x_DISABLE PCIIDE_AMD756_ENABLEDMA 15defflag opt_pciide.h PCIIDE_CMD064x_DISABLE PCIIDE_AMD756_ENABLEDMA
16 PCIIDE_CMD0646U_ENABLEUDMA PCIIDE_I31244_DISABLEDMA 16 PCIIDE_CMD0646U_ENABLEUDMA PCIIDE_I31244_DISABLEDMA
17 17
18device pci {[dev = -1], [function = -1]} 18device pci {[dev = -1], [function = -1]}
19attach pci at pcibus 19attach pci at pcibus
20file dev/pci/pci.c pci needs-flag 20file dev/pci/pci.c pci needs-flag
21file dev/pci/pci_map.c pci 21file dev/pci/pci_map.c pci
22file dev/pci/pci_quirks.c pci 22file dev/pci/pci_quirks.c pci
23file dev/pci/pci_subr.c pci 23file dev/pci/pci_subr.c pci
24file dev/pci/pci_stub.c pci 24file dev/pci/pci_stub.c pci
25file dev/pci/pci_usrreq.c pci 25file dev/pci/pci_usrreq.c pci
26file dev/pci/pciconf.c pci & pci_netbsd_configure 26file dev/pci/pciconf.c pci & pci_netbsd_configure
27 27
28file dev/pci/pcibusprint.c pcibus 28file dev/pci/pcibusprint.c pcibus
29 29
30file dev/pci/pci_verbose.c pci & pciverbose 30file dev/pci/pci_verbose.c pci & pciverbose
31 31
32file dev/pci/wsdisplay_pci.c wsdisplay & pci 32file dev/pci/wsdisplay_pci.c wsdisplay & pci
33 33
34# Cypress 82c693 hyperCache(tm) Stand-Alone PCI Peripheral Controller 34# Cypress 82c693 hyperCache(tm) Stand-Alone PCI Peripheral Controller
35# with USB. This is a combo chip: 35# with USB. This is a combo chip:
36# 36#
37# PCI-ISA bridge 37# PCI-ISA bridge
38# PCI IDE controller 38# PCI IDE controller
39# OHCI USB controller 39# OHCI USB controller
40# 40#
41# There are some common subroutines that each function needs. 41# There are some common subroutines that each function needs.
42define cy82c693 42define cy82c693
43file dev/pci/cy82c693.c cy82c693 43file dev/pci/cy82c693.c cy82c693
44 44
45# Adaptec 3940, 2940, and aic78[5678]0 SCSI controllers 45# Adaptec 3940, 2940, and aic78[5678]0 SCSI controllers
46# device declaration in sys/conf/files 46# device declaration in sys/conf/files
47attach ahc at pci with ahc_pci: ahc_seeprom, smc93cx6 47attach ahc at pci with ahc_pci: ahc_seeprom, smc93cx6
48file dev/pci/ahc_pci.c ahc_pci 48file dev/pci/ahc_pci.c ahc_pci
49 49
50attach ahd at pci with ahd_pci 50attach ahd at pci with ahd_pci
51file dev/pci/ahd_pci.c ahd_pci 51file dev/pci/ahd_pci.c ahd_pci
52 52
53# I2O adapters 53# I2O adapters
54attach iop at pci with iop_pci 54attach iop at pci with iop_pci
55file dev/pci/iop_pci.c iop_pci 55file dev/pci/iop_pci.c iop_pci
56 56
57# 3ware RAID controllers 57# 3ware RAID controllers
58device twe {unit = -1} 58device twe {unit = -1}
59attach twe at pci 59attach twe at pci
60file dev/pci/twe.c twe 60file dev/pci/twe.c twe
61 61
62attach ld at twe with ld_twe 62attach ld at twe with ld_twe
63file dev/pci/ld_twe.c ld_twe 63file dev/pci/ld_twe.c ld_twe
64 64
65device twa {unit = -1} 65device twa {unit = -1}
66attach twa at pci 66attach twa at pci
67file dev/pci/twa.c twa 67file dev/pci/twa.c twa
68 68
69attach ld at twa with ld_twa 69attach ld at twa with ld_twa
70file dev/pci/ld_twa.c ld_twa 70file dev/pci/ld_twa.c ld_twa
71 71
72# AMI RAID controllers 72# AMI RAID controllers
73device amr {unit = -1} 73device amr {unit = -1}
74attach amr at pci 74attach amr at pci
75file dev/pci/amr.c amr 75file dev/pci/amr.c amr
76 76
77attach ld at amr with ld_amr 77attach ld at amr with ld_amr
78file dev/pci/ld_amr.c ld_amr 78file dev/pci/ld_amr.c ld_amr
79 79
80# Areca SATA RAID Controllers 80# Areca SATA RAID Controllers
81device arcmsr: scsi, sysmon_envsys 81device arcmsr: scsi, sysmon_envsys
82attach arcmsr at pci 82attach arcmsr at pci
83file dev/pci/arcmsr.c arcmsr 83file dev/pci/arcmsr.c arcmsr
84 84
85# Compaq RAID controllers 85# Compaq RAID controllers
86attach cac at pci with cac_pci 86attach cac at pci with cac_pci
87file dev/pci/cac_pci.c cac_pci 87file dev/pci/cac_pci.c cac_pci
88 88
89# Mylex DAC960 RAID controllers 89# Mylex DAC960 RAID controllers
90attach mlx at pci with mlx_pci 90attach mlx at pci with mlx_pci
91file dev/pci/mlx_pci.c mlx_pci 91file dev/pci/mlx_pci.c mlx_pci
92 92
93# Newer Mylex AcceleRAID and eXtremeRAID controllers 93# Newer Mylex AcceleRAID and eXtremeRAID controllers
94device mly: scsi 94device mly: scsi
95attach mly at pci 95attach mly at pci
96file dev/pci/mly.c mly needs-flag 96file dev/pci/mly.c mly needs-flag
97 97
98# Myson-Century Technology MTD803 3-in-1 Fast Ethernet Controller 98# Myson-Century Technology MTD803 3-in-1 Fast Ethernet Controller
99attach mtd at pci with mtd_pci 99attach mtd at pci with mtd_pci
100file dev/pci/if_mtd_pci.c mtd_pci 100file dev/pci/if_mtd_pci.c mtd_pci
101 101
102# ICP-Vortex/Intel RAID controllers 102# ICP-Vortex/Intel RAID controllers
103attach icp at pci with icp_pci 103attach icp at pci with icp_pci
104file dev/pci/icp_pci.c icp_pci 104file dev/pci/icp_pci.c icp_pci
105 105
106# Adaptec FSA RAID controllers 106# Adaptec FSA RAID controllers
107attach aac at pci with aac_pci 107attach aac at pci with aac_pci
108file dev/pci/aac_pci.c aac_pci 108file dev/pci/aac_pci.c aac_pci
109 109
110# IBM ServeRAID RAID controllers 110# IBM ServeRAID RAID controllers
111device ips: scsi 111device ips: scsi
112attach ips at pci 112attach ips at pci
113file dev/pci/ips.c ips 113file dev/pci/ips.c ips
114 114
115# DPT EATA SCSI controllers 115# DPT EATA SCSI controllers
116attach dpt at pci with dpt_pci 116attach dpt at pci with dpt_pci
117file dev/pci/dpt_pci.c dpt_pci 117file dev/pci/dpt_pci.c dpt_pci
118 118
119# AdvanSys 1200A, 1200B, and ULTRA SCSI controllers 119# AdvanSys 1200A, 1200B, and ULTRA SCSI controllers
120# device declaration in sys/conf/files 120# device declaration in sys/conf/files
121attach adv at pci with adv_pci 121attach adv at pci with adv_pci
122file dev/pci/adv_pci.c adv_pci 122file dev/pci/adv_pci.c adv_pci
123 123
124# AdvanSys ULTRA WIDE SCSI controllers 124# AdvanSys ULTRA WIDE SCSI controllers
125# device declaration in sys/conf/files 125# device declaration in sys/conf/files
126attach adw at pci with adw_pci 126attach adw at pci with adw_pci
127file dev/pci/adw_pci.c adw_pci 127file dev/pci/adw_pci.c adw_pci
128file dev/ic/adwlib.c adw_pci 128file dev/ic/adwlib.c adw_pci
129file dev/ic/adwmcode.c adw_pci 129file dev/ic/adwmcode.c adw_pci
130 130
131# AMD Am53c974 PCscsi-PCI SCSI controllers 131# AMD Am53c974 PCscsi-PCI SCSI controllers
132device pcscp: scsi, ncr53c9x 132device pcscp: scsi, ncr53c9x
133attach pcscp at pci 133attach pcscp at pci
134file dev/pci/pcscp.c pcscp 134file dev/pci/pcscp.c pcscp
135 135
136# HP/Compaq Command Interface to Scsi-3 136# HP/Compaq Command Interface to Scsi-3
137attach ciss at pci with ciss_pci 137attach ciss at pci with ciss_pci
138file dev/pci/ciss_pci.c ciss_pci 138file dev/pci/ciss_pci.c ciss_pci
139 139
140# BusLogic BT-9xx PCI family 140# BusLogic BT-9xx PCI family
141# device declaration in sys/conf/files 141# device declaration in sys/conf/files
142attach bha at pci with bha_pci 142attach bha at pci with bha_pci
143file dev/pci/bha_pci.c bha_pci 143file dev/pci/bha_pci.c bha_pci
144 144
145# Qlogic ISP 10x0 (PCI) family 145# Qlogic ISP 10x0 (PCI) family
146# device declaration in sys/conf/files 146# device declaration in sys/conf/files
147attach isp at pci with isp_pci 147attach isp at pci with isp_pci
148file dev/pci/isp_pci.c isp_pci 148file dev/pci/isp_pci.c isp_pci
149 149
150# LSILogic MegaRAID SAS 150# LSILogic MegaRAID SAS
151# device declaration in sys/conf/files 151# device declaration in sys/conf/files
152attach mfi at pci with mfi_pci 152attach mfi at pci with mfi_pci
153file dev/pci/mfi_pci.c mfi_pci 153file dev/pci/mfi_pci.c mfi_pci
154 154
155# LSI MegaRAID SAS Fusion RAID controllers  155# LSI MegaRAID SAS Fusion RAID controllers
156device mfii: scsi 156device mfii: scsi
157attach mfii at pci 157attach mfii at pci
158file dev/pci/mfii.c mfii 158file dev/pci/mfii.c mfii
159 159
160# LSILogic Fusion-MPT I/O Processor family 160# LSILogic Fusion-MPT I/O Processor family
161# device declaration in sys/conf/files 161# device declaration in sys/conf/files
162attach mpt at pci with mpt_pci 162attach mpt at pci with mpt_pci
163file dev/pci/mpt_pci.c mpt_pci 163file dev/pci/mpt_pci.c mpt_pci
164 164
165# LSI Logic Fusion-MPT Message Passing Interface 2.0 165# LSI Logic Fusion-MPT Message Passing Interface 2.0
166device mpii: scsi 166device mpii: scsi
167attach mpii at pci 167attach mpii at pci
168file dev/pci/mpii.c mpii 168file dev/pci/mpii.c mpii
169 169
170# Aquantia/Atlantic 10-Gigabit Ethernet 170# Aquantia/Atlantic 10-Gigabit Ethernet
171device aq: ether, ifnet, arp, sysmon_envsys 171device aq: ether, ifnet, arp, sysmon_envsys
172attach aq at pci 172attach aq at pci
173file dev/pci/if_aq.c aq 173file dev/pci/if_aq.c aq
174defflag opt_if_aq.h AQ_EVENT_COUNTERS 174defflag opt_if_aq.h AQ_EVENT_COUNTERS
175 175
176# Ethernet driver for DC21040-based boards 176# Ethernet driver for DC21040-based boards
177device de: ether, ifnet, arp 177device de: ether, ifnet, arp
178attach de at pci 178attach de at pci
179file dev/pci/if_de.c de 179file dev/pci/if_de.c de
180 180
181# 3Com 3c590 and 3c595 Ethernet controllers 181# 3Com 3c590 and 3c595 Ethernet controllers
182# device declaration in sys/conf/files 182# device declaration in sys/conf/files
183attach ep at pci with ep_pci 183attach ep at pci with ep_pci
184file dev/pci/if_ep_pci.c ep_pci 184file dev/pci/if_ep_pci.c ep_pci
185 185
186# 3Com 3c90x[B] Ethernet controllers 186# 3Com 3c90x[B] Ethernet controllers
187# device declaration in sys/conf/files 187# device declaration in sys/conf/files
188attach ex at pci with ex_pci 188attach ex at pci with ex_pci
189file dev/pci/if_ex_pci.c ex_pci 189file dev/pci/if_ex_pci.c ex_pci
190 190
191# Digital DEFPA PCI FDDI Controller 191# Digital DEFPA PCI FDDI Controller
192device fpa: pdq, fddi, ifnet, arp 192device fpa: pdq, fddi, ifnet, arp
193attach fpa at pci 193attach fpa at pci
194file dev/pci/if_fpa.c fpa 194file dev/pci/if_fpa.c fpa
195 195
196# AMD am7990 (LANCE) -based Ethernet controllers 196# AMD am7990 (LANCE) -based Ethernet controllers
197# device declaration in sys/conf/files 197# device declaration in sys/conf/files
198attach le at pci with le_pci: le32 198attach le at pci with le_pci: le32
199file dev/pci/if_le_pci.c le_pci 199file dev/pci/if_le_pci.c le_pci
200 200
201# AMD PCnet-PCI Ethernet controller family 201# AMD PCnet-PCI Ethernet controller family
202# Supersedes if_le_pci.c 202# Supersedes if_le_pci.c
203device pcn: ether, ifnet, arp, mii 203device pcn: ether, ifnet, arp, mii
204attach pcn at pci 204attach pcn at pci
205file dev/pci/if_pcn.c pcn 205file dev/pci/if_pcn.c pcn
206 206
207# common code for siop/esiop pci front end 207# common code for siop/esiop pci front end
208define siop_pci_common 208define siop_pci_common
209file dev/pci/siop_pci_common.c siop_pci_common 209file dev/pci/siop_pci_common.c siop_pci_common
210 210
211# Symbios 53c8xx SCSI chips 211# Symbios 53c8xx SCSI chips
212# device declaration in sys/conf/files 212# device declaration in sys/conf/files
213attach siop at pci with siop_pci: siop_pci_common 213attach siop at pci with siop_pci: siop_pci_common
214file dev/pci/siop_pci.c siop_pci 214file dev/pci/siop_pci.c siop_pci
215attach esiop at pci with esiop_pci: siop_pci_common 215attach esiop at pci with esiop_pci: siop_pci_common
216file dev/pci/esiop_pci.c esiop_pci 216file dev/pci/esiop_pci.c esiop_pci
217 217
218# Initio INIC-940/950 SCSI controllers 218# Initio INIC-940/950 SCSI controllers
219attach iha at pci with iha_pci 219attach iha at pci with iha_pci
220file dev/pci/iha_pci.c iha_pci 220file dev/pci/iha_pci.c iha_pci
221 221
222# Tekram DC-395U/UW/F and DC-315/U SCSI controllers 222# Tekram DC-395U/UW/F and DC-315/U SCSI controllers
223device trm: scsi 223device trm: scsi
224attach trm at pci 224attach trm at pci
225file dev/pci/trm.c trm 225file dev/pci/trm.c trm
226 226
227# Guillemot Maxi Radio FM 2000 Radio Card 227# Guillemot Maxi Radio FM 2000 Radio Card
228device gtp: radiodev, tea5757 228device gtp: radiodev, tea5757
229attach gtp at pci 229attach gtp at pci
230file dev/pci/gtp.c gtp 230file dev/pci/gtp.c gtp
231 231
232# MediaForte SoundForte SF64-PCR Radio card 232# MediaForte SoundForte SF64-PCR Radio card
233#device sf4r: radiodev, tea5757 233#device sf4r: radiodev, tea5757
234#attach sf4r at pci 234#attach sf4r at pci
235#file dev/pci/sf64pcr.c sf4r 235#file dev/pci/sf64pcr.c sf4r
236 236
237# PCI IDE controllers 237# PCI IDE controllers
238define pciide_common 238define pciide_common
239file dev/pci/pciide_common.c pciide_common 239file dev/pci/pciide_common.c pciide_common
240 240
241device pciide: ata, pciide_common, wdc_common 241device pciide: ata, pciide_common, wdc_common
242attach pciide at pci 242attach pciide at pci
243file dev/pci/pciide.c pciide 243file dev/pci/pciide.c pciide
244 244
245# AHCI SATA controllers 245# AHCI SATA controllers
246attach ahcisata at pci with ahcisata_pci 246attach ahcisata at pci with ahcisata_pci
247file dev/pci/ahcisata_pci.c ahcisata_pci 247file dev/pci/ahcisata_pci.c ahcisata_pci
248defflag opt_ahcisata_pci.h AHCISATA_DISABLE_MSI 248defflag opt_ahcisata_pci.h AHCISATA_DISABLE_MSI
249defflag opt_ahcisata_pci.h AHCISATA_DISABLE_MSIX 249defflag opt_ahcisata_pci.h AHCISATA_DISABLE_MSIX
250 250
251# Acard IDE controllers 251# Acard IDE controllers
252device acardide: ata, ata_dma, ata_udma, pciide_common, wdc_common 252device acardide: ata, ata_dma, ata_udma, pciide_common, wdc_common
253attach acardide at pci 253attach acardide at pci
254file dev/pci/acardide.c acardide 254file dev/pci/acardide.c acardide
255 255
256# Acer Lab IDE controllers 256# Acer Lab IDE controllers
257device aceride: ata, ata_dma, ata_udma, pciide_common, wdc_common 257device aceride: ata, ata_dma, ata_udma, pciide_common, wdc_common
258attach aceride at pci 258attach aceride at pci
259file dev/pci/aceride.c aceride 259file dev/pci/aceride.c aceride
260 260
261# Intel i31244 SATA controller 261# Intel i31244 SATA controller
262device artsata: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata 262device artsata: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata
263attach artsata at pci 263attach artsata at pci
264file dev/pci/artsata.c artsata 264file dev/pci/artsata.c artsata
265 265
266# CMD tech IDE controllers 266# CMD tech IDE controllers
267device cmdide: ata, ata_dma, ata_udma, pciide_common, wdc_common 267device cmdide: ata, ata_dma, ata_udma, pciide_common, wdc_common
268attach cmdide at pci 268attach cmdide at pci
269file dev/pci/cmdide.c cmdide 269file dev/pci/cmdide.c cmdide
270 270
271# Cypress IDE controllers 271# Cypress IDE controllers
272device cypide: ata, ata_dma, pciide_common, wdc_common, cy82c693 272device cypide: ata, ata_dma, pciide_common, wdc_common, cy82c693
273attach cypide at pci 273attach cypide at pci
274file dev/pci/cypide.c cypide 274file dev/pci/cypide.c cypide
275 275
276# AMD Geode IDE controllers 276# AMD Geode IDE controllers
277device geodeide: ata, ata_dma, ata_udma, pciide_common, wdc_common 277device geodeide: ata, ata_dma, ata_udma, pciide_common, wdc_common
278attach geodeide at pci 278attach geodeide at pci
279file dev/pci/geodeide.c geodeide 279file dev/pci/geodeide.c geodeide
280 280
281# Triones/HighPoint IDE controllers 281# Triones/HighPoint IDE controllers
282device hptide: ata, ata_dma, ata_udma, pciide_common, wdc_common 282device hptide: ata, ata_dma, ata_udma, pciide_common, wdc_common
283attach hptide at pci 283attach hptide at pci
284file dev/pci/hptide.c hptide 284file dev/pci/hptide.c hptide
285 285
286# Integrated Technology Express IDE controllers 286# Integrated Technology Express IDE controllers
287device iteide: ata, ata_dma, ata_udma, pciide_common, wdc_common 287device iteide: ata, ata_dma, ata_udma, pciide_common, wdc_common
288attach iteide at pci 288attach iteide at pci
289file dev/pci/iteide.c iteide 289file dev/pci/iteide.c iteide
290 290
291# Jmicron hybrid IDE/AHCI controllers 291# Jmicron hybrid IDE/AHCI controllers
292define jmide_hl { } 292define jmide_hl { }
293device jmide: ata, ata_dma, ata_udma, pciide_common, wdc_common, jmide_hl 293device jmide: ata, ata_dma, ata_udma, pciide_common, wdc_common, jmide_hl
294attach jmide at pci 294attach jmide at pci
295attach ahcisata at jmide_hl with jmahci 295attach ahcisata at jmide_hl with jmahci
296file dev/pci/jmide.c jmide | jmahci needs-flag 296file dev/pci/jmide.c jmide | jmahci needs-flag
297 297
298# National Semiconductor IDE controllers 298# National Semiconductor IDE controllers
299device nside: ata, ata_dma, ata_udma, pciide_common, wdc_common 299device nside: ata, ata_dma, ata_udma, pciide_common, wdc_common
300attach nside at pci 300attach nside at pci
301file dev/pci/nside.c nside 301file dev/pci/nside.c nside
302 302
303# Opti IDE controllers 303# Opti IDE controllers
304device optiide: ata, ata_dma, pciide_common, wdc_common 304device optiide: ata, ata_dma, pciide_common, wdc_common
305attach optiide at pci 305attach optiide at pci
306file dev/pci/optiide.c optiide 306file dev/pci/optiide.c optiide
307 307
308# Intel IDE controllers 308# Intel IDE controllers
309device piixide: ata, ata_dma, ata_udma, pciide_common, wdc_common 309device piixide: ata, ata_dma, ata_udma, pciide_common, wdc_common
310attach piixide at pci 310attach piixide at pci
311file dev/pci/piixide.c piixide 311file dev/pci/piixide.c piixide
312 312
313# Promise Serial ATA controllers 313# Promise Serial ATA controllers
314device pdcsata: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata 314device pdcsata: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata
315attach pdcsata at pci 315attach pdcsata at pci
316file dev/pci/pdcsata.c pdcsata 316file dev/pci/pdcsata.c pdcsata
317 317
318# Promise IDE controllers 318# Promise IDE controllers
319device pdcide: ata, ata_dma, ata_udma, pciide_common, wdc_common 319device pdcide: ata, ata_dma, ata_udma, pciide_common, wdc_common
320attach pdcide at pci 320attach pdcide at pci
321file dev/pci/pdcide.c pdcide 321file dev/pci/pdcide.c pdcide
322 322
323# ServerWorks IDE controllers 323# ServerWorks IDE controllers
324device rccide: ata, ata_dma, ata_udma, pciide_common, wdc_common 324device rccide: ata, ata_dma, ata_udma, pciide_common, wdc_common
325attach rccide at pci 325attach rccide at pci
326file dev/pci/rccide.c rccide 326file dev/pci/rccide.c rccide
327 327
328# RDC IDE controllers 328# RDC IDE controllers
329device rdcide: ata, ata_dma, ata_udma, pciide_common, wdc_common 329device rdcide: ata, ata_dma, ata_udma, pciide_common, wdc_common
330attach rdcide at pci 330attach rdcide at pci
331file dev/pci/rdcide.c rdcide 331file dev/pci/rdcide.c rdcide
332 332
333# ServerWorks SATA controllers 333# ServerWorks SATA controllers
334device svwsata: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata 334device svwsata: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata
335attach svwsata at pci 335attach svwsata at pci
336file dev/pci/svwsata.c svwsata 336file dev/pci/svwsata.c svwsata
337 337
338# Silicon Image SATALink controllers 338# Silicon Image SATALink controllers
339device satalink: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata 339device satalink: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata
340attach satalink at pci 340attach satalink at pci
341file dev/pci/satalink.c satalink 341file dev/pci/satalink.c satalink
342 342
343# Intel SCH IDE controllers 343# Intel SCH IDE controllers
344device schide: ata, ata_dma, ata_udma, pciide_common, wdc_common 344device schide: ata, ata_dma, ata_udma, pciide_common, wdc_common
345attach schide at pci 345attach schide at pci
346file dev/pci/schide.c schide 346file dev/pci/schide.c schide
347 347
348# SiS IDE controllers 348# SiS IDE controllers
349device siside: ata, ata_dma, ata_udma, pciide_common, wdc_common 349device siside: ata, ata_dma, ata_udma, pciide_common, wdc_common
350attach siside at pci 350attach siside at pci
351file dev/pci/siside.c siside 351file dev/pci/siside.c siside
352 352
353# Symphony Labs IDE controllers 353# Symphony Labs IDE controllers
354device slide: ata, ata_dma, pciide_common, wdc_common 354device slide: ata, ata_dma, pciide_common, wdc_common
355attach slide at pci 355attach slide at pci
356file dev/pci/slide.c slide 356file dev/pci/slide.c slide
357 357
358# ServerWorks IDE controllers 358# ServerWorks IDE controllers
359#device swide: ata, ata_dma, ata_udma, pciide_common, wdc_common 359#device swide: ata, ata_dma, ata_udma, pciide_common, wdc_common
360#attach swide at pci 360#attach swide at pci
361#file dev/pci/swide.c swide 361#file dev/pci/swide.c swide
362 362
363# VIA/AMD/Nvidia IDE controllers 363# VIA/AMD/Nvidia IDE controllers
364device viaide: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata 364device viaide: ata, ata_dma, ata_udma, pciide_common, wdc_common, sata
365attach viaide at pci 365attach viaide at pci
366file dev/pci/viaide.c viaide 366file dev/pci/viaide.c viaide
367 367
368# STMicroelectronics STPC IDE controllers 368# STMicroelectronics STPC IDE controllers
369device stpcide: ata, ata_dma, ata_udma, pciide_common, wdc_common 369device stpcide: ata, ata_dma, ata_udma, pciide_common, wdc_common
370attach stpcide at pci 370attach stpcide at pci
371file dev/pci/stpcide.c stpcide 371file dev/pci/stpcide.c stpcide
372 372
373# ATI IXP IDE controllers 373# ATI IXP IDE controllers
374device ixpide: ata, ata_dma, ata_udma, pciide_common, wdc_common 374device ixpide: ata, ata_dma, ata_udma, pciide_common, wdc_common
375attach ixpide at pci 375attach ixpide at pci
376file dev/pci/ixpide.c ixpide 376file dev/pci/ixpide.c ixpide
377 377
378# Toshiba PICCOLO IDE controllers 378# Toshiba PICCOLO IDE controllers
379device toshide: ata, ata_dma, pciide_common, wdc_common 379device toshide: ata, ata_dma, pciide_common, wdc_common
380attach toshide at pci 380attach toshide at pci
381file dev/pci/toshide.c toshide 381file dev/pci/toshide.c toshide
382 382
383# PCI-PCI bridge chips 383# PCI-PCI bridge chips
384device ppb: pcibus 384device ppb: pcibus
385attach ppb at pci 385attach ppb at pci
386file dev/pci/ppb.c ppb 386file dev/pci/ppb.c ppb
387defflag opt_ppb.h PPB_USEINTR 387defflag opt_ppb.h PPB_USEINTR
388 388
389# Cyclades Cyclom-8/16/32 389# Cyclades Cyclom-8/16/32
390attach cy at pci with cy_pci 390attach cy at pci with cy_pci
391file dev/pci/cy_pci.c cy_pci 391file dev/pci/cy_pci.c cy_pci
392 392
393# Cyclades-Z series of intelligent multi-port serial adapters 393# Cyclades-Z series of intelligent multi-port serial adapters
394device cz 394device cz
395attach cz at pci 395attach cz at pci
396file dev/pci/cz.c cz needs-flag 396file dev/pci/cz.c cz needs-flag
397 397
398# Intel EtherExpress PRO 10/100B 398# Intel EtherExpress PRO 10/100B
399attach fxp at pci with fxp_pci 399attach fxp at pci with fxp_pci
400file dev/pci/if_fxp_pci.c fxp_pci 400file dev/pci/if_fxp_pci.c fxp_pci
401 401
402# Sun HME-network 402# Sun HME-network
403attach hme at pci with hme_pci 403attach hme at pci with hme_pci
404file dev/pci/if_hme_pci.c hme_pci 404file dev/pci/if_hme_pci.c hme_pci
405 405
406# Sun GEM-network 406# Sun GEM-network
407attach gem at pci with gem_pci 407attach gem at pci with gem_pci
408file dev/pci/if_gem_pci.c gem_pci 408file dev/pci/if_gem_pci.c gem_pci
409 409
410# Sun Cassini-network 410# Sun Cassini-network
411device cas: arp, ether, ifnet, mii 411device cas: arp, ether, ifnet, mii
412attach cas at pci 412attach cas at pci
413file dev/pci/if_cas.c cas 413file dev/pci/if_cas.c cas
414 414
415# JMicron JMC2[56]0 ethernet controllers 415# JMicron JMC2[56]0 ethernet controllers
416device jme: ether, ifnet, arp, mii 416device jme: ether, ifnet, arp, mii
417attach jme at pci 417attach jme at pci
418file dev/pci/if_jme.c jme 418file dev/pci/if_jme.c jme
419 419
420# NE2000-compatible PCI Ethernet cards 420# NE2000-compatible PCI Ethernet cards
421attach ne at pci with ne_pci: rtl80x9 421attach ne at pci with ne_pci: rtl80x9
422file dev/pci/if_ne_pci.c ne_pci 422file dev/pci/if_ne_pci.c ne_pci
423 423
424# Texas Instruments ThunderLAN Chip. 424# Texas Instruments ThunderLAN Chip.
425device tl: ether, ifnet, arp, i2cexec, at24cxx_eeprom, i2c_bitbang, 425device tl: ether, ifnet, arp, i2cexec, at24cxx_eeprom, i2c_bitbang,
426 mii, mii_bitbang 426 mii, mii_bitbang
427attach tl at pci 427attach tl at pci
428file dev/pci/if_tl.c tl 428file dev/pci/if_tl.c tl
429 429
430# SDL Communications N2 PCI Network Interface 430# SDL Communications N2 PCI Network Interface
431# device declaration in sys/conf/files 431# device declaration in sys/conf/files
432attach ntwoc at pci with ntwoc_pci 432attach ntwoc at pci with ntwoc_pci
433file dev/pci/if_ntwoc_pci.c ntwoc_pci 433file dev/pci/if_ntwoc_pci.c ntwoc_pci
434 434
435# Essential Communications HIPPI interface 435# Essential Communications HIPPI interface
436# device declaration in sys/conf/files 436# device declaration in sys/conf/files
437attach esh at pci with esh_pci 437attach esh at pci with esh_pci
438file dev/pci/if_esh_pci.c esh_pci 438file dev/pci/if_esh_pci.c esh_pci
439 439
440# generic PCI VGA 440# generic PCI VGA
441defflag opt_vga.h VGA_POST: X86EMU 441defflag opt_vga.h VGA_POST: X86EMU
442attach vga at pci with vga_pci 442attach vga at pci with vga_pci
443file dev/pci/vga_pci.c vga_pci needs-flag 443file dev/pci/vga_pci.c vga_pci needs-flag
444 444
445# DEC TGA 445# DEC TGA
446device tga: wsemuldisplaydev, rasops8, rasops32 446device tga: wsemuldisplaydev, rasops8, rasops32
447attach tga at pci 447attach tga at pci
448file dev/pci/tga.c tga needs-flag 448file dev/pci/tga.c tga needs-flag
449file dev/pci/tga_conf.c tga 449file dev/pci/tga_conf.c tga
450file dev/ic/bt485.c tga 450file dev/ic/bt485.c tga
451file dev/ic/bt463.c tga 451file dev/ic/bt463.c tga
452file dev/ic/ibm561.c tga 452file dev/ic/ibm561.c tga
453 453
454# HP Visualize 454# HP Visualize
455attach sti at pci with sti_pci 455attach sti at pci with sti_pci
456file dev/pci/sti_pci.c sti_pci needs-flag 456file dev/pci/sti_pci.c sti_pci needs-flag
457 457
458# Integraphics Systems IGA168x and CyberPro framebuffers (linear non-VGA mode) 458# Integraphics Systems IGA168x and CyberPro framebuffers (linear non-VGA mode)
459# device declaration in sys/conf/files 459# device declaration in sys/conf/files
460attach igsfb at pci with igsfb_pci 460attach igsfb at pci with igsfb_pci
461file dev/pci/igsfb_pci.c igsfb_pci 461file dev/pci/igsfb_pci.c igsfb_pci
462 462
463# Brooktree Bt848 video capture 463# Brooktree Bt848 video capture
464device bktr: radiodev 464device bktr: radiodev
465attach bktr at pci 465attach bktr at pci
466file dev/pci/bktr/bktr_audio.c bktr 466file dev/pci/bktr/bktr_audio.c bktr
467file dev/pci/bktr/bktr_card.c bktr 467file dev/pci/bktr/bktr_card.c bktr
468file dev/pci/bktr/bktr_core.c bktr 468file dev/pci/bktr/bktr_core.c bktr
469file dev/pci/bktr/bktr_os.c bktr needs-flag 469file dev/pci/bktr/bktr_os.c bktr needs-flag
470file dev/pci/bktr/bktr_tuner.c bktr 470file dev/pci/bktr/bktr_tuner.c bktr
471 471
472# Cirrus Logic CrystalClear PCI Audio CS4280 472# Cirrus Logic CrystalClear PCI Audio CS4280
473device clcs: audiobus, ac97, midibus 473device clcs: audiobus, ac97, midibus
474attach clcs at pci 474attach clcs at pci
475file dev/pci/cs4280.c clcs 475file dev/pci/cs4280.c clcs
476 476
477# Cirrus Logic CrystalClear PCI Audio CS4281 477# Cirrus Logic CrystalClear PCI Audio CS4281
478device clct: audiobus, ac97 478device clct: audiobus, ac97
479attach clct at pci 479attach clct at pci
480file dev/pci/cs4281.c clct 480file dev/pci/cs4281.c clct
481 481
482# Shared code for Cirrus Logic CrystalClear PCI Audio CS4280 and CS4281 482# Shared code for Cirrus Logic CrystalClear PCI Audio CS4280 and CS4281
483file dev/pci/cs428x.c clcs | clct 483file dev/pci/cs428x.c clcs | clct
484 484
485# Forte Media FM801 485# Forte Media FM801
486device fms { }: audiobus, ac97, midibus 486device fms { }: audiobus, ac97, midibus
487attach fms at pci 487attach fms at pci
488file dev/pci/fms.c fms 488file dev/pci/fms.c fms
489 489
490attach opl at fms with opl_fms 490attach opl at fms with opl_fms
491file dev/pci/opl_fms.c opl_fms 491file dev/pci/opl_fms.c opl_fms
492 492
493attach mpu at fms with mpu_fms 493attach mpu at fms with mpu_fms
494file dev/pci/mpu_fms.c mpu_fms 494file dev/pci/mpu_fms.c mpu_fms
495 495
496# Ensoniq AudioPCI S5016 496# Ensoniq AudioPCI S5016
497device eap { }: audiobus, ac97, midibus 497device eap { }: audiobus, ac97, midibus
498attach eap at pci 498attach eap at pci
499file dev/pci/eap.c eap 499file dev/pci/eap.c eap
500 500
501attach joy at eap with joy_eap 501attach joy at eap with joy_eap
502file dev/pci/joy_eap.c joy_eap needs-flag 502file dev/pci/joy_eap.c joy_eap needs-flag
503 503
504# Acer Labs M5455 504# Acer Labs M5455
505device auacer: audiobus, ac97, aurateconv 505device auacer: audiobus, ac97, aurateconv
506attach auacer at pci 506attach auacer at pci
507file dev/pci/auacer.c auacer 507file dev/pci/auacer.c auacer
508 508
509# Intel ICH AC'97 audio 509# Intel ICH AC'97 audio
510device auich: audiobus, ac97, aurateconv 510device auich: audiobus, ac97, aurateconv
511attach auich at pci 511attach auich at pci
512file dev/pci/auich.c auich 512file dev/pci/auich.c auich
513 513
514# VIA VT82C686A/VT8233/VT8235 AC'97 Audio 514# VIA VT82C686A/VT8233/VT8235 AC'97 Audio
515device auvia: audiobus, ac97, aurateconv 515device auvia: audiobus, ac97, aurateconv
516attach auvia at pci 516attach auvia at pci
517file dev/pci/auvia.c auvia 517file dev/pci/auvia.c auvia
518 518
519# ATI IXP 200/300/400 series AC'97 Audio 519# ATI IXP 200/300/400 series AC'97 Audio
520device auixp: audiobus, ac97, aurateconv 520device auixp: audiobus, ac97, aurateconv
521attach auixp at pci 521attach auixp at pci
522file dev/pci/auixp.c auixp 522file dev/pci/auixp.c auixp
523 523
524# High Definition Audio 524# High Definition Audio
525device azalia: audiobus, ac97, aurateconv 525device azalia: audiobus, ac97, aurateconv
526attach azalia at pci 526attach azalia at pci
527file dev/pci/azalia.c azalia 527file dev/pci/azalia.c azalia
528file dev/pci/azalia_codec.c azalia 528file dev/pci/azalia_codec.c azalia
529 529
530# AMD Geode CS5536 Companion Audio 530# AMD Geode CS5536 Companion Audio
531device gcscaudio: audiobus, ac97, aurateconv 531device gcscaudio: audiobus, ac97, aurateconv
532attach gcscaudio at pci 532attach gcscaudio at pci
533file dev/pci/gcscaudio.c gcscaudio 533file dev/pci/gcscaudio.c gcscaudio
534 534
535# NeoMagic 256 AC'97 Audio 535# NeoMagic 256 AC'97 Audio
536device neo: audiobus, ac97 536device neo: audiobus, ac97
537attach neo at pci 537attach neo at pci
538file dev/pci/neo.c neo 538file dev/pci/neo.c neo
539 539
540# ESS Allegro-1 / Maestro3 540# ESS Allegro-1 / Maestro3
541device esa: audiobus, ac97 541device esa: audiobus, ac97
542attach esa at pci 542attach esa at pci
543file dev/pci/esa.c esa 543file dev/pci/esa.c esa
544 544
545# ESS Solo-1 PCI AudioDrive 545# ESS Solo-1 PCI AudioDrive
546device eso { }: audiobus, midibus 546device eso { }: audiobus, midibus
547attach eso at pci 547attach eso at pci
548file dev/pci/eso.c eso 548file dev/pci/eso.c eso
549 549
550attach opl at eso with opl_eso 550attach opl at eso with opl_eso
551file dev/pci/opl_eso.c opl_eso 551file dev/pci/opl_eso.c opl_eso
552 552
553attach mpu at eso with mpu_eso 553attach mpu at eso with mpu_eso
554file dev/pci/mpu_eso.c mpu_eso 554file dev/pci/mpu_eso.c mpu_eso
555 555
556attach joy at eso with joy_eso 556attach joy at eso with joy_eso
557file dev/pci/joy_eso.c joy_eso 557file dev/pci/joy_eso.c joy_eso
558 558
559# ESS Maestro-1/2/2e PCI AC97 Audio Accelerator 559# ESS Maestro-1/2/2e PCI AC97 Audio Accelerator
560device esm: audiobus, ac97 560device esm: audiobus, ac97
561attach esm at pci 561attach esm at pci
562file dev/pci/esm.c esm 562file dev/pci/esm.c esm
563 563
564# S3 SonicVibes (S3 617) 564# S3 SonicVibes (S3 617)
565device sv { }: audiobus, midibus 565device sv { }: audiobus, midibus
566attach sv at pci 566attach sv at pci
567file dev/pci/sv.c sv 567file dev/pci/sv.c sv
568 568
569attach opl at sv with opl_sv 569attach opl at sv with opl_sv
570file dev/pci/opl_sv.c opl_sv 570file dev/pci/opl_sv.c opl_sv
571 571
572# C-Media CMI8x38 Audio Chip 572# C-Media CMI8x38 Audio Chip
573device cmpci { }: audiobus 573device cmpci { }: audiobus
574attach cmpci at pci 574attach cmpci at pci
575file dev/pci/cmpci.c cmpci 575file dev/pci/cmpci.c cmpci
576 576
577attach opl at cmpci with opl_cmpci 577attach opl at cmpci with opl_cmpci
578file dev/pci/opl_cmpci.c opl_cmpci 578file dev/pci/opl_cmpci.c opl_cmpci
579 579
580attach mpu at cmpci with mpu_cmpci 580attach mpu at cmpci with mpu_cmpci
581file dev/pci/mpu_cmpci.c mpu_cmpci 581file dev/pci/mpu_cmpci.c mpu_cmpci
582 582
583# Yamaha YMF724/740/744/754 PCI audio controller 583# Yamaha YMF724/740/744/754 PCI audio controller
584device yds { }: audiobus, ac97 584device yds { }: audiobus, ac97
585attach yds at pci 585attach yds at pci
586file dev/pci/yds.c yds 586file dev/pci/yds.c yds
587 587
588attach opl at yds with opl_yds 588attach opl at yds with opl_yds
589file dev/pci/opl_yds.c opl_yds 589file dev/pci/opl_yds.c opl_yds
590 590
591attach mpu at yds with mpu_yds 591attach mpu at yds with mpu_yds
592file dev/pci/mpu_yds.c mpu_yds 592file dev/pci/mpu_yds.c mpu_yds
593 593
594# Creative Labs EMU10k1 (SBLive! series and PCI512) 594# Creative Labs EMU10k1 (SBLive! series and PCI512)
595device emuxki: audiobus, ac97 595device emuxki: audiobus, ac97
596attach emuxki at pci 596attach emuxki at pci
597file dev/pci/emuxki.c emuxki 597file dev/pci/emuxki.c emuxki
598 598
599# Trident 4DWAVE AC'97 audio (including SiS 7018,ALi M5451) 599# Trident 4DWAVE AC'97 audio (including SiS 7018,ALi M5451)
600device autri: audiobus, ac97, midibus 600device autri: audiobus, ac97, midibus
601attach autri at pci 601attach autri at pci
602file dev/pci/autri.c autri 602file dev/pci/autri.c autri
603 603
604# SMC EPIC/100 Fast Ethernet on PCI 604# SMC EPIC/100 Fast Ethernet on PCI
605attach epic at pci with epic_pci 605attach epic at pci with epic_pci
606file dev/pci/if_epic_pci.c epic_pci 606file dev/pci/if_epic_pci.c epic_pci
607 607
608# PCI "universal" communication device driver, for PCI com, lpt, etc. ports 608# PCI "universal" communication device driver, for PCI com, lpt, etc. ports
609# (see documentation in the driver for what, exactly, should be supported) 609# (see documentation in the driver for what, exactly, should be supported)
610device puc { port = -1 } 610device puc { port = -1 }
611attach puc at pci 611attach puc at pci
612file dev/pci/puc.c puc 612file dev/pci/puc.c puc
613file dev/pci/pucdata.c puc 613file dev/pci/pucdata.c puc
614defflag opt_puc.h PUC_CNAUTO 614defflag opt_puc.h PUC_CNAUTO
615defparam opt_puc.h PUC_CNBUS 615defparam opt_puc.h PUC_CNBUS
616 616
617attach com at puc with com_puc 617attach com at puc with com_puc
618file dev/pci/com_puc.c com_puc needs-flag 618file dev/pci/com_puc.c com_puc needs-flag
619file dev/pci/cyber.c com_puc 619file dev/pci/cyber.c com_puc
620file dev/pci/puccn.c com_puc 620file dev/pci/puccn.c com_puc
621 621
622attach lpt at puc with lpt_puc 622attach lpt at puc with lpt_puc
623file dev/pci/lpt_puc.c lpt_puc & !ppbus 623file dev/pci/lpt_puc.c lpt_puc & !ppbus
624 624
625attach atppc at puc with atppc_puc 625attach atppc at puc with atppc_puc
626file dev/pci/atppc_puc.c atppc_puc 626file dev/pci/atppc_puc.c atppc_puc
627 627
628# UHCI USB controller 628# UHCI USB controller
629attach uhci at pci with uhci_pci 629attach uhci at pci with uhci_pci
630file dev/pci/uhci_pci.c uhci_pci 630file dev/pci/uhci_pci.c uhci_pci
631 631
632# OHCI USB controller 632# OHCI USB controller
633attach ohci at pci with ohci_pci 633attach ohci at pci with ohci_pci
634file dev/pci/ohci_pci.c ohci_pci 634file dev/pci/ohci_pci.c ohci_pci
635 635
636# EHCI USB controller 636# EHCI USB controller
637attach ehci at pci with ehci_pci 637attach ehci at pci with ehci_pci
638file dev/pci/ehci_pci.c ehci_pci 638file dev/pci/ehci_pci.c ehci_pci
639 639
640file dev/pci/usb_pci.c ehci_pci | ehci_cardbus 640file dev/pci/usb_pci.c ehci_pci | ehci_cardbus
641 641
642# xHCI USB controller 642# xHCI USB controller
643attach xhci at pci with xhci_pci 643attach xhci at pci with xhci_pci
644file dev/pci/xhci_pci.c xhci_pci 644file dev/pci/xhci_pci.c xhci_pci
645defflag opt_xhci_pci.h XHCI_DISABLE_MSI 645defflag opt_xhci_pci.h XHCI_DISABLE_MSI
646defflag opt_xhci_pci.h XHCI_DISABLE_MSIX 646defflag opt_xhci_pci.h XHCI_DISABLE_MSIX
647 647
648# OHCI IEEE 1394 controller 648# OHCI IEEE 1394 controller
649attach fwohci at pci with fwohci_pci 649attach fwohci at pci with fwohci_pci
650file dev/pci/fwohci_pci.c fwohci_pci 650file dev/pci/fwohci_pci.c fwohci_pci
651 651
652# VIA Rhine/Rhine II Fast Ethernet controllers 652# VIA Rhine/Rhine II Fast Ethernet controllers
653device vr: ether, ifnet, arp, mii, mii_bitbang 653device vr: ether, ifnet, arp, mii, mii_bitbang
654attach vr at pci 654attach vr at pci
655file dev/pci/if_vr.c vr 655file dev/pci/if_vr.c vr
656 656
657# SiS 900 Fast Ethernet controllers 657# SiS 900 Fast Ethernet controllers
658device sip: ether, ifnet, arp, mii, mii_bitbang 658device sip: ether, ifnet, arp, mii, mii_bitbang
659attach sip at pci 659attach sip at pci
660file dev/pci/if_sip.c sip | gsip 660file dev/pci/if_sip.c sip | gsip
661 661
662# National Semiconductor DP83820 Gigabit Ethernet 662# National Semiconductor DP83820 Gigabit Ethernet
663device gsip: ether, ifnet, arp, mii, mii_bitbang 663device gsip: ether, ifnet, arp, mii, mii_bitbang
664attach gsip at pci 664attach gsip at pci
665 665
666# Level One LXT-1001 Gigabit Ethernet 666# Level One LXT-1001 Gigabit Ethernet
667#device glxt: ether, ifnet, arp, mii 667#device glxt: ether, ifnet, arp, mii
668#attach glxt at pci 668#attach glxt at pci
669#file dev/pci/if_glxt.c glxt 669#file dev/pci/if_glxt.c glxt
670 670
671# Sundance Tech./Tamarack TC9021 Gigabit Ethernet 671# Sundance Tech./Tamarack TC9021 Gigabit Ethernet
672device stge: ether, ifnet, arp, mii, mii_bitbang 672device stge: ether, ifnet, arp, mii, mii_bitbang
673attach stge at pci 673attach stge at pci
674file dev/pci/if_stge.c stge 674file dev/pci/if_stge.c stge
675 675
676# Intel i82598 & i82599 10-Gigabit Ethernet 676# Intel i82598 & i82599 10-Gigabit Ethernet
677device ixg: ether, ifnet, arp, mii, mii_phy 677device ixg: ether, ifnet, arp, mii, mii_phy
678attach ixg at pci 678attach ixg at pci
679file dev/pci/ixgbe/ixgbe.c ixg | ixv 679file dev/pci/ixgbe/ixgbe.c ixg | ixv
680file dev/pci/ixgbe/ix_txrx.c ixg | ixv 680file dev/pci/ixgbe/ix_txrx.c ixg | ixv
681file dev/pci/ixgbe/ixgbe_netbsd.c ixg | ixv 681file dev/pci/ixgbe/ixgbe_netbsd.c ixg | ixv
682file dev/pci/ixgbe/ixgbe_82598.c ixg | ixv 682file dev/pci/ixgbe/ixgbe_82598.c ixg | ixv
683file dev/pci/ixgbe/ixgbe_82599.c ixg | ixv 683file dev/pci/ixgbe/ixgbe_82599.c ixg | ixv
684file dev/pci/ixgbe/ixgbe_x540.c ixg | ixv 684file dev/pci/ixgbe/ixgbe_x540.c ixg | ixv
685file dev/pci/ixgbe/ixgbe_x550.c ixg | ixv 685file dev/pci/ixgbe/ixgbe_x550.c ixg | ixv
686file dev/pci/ixgbe/ixgbe_api.c ixg | ixv 686file dev/pci/ixgbe/ixgbe_api.c ixg | ixv
687file dev/pci/ixgbe/ixgbe_common.c ixg | ixv 687file dev/pci/ixgbe/ixgbe_common.c ixg | ixv
688file dev/pci/ixgbe/ixgbe_mbx.c ixg | ixv 688file dev/pci/ixgbe/ixgbe_mbx.c ixg | ixv
689file dev/pci/ixgbe/ixgbe_osdep.c ixg | ixv 689file dev/pci/ixgbe/ixgbe_osdep.c ixg | ixv
690file dev/pci/ixgbe/ixgbe_phy.c ixg | ixv 690file dev/pci/ixgbe/ixgbe_phy.c ixg | ixv
691file dev/pci/ixgbe/ixgbe_vf.c ixg | ixv 691file dev/pci/ixgbe/ixgbe_vf.c ixg | ixv
692file dev/pci/ixgbe/if_bypass.c ixg | ixv 692file dev/pci/ixgbe/if_bypass.c ixg | ixv
693file dev/pci/ixgbe/if_fdir.c ixg | ixv 693file dev/pci/ixgbe/if_fdir.c ixg | ixv
 694defparam opt_ixgbe.h IXGBE_JCLNUM_MULTI
694 695
695# This appears to be the driver for virtual instances of i82599. 696# This appears to be the driver for virtual instances of i82599.
696device ixv: ether, ifnet, arp, mii, mii_phy 697device ixv: ether, ifnet, arp, mii, mii_phy
697attach ixv at pci 698attach ixv at pci
698file dev/pci/ixgbe/ixv.c ixv 699file dev/pci/ixgbe/ixv.c ixv
699 700
700# Intel i8254x Gigabit Ethernet 701# Intel i8254x Gigabit Ethernet
701device wm: ether, ifnet, arp, mii, mii_bitbang 702device wm: ether, ifnet, arp, mii, mii_bitbang
702attach wm at pci 703attach wm at pci
703file dev/pci/if_wm.c wm 704file dev/pci/if_wm.c wm
704defflag opt_if_wm.h WM_EVENT_COUNTERS 705defflag opt_if_wm.h WM_EVENT_COUNTERS
705defparam opt_if_wm.h WM_RX_PROCESS_LIMIT_DEFAULT 706defparam opt_if_wm.h WM_RX_PROCESS_LIMIT_DEFAULT
706 WM_RX_INTR_PROCESS_LIMIT_DEFAULT 707 WM_RX_INTR_PROCESS_LIMIT_DEFAULT
707 WM_DISABLE_MSI 708 WM_DISABLE_MSI
708 WM_DISABLE_MSIX 709 WM_DISABLE_MSIX
709 710
710# Mellanox 5th generation Ethernet devices 711# Mellanox 5th generation Ethernet devices
711device mcx: ether, ifnet, arp 712device mcx: ether, ifnet, arp
712attach mcx at pci 713attach mcx at pci
713file dev/pci/if_mcx.c mcx 714file dev/pci/if_mcx.c mcx
714 715
715# Broadcom 570x Gigabit Ethernet 716# Broadcom 570x Gigabit Ethernet
716device bge: ether, ifnet, arp, mii, mii_bitbang 717device bge: ether, ifnet, arp, mii, mii_bitbang
717attach bge at pci 718attach bge at pci
718file dev/pci/if_bge.c bge 719file dev/pci/if_bge.c bge
719 720
720# Broadcom NetXtreme II 721# Broadcom NetXtreme II
721device bnx: ether, ifnet, arp, mii 722device bnx: ether, ifnet, arp, mii
722attach bnx at pci 723attach bnx at pci
723file dev/pci/if_bnx.c bnx 724file dev/pci/if_bnx.c bnx
724 725
725# Realtek 8129/8139 Ethernet controllers 726# Realtek 8129/8139 Ethernet controllers
726attach rtk at pci with rtk_pci 727attach rtk at pci with rtk_pci
727file dev/pci/if_rtk_pci.c rtk_pci 728file dev/pci/if_rtk_pci.c rtk_pci
728 729
729# DECchip 21x4x Ethernet controller family, and assorted clones. 730# DECchip 21x4x Ethernet controller family, and assorted clones.
730attach tlp at pci with tlp_pci 731attach tlp at pci with tlp_pci
731file dev/pci/if_tlp_pci.c tlp_pci 732file dev/pci/if_tlp_pci.c tlp_pci
732 733
733# Bit3 PCI-VME mod. 617 734# Bit3 PCI-VME mod. 617
734device btvmei: vmebus 735device btvmei: vmebus
735attach btvmei at pci 736attach btvmei at pci
736file dev/pci/btvmei.c btvmei 737file dev/pci/btvmei.c btvmei
737#file dev/pci/btvmei_dma.c btvmei 738#file dev/pci/btvmei_dma.c btvmei
738#file dev/pci/btvmei_cntlrdma.c btvmei 739#file dev/pci/btvmei_cntlrdma.c btvmei
739 740
740# Alteon ACEnic Gigabit Ethernet controller 741# Alteon ACEnic Gigabit Ethernet controller
741device ti: ether, ifnet, arp 742device ti: ether, ifnet, arp
742attach ti at pci 743attach ti at pci
743file dev/pci/if_ti.c ti 744file dev/pci/if_ti.c ti
744 745
745# Adaptec AIC-6915 Ethernet interface 746# Adaptec AIC-6915 Ethernet interface
746attach sf at pci with sf_pci 747attach sf at pci with sf_pci
747file dev/pci/if_sf_pci.c sf_pci 748file dev/pci/if_sf_pci.c sf_pci
748 749
749# Sundance Tech. ST-201 10/100 Ethernet 750# Sundance Tech. ST-201 10/100 Ethernet
750device ste: ether, ifnet, arp, mii, mii_bitbang 751device ste: ether, ifnet, arp, mii, mii_bitbang
751attach ste at pci 752attach ste at pci
752file dev/pci/if_ste.c ste 753file dev/pci/if_ste.c ste
753 754
754# YENTA PCI-Cardbus bridge 755# YENTA PCI-Cardbus bridge
755#device cbb: cbbus, pcmciabus 756#device cbb: cbbus, pcmciabus
756device cbb: pcmciaslot 757device cbb: pcmciaslot
757attach cbb at pci with cbb_pci 758attach cbb at pci with cbb_pci
758file dev/pci/pccbb.c cbb 759file dev/pci/pccbb.c cbb
759 760
760# Tundra Universe PCI-VME adapter 761# Tundra Universe PCI-VME adapter
761define univ_pci 762define univ_pci
762file dev/pci/universe_pci.c univ_pci 763file dev/pci/universe_pci.c univ_pci
763 764
764# Bit3 PCI-VME mod. 2706 765# Bit3 PCI-VME mod. 2706
765device btvmeii: vmebus, univ_pci 766device btvmeii: vmebus, univ_pci
766attach btvmeii at pci 767attach btvmeii at pci
767file dev/pci/btvmeii.c btvmeii 768file dev/pci/btvmeii.c btvmeii
768 769
769# VIA VT82C686A/VT8231 PM Timer and Hardware Monitor 770# VIA VT82C686A/VT8231 PM Timer and Hardware Monitor
770device viaenv: acpipmtimer, sysmon_envsys 771device viaenv: acpipmtimer, sysmon_envsys
771attach viaenv at pci 772attach viaenv at pci
772file dev/pci/viaenv.c viaenv 773file dev/pci/viaenv.c viaenv
773 774
774# Intel PIIX4 power management controller 775# Intel PIIX4 power management controller
775device piixpm: i2cbus, acpipmtimer 776device piixpm: i2cbus, acpipmtimer
776attach piixpm at pci 777attach piixpm at pci
777file dev/pci/piixpm.c piixpm 778file dev/pci/piixpm.c piixpm
778 779
779# AMD 768MPX power management controller 780# AMD 768MPX power management controller
780defflag opt_amdpm.h AMDPM_RND_COUNTERS 781defflag opt_amdpm.h AMDPM_RND_COUNTERS
781device amdpm: i2cbus, acpipmtimer 782device amdpm: i2cbus, acpipmtimer
782attach amdpm at pci 783attach amdpm at pci
783file dev/pci/amdpm.c amdpm 784file dev/pci/amdpm.c amdpm
784file dev/pci/amdpm_smbus.c amdpm 785file dev/pci/amdpm_smbus.c amdpm
785 786
786# Hi/fn 7751 787# Hi/fn 7751
787device hifn: opencrypto 788device hifn: opencrypto
788attach hifn at pci 789attach hifn at pci
789file dev/pci/hifn7751.c hifn 790file dev/pci/hifn7751.c hifn
790 791
791# Bluesteelnet 5501/5601, Broadcom 580x/582x security processor 792# Bluesteelnet 5501/5601, Broadcom 580x/582x security processor
792device ubsec: opencrypto 793device ubsec: opencrypto
793attach ubsec at pci 794attach ubsec at pci
794file dev/pci/ubsec.c ubsec 795file dev/pci/ubsec.c ubsec
795 796
796# Aironet PC4500/PC4800 797# Aironet PC4500/PC4800
797attach an at pci with an_pci 798attach an at pci with an_pci
798file dev/pci/if_an_pci.c an_pci 799file dev/pci/if_an_pci.c an_pci
799 800
800# ADMtek ADM8211 PCI/Mini-PCI 801# ADMtek ADM8211 PCI/Mini-PCI
801attach atw at pci with atw_pci 802attach atw at pci with atw_pci
802file dev/pci/if_atw_pci.c atw_pci 803file dev/pci/if_atw_pci.c atw_pci
803 804
804# Realtek RTL8180 PCI/Mini-PCI 805# Realtek RTL8180 PCI/Mini-PCI
805attach rtw at pci with rtw_pci 806attach rtw at pci with rtw_pci
806file dev/pci/if_rtw_pci.c rtw_pci 807file dev/pci/if_rtw_pci.c rtw_pci
807 808
808# Realtek RTL8188CE Mini-PCIe 809# Realtek RTL8188CE Mini-PCIe
809device rtwn: ifnet, arp, wlan, firmload 810device rtwn: ifnet, arp, wlan, firmload
810attach rtwn at pci 811attach rtwn at pci
811file dev/pci/if_rtwn.c rtwn 812file dev/pci/if_rtwn.c rtwn
812 813
813# Ralink RT2500/RT2600 PCI/Mini-PCI 814# Ralink RT2500/RT2600 PCI/Mini-PCI
814attach ral at pci with ral_pci 815attach ral at pci with ral_pci
815file dev/pci/if_ral_pci.c ral_pci 816file dev/pci/if_ral_pci.c ral_pci
816 817
817# Marvel Libertas Open 818# Marvel Libertas Open
818attach malo at pci with malo_pci 819attach malo at pci with malo_pci
819file dev/pci/if_malo_pci.c malo_pci 820file dev/pci/if_malo_pci.c malo_pci
820 821
821# Intersil Prism2.5 Mini-PCI 822# Intersil Prism2.5 Mini-PCI
822attach wi at pci with wi_pci 823attach wi at pci with wi_pci
823file dev/pci/if_wi_pci.c wi_pci 824file dev/pci/if_wi_pci.c wi_pci
824 825
825# IrDA devices 826# IrDA devices
826# Toshiba Fast Infrared Type O IrDA driver 827# Toshiba Fast Infrared Type O IrDA driver
827device oboe: irbus, irdasir 828device oboe: irbus, irdasir
828attach oboe at pci 829attach oboe at pci
829file dev/pci/oboe.c oboe 830file dev/pci/oboe.c oboe
830 831
831# Middle Digital, Inc. PCI-Weasel serial console board control 832# Middle Digital, Inc. PCI-Weasel serial console board control
832# devices (watchdog timer, etc.) 833# devices (watchdog timer, etc.)
833device weasel: sysmon_wdog 834device weasel: sysmon_wdog
834attach weasel at pci with weasel_pci 835attach weasel at pci with weasel_pci
835file dev/pci/weasel_pci.c weasel 836file dev/pci/weasel_pci.c weasel
836 837
837# Game adapter (joystick) 838# Game adapter (joystick)
838attach joy at pci with joy_pci 839attach joy at pci with joy_pci
839file dev/pci/joy_pci.c joy_pci 840file dev/pci/joy_pci.c joy_pci
840 841
841# ATI Mach64 framebuffer console driver 842# ATI Mach64 framebuffer console driver
842defflag opt_machfb.h MACHFB_DEBUG 843defflag opt_machfb.h MACHFB_DEBUG
843device machfb: wsemuldisplaydev, rasops8, fb, vcons, videomode, edid, drm, glyphcache 844device machfb: wsemuldisplaydev, rasops8, fb, vcons, videomode, edid, drm, glyphcache
844attach machfb at pci 845attach machfb at pci
845file dev/pci/machfb.c machfb 846file dev/pci/machfb.c machfb
846 847
847# 3Dfx Voodoo3 framebuffer console driver 848# 3Dfx Voodoo3 framebuffer console driver
848device voodoofb: wsemuldisplaydev, rasops8, vcons, videomode, drm, i2cbus, i2c_bitbang, ddc_read_edid, edid 849device voodoofb: wsemuldisplaydev, rasops8, vcons, videomode, drm, i2cbus, i2c_bitbang, ddc_read_edid, edid
849attach voodoofb at pci 850attach voodoofb at pci
850file dev/pci/voodoofb.c voodoofb 851file dev/pci/voodoofb.c voodoofb
851 852
852# VIA UniChrome framebuffer console driver 853# VIA UniChrome framebuffer console driver
853device unichromefb: wsemuldisplaydev, rasops16, rasops32, vcons, drm 854device unichromefb: wsemuldisplaydev, rasops16, rasops32, vcons, drm
854attach unichromefb at pci 855attach unichromefb at pci
855file dev/pci/unichromefb.c unichromefb needs-flag 856file dev/pci/unichromefb.c unichromefb needs-flag
856 857
857# ATI Radeon framebuffer console driver 858# ATI Radeon framebuffer console driver
858# (Note: to enable the BIOS parser, add options RADEON_BIOS_INIT to the config) 859# (Note: to enable the BIOS parser, add options RADEON_BIOS_INIT to the config)
859device radeonfb: wsemuldisplaydev, videomode, rasops8, rasops32, vcons, splash, i2cbus, i2c_bitbang, ddc_read_edid, edid, drm, glyphcache 860device radeonfb: wsemuldisplaydev, videomode, rasops8, rasops32, vcons, splash, i2cbus, i2c_bitbang, ddc_read_edid, edid, drm, glyphcache
860attach radeonfb at pci 861attach radeonfb at pci
861file dev/pci/radeonfb.c radeonfb 862file dev/pci/radeonfb.c radeonfb
862file dev/pci/radeonfb_i2c.c radeonfb 863file dev/pci/radeonfb_i2c.c radeonfb
863file dev/pci/radeonfb_bios.c radeonfb 864file dev/pci/radeonfb_bios.c radeonfb
864defflag opt_radeonfb.h RADEONFB_DEBUG 865defflag opt_radeonfb.h RADEONFB_DEBUG
865defflag opt_radeonfb.h RADEONFB_BIOS_INIT 866defflag opt_radeonfb.h RADEONFB_BIOS_INIT
866defflag opt_radeonfb.h RADEONFB_BIOS_DEBUG 867defflag opt_radeonfb.h RADEONFB_BIOS_DEBUG
867defflag opt_radeonfb.h RADEONFB_MMAP_BARS 868defflag opt_radeonfb.h RADEONFB_MMAP_BARS
868defflag opt_radeonfb.h RADEONFB_DEPTH_32 869defflag opt_radeonfb.h RADEONFB_DEPTH_32
869defflag opt_radeonfb.h RADEONFB_ALWAYS_ACCEL_PUTCHAR 870defflag opt_radeonfb.h RADEONFB_ALWAYS_ACCEL_PUTCHAR
870 871
871# Chelsio Terminator 3 (T3) 10 gigabit ethernet 872# Chelsio Terminator 3 (T3) 10 gigabit ethernet
872device cxgbc { } 873device cxgbc { }
873attach cxgbc at pci 874attach cxgbc at pci
874device cxgb: ether, ifnet, arp 875device cxgb: ether, ifnet, arp
875attach cxgb at cxgbc 876attach cxgb at cxgbc
876file dev/pci/cxgb/cxgb_main.c cxgbc | cxgb 877file dev/pci/cxgb/cxgb_main.c cxgbc | cxgb
877file dev/pci/cxgb/cxgb_mc5.c cxgbc | cxgb 878file dev/pci/cxgb/cxgb_mc5.c cxgbc | cxgb
878file dev/pci/cxgb/cxgb_vsc8211.c cxgbc | cxgb 879file dev/pci/cxgb/cxgb_vsc8211.c cxgbc | cxgb
879file dev/pci/cxgb/cxgb_ael1002.c cxgbc | cxgb 880file dev/pci/cxgb/cxgb_ael1002.c cxgbc | cxgb
880file dev/pci/cxgb/cxgb_mv88e1xxx.c cxgbc | cxgb 881file dev/pci/cxgb/cxgb_mv88e1xxx.c cxgbc | cxgb
881file dev/pci/cxgb/cxgb_vsc7323.c cxgbc | cxgb 882file dev/pci/cxgb/cxgb_vsc7323.c cxgbc | cxgb
882file dev/pci/cxgb/cxgb_xgmac.c cxgbc | cxgb 883file dev/pci/cxgb/cxgb_xgmac.c cxgbc | cxgb
883file dev/pci/cxgb/cxgb_t3_hw.c cxgbc | cxgb 884file dev/pci/cxgb/cxgb_t3_hw.c cxgbc | cxgb
884file dev/pci/cxgb/cxgb_sge.c cxgbc | cxgb 885file dev/pci/cxgb/cxgb_sge.c cxgbc | cxgb
885file dev/pci/cxgb/cxgb_lro.c cxgbc | cxgb 886file dev/pci/cxgb/cxgb_lro.c cxgbc | cxgb
886# file dev/pci/cxgb/cxgb_offload.c cxgbc | cxgb 887# file dev/pci/cxgb/cxgb_offload.c cxgbc | cxgb
887file dev/pci/cxgb/cxgb_l2t.c cxgbc | cxgb 888file dev/pci/cxgb/cxgb_l2t.c cxgbc | cxgb
888file dev/pci/cxgb/cxgb_osdep.c cxgbc | cxgb 889file dev/pci/cxgb/cxgb_osdep.c cxgbc | cxgb
889 890
890# Chips & Technologies 65550 framebuffer console driver 891# Chips & Technologies 65550 framebuffer console driver
891attach chipsfb at pci with chipsfb_pci 892attach chipsfb at pci with chipsfb_pci
892file dev/pci/chipsfb.c chipsfb_pci 893file dev/pci/chipsfb.c chipsfb_pci
893 894
894# 3Com 3c990 895# 3Com 3c990
895device txp: ether, ifnet, arp 896device txp: ether, ifnet, arp
896attach txp at pci 897attach txp at pci
897file dev/pci/if_txp.c txp 898file dev/pci/if_txp.c txp
898 899
899# SysKonnect 900# SysKonnect
900device skc { } 901device skc { }
901attach skc at pci 902attach skc at pci
902device sk: ether, ifnet, arp, mii 903device sk: ether, ifnet, arp, mii
903attach sk at skc 904attach sk at skc
904file dev/pci/if_sk.c skc | sk 905file dev/pci/if_sk.c skc | sk
905 906
906# Broadcom 4401 10/100 Ethernet 907# Broadcom 4401 10/100 Ethernet
907device bce: ether, ifnet, arp, mii 908device bce: ether, ifnet, arp, mii
908attach bce at pci 909attach bce at pci
909file dev/pci/if_bce.c bce 910file dev/pci/if_bce.c bce
910 911
911# Intel PRO/10GbE 912# Intel PRO/10GbE
912device dge: ether, ifnet, arp 913device dge: ether, ifnet, arp
913attach dge at pci 914attach dge at pci
914file dev/pci/if_dge.c dge 915file dev/pci/if_dge.c dge
915 916
916# Realtek 8169 Ethernet controllers 917# Realtek 8169 Ethernet controllers
917attach re at pci with re_pci 918attach re at pci with re_pci
918file dev/pci/if_re_pci.c re_pci 919file dev/pci/if_re_pci.c re_pci
919 920
920# Intel PRO/Wireless 2100 921# Intel PRO/Wireless 2100
921device ipw: ifnet, arp, wlan, firmload 922device ipw: ifnet, arp, wlan, firmload
922attach ipw at pci 923attach ipw at pci
923file dev/pci/if_ipw.c ipw 924file dev/pci/if_ipw.c ipw
924 925
925# Intel PRO/Wireless 2200BG/2915ABG 926# Intel PRO/Wireless 2200BG/2915ABG
926device iwi: ifnet, arp, wlan, firmload 927device iwi: ifnet, arp, wlan, firmload
927attach iwi at pci 928attach iwi at pci
928file dev/pci/if_iwi.c iwi 929file dev/pci/if_iwi.c iwi
929 930
930# Intel PRO/Wireless 3945ABG 931# Intel PRO/Wireless 3945ABG
931device wpi: ifnet, arp, wlan, firmload 932device wpi: ifnet, arp, wlan, firmload
932attach wpi at pci 933attach wpi at pci
933file dev/pci/if_wpi.c wpi 934file dev/pci/if_wpi.c wpi
934 935
935# Intel PRO/Wireless 4965AGN Mini-PCI Adapter 936# Intel PRO/Wireless 4965AGN Mini-PCI Adapter
936device iwn: ifnet, arp, wlan, firmload 937device iwn: ifnet, arp, wlan, firmload
937attach iwn at pci 938attach iwn at pci
938file dev/pci/if_iwn.c iwn 939file dev/pci/if_iwn.c iwn
939 940
940# Intel Centrino 7260 941# Intel Centrino 7260
941device iwm: ifnet, arp, wlan, firmload 942device iwm: ifnet, arp, wlan, firmload
942attach iwm at pci 943attach iwm at pci
943file dev/pci/if_iwm.c iwm 944file dev/pci/if_iwm.c iwm
944 945
945# Workbit NinjaSCSI-32 controllers 946# Workbit NinjaSCSI-32 controllers
946# device declaration in sys/conf/files 947# device declaration in sys/conf/files
947attach njs at pci with njs_pci 948attach njs at pci with njs_pci
948file dev/pci/njs_pci.c njs_pci 949file dev/pci/njs_pci.c njs_pci
949 950
950# S2io Xframe 10 Gigabit ethernet (Xframe driver) 951# S2io Xframe 10 Gigabit ethernet (Xframe driver)
951device xge: ether, ifnet, arp 952device xge: ether, ifnet, arp
952attach xge at pci  953attach xge at pci
953file dev/pci/if_xge.c xge 954file dev/pci/if_xge.c xge
954 955
955# Via Velocity 612x 10/100/1000 Ethernet 956# Via Velocity 612x 10/100/1000 Ethernet
956device vge: ether, ifnet, arp, mii 957device vge: ether, ifnet, arp, mii
957attach vge at pci 958attach vge at pci
958file dev/pci/if_vge.c vge 959file dev/pci/if_vge.c vge
959 960
960# Atheros 5210/5211/5212 PCI/Mini-PCI 961# Atheros 5210/5211/5212 PCI/Mini-PCI
961attach ath at pci with ath_pci 962attach ath at pci with ath_pci
962file dev/pci/if_ath_pci.c ath_pci 963file dev/pci/if_ath_pci.c ath_pci
963 964
964# Atheros AR9k (802.11 a/g/n) 965# Atheros AR9k (802.11 a/g/n)
965attach athn at pci with athn_pci 966attach athn at pci with athn_pci
966file dev/pci/if_athn_pci.c athn_pci 967file dev/pci/if_athn_pci.c athn_pci
967 968
968# NVIDIA nForce Ethernet 969# NVIDIA nForce Ethernet
969device nfe: ether, ifnet, arp, mii, mii_phy 970device nfe: ether, ifnet, arp, mii, mii_phy
970attach nfe at pci 971attach nfe at pci
971file dev/pci/if_nfe.c nfe 972file dev/pci/if_nfe.c nfe
972 973
973# MICREL Etherent 974# MICREL Etherent
974device kse: ether, ifnet, arp 975device kse: ether, ifnet, arp
975attach kse at pci 976attach kse at pci
976file dev/pci/if_kse.c kse 977file dev/pci/if_kse.c kse
977 978
978# Yukon 2 979# Yukon 2
979device mskc { } 980device mskc { }
980attach mskc at pci 981attach mskc at pci
981device msk: ether, ifnet, arp, mii 982device msk: ether, ifnet, arp, mii
982attach msk at mskc 983attach msk at mskc
983file dev/pci/if_msk.c mskc | msk 984file dev/pci/if_msk.c mskc | msk
984 985
985# SD Host Controller 986# SD Host Controller
986attach sdhc at pci with sdhc_pci 987attach sdhc at pci with sdhc_pci
987file dev/pci/sdhc_pci.c sdhc_pci 988file dev/pci/sdhc_pci.c sdhc_pci
988 989
989# generic framebuffer console driver, PCI frontend 990# generic framebuffer console driver, PCI frontend
990attach genfb at pci with genfb_pci : splash 991attach genfb at pci with genfb_pci : splash
991file dev/pci/genfb_pci.c genfb_pci 992file dev/pci/genfb_pci.c genfb_pci
992 993
993# NVIDIA nForce2/3/4 SMBus controller 994# NVIDIA nForce2/3/4 SMBus controller
994device nfsmbc { } 995device nfsmbc { }
995attach nfsmbc at pci 996attach nfsmbc at pci
996device nfsmb: i2cbus 997device nfsmb: i2cbus
997attach nfsmb at nfsmbc 998attach nfsmb at nfsmbc
998file dev/pci/nfsmb.c nfsmbc | nfsmb 999file dev/pci/nfsmb.c nfsmbc | nfsmb
999 1000
1000# Intel ICH SMBus controller 1001# Intel ICH SMBus controller
1001device ichsmb: i2cbus 1002device ichsmb: i2cbus
1002attach ichsmb at pci 1003attach ichsmb at pci
1003file dev/pci/ichsmb.c ichsmb 1004file dev/pci/ichsmb.c ichsmb
1004 1005
1005# ATI Rage 128 framebuffer console driver 1006# ATI Rage 128 framebuffer console driver
1006device r128fb: wsemuldisplaydev, rasops8, rasops32, vcons, videomode, i2cbus, i2c_bitbang, glyphcache 1007device r128fb: wsemuldisplaydev, rasops8, rasops32, vcons, videomode, i2cbus, i2c_bitbang, glyphcache
1007attach r128fb at pci 1008attach r128fb at pci
1008file dev/pci/r128fb.c r128fb 1009file dev/pci/r128fb.c r128fb
1009defflag opt_r128fb.h R128FB_DEBUG 1010defflag opt_r128fb.h R128FB_DEBUG
1010defflag opt_r128fb.h R128FB_WAIT 1011defflag opt_r128fb.h R128FB_WAIT
1011 1012
1012# Attansic/Atheros L1 Gigabit-Ethernet 1013# Attansic/Atheros L1 Gigabit-Ethernet
1013device age: ether, ifnet, arp, mii, mii_phy 1014device age: ether, ifnet, arp, mii, mii_phy
1014attach age at pci 1015attach age at pci
1015file dev/pci/if_age.c age 1016file dev/pci/if_age.c age
1016 1017
1017# Attansic/Atheros L1C/L2C Gigabit Ethernet 1018# Attansic/Atheros L1C/L2C Gigabit Ethernet
1018device alc: ether, ifnet, arp, mii, mii_phy 1019device alc: ether, ifnet, arp, mii, mii_phy
1019attach alc at pci 1020attach alc at pci
1020file dev/pci/if_alc.c alc 1021file dev/pci/if_alc.c alc
1021 1022
1022# Attanisc/Atheros L1E Gigabit Ethernet 1023# Attanisc/Atheros L1E Gigabit Ethernet
1023device ale: ether, ifnet, arp, mii, mii_phy 1024device ale: ether, ifnet, arp, mii, mii_phy
1024attach ale at pci 1025attach ale at pci
1025file dev/pci/if_ale.c ale 1026file dev/pci/if_ale.c ale
1026 1027
1027# Atheros/Attansic L2 Fast-Ethernet 1028# Atheros/Attansic L2 Fast-Ethernet
1028device lii: ether, ifnet, arp, mii 1029device lii: ether, ifnet, arp, mii
1029attach lii at pci 1030attach lii at pci
1030file dev/pci/if_lii.c lii 1031file dev/pci/if_lii.c lii
1031 1032
1032# Silicon Image SteelVine SATA-II controllers 1033# Silicon Image SteelVine SATA-II controllers
1033attach siisata at pci with siisata_pci 1034attach siisata at pci with siisata_pci
1034file dev/pci/siisata_pci.c siisata_pci 1035file dev/pci/siisata_pci.c siisata_pci
1035 1036
1036# Acer Labs M7101 power management controller 1037# Acer Labs M7101 power management controller
1037device alipm: i2cbus 1038device alipm: i2cbus
1038attach alipm at pci 1039attach alipm at pci
1039file dev/pci/alipm.c alipm 1040file dev/pci/alipm.c alipm
1040 1041
1041# 1042#
1042# Broadcom AirForce / Apple Airport Extreme 1043# Broadcom AirForce / Apple Airport Extreme
1043# 1044#
1044attach bwi at pci with bwi_pci 1045attach bwi at pci with bwi_pci
1045file dev/pci/if_bwi_pci.c bwi_pci 1046file dev/pci/if_bwi_pci.c bwi_pci
1046 1047
1047# Broadcom FullMAC USB wireless adapter 1048# Broadcom FullMAC USB wireless adapter
1048attach bwfm at pci with bwfm_pci: firmload 1049attach bwfm at pci with bwfm_pci: firmload
1049file dev/pci/if_bwfm_pci.c bwfm_pci 1050file dev/pci/if_bwfm_pci.c bwfm_pci
1050 1051
1051# Marvell Serial-ATA Host Controller 1052# Marvell Serial-ATA Host Controller
1052attach mvsata at pci with mvsata_pci 1053attach mvsata at pci with mvsata_pci
1053file dev/pci/mvsata_pci.c mvsata_pci 1054file dev/pci/mvsata_pci.c mvsata_pci
1054 1055
1055include "dev/pci/voyager/files.voyager" 1056include "dev/pci/voyager/files.voyager"
1056# Silicon Motion SM502 / Voyager GX 1057# Silicon Motion SM502 / Voyager GX
1057device voyager: i2c_bitbang, voyagerbus, i2cbus 1058device voyager: i2c_bitbang, voyagerbus, i2cbus
1058attach voyager at pci 1059attach voyager at pci
1059file dev/pci/voyager.c voyager 1060file dev/pci/voyager.c voyager
1060defflag opt_voyager.h VOYAGER_DEBUG 1061defflag opt_voyager.h VOYAGER_DEBUG
1061 1062
1062# High Definition Audio 1063# High Definition Audio
1063attach hdaudio at pci with hdaudio_pci 1064attach hdaudio at pci with hdaudio_pci
1064file dev/pci/hdaudio_pci.c hdaudio_pci 1065file dev/pci/hdaudio_pci.c hdaudio_pci
1065 1066
1066# Permedia 2 / Sun PGX32 / Raptor 1067# Permedia 2 / Sun PGX32 / Raptor
1067device pm2fb: wsemuldisplaydev, rasops8, rasops32, vcons, videomode, i2cbus, i2c_bitbang, ddc_read_edid, edid, glyphcache 1068device pm2fb: wsemuldisplaydev, rasops8, rasops32, vcons, videomode, i2cbus, i2c_bitbang, ddc_read_edid, edid, glyphcache
1068attach pm2fb at pci 1069attach pm2fb at pci
1069file dev/pci/pm2fb.c pm2fb 1070file dev/pci/pm2fb.c pm2fb
1070defflag opt_pm2fb.h PM2FB_DEBUG 1071defflag opt_pm2fb.h PM2FB_DEBUG
1071 1072
1072# Permedia 3 / Oxygen VX1 / Proformance 3  1073# Permedia 3 / Oxygen VX1 / Proformance 3
1073device pm3fb: wsemuldisplaydev, rasops8, vcons, videomode, i2cbus, i2c_bitbang, ddc_read_edid, edid 1074device pm3fb: wsemuldisplaydev, rasops8, vcons, videomode, i2cbus, i2c_bitbang, ddc_read_edid, edid
1074attach pm3fb at pci 1075attach pm3fb at pci
1075file dev/pci/pm3fb.c pm3fb 1076file dev/pci/pm3fb.c pm3fb
1076 1077
1077# 3Dlabs Wildcat / Sun XVR-500, 1200, Expert3D etc. 1078# 3Dlabs Wildcat / Sun XVR-500, 1200, Expert3D etc.
1078device wcfb: wsemuldisplaydev, rasops8, vcons 1079device wcfb: wsemuldisplaydev, rasops8, vcons
1079attach wcfb at pci 1080attach wcfb at pci
1080file dev/pci/wcfb.c wcfb 1081file dev/pci/wcfb.c wcfb
1081defflag opt_wcfb.h WCFB_DEBUG 1082defflag opt_wcfb.h WCFB_DEBUG
1082 1083
1083# Domex 536, 5380-compatible SCSI HBA 1084# Domex 536, 5380-compatible SCSI HBA
1084attach nca at pci with nca_pci 1085attach nca at pci with nca_pci
1085file dev/pci/nca_pci.c nca_pci 1086file dev/pci/nca_pci.c nca_pci
1086 1087
1087# Agere ET1310/1301 Ethernet 1088# Agere ET1310/1301 Ethernet
1088device et: ether, ifnet, arp, mii, mii_phy 1089device et: ether, ifnet, arp, mii, mii_phy
1089attach et at pci 1090attach et at pci
1090file dev/pci/if_et.c et 1091file dev/pci/if_et.c et
1091 1092
1092# RDC Semiconductor R6040 10/100 Ethernet 1093# RDC Semiconductor R6040 10/100 Ethernet
1093device vte: ether, ifnet, arp, mii, mii_phy 1094device vte: ether, ifnet, arp, mii, mii_phy
1094attach vte at pci 1095attach vte at pci
1095file dev/pci/if_vte.c vte 1096file dev/pci/if_vte.c vte
1096 1097
1097# Conexant CX23880-series DTV interface 1098# Conexant CX23880-series DTV interface
1098device cxdtv: dtvbus, i2c_bitbang, i2cbus, i2cexec, tvpll, nxt2k, lg3303 1099device cxdtv: dtvbus, i2c_bitbang, i2cbus, i2cexec, tvpll, nxt2k, lg3303
1099attach cxdtv at pci 1100attach cxdtv at pci
1100file dev/pci/cxdtv.c cxdtv 1101file dev/pci/cxdtv.c cxdtv
1101file dev/pci/cxdtv_boards.c cxdtv 1102file dev/pci/cxdtv_boards.c cxdtv
1102 1103
1103# Conexant CX23885-series DTV interface 1104# Conexant CX23885-series DTV interface
1104device coram: dtvbus, i2cbus, i2cexec, mt2131, cx24227 1105device coram: dtvbus, i2cbus, i2cexec, mt2131, cx24227
1105attach coram at pci 1106attach coram at pci
1106file dev/pci/coram.c coram 1107file dev/pci/coram.c coram
1107 1108
1108# QUANCOM Electronic GmbH PWDOG1 1109# QUANCOM Electronic GmbH PWDOG1
1109device pwdog: sysmon_envsys 1110device pwdog: sysmon_envsys
1110attach pwdog at pci 1111attach pwdog at pci
1111file dev/pci/pwdog.c pwdog 1112file dev/pci/pwdog.c pwdog
1112 1113
1113# IBM 4810 BSP cash drawer port 1114# IBM 4810 BSP cash drawer port
1114device ibmcd: gpiobus 1115device ibmcd: gpiobus
1115attach ibmcd at pci 1116attach ibmcd at pci
1116file dev/pci/ibmcd.c ibmcd 1117file dev/pci/ibmcd.c ibmcd
1117 1118
1118# SIS 315 Pro frame buffer 1119# SIS 315 Pro frame buffer
1119device sisfb: wsemuldisplaydev, rasops8, rasops15, rasops16, rasops32, vcons 1120device sisfb: wsemuldisplaydev, rasops8, rasops15, rasops16, rasops32, vcons
1120attach sisfb at pci 1121attach sisfb at pci
1121file dev/pci/sisfb.c sisfb needs-flag 1122file dev/pci/sisfb.c sisfb needs-flag
1122 1123
1123attach virtio at pci with virtio_pci 1124attach virtio at pci with virtio_pci
1124file dev/pci/virtio_pci.c virtio_pci 1125file dev/pci/virtio_pci.c virtio_pci
1125 1126
1126# Silicon Motion SM712(LynxEM+) frame buffer 1127# Silicon Motion SM712(LynxEM+) frame buffer
1127device lynxfb: wsemuldisplaydev, rasops16 1128device lynxfb: wsemuldisplaydev, rasops16
1128attach lynxfb at pci 1129attach lynxfb at pci
1129file dev/pci/lynxfb.c lynxfb needs-flag 1130file dev/pci/lynxfb.c lynxfb needs-flag
1130 1131
1131include "dev/pci/igma/files.igma" 1132include "dev/pci/igma/files.igma"
1132# Intel GMA 1133# Intel GMA
1133device igma: igmabus, i2cbus, i2c_bitbang, ddc_read_edid, edid 1134device igma: igmabus, i2cbus, i2c_bitbang, ddc_read_edid, edid
1134attach igma at pci 1135attach igma at pci
1135file dev/pci/igma.c igma 1136file dev/pci/igma.c igma
1136 1137
1137# 3Dfx Voodoo Graphics 1138# 3Dfx Voodoo Graphics
1138defflag opt_tdvfb.h TDVFB_CONSOLE 1139defflag opt_tdvfb.h TDVFB_CONSOLE
1139device tdvfb: wsemuldisplaydev, rasops16, rasops32, vcons, videomode 1140device tdvfb: wsemuldisplaydev, rasops16, rasops32, vcons, videomode
1140attach tdvfb at pci 1141attach tdvfb at pci
1141file dev/pci/tdvfb.c tdvfb  1142file dev/pci/tdvfb.c tdvfb
1142 1143
1143# nvidia geforce framebuffer console driver 1144# nvidia geforce framebuffer console driver
1144device gffb: wsemuldisplaydev, rasops8, vcons, videomode, i2cbus, i2c_bitbang, glyphcache 1145device gffb: wsemuldisplaydev, rasops8, vcons, videomode, i2cbus, i2c_bitbang, glyphcache
1145attach gffb at pci 1146attach gffb at pci
1146file dev/pci/gffb.c gffb 1147file dev/pci/gffb.c gffb
1147defflag opt_gffb.h GFFB_DEBUG 1148defflag opt_gffb.h GFFB_DEBUG
1148 1149
1149# Realtek RTS5209/RTS5229 Card Reader driver 1150# Realtek RTS5209/RTS5229 Card Reader driver
1150attach rtsx at pci with rtsx_pci 1151attach rtsx at pci with rtsx_pci
1151file dev/pci/rtsx_pci.c rtsx_pci 1152file dev/pci/rtsx_pci.c rtsx_pci
1152 1153
1153# NVM Express Controller 1154# NVM Express Controller
1154attach nvme at pci with nvme_pci 1155attach nvme at pci with nvme_pci
1155file dev/pci/nvme_pci.c nvme_pci 1156file dev/pci/nvme_pci.c nvme_pci
1156 1157
1157# PCI graphics devices with DRM/KMS 1158# PCI graphics devices with DRM/KMS
1158include "external/bsd/drm2/pci/files.drmkms_pci" 1159include "external/bsd/drm2/pci/files.drmkms_pci"
1159 1160
1160# Intel S1200,C2000 (non-pch) SMBus controller 1161# Intel S1200,C2000 (non-pch) SMBus controller
1161device ismt: i2cbus 1162device ismt: i2cbus
1162attach ismt at pci 1163attach ismt at pci
1163file dev/pci/ismt.c ismt 1164file dev/pci/ismt.c ismt
1164 1165
1165# Amazon Elastic Network Adapter (ENA) family 1166# Amazon Elastic Network Adapter (ENA) family
1166device ena: ether, ifnet, arp 1167device ena: ether, ifnet, arp
1167attach ena at pci 1168attach ena at pci
1168file dev/pci/if_ena.c ena 1169file dev/pci/if_ena.c ena
1169file external/bsd/ena-com/ena_com.c ena 1170file external/bsd/ena-com/ena_com.c ena
1170file external/bsd/ena-com/ena_eth_com.c ena 1171file external/bsd/ena-com/ena_eth_com.c ena

cvs diff -r1.54.2.4 -r1.54.2.5 src/sys/dev/pci/ixgbe/ix_txrx.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ix_txrx.c 2020/07/10 11:35:51 1.54.2.4
+++ src/sys/dev/pci/ixgbe/ix_txrx.c 2021/03/11 16:00:24 1.54.2.5
@@ -1,2406 +1,2454 @@ @@ -1,2406 +1,2454 @@
1/* $NetBSD: ix_txrx.c,v 1.54.2.4 2020/07/10 11:35:51 martin Exp $ */ 1/* $NetBSD: ix_txrx.c,v 1.54.2.5 2021/03/11 16:00:24 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 327031 2017-12-20 18:15:06Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#include "opt_inet.h" 66#include "opt_inet.h"
67#include "opt_inet6.h" 67#include "opt_inet6.h"
68 68
69#include "ixgbe.h" 69#include "ixgbe.h"
70 70
71/* 71/*
72 * HW RSC control: 72 * HW RSC control:
73 * this feature only works with 73 * this feature only works with
74 * IPv4, and only on 82599 and later. 74 * IPv4, and only on 82599 and later.
75 * Also this will cause IP forwarding to 75 * Also this will cause IP forwarding to
76 * fail and that can't be controlled by 76 * fail and that can't be controlled by
77 * the stack as LRO can. For all these 77 * the stack as LRO can. For all these
78 * reasons I've deemed it best to leave 78 * reasons I've deemed it best to leave
79 * this off and not bother with a tuneable 79 * this off and not bother with a tuneable
80 * interface, this would need to be compiled 80 * interface, this would need to be compiled
81 * to enable. 81 * to enable.
82 */ 82 */
83static bool ixgbe_rsc_enable = FALSE; 83static bool ixgbe_rsc_enable = FALSE;
84 84
85/* 85/*
86 * For Flow Director: this is the 86 * For Flow Director: this is the
87 * number of TX packets we sample 87 * number of TX packets we sample
88 * for the filter pool, this means 88 * for the filter pool, this means
89 * every 20th packet will be probed. 89 * every 20th packet will be probed.
90 * 90 *
91 * This feature can be disabled by 91 * This feature can be disabled by
92 * setting this to 0. 92 * setting this to 0.
93 */ 93 */
94static int atr_sample_rate = 20; 94static int atr_sample_rate = 20;
95 95
96/************************************************************************ 96/************************************************************************
97 * Local Function prototypes 97 * Local Function prototypes
98 ************************************************************************/ 98 ************************************************************************/
99static void ixgbe_setup_transmit_ring(struct tx_ring *); 99static void ixgbe_setup_transmit_ring(struct tx_ring *);
100static void ixgbe_free_transmit_buffers(struct tx_ring *); 100static void ixgbe_free_transmit_buffers(struct tx_ring *);
101static int ixgbe_setup_receive_ring(struct rx_ring *); 101static int ixgbe_setup_receive_ring(struct rx_ring *);
102static void ixgbe_free_receive_buffers(struct rx_ring *); 102static void ixgbe_free_receive_buffers(struct rx_ring *);
103static void ixgbe_rx_checksum(u32, struct mbuf *, u32, 103static void ixgbe_rx_checksum(u32, struct mbuf *, u32,
104 struct ixgbe_hw_stats *); 104 struct ixgbe_hw_stats *);
105static void ixgbe_refresh_mbufs(struct rx_ring *, int); 105static void ixgbe_refresh_mbufs(struct rx_ring *, int);
106static void ixgbe_drain(struct ifnet *, struct tx_ring *); 106static void ixgbe_drain(struct ifnet *, struct tx_ring *);
107static int ixgbe_xmit(struct tx_ring *, struct mbuf *); 107static int ixgbe_xmit(struct tx_ring *, struct mbuf *);
108static int ixgbe_tx_ctx_setup(struct tx_ring *, 108static int ixgbe_tx_ctx_setup(struct tx_ring *,
109 struct mbuf *, u32 *, u32 *); 109 struct mbuf *, u32 *, u32 *);
110static int ixgbe_tso_setup(struct tx_ring *, 110static int ixgbe_tso_setup(struct tx_ring *,
111 struct mbuf *, u32 *, u32 *); 111 struct mbuf *, u32 *, u32 *);
112static __inline void ixgbe_rx_discard(struct rx_ring *, int); 112static __inline void ixgbe_rx_discard(struct rx_ring *, int);
113static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, 113static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
114 struct mbuf *, u32); 114 struct mbuf *, u32);
115static int ixgbe_dma_malloc(struct adapter *, bus_size_t, 115static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
116 struct ixgbe_dma_alloc *, int); 116 struct ixgbe_dma_alloc *, int);
117static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); 117static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
118 118
119static void ixgbe_setup_hw_rsc(struct rx_ring *); 119static void ixgbe_setup_hw_rsc(struct rx_ring *);
120 120
121/************************************************************************ 121/************************************************************************
122 * ixgbe_legacy_start_locked - Transmit entry point 122 * ixgbe_legacy_start_locked - Transmit entry point
123 * 123 *
124 * Called by the stack to initiate a transmit. 124 * Called by the stack to initiate a transmit.
125 * The driver will remain in this routine as long as there are 125 * The driver will remain in this routine as long as there are
126 * packets to transmit and transmit resources are available. 126 * packets to transmit and transmit resources are available.
127 * In case resources are not available, the stack is notified 127 * In case resources are not available, the stack is notified
128 * and the packet is requeued. 128 * and the packet is requeued.
129 ************************************************************************/ 129 ************************************************************************/
130int 130int
131ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr) 131ixgbe_legacy_start_locked(struct ifnet *ifp, struct tx_ring *txr)
132{ 132{
133 int rc; 133 int rc;
134 struct mbuf *m_head; 134 struct mbuf *m_head;
135 struct adapter *adapter = txr->adapter; 135 struct adapter *adapter = txr->adapter;
136 136
137 IXGBE_TX_LOCK_ASSERT(txr); 137 IXGBE_TX_LOCK_ASSERT(txr);
138 138
139 if (adapter->link_active != LINK_STATE_UP) { 139 if (adapter->link_active != LINK_STATE_UP) {
140 /* 140 /*
141 * discard all packets buffered in IFQ to avoid 141 * discard all packets buffered in IFQ to avoid
142 * sending old packets at next link up timing. 142 * sending old packets at next link up timing.
143 */ 143 */
144 ixgbe_drain(ifp, txr); 144 ixgbe_drain(ifp, txr);
145 return (ENETDOWN); 145 return (ENETDOWN);
146 } 146 }
147 if ((ifp->if_flags & IFF_RUNNING) == 0) 147 if ((ifp->if_flags & IFF_RUNNING) == 0)
148 return (ENETDOWN); 148 return (ENETDOWN);
149 if (txr->txr_no_space) 149 if (txr->txr_no_space)
150 return (ENETDOWN); 150 return (ENETDOWN);
151 151
152 while (!IFQ_IS_EMPTY(&ifp->if_snd)) { 152 while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
153 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) 153 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
154 break; 154 break;
155 155
156 IFQ_POLL(&ifp->if_snd, m_head); 156 IFQ_POLL(&ifp->if_snd, m_head);
157 if (m_head == NULL) 157 if (m_head == NULL)
158 break; 158 break;
159 159
160 if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) { 160 if ((rc = ixgbe_xmit(txr, m_head)) == EAGAIN) {
161 break; 161 break;
162 } 162 }
163 IFQ_DEQUEUE(&ifp->if_snd, m_head); 163 IFQ_DEQUEUE(&ifp->if_snd, m_head);
164 if (rc != 0) { 164 if (rc != 0) {
165 m_freem(m_head); 165 m_freem(m_head);
166 continue; 166 continue;
167 } 167 }
168 168
169 /* Send a copy of the frame to the BPF listener */ 169 /* Send a copy of the frame to the BPF listener */
170 bpf_mtap(ifp, m_head, BPF_D_OUT); 170 bpf_mtap(ifp, m_head, BPF_D_OUT);
171 } 171 }
172 172
173 return IXGBE_SUCCESS; 173 return IXGBE_SUCCESS;
174} /* ixgbe_legacy_start_locked */ 174} /* ixgbe_legacy_start_locked */
175 175
176/************************************************************************ 176/************************************************************************
177 * ixgbe_legacy_start 177 * ixgbe_legacy_start
178 * 178 *
179 * Called by the stack, this always uses the first tx ring, 179 * Called by the stack, this always uses the first tx ring,
180 * and should not be used with multiqueue tx enabled. 180 * and should not be used with multiqueue tx enabled.
181 ************************************************************************/ 181 ************************************************************************/
182void 182void
183ixgbe_legacy_start(struct ifnet *ifp) 183ixgbe_legacy_start(struct ifnet *ifp)
184{ 184{
185 struct adapter *adapter = ifp->if_softc; 185 struct adapter *adapter = ifp->if_softc;
186 struct tx_ring *txr = adapter->tx_rings; 186 struct tx_ring *txr = adapter->tx_rings;
187 187
188 if (ifp->if_flags & IFF_RUNNING) { 188 if (ifp->if_flags & IFF_RUNNING) {
189 IXGBE_TX_LOCK(txr); 189 IXGBE_TX_LOCK(txr);
190 ixgbe_legacy_start_locked(ifp, txr); 190 ixgbe_legacy_start_locked(ifp, txr);
191 IXGBE_TX_UNLOCK(txr); 191 IXGBE_TX_UNLOCK(txr);
192 } 192 }
193} /* ixgbe_legacy_start */ 193} /* ixgbe_legacy_start */
194 194
195/************************************************************************ 195/************************************************************************
196 * ixgbe_mq_start - Multiqueue Transmit Entry Point 196 * ixgbe_mq_start - Multiqueue Transmit Entry Point
197 * 197 *
198 * (if_transmit function) 198 * (if_transmit function)
199 ************************************************************************/ 199 ************************************************************************/
200int 200int
201ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) 201ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
202{ 202{
203 struct adapter *adapter = ifp->if_softc; 203 struct adapter *adapter = ifp->if_softc;
204 struct tx_ring *txr; 204 struct tx_ring *txr;
205 int i; 205 int i;
206#ifdef RSS 206#ifdef RSS
207 uint32_t bucket_id; 207 uint32_t bucket_id;
208#endif 208#endif
209 209
210 /* 210 /*
211 * When doing RSS, map it to the same outbound queue 211 * When doing RSS, map it to the same outbound queue
212 * as the incoming flow would be mapped to. 212 * as the incoming flow would be mapped to.
213 * 213 *
214 * If everything is setup correctly, it should be the 214 * If everything is setup correctly, it should be the
215 * same bucket that the current CPU we're on is. 215 * same bucket that the current CPU we're on is.
216 */ 216 */
217#ifdef RSS 217#ifdef RSS
218 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 218 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
219 if ((adapter->feat_en & IXGBE_FEATURE_RSS) && 219 if ((adapter->feat_en & IXGBE_FEATURE_RSS) &&
220 (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), 220 (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
221 &bucket_id) == 0)) { 221 &bucket_id) == 0)) {
222 i = bucket_id % adapter->num_queues; 222 i = bucket_id % adapter->num_queues;
223#ifdef IXGBE_DEBUG 223#ifdef IXGBE_DEBUG
224 if (bucket_id > adapter->num_queues) 224 if (bucket_id > adapter->num_queues)
225 if_printf(ifp, 225 if_printf(ifp,
226 "bucket_id (%d) > num_queues (%d)\n", 226 "bucket_id (%d) > num_queues (%d)\n",
227 bucket_id, adapter->num_queues); 227 bucket_id, adapter->num_queues);
228#endif 228#endif
229 } else 229 } else
230 i = m->m_pkthdr.flowid % adapter->num_queues; 230 i = m->m_pkthdr.flowid % adapter->num_queues;
231 } else 231 } else
232#endif /* 0 */ 232#endif /* 0 */
233 i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues; 233 i = (cpu_index(curcpu()) % ncpu) % adapter->num_queues;
234 234
235 /* Check for a hung queue and pick alternative */ 235 /* Check for a hung queue and pick alternative */
236 if (((1ULL << i) & adapter->active_queues) == 0) 236 if (((1ULL << i) & adapter->active_queues) == 0)
237 i = ffs64(adapter->active_queues); 237 i = ffs64(adapter->active_queues);
238 238
239 txr = &adapter->tx_rings[i]; 239 txr = &adapter->tx_rings[i];
240 240
241 if (__predict_false(!pcq_put(txr->txr_interq, m))) { 241 if (__predict_false(!pcq_put(txr->txr_interq, m))) {
242 m_freem(m); 242 m_freem(m);
243 txr->pcq_drops.ev_count++; 243 txr->pcq_drops.ev_count++;
244 return ENOBUFS; 244 return ENOBUFS;
245 } 245 }
246 if (IXGBE_TX_TRYLOCK(txr)) { 246 if (IXGBE_TX_TRYLOCK(txr)) {
247 ixgbe_mq_start_locked(ifp, txr); 247 ixgbe_mq_start_locked(ifp, txr);
248 IXGBE_TX_UNLOCK(txr); 248 IXGBE_TX_UNLOCK(txr);
249 } else { 249 } else {
250 if (adapter->txrx_use_workqueue) { 250 if (adapter->txrx_use_workqueue) {
251 u_int *enqueued; 251 u_int *enqueued;
252 252
253 /* 253 /*
254 * This function itself is not called in interrupt 254 * This function itself is not called in interrupt
255 * context, however it can be called in fast softint 255 * context, however it can be called in fast softint
256 * context right after receiving forwarding packets. 256 * context right after receiving forwarding packets.
257 * So, it is required to protect workqueue from twice 257 * So, it is required to protect workqueue from twice
258 * enqueuing when the machine uses both spontaneous 258 * enqueuing when the machine uses both spontaneous
259 * packets and forwarding packets. 259 * packets and forwarding packets.
260 */ 260 */
261 enqueued = percpu_getref(adapter->txr_wq_enqueued); 261 enqueued = percpu_getref(adapter->txr_wq_enqueued);
262 if (*enqueued == 0) { 262 if (*enqueued == 0) {
263 *enqueued = 1; 263 *enqueued = 1;
264 percpu_putref(adapter->txr_wq_enqueued); 264 percpu_putref(adapter->txr_wq_enqueued);
265 workqueue_enqueue(adapter->txr_wq, 265 workqueue_enqueue(adapter->txr_wq,
266 &txr->wq_cookie, curcpu()); 266 &txr->wq_cookie, curcpu());
267 } else 267 } else
268 percpu_putref(adapter->txr_wq_enqueued); 268 percpu_putref(adapter->txr_wq_enqueued);
269 } else { 269 } else {
270 kpreempt_disable(); 270 kpreempt_disable();
271 softint_schedule(txr->txr_si); 271 softint_schedule(txr->txr_si);
272 kpreempt_enable(); 272 kpreempt_enable();
273 } 273 }
274 } 274 }
275 275
276 return (0); 276 return (0);
277} /* ixgbe_mq_start */ 277} /* ixgbe_mq_start */
278 278
279/************************************************************************ 279/************************************************************************
280 * ixgbe_mq_start_locked 280 * ixgbe_mq_start_locked
281 ************************************************************************/ 281 ************************************************************************/
282int 282int
283ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) 283ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
284{ 284{
285 struct mbuf *next; 285 struct mbuf *next;
286 int enqueued = 0, err = 0; 286 int enqueued = 0, err = 0;
287 287
288 if (txr->adapter->link_active != LINK_STATE_UP) { 288 if (txr->adapter->link_active != LINK_STATE_UP) {
289 /* 289 /*
290 * discard all packets buffered in txr_interq to avoid 290 * discard all packets buffered in txr_interq to avoid
291 * sending old packets at next link up timing. 291 * sending old packets at next link up timing.
292 */ 292 */
293 ixgbe_drain(ifp, txr); 293 ixgbe_drain(ifp, txr);
294 return (ENETDOWN); 294 return (ENETDOWN);
295 } 295 }
296 if ((ifp->if_flags & IFF_RUNNING) == 0) 296 if ((ifp->if_flags & IFF_RUNNING) == 0)
297 return (ENETDOWN); 297 return (ENETDOWN);
298 if (txr->txr_no_space) 298 if (txr->txr_no_space)
299 return (ENETDOWN); 299 return (ENETDOWN);
300 300
301 /* Process the queue */ 301 /* Process the queue */
302 while ((next = pcq_get(txr->txr_interq)) != NULL) { 302 while ((next = pcq_get(txr->txr_interq)) != NULL) {
303 if ((err = ixgbe_xmit(txr, next)) != 0) { 303 if ((err = ixgbe_xmit(txr, next)) != 0) {
304 m_freem(next); 304 m_freem(next);
305 /* All errors are counted in ixgbe_xmit() */ 305 /* All errors are counted in ixgbe_xmit() */
306 break; 306 break;
307 } 307 }
308 enqueued++; 308 enqueued++;
309#if __FreeBSD_version >= 1100036 309#if __FreeBSD_version >= 1100036
310 /* 310 /*
311 * Since we're looking at the tx ring, we can check 311 * Since we're looking at the tx ring, we can check
312 * to see if we're a VF by examing our tail register 312 * to see if we're a VF by examing our tail register
313 * address. 313 * address.
314 */ 314 */
315 if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) && 315 if ((txr->adapter->feat_en & IXGBE_FEATURE_VF) &&
316 (next->m_flags & M_MCAST)) 316 (next->m_flags & M_MCAST))
317 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 317 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
318#endif 318#endif
319 /* Send a copy of the frame to the BPF listener */ 319 /* Send a copy of the frame to the BPF listener */
320 bpf_mtap(ifp, next, BPF_D_OUT); 320 bpf_mtap(ifp, next, BPF_D_OUT);
321 if ((ifp->if_flags & IFF_RUNNING) == 0) 321 if ((ifp->if_flags & IFF_RUNNING) == 0)
322 break; 322 break;
323 } 323 }
324 324
325 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter)) 325 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD(txr->adapter))
326 ixgbe_txeof(txr); 326 ixgbe_txeof(txr);
327 327
328 return (err); 328 return (err);
329} /* ixgbe_mq_start_locked */ 329} /* ixgbe_mq_start_locked */
330 330
331/************************************************************************ 331/************************************************************************
332 * ixgbe_deferred_mq_start 332 * ixgbe_deferred_mq_start
333 * 333 *
334 * Called from a softint and workqueue (indirectly) to drain queued 334 * Called from a softint and workqueue (indirectly) to drain queued
335 * transmit packets. 335 * transmit packets.
336 ************************************************************************/ 336 ************************************************************************/
337void 337void
338ixgbe_deferred_mq_start(void *arg) 338ixgbe_deferred_mq_start(void *arg)
339{ 339{
340 struct tx_ring *txr = arg; 340 struct tx_ring *txr = arg;
341 struct adapter *adapter = txr->adapter; 341 struct adapter *adapter = txr->adapter;
342 struct ifnet *ifp = adapter->ifp; 342 struct ifnet *ifp = adapter->ifp;
343 343
344 IXGBE_TX_LOCK(txr); 344 IXGBE_TX_LOCK(txr);
345 if (pcq_peek(txr->txr_interq) != NULL) 345 if (pcq_peek(txr->txr_interq) != NULL)
346 ixgbe_mq_start_locked(ifp, txr); 346 ixgbe_mq_start_locked(ifp, txr);
347 IXGBE_TX_UNLOCK(txr); 347 IXGBE_TX_UNLOCK(txr);
348} /* ixgbe_deferred_mq_start */ 348} /* ixgbe_deferred_mq_start */
349 349
350/************************************************************************ 350/************************************************************************
351 * ixgbe_deferred_mq_start_work 351 * ixgbe_deferred_mq_start_work
352 * 352 *
353 * Called from a workqueue to drain queued transmit packets. 353 * Called from a workqueue to drain queued transmit packets.
354 ************************************************************************/ 354 ************************************************************************/
355void 355void
356ixgbe_deferred_mq_start_work(struct work *wk, void *arg) 356ixgbe_deferred_mq_start_work(struct work *wk, void *arg)
357{ 357{
358 struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie); 358 struct tx_ring *txr = container_of(wk, struct tx_ring, wq_cookie);
359 struct adapter *adapter = txr->adapter; 359 struct adapter *adapter = txr->adapter;
360 u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued); 360 u_int *enqueued = percpu_getref(adapter->txr_wq_enqueued);
361 *enqueued = 0; 361 *enqueued = 0;
362 percpu_putref(adapter->txr_wq_enqueued); 362 percpu_putref(adapter->txr_wq_enqueued);
363 363
364 ixgbe_deferred_mq_start(txr); 364 ixgbe_deferred_mq_start(txr);
365} /* ixgbe_deferred_mq_start */ 365} /* ixgbe_deferred_mq_start */
366 366
367/************************************************************************ 367/************************************************************************
368 * ixgbe_drain_all 368 * ixgbe_drain_all
369 ************************************************************************/ 369 ************************************************************************/
370void 370void
371ixgbe_drain_all(struct adapter *adapter) 371ixgbe_drain_all(struct adapter *adapter)
372{ 372{
373 struct ifnet *ifp = adapter->ifp; 373 struct ifnet *ifp = adapter->ifp;
374 struct ix_queue *que = adapter->queues; 374 struct ix_queue *que = adapter->queues;
375 375
376 for (int i = 0; i < adapter->num_queues; i++, que++) { 376 for (int i = 0; i < adapter->num_queues; i++, que++) {
377 struct tx_ring *txr = que->txr; 377 struct tx_ring *txr = que->txr;
378 378
379 IXGBE_TX_LOCK(txr); 379 IXGBE_TX_LOCK(txr);
380 ixgbe_drain(ifp, txr); 380 ixgbe_drain(ifp, txr);
381 IXGBE_TX_UNLOCK(txr); 381 IXGBE_TX_UNLOCK(txr);
382 } 382 }
383} 383}
384 384
385/************************************************************************ 385/************************************************************************
386 * ixgbe_xmit 386 * ixgbe_xmit
387 * 387 *
388 * Maps the mbufs to tx descriptors, allowing the 388 * Maps the mbufs to tx descriptors, allowing the
389 * TX engine to transmit the packets. 389 * TX engine to transmit the packets.
390 * 390 *
391 * Return 0 on success, positive on failure 391 * Return 0 on success, positive on failure
392 ************************************************************************/ 392 ************************************************************************/
393static int 393static int
394ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head) 394ixgbe_xmit(struct tx_ring *txr, struct mbuf *m_head)
395{ 395{
396 struct adapter *adapter = txr->adapter; 396 struct adapter *adapter = txr->adapter;
397 struct ixgbe_tx_buf *txbuf; 397 struct ixgbe_tx_buf *txbuf;
398 union ixgbe_adv_tx_desc *txd = NULL; 398 union ixgbe_adv_tx_desc *txd = NULL;
399 struct ifnet *ifp = adapter->ifp; 399 struct ifnet *ifp = adapter->ifp;
400 int i, j, error; 400 int i, j, error;
401 int first; 401 int first;
402 u32 olinfo_status = 0, cmd_type_len; 402 u32 olinfo_status = 0, cmd_type_len;
403 bool remap = TRUE; 403 bool remap = TRUE;
404 bus_dmamap_t map; 404 bus_dmamap_t map;
405 405
406 /* Basic descriptor defines */ 406 /* Basic descriptor defines */
407 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | 407 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
408 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); 408 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
409 409
410 if (vlan_has_tag(m_head)) 410 if (vlan_has_tag(m_head))
411 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 411 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
412 412
413 /* 413 /*
414 * Important to capture the first descriptor 414 * Important to capture the first descriptor
415 * used because it will contain the index of 415 * used because it will contain the index of
416 * the one we tell the hardware to report back 416 * the one we tell the hardware to report back
417 */ 417 */
418 first = txr->next_avail_desc; 418 first = txr->next_avail_desc;
419 txbuf = &txr->tx_buffers[first]; 419 txbuf = &txr->tx_buffers[first];
420 map = txbuf->map; 420 map = txbuf->map;
421 421
422 /* 422 /*
423 * Map the packet for DMA. 423 * Map the packet for DMA.
424 */ 424 */
425retry: 425retry:
426 error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head, 426 error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, m_head,
427 BUS_DMA_NOWAIT); 427 BUS_DMA_NOWAIT);
428 428
429 if (__predict_false(error)) { 429 if (__predict_false(error)) {
430 struct mbuf *m; 430 struct mbuf *m;
431 431
432 switch (error) { 432 switch (error) {
433 case EAGAIN: 433 case EAGAIN:
434 txr->q_eagain_tx_dma_setup++; 434 txr->q_eagain_tx_dma_setup++;
435 return EAGAIN; 435 return EAGAIN;
436 case ENOMEM: 436 case ENOMEM:
437 txr->q_enomem_tx_dma_setup++; 437 txr->q_enomem_tx_dma_setup++;
438 return EAGAIN; 438 return EAGAIN;
439 case EFBIG: 439 case EFBIG:
440 /* Try it again? - one try */ 440 /* Try it again? - one try */
441 if (remap == TRUE) { 441 if (remap == TRUE) {
442 remap = FALSE; 442 remap = FALSE;
443 /* 443 /*
444 * XXX: m_defrag will choke on 444 * XXX: m_defrag will choke on
445 * non-MCLBYTES-sized clusters 445 * non-MCLBYTES-sized clusters
446 */ 446 */
447 txr->q_efbig_tx_dma_setup++; 447 txr->q_efbig_tx_dma_setup++;
448 m = m_defrag(m_head, M_NOWAIT); 448 m = m_defrag(m_head, M_NOWAIT);
449 if (m == NULL) { 449 if (m == NULL) {
450 txr->q_mbuf_defrag_failed++; 450 txr->q_mbuf_defrag_failed++;
451 return ENOBUFS; 451 return ENOBUFS;
452 } 452 }
453 m_head = m; 453 m_head = m;
454 goto retry; 454 goto retry;
455 } else { 455 } else {
456 txr->q_efbig2_tx_dma_setup++; 456 txr->q_efbig2_tx_dma_setup++;
457 return error; 457 return error;
458 } 458 }
459 case EINVAL: 459 case EINVAL:
460 txr->q_einval_tx_dma_setup++; 460 txr->q_einval_tx_dma_setup++;
461 return error; 461 return error;
462 default: 462 default:
463 txr->q_other_tx_dma_setup++; 463 txr->q_other_tx_dma_setup++;
464 return error; 464 return error;
465 } 465 }
466 } 466 }
467 467
468 /* Make certain there are enough descriptors */ 468 /* Make certain there are enough descriptors */
469 if (txr->tx_avail < (map->dm_nsegs + 2)) { 469 if (txr->tx_avail < (map->dm_nsegs + 2)) {
470 txr->txr_no_space = true; 470 txr->txr_no_space = true;
471 txr->no_desc_avail.ev_count++; 471 txr->no_desc_avail.ev_count++;
472 ixgbe_dmamap_unload(txr->txtag, txbuf->map); 472 ixgbe_dmamap_unload(txr->txtag, txbuf->map);
473 return EAGAIN; 473 return EAGAIN;
474 } 474 }
475 475
476 /* 476 /*
477 * Set up the appropriate offload context 477 * Set up the appropriate offload context
478 * this will consume the first descriptor 478 * this will consume the first descriptor
479 */ 479 */
480 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); 480 error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
481 if (__predict_false(error)) { 481 if (__predict_false(error)) {
482 return (error); 482 return (error);
483 } 483 }
484 484
485 /* Do the flow director magic */ 485 /* Do the flow director magic */
486 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 486 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
487 (txr->atr_sample) && (!adapter->fdir_reinit)) { 487 (txr->atr_sample) && (!adapter->fdir_reinit)) {
488 ++txr->atr_count; 488 ++txr->atr_count;
489 if (txr->atr_count >= atr_sample_rate) { 489 if (txr->atr_count >= atr_sample_rate) {
490 ixgbe_atr(txr, m_head); 490 ixgbe_atr(txr, m_head);
491 txr->atr_count = 0; 491 txr->atr_count = 0;
492 } 492 }
493 } 493 }
494 494
495 olinfo_status |= IXGBE_ADVTXD_CC; 495 olinfo_status |= IXGBE_ADVTXD_CC;
496 i = txr->next_avail_desc; 496 i = txr->next_avail_desc;
497 for (j = 0; j < map->dm_nsegs; j++) { 497 for (j = 0; j < map->dm_nsegs; j++) {
498 bus_size_t seglen; 498 bus_size_t seglen;
499 bus_addr_t segaddr; 499 bus_addr_t segaddr;
500 500
501 txbuf = &txr->tx_buffers[i]; 501 txbuf = &txr->tx_buffers[i];
502 txd = &txr->tx_base[i]; 502 txd = &txr->tx_base[i];
503 seglen = map->dm_segs[j].ds_len; 503 seglen = map->dm_segs[j].ds_len;
504 segaddr = htole64(map->dm_segs[j].ds_addr); 504 segaddr = htole64(map->dm_segs[j].ds_addr);
505 505
506 txd->read.buffer_addr = segaddr; 506 txd->read.buffer_addr = segaddr;
507 txd->read.cmd_type_len = htole32(cmd_type_len | seglen); 507 txd->read.cmd_type_len = htole32(cmd_type_len | seglen);
508 txd->read.olinfo_status = htole32(olinfo_status); 508 txd->read.olinfo_status = htole32(olinfo_status);
509 509
510 if (++i == txr->num_desc) 510 if (++i == txr->num_desc)
511 i = 0; 511 i = 0;
512 } 512 }
513 513
514 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); 514 txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
515 txr->tx_avail -= map->dm_nsegs; 515 txr->tx_avail -= map->dm_nsegs;
516 txr->next_avail_desc = i; 516 txr->next_avail_desc = i;
517 517
518 txbuf->m_head = m_head; 518 txbuf->m_head = m_head;
519 /* 519 /*
520 * Here we swap the map so the last descriptor, 520 * Here we swap the map so the last descriptor,
521 * which gets the completion interrupt has the 521 * which gets the completion interrupt has the
522 * real map, and the first descriptor gets the 522 * real map, and the first descriptor gets the
523 * unused map from this descriptor. 523 * unused map from this descriptor.
524 */ 524 */
525 txr->tx_buffers[first].map = txbuf->map; 525 txr->tx_buffers[first].map = txbuf->map;
526 txbuf->map = map; 526 txbuf->map = map;
527 bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, 527 bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len,
528 BUS_DMASYNC_PREWRITE); 528 BUS_DMASYNC_PREWRITE);
529 529
530 /* Set the EOP descriptor that will be marked done */ 530 /* Set the EOP descriptor that will be marked done */
531 txbuf = &txr->tx_buffers[first]; 531 txbuf = &txr->tx_buffers[first];
532 txbuf->eop = txd; 532 txbuf->eop = txd;
533 533
534 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 534 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
535 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 535 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
536 /* 536 /*
537 * Advance the Transmit Descriptor Tail (Tdt), this tells the 537 * Advance the Transmit Descriptor Tail (Tdt), this tells the
538 * hardware that this frame is available to transmit. 538 * hardware that this frame is available to transmit.
539 */ 539 */
540 ++txr->total_packets.ev_count; 540 ++txr->total_packets.ev_count;
541 IXGBE_WRITE_REG(&adapter->hw, txr->tail, i); 541 IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
542 542
543 /* 543 /*
544 * XXXX NOMPSAFE: ifp->if_data should be percpu. 544 * XXXX NOMPSAFE: ifp->if_data should be percpu.
545 */ 545 */
546 ifp->if_obytes += m_head->m_pkthdr.len; 546 ifp->if_obytes += m_head->m_pkthdr.len;
547 if (m_head->m_flags & M_MCAST) 547 if (m_head->m_flags & M_MCAST)
548 ifp->if_omcasts++; 548 ifp->if_omcasts++;
549 549
550 /* Mark queue as having work */ 550 /* Mark queue as having work */
551 if (txr->busy == 0) 551 if (txr->busy == 0)
552 txr->busy = 1; 552 txr->busy = 1;
553 553
554 return (0); 554 return (0);
555} /* ixgbe_xmit */ 555} /* ixgbe_xmit */
556 556
557/************************************************************************ 557/************************************************************************
558 * ixgbe_drain 558 * ixgbe_drain
559 ************************************************************************/ 559 ************************************************************************/
560static void 560static void
561ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr) 561ixgbe_drain(struct ifnet *ifp, struct tx_ring *txr)
562{ 562{
563 struct mbuf *m; 563 struct mbuf *m;
564 564
565 IXGBE_TX_LOCK_ASSERT(txr); 565 IXGBE_TX_LOCK_ASSERT(txr);
566 566
567 if (txr->me == 0) { 567 if (txr->me == 0) {
568 while (!IFQ_IS_EMPTY(&ifp->if_snd)) { 568 while (!IFQ_IS_EMPTY(&ifp->if_snd)) {
569 IFQ_DEQUEUE(&ifp->if_snd, m); 569 IFQ_DEQUEUE(&ifp->if_snd, m);
570 m_freem(m); 570 m_freem(m);
571 IF_DROP(&ifp->if_snd); 571 IF_DROP(&ifp->if_snd);
572 } 572 }
573 } 573 }
574 574
575 while ((m = pcq_get(txr->txr_interq)) != NULL) { 575 while ((m = pcq_get(txr->txr_interq)) != NULL) {
576 m_freem(m); 576 m_freem(m);
577 txr->pcq_drops.ev_count++; 577 txr->pcq_drops.ev_count++;
578 } 578 }
579} 579}
580 580
581/************************************************************************ 581/************************************************************************
582 * ixgbe_allocate_transmit_buffers 582 * ixgbe_allocate_transmit_buffers
583 * 583 *
584 * Allocate memory for tx_buffer structures. The tx_buffer stores all 584 * Allocate memory for tx_buffer structures. The tx_buffer stores all
585 * the information needed to transmit a packet on the wire. This is 585 * the information needed to transmit a packet on the wire. This is
586 * called only once at attach, setup is done every reset. 586 * called only once at attach, setup is done every reset.
587 ************************************************************************/ 587 ************************************************************************/
588static int 588static int
589ixgbe_allocate_transmit_buffers(struct tx_ring *txr) 589ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
590{ 590{
591 struct adapter *adapter = txr->adapter; 591 struct adapter *adapter = txr->adapter;
592 device_t dev = adapter->dev; 592 device_t dev = adapter->dev;
593 struct ixgbe_tx_buf *txbuf; 593 struct ixgbe_tx_buf *txbuf;
594 int error, i; 594 int error, i;
595 595
596 /* 596 /*
597 * Setup DMA descriptor areas. 597 * Setup DMA descriptor areas.
598 */ 598 */
599 error = ixgbe_dma_tag_create( 599 error = ixgbe_dma_tag_create(
600 /* parent */ adapter->osdep.dmat, 600 /* parent */ adapter->osdep.dmat,
601 /* alignment */ 1, 601 /* alignment */ 1,
602 /* bounds */ 0, 602 /* bounds */ 0,
603 /* maxsize */ IXGBE_TSO_SIZE, 603 /* maxsize */ IXGBE_TSO_SIZE,
604 /* nsegments */ adapter->num_segs, 604 /* nsegments */ adapter->num_segs,
605 /* maxsegsize */ PAGE_SIZE, 605 /* maxsegsize */ PAGE_SIZE,
606 /* flags */ 0, 606 /* flags */ 0,
607 &txr->txtag); 607 &txr->txtag);
608 if (error != 0) { 608 if (error != 0) {
609 aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); 609 aprint_error_dev(dev,"Unable to allocate TX DMA tag\n");
610 goto fail; 610 goto fail;
611 } 611 }
612 612
613 txr->tx_buffers = 613 txr->tx_buffers =
614 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) * 614 (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
615 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); 615 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
616 if (txr->tx_buffers == NULL) { 616 if (txr->tx_buffers == NULL) {
617 aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n"); 617 aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n");
618 error = ENOMEM; 618 error = ENOMEM;
619 goto fail; 619 goto fail;
620 } 620 }
621 621
622 /* Create the descriptor buffer dma maps */ 622 /* Create the descriptor buffer dma maps */
623 txbuf = txr->tx_buffers; 623 txbuf = txr->tx_buffers;
624 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { 624 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
625 error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); 625 error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map);
626 if (error != 0) { 626 if (error != 0) {
627 aprint_error_dev(dev, 627 aprint_error_dev(dev,
628 "Unable to create TX DMA map (%d)\n", error); 628 "Unable to create TX DMA map (%d)\n", error);
629 goto fail; 629 goto fail;
630 } 630 }
631 } 631 }
632 632
633 return 0; 633 return 0;
634fail: 634fail:
635 /* We free all, it handles case where we are in the middle */ 635 /* We free all, it handles case where we are in the middle */
636#if 0 /* XXX was FreeBSD */ 636#if 0 /* XXX was FreeBSD */
637 ixgbe_free_transmit_structures(adapter); 637 ixgbe_free_transmit_structures(adapter);
638#else 638#else
639 ixgbe_free_transmit_buffers(txr); 639 ixgbe_free_transmit_buffers(txr);
640#endif 640#endif
641 return (error); 641 return (error);
642} /* ixgbe_allocate_transmit_buffers */ 642} /* ixgbe_allocate_transmit_buffers */
643 643
644/************************************************************************ 644/************************************************************************
645 * ixgbe_setup_transmit_ring - Initialize a transmit ring. 645 * ixgbe_setup_transmit_ring - Initialize a transmit ring.
646 ************************************************************************/ 646 ************************************************************************/
647static void 647static void
648ixgbe_setup_transmit_ring(struct tx_ring *txr) 648ixgbe_setup_transmit_ring(struct tx_ring *txr)
649{ 649{
650 struct adapter *adapter = txr->adapter; 650 struct adapter *adapter = txr->adapter;
651 struct ixgbe_tx_buf *txbuf; 651 struct ixgbe_tx_buf *txbuf;
652#ifdef DEV_NETMAP 652#ifdef DEV_NETMAP
653 struct netmap_adapter *na = NA(adapter->ifp); 653 struct netmap_adapter *na = NA(adapter->ifp);
654 struct netmap_slot *slot; 654 struct netmap_slot *slot;
655#endif /* DEV_NETMAP */ 655#endif /* DEV_NETMAP */
656 656
657 /* Clear the old ring contents */ 657 /* Clear the old ring contents */
658 IXGBE_TX_LOCK(txr); 658 IXGBE_TX_LOCK(txr);
659 659
660#ifdef DEV_NETMAP 660#ifdef DEV_NETMAP
661 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { 661 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
662 /* 662 /*
663 * (under lock): if in netmap mode, do some consistency 663 * (under lock): if in netmap mode, do some consistency
664 * checks and set slot to entry 0 of the netmap ring. 664 * checks and set slot to entry 0 of the netmap ring.
665 */ 665 */
666 slot = netmap_reset(na, NR_TX, txr->me, 0); 666 slot = netmap_reset(na, NR_TX, txr->me, 0);
667 } 667 }
668#endif /* DEV_NETMAP */ 668#endif /* DEV_NETMAP */
669 669
670 bzero((void *)txr->tx_base, 670 bzero((void *)txr->tx_base,
671 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); 671 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
672 /* Reset indices */ 672 /* Reset indices */
673 txr->next_avail_desc = 0; 673 txr->next_avail_desc = 0;
674 txr->next_to_clean = 0; 674 txr->next_to_clean = 0;
675 675
676 /* Free any existing tx buffers. */ 676 /* Free any existing tx buffers. */
677 txbuf = txr->tx_buffers; 677 txbuf = txr->tx_buffers;
678 for (int i = 0; i < txr->num_desc; i++, txbuf++) { 678 for (int i = 0; i < txr->num_desc; i++, txbuf++) {
679 if (txbuf->m_head != NULL) { 679 if (txbuf->m_head != NULL) {
680 bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map, 680 bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
681 0, txbuf->m_head->m_pkthdr.len, 681 0, txbuf->m_head->m_pkthdr.len,
682 BUS_DMASYNC_POSTWRITE); 682 BUS_DMASYNC_POSTWRITE);
683 ixgbe_dmamap_unload(txr->txtag, txbuf->map); 683 ixgbe_dmamap_unload(txr->txtag, txbuf->map);
684 m_freem(txbuf->m_head); 684 m_freem(txbuf->m_head);
685 txbuf->m_head = NULL; 685 txbuf->m_head = NULL;
686 } 686 }
687 687
688#ifdef DEV_NETMAP 688#ifdef DEV_NETMAP
689 /* 689 /*
690 * In netmap mode, set the map for the packet buffer. 690 * In netmap mode, set the map for the packet buffer.
691 * NOTE: Some drivers (not this one) also need to set 691 * NOTE: Some drivers (not this one) also need to set
692 * the physical buffer address in the NIC ring. 692 * the physical buffer address in the NIC ring.
693 * Slots in the netmap ring (indexed by "si") are 693 * Slots in the netmap ring (indexed by "si") are
694 * kring->nkr_hwofs positions "ahead" wrt the 694 * kring->nkr_hwofs positions "ahead" wrt the
695 * corresponding slot in the NIC ring. In some drivers 695 * corresponding slot in the NIC ring. In some drivers
696 * (not here) nkr_hwofs can be negative. Function 696 * (not here) nkr_hwofs can be negative. Function
697 * netmap_idx_n2k() handles wraparounds properly. 697 * netmap_idx_n2k() handles wraparounds properly.
698 */ 698 */
699 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { 699 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
700 int si = netmap_idx_n2k(na->tx_rings[txr->me], i); 700 int si = netmap_idx_n2k(na->tx_rings[txr->me], i);
701 netmap_load_map(na, txr->txtag, 701 netmap_load_map(na, txr->txtag,
702 txbuf->map, NMB(na, slot + si)); 702 txbuf->map, NMB(na, slot + si));
703 } 703 }
704#endif /* DEV_NETMAP */ 704#endif /* DEV_NETMAP */
705 705
706 /* Clear the EOP descriptor pointer */ 706 /* Clear the EOP descriptor pointer */
707 txbuf->eop = NULL; 707 txbuf->eop = NULL;
708 } 708 }
709 709
710 /* Set the rate at which we sample packets */ 710 /* Set the rate at which we sample packets */
711 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 711 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
712 txr->atr_sample = atr_sample_rate; 712 txr->atr_sample = atr_sample_rate;
713 713
714 /* Set number of descriptors available */ 714 /* Set number of descriptors available */
715 txr->tx_avail = adapter->num_tx_desc; 715 txr->tx_avail = adapter->num_tx_desc;
716 716
717 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 717 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 718 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
719 IXGBE_TX_UNLOCK(txr); 719 IXGBE_TX_UNLOCK(txr);
720} /* ixgbe_setup_transmit_ring */ 720} /* ixgbe_setup_transmit_ring */
721 721
722/************************************************************************ 722/************************************************************************
723 * ixgbe_setup_transmit_structures - Initialize all transmit rings. 723 * ixgbe_setup_transmit_structures - Initialize all transmit rings.
724 ************************************************************************/ 724 ************************************************************************/
725int 725int
726ixgbe_setup_transmit_structures(struct adapter *adapter) 726ixgbe_setup_transmit_structures(struct adapter *adapter)
727{ 727{
728 struct tx_ring *txr = adapter->tx_rings; 728 struct tx_ring *txr = adapter->tx_rings;
729 729
730 for (int i = 0; i < adapter->num_queues; i++, txr++) 730 for (int i = 0; i < adapter->num_queues; i++, txr++)
731 ixgbe_setup_transmit_ring(txr); 731 ixgbe_setup_transmit_ring(txr);
732 732
733 return (0); 733 return (0);
734} /* ixgbe_setup_transmit_structures */ 734} /* ixgbe_setup_transmit_structures */
735 735
736/************************************************************************ 736/************************************************************************
737 * ixgbe_free_transmit_structures - Free all transmit rings. 737 * ixgbe_free_transmit_structures - Free all transmit rings.
738 ************************************************************************/ 738 ************************************************************************/
739void 739void
740ixgbe_free_transmit_structures(struct adapter *adapter) 740ixgbe_free_transmit_structures(struct adapter *adapter)
741{ 741{
742 struct tx_ring *txr = adapter->tx_rings; 742 struct tx_ring *txr = adapter->tx_rings;
743 743
744 for (int i = 0; i < adapter->num_queues; i++, txr++) { 744 for (int i = 0; i < adapter->num_queues; i++, txr++) {
745 ixgbe_free_transmit_buffers(txr); 745 ixgbe_free_transmit_buffers(txr);
746 ixgbe_dma_free(adapter, &txr->txdma); 746 ixgbe_dma_free(adapter, &txr->txdma);
747 IXGBE_TX_LOCK_DESTROY(txr); 747 IXGBE_TX_LOCK_DESTROY(txr);
748 } 748 }
749 free(adapter->tx_rings, M_DEVBUF); 749 free(adapter->tx_rings, M_DEVBUF);
750} /* ixgbe_free_transmit_structures */ 750} /* ixgbe_free_transmit_structures */
751 751
752/************************************************************************ 752/************************************************************************
753 * ixgbe_free_transmit_buffers 753 * ixgbe_free_transmit_buffers
754 * 754 *
755 * Free transmit ring related data structures. 755 * Free transmit ring related data structures.
756 ************************************************************************/ 756 ************************************************************************/
757static void 757static void
758ixgbe_free_transmit_buffers(struct tx_ring *txr) 758ixgbe_free_transmit_buffers(struct tx_ring *txr)
759{ 759{
760 struct adapter *adapter = txr->adapter; 760 struct adapter *adapter = txr->adapter;
761 struct ixgbe_tx_buf *tx_buffer; 761 struct ixgbe_tx_buf *tx_buffer;
762 int i; 762 int i;
763 763
764 INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin"); 764 INIT_DEBUGOUT("ixgbe_free_transmit_buffers: begin");
765 765
766 if (txr->tx_buffers == NULL) 766 if (txr->tx_buffers == NULL)
767 return; 767 return;
768 768
769 tx_buffer = txr->tx_buffers; 769 tx_buffer = txr->tx_buffers;
770 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { 770 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
771 if (tx_buffer->m_head != NULL) { 771 if (tx_buffer->m_head != NULL) {
772 bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, 772 bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map,
773 0, tx_buffer->m_head->m_pkthdr.len, 773 0, tx_buffer->m_head->m_pkthdr.len,
774 BUS_DMASYNC_POSTWRITE); 774 BUS_DMASYNC_POSTWRITE);
775 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); 775 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
776 m_freem(tx_buffer->m_head); 776 m_freem(tx_buffer->m_head);
777 tx_buffer->m_head = NULL; 777 tx_buffer->m_head = NULL;
778 if (tx_buffer->map != NULL) { 778 if (tx_buffer->map != NULL) {
779 ixgbe_dmamap_destroy(txr->txtag, 779 ixgbe_dmamap_destroy(txr->txtag,
780 tx_buffer->map); 780 tx_buffer->map);
781 tx_buffer->map = NULL; 781 tx_buffer->map = NULL;
782 } 782 }
783 } else if (tx_buffer->map != NULL) { 783 } else if (tx_buffer->map != NULL) {
784 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); 784 ixgbe_dmamap_unload(txr->txtag, tx_buffer->map);
785 ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); 785 ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map);
786 tx_buffer->map = NULL; 786 tx_buffer->map = NULL;
787 } 787 }
788 } 788 }
789 if (txr->txr_interq != NULL) { 789 if (txr->txr_interq != NULL) {
790 struct mbuf *m; 790 struct mbuf *m;
791 791
792 while ((m = pcq_get(txr->txr_interq)) != NULL) 792 while ((m = pcq_get(txr->txr_interq)) != NULL)
793 m_freem(m); 793 m_freem(m);
794 pcq_destroy(txr->txr_interq); 794 pcq_destroy(txr->txr_interq);
795 } 795 }
796 if (txr->tx_buffers != NULL) { 796 if (txr->tx_buffers != NULL) {
797 free(txr->tx_buffers, M_DEVBUF); 797 free(txr->tx_buffers, M_DEVBUF);
798 txr->tx_buffers = NULL; 798 txr->tx_buffers = NULL;
799 } 799 }
800 if (txr->txtag != NULL) { 800 if (txr->txtag != NULL) {
801 ixgbe_dma_tag_destroy(txr->txtag); 801 ixgbe_dma_tag_destroy(txr->txtag);
802 txr->txtag = NULL; 802 txr->txtag = NULL;
803 } 803 }
804} /* ixgbe_free_transmit_buffers */ 804} /* ixgbe_free_transmit_buffers */
805 805
806/************************************************************************ 806/************************************************************************
807 * ixgbe_tx_ctx_setup 807 * ixgbe_tx_ctx_setup
808 * 808 *
809 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 809 * Advanced Context Descriptor setup for VLAN, CSUM or TSO
810 ************************************************************************/ 810 ************************************************************************/
811static int 811static int
812ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, 812ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
813 u32 *cmd_type_len, u32 *olinfo_status) 813 u32 *cmd_type_len, u32 *olinfo_status)
814{ 814{
815 struct adapter *adapter = txr->adapter; 815 struct adapter *adapter = txr->adapter;
816 struct ixgbe_adv_tx_context_desc *TXD; 816 struct ixgbe_adv_tx_context_desc *TXD;
817 struct ether_vlan_header *eh; 817 struct ether_vlan_header *eh;
818#ifdef INET 818#ifdef INET
819 struct ip *ip; 819 struct ip *ip;
820#endif 820#endif
821#ifdef INET6 821#ifdef INET6
822 struct ip6_hdr *ip6; 822 struct ip6_hdr *ip6;
823#endif 823#endif
824 int ehdrlen, ip_hlen = 0; 824 int ehdrlen, ip_hlen = 0;
825 int offload = TRUE; 825 int offload = TRUE;
826 int ctxd = txr->next_avail_desc; 826 int ctxd = txr->next_avail_desc;
827 u32 vlan_macip_lens = 0; 827 u32 vlan_macip_lens = 0;
828 u32 type_tucmd_mlhl = 0; 828 u32 type_tucmd_mlhl = 0;
829 u16 vtag = 0; 829 u16 vtag = 0;
830 u16 etype; 830 u16 etype;
831 u8 ipproto = 0; 831 u8 ipproto = 0;
832 char *l3d; 832 char *l3d;
833 833
834 834
835 /* First check if TSO is to be used */ 835 /* First check if TSO is to be used */
836 if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) { 836 if (mp->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) {
837 int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status); 837 int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
838 838
839 if (rv != 0) 839 if (rv != 0)
840 ++adapter->tso_err.ev_count; 840 ++adapter->tso_err.ev_count;
841 return rv; 841 return rv;
842 } 842 }
843 843
844 if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0) 844 if ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) == 0)
845 offload = FALSE; 845 offload = FALSE;
846 846
847 /* Indicate the whole packet as payload when not doing TSO */ 847 /* Indicate the whole packet as payload when not doing TSO */
848 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT; 848 *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
849 849
850 /* Now ready a context descriptor */ 850 /* Now ready a context descriptor */
851 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; 851 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
852 852
853 /* 853 /*
854 * In advanced descriptors the vlan tag must 854 * In advanced descriptors the vlan tag must
855 * be placed into the context descriptor. Hence 855 * be placed into the context descriptor. Hence
856 * we need to make one even if not doing offloads. 856 * we need to make one even if not doing offloads.
857 */ 857 */
858 if (vlan_has_tag(mp)) { 858 if (vlan_has_tag(mp)) {
859 vtag = htole16(vlan_get_tag(mp)); 859 vtag = htole16(vlan_get_tag(mp));
860 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); 860 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
861 } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) && 861 } else if (!(txr->adapter->feat_en & IXGBE_FEATURE_NEEDS_CTXD) &&
862 (offload == FALSE)) 862 (offload == FALSE))
863 return (0); 863 return (0);
864 864
865 /* 865 /*
866 * Determine where frame payload starts. 866 * Determine where frame payload starts.
867 * Jump over vlan headers if already present, 867 * Jump over vlan headers if already present,
868 * helpful for QinQ too. 868 * helpful for QinQ too.
869 */ 869 */
870 KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag)); 870 KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag));
871 eh = mtod(mp, struct ether_vlan_header *); 871 eh = mtod(mp, struct ether_vlan_header *);
872 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 872 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
873 KASSERT(mp->m_len >= sizeof(struct ether_vlan_header)); 873 KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
874 etype = ntohs(eh->evl_proto); 874 etype = ntohs(eh->evl_proto);
875 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 875 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
876 } else { 876 } else {
877 etype = ntohs(eh->evl_encap_proto); 877 etype = ntohs(eh->evl_encap_proto);
878 ehdrlen = ETHER_HDR_LEN; 878 ehdrlen = ETHER_HDR_LEN;
879 } 879 }
880 880
881 /* Set the ether header length */ 881 /* Set the ether header length */
882 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; 882 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
883 883
884 if (offload == FALSE) 884 if (offload == FALSE)
885 goto no_offloads; 885 goto no_offloads;
886 886
887 /* 887 /*
888 * If the first mbuf only includes the ethernet header, 888 * If the first mbuf only includes the ethernet header,
889 * jump to the next one 889 * jump to the next one
890 * XXX: This assumes the stack splits mbufs containing headers 890 * XXX: This assumes the stack splits mbufs containing headers
891 * on header boundaries 891 * on header boundaries
892 * XXX: And assumes the entire IP header is contained in one mbuf 892 * XXX: And assumes the entire IP header is contained in one mbuf
893 */ 893 */
894 if (mp->m_len == ehdrlen && mp->m_next) 894 if (mp->m_len == ehdrlen && mp->m_next)
895 l3d = mtod(mp->m_next, char *); 895 l3d = mtod(mp->m_next, char *);
896 else 896 else
897 l3d = mtod(mp, char *) + ehdrlen; 897 l3d = mtod(mp, char *) + ehdrlen;
898 898
899 switch (etype) { 899 switch (etype) {
900#ifdef INET 900#ifdef INET
901 case ETHERTYPE_IP: 901 case ETHERTYPE_IP:
902 ip = (struct ip *)(l3d); 902 ip = (struct ip *)(l3d);
903 ip_hlen = ip->ip_hl << 2; 903 ip_hlen = ip->ip_hl << 2;
904 ipproto = ip->ip_p; 904 ipproto = ip->ip_p;
905 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 905 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
906 KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 || 906 KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
907 ip->ip_sum == 0); 907 ip->ip_sum == 0);
908 break; 908 break;
909#endif 909#endif
910#ifdef INET6 910#ifdef INET6
911 case ETHERTYPE_IPV6: 911 case ETHERTYPE_IPV6:
912 ip6 = (struct ip6_hdr *)(l3d); 912 ip6 = (struct ip6_hdr *)(l3d);
913 ip_hlen = sizeof(struct ip6_hdr); 913 ip_hlen = sizeof(struct ip6_hdr);
914 ipproto = ip6->ip6_nxt; 914 ipproto = ip6->ip6_nxt;
915 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; 915 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
916 break; 916 break;
917#endif 917#endif
918 default: 918 default:
919 offload = false; 919 offload = false;
920 break; 920 break;
921 } 921 }
922 922
923 if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) 923 if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0)
924 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; 924 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
925 925
926 vlan_macip_lens |= ip_hlen; 926 vlan_macip_lens |= ip_hlen;
927 927
928 /* No support for offloads for non-L4 next headers */ 928 /* No support for offloads for non-L4 next headers */
929 switch (ipproto) { 929 switch (ipproto) {
930 case IPPROTO_TCP: 930 case IPPROTO_TCP:
931 if (mp->m_pkthdr.csum_flags & 931 if (mp->m_pkthdr.csum_flags &
932 (M_CSUM_TCPv4 | M_CSUM_TCPv6)) 932 (M_CSUM_TCPv4 | M_CSUM_TCPv6))
933 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 933 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
934 else 934 else
935 offload = false; 935 offload = false;
936 break; 936 break;
937 case IPPROTO_UDP: 937 case IPPROTO_UDP:
938 if (mp->m_pkthdr.csum_flags & 938 if (mp->m_pkthdr.csum_flags &
939 (M_CSUM_UDPv4 | M_CSUM_UDPv6)) 939 (M_CSUM_UDPv4 | M_CSUM_UDPv6))
940 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; 940 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
941 else 941 else
942 offload = false; 942 offload = false;
943 break; 943 break;
944 default: 944 default:
945 offload = false; 945 offload = false;
946 break; 946 break;
947 } 947 }
948 948
949 if (offload) /* Insert L4 checksum into data descriptors */ 949 if (offload) /* Insert L4 checksum into data descriptors */
950 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; 950 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
951 951
952no_offloads: 952no_offloads:
953 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 953 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
954 954
955 /* Now copy bits into descriptor */ 955 /* Now copy bits into descriptor */
956 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 956 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
957 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 957 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
958 TXD->seqnum_seed = htole32(0); 958 TXD->seqnum_seed = htole32(0);
959 TXD->mss_l4len_idx = htole32(0); 959 TXD->mss_l4len_idx = htole32(0);
960 960
961 /* We've consumed the first desc, adjust counters */ 961 /* We've consumed the first desc, adjust counters */
962 if (++ctxd == txr->num_desc) 962 if (++ctxd == txr->num_desc)
963 ctxd = 0; 963 ctxd = 0;
964 txr->next_avail_desc = ctxd; 964 txr->next_avail_desc = ctxd;
965 --txr->tx_avail; 965 --txr->tx_avail;
966 966
967 return (0); 967 return (0);
968} /* ixgbe_tx_ctx_setup */ 968} /* ixgbe_tx_ctx_setup */
969 969
970/************************************************************************ 970/************************************************************************
971 * ixgbe_tso_setup 971 * ixgbe_tso_setup
972 * 972 *
973 * Setup work for hardware segmentation offload (TSO) on 973 * Setup work for hardware segmentation offload (TSO) on
974 * adapters using advanced tx descriptors 974 * adapters using advanced tx descriptors
975 ************************************************************************/ 975 ************************************************************************/
976static int 976static int
977ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, 977ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len,
978 u32 *olinfo_status) 978 u32 *olinfo_status)
979{ 979{
980 struct ixgbe_adv_tx_context_desc *TXD; 980 struct ixgbe_adv_tx_context_desc *TXD;
981 struct ether_vlan_header *eh; 981 struct ether_vlan_header *eh;
982#ifdef INET6 982#ifdef INET6
983 struct ip6_hdr *ip6; 983 struct ip6_hdr *ip6;
984#endif 984#endif
985#ifdef INET 985#ifdef INET
986 struct ip *ip; 986 struct ip *ip;
987#endif 987#endif
988 struct tcphdr *th; 988 struct tcphdr *th;
989 int ctxd, ehdrlen, ip_hlen, tcp_hlen; 989 int ctxd, ehdrlen, ip_hlen, tcp_hlen;
990 u32 vlan_macip_lens = 0; 990 u32 vlan_macip_lens = 0;
991 u32 type_tucmd_mlhl = 0; 991 u32 type_tucmd_mlhl = 0;
992 u32 mss_l4len_idx = 0, paylen; 992 u32 mss_l4len_idx = 0, paylen;
993 u16 vtag = 0, eh_type; 993 u16 vtag = 0, eh_type;
994 994
995 /* 995 /*
996 * Determine where frame payload starts. 996 * Determine where frame payload starts.
997 * Jump over vlan headers if already present 997 * Jump over vlan headers if already present
998 */ 998 */
999 eh = mtod(mp, struct ether_vlan_header *); 999 eh = mtod(mp, struct ether_vlan_header *);
1000 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1000 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1001 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1001 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1002 eh_type = eh->evl_proto; 1002 eh_type = eh->evl_proto;
1003 } else { 1003 } else {
1004 ehdrlen = ETHER_HDR_LEN; 1004 ehdrlen = ETHER_HDR_LEN;
1005 eh_type = eh->evl_encap_proto; 1005 eh_type = eh->evl_encap_proto;
1006 } 1006 }
1007 1007
1008 switch (ntohs(eh_type)) { 1008 switch (ntohs(eh_type)) {
1009#ifdef INET 1009#ifdef INET
1010 case ETHERTYPE_IP: 1010 case ETHERTYPE_IP:
1011 ip = (struct ip *)(mp->m_data + ehdrlen); 1011 ip = (struct ip *)(mp->m_data + ehdrlen);
1012 if (ip->ip_p != IPPROTO_TCP) 1012 if (ip->ip_p != IPPROTO_TCP)
1013 return (ENXIO); 1013 return (ENXIO);
1014 ip->ip_sum = 0; 1014 ip->ip_sum = 0;
1015 ip_hlen = ip->ip_hl << 2; 1015 ip_hlen = ip->ip_hl << 2;
1016 th = (struct tcphdr *)((char *)ip + ip_hlen); 1016 th = (struct tcphdr *)((char *)ip + ip_hlen);
1017 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1017 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1018 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1018 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1019 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 1019 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1020 /* Tell transmit desc to also do IPv4 checksum. */ 1020 /* Tell transmit desc to also do IPv4 checksum. */
1021 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; 1021 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1022 break; 1022 break;
1023#endif 1023#endif
1024#ifdef INET6 1024#ifdef INET6
1025 case ETHERTYPE_IPV6: 1025 case ETHERTYPE_IPV6:
1026 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1026 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1027 /* XXX-BZ For now we do not pretend to support ext. hdrs. */ 1027 /* XXX-BZ For now we do not pretend to support ext. hdrs. */
1028 if (ip6->ip6_nxt != IPPROTO_TCP) 1028 if (ip6->ip6_nxt != IPPROTO_TCP)
1029 return (ENXIO); 1029 return (ENXIO);
1030 ip_hlen = sizeof(struct ip6_hdr); 1030 ip_hlen = sizeof(struct ip6_hdr);
1031 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1031 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1032 th = (struct tcphdr *)((char *)ip6 + ip_hlen); 1032 th = (struct tcphdr *)((char *)ip6 + ip_hlen);
1033 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1033 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1034 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1034 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1035 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; 1035 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1036 break; 1036 break;
1037#endif 1037#endif
1038 default: 1038 default:
1039 panic("%s: CSUM_TSO but no supported IP version (0x%04x)", 1039 panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
1040 __func__, ntohs(eh_type)); 1040 __func__, ntohs(eh_type));
1041 break; 1041 break;
1042 } 1042 }
1043 1043
1044 ctxd = txr->next_avail_desc; 1044 ctxd = txr->next_avail_desc;
1045 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd]; 1045 TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
1046 1046
1047 tcp_hlen = th->th_off << 2; 1047 tcp_hlen = th->th_off << 2;
1048 1048
1049 /* This is used in the transmit desc in encap */ 1049 /* This is used in the transmit desc in encap */
1050 paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; 1050 paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
1051 1051
1052 /* VLAN MACLEN IPLEN */ 1052 /* VLAN MACLEN IPLEN */
1053 if (vlan_has_tag(mp)) { 1053 if (vlan_has_tag(mp)) {
1054 vtag = htole16(vlan_get_tag(mp)); 1054 vtag = htole16(vlan_get_tag(mp));
1055 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); 1055 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
1056 } 1056 }
1057 1057
1058 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; 1058 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1059 vlan_macip_lens |= ip_hlen; 1059 vlan_macip_lens |= ip_hlen;
1060 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 1060 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1061 1061
1062 /* ADV DTYPE TUCMD */ 1062 /* ADV DTYPE TUCMD */
1063 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 1063 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1064 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 1064 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1065 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 1065 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1066 1066
1067 /* MSS L4LEN IDX */ 1067 /* MSS L4LEN IDX */
1068 mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT); 1068 mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT);
1069 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); 1069 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
1070 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 1070 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1071 1071
1072 TXD->seqnum_seed = htole32(0); 1072 TXD->seqnum_seed = htole32(0);
1073 1073
1074 if (++ctxd == txr->num_desc) 1074 if (++ctxd == txr->num_desc)
1075 ctxd = 0; 1075 ctxd = 0;
1076 1076
1077 txr->tx_avail--; 1077 txr->tx_avail--;
1078 txr->next_avail_desc = ctxd; 1078 txr->next_avail_desc = ctxd;
1079 *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 1079 *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1080 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; 1080 *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1081 *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; 1081 *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1082 ++txr->tso_tx.ev_count; 1082 ++txr->tso_tx.ev_count;
1083 1083
1084 return (0); 1084 return (0);
1085} /* ixgbe_tso_setup */ 1085} /* ixgbe_tso_setup */
1086 1086
1087 1087
1088/************************************************************************ 1088/************************************************************************
1089 * ixgbe_txeof 1089 * ixgbe_txeof
1090 * 1090 *
1091 * Examine each tx_buffer in the used queue. If the hardware is done 1091 * Examine each tx_buffer in the used queue. If the hardware is done
1092 * processing the packet then free associated resources. The 1092 * processing the packet then free associated resources. The
1093 * tx_buffer is put back on the free queue. 1093 * tx_buffer is put back on the free queue.
1094 ************************************************************************/ 1094 ************************************************************************/
1095bool 1095bool
1096ixgbe_txeof(struct tx_ring *txr) 1096ixgbe_txeof(struct tx_ring *txr)
1097{ 1097{
1098 struct adapter *adapter = txr->adapter; 1098 struct adapter *adapter = txr->adapter;
1099 struct ifnet *ifp = adapter->ifp; 1099 struct ifnet *ifp = adapter->ifp;
1100 struct ixgbe_tx_buf *buf; 1100 struct ixgbe_tx_buf *buf;
1101 union ixgbe_adv_tx_desc *txd; 1101 union ixgbe_adv_tx_desc *txd;
1102 u32 work, processed = 0; 1102 u32 work, processed = 0;
1103 u32 limit = adapter->tx_process_limit; 1103 u32 limit = adapter->tx_process_limit;
1104 1104
1105 KASSERT(mutex_owned(&txr->tx_mtx)); 1105 KASSERT(mutex_owned(&txr->tx_mtx));
1106 1106
1107#ifdef DEV_NETMAP 1107#ifdef DEV_NETMAP
1108 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 1108 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1109 (adapter->ifp->if_capenable & IFCAP_NETMAP)) { 1109 (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1110 struct netmap_adapter *na = NA(adapter->ifp); 1110 struct netmap_adapter *na = NA(adapter->ifp);
1111 struct netmap_kring *kring = na->tx_rings[txr->me]; 1111 struct netmap_kring *kring = na->tx_rings[txr->me];
1112 txd = txr->tx_base; 1112 txd = txr->tx_base;
1113 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 1113 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1114 BUS_DMASYNC_POSTREAD); 1114 BUS_DMASYNC_POSTREAD);
1115 /* 1115 /*
1116 * In netmap mode, all the work is done in the context 1116 * In netmap mode, all the work is done in the context
1117 * of the client thread. Interrupt handlers only wake up 1117 * of the client thread. Interrupt handlers only wake up
1118 * clients, which may be sleeping on individual rings 1118 * clients, which may be sleeping on individual rings
1119 * or on a global resource for all rings. 1119 * or on a global resource for all rings.
1120 * To implement tx interrupt mitigation, we wake up the client 1120 * To implement tx interrupt mitigation, we wake up the client
1121 * thread roughly every half ring, even if the NIC interrupts 1121 * thread roughly every half ring, even if the NIC interrupts
1122 * more frequently. This is implemented as follows: 1122 * more frequently. This is implemented as follows:
1123 * - ixgbe_txsync() sets kring->nr_kflags with the index of 1123 * - ixgbe_txsync() sets kring->nr_kflags with the index of
1124 * the slot that should wake up the thread (nkr_num_slots 1124 * the slot that should wake up the thread (nkr_num_slots
1125 * means the user thread should not be woken up); 1125 * means the user thread should not be woken up);
1126 * - the driver ignores tx interrupts unless netmap_mitigate=0 1126 * - the driver ignores tx interrupts unless netmap_mitigate=0
1127 * or the slot has the DD bit set. 1127 * or the slot has the DD bit set.
1128 */ 1128 */
1129 if (kring->nr_kflags < kring->nkr_num_slots && 1129 if (kring->nr_kflags < kring->nkr_num_slots &&
1130 txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) { 1130 txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD) {
1131 netmap_tx_irq(ifp, txr->me); 1131 netmap_tx_irq(ifp, txr->me);
1132 } 1132 }
1133 return false; 1133 return false;
1134 } 1134 }
1135#endif /* DEV_NETMAP */ 1135#endif /* DEV_NETMAP */
1136 1136
1137 if (txr->tx_avail == txr->num_desc) { 1137 if (txr->tx_avail == txr->num_desc) {
1138 txr->busy = 0; 1138 txr->busy = 0;
1139 return false; 1139 return false;
1140 } 1140 }
1141 1141
1142 /* Get work starting point */ 1142 /* Get work starting point */
1143 work = txr->next_to_clean; 1143 work = txr->next_to_clean;
1144 buf = &txr->tx_buffers[work]; 1144 buf = &txr->tx_buffers[work];
1145 txd = &txr->tx_base[work]; 1145 txd = &txr->tx_base[work];
1146 work -= txr->num_desc; /* The distance to ring end */ 1146 work -= txr->num_desc; /* The distance to ring end */
1147 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 1147 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1148 BUS_DMASYNC_POSTREAD); 1148 BUS_DMASYNC_POSTREAD);
1149 1149
1150 do { 1150 do {
1151 union ixgbe_adv_tx_desc *eop = buf->eop; 1151 union ixgbe_adv_tx_desc *eop = buf->eop;
1152 if (eop == NULL) /* No work */ 1152 if (eop == NULL) /* No work */
1153 break; 1153 break;
1154 1154
1155 if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0) 1155 if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
1156 break; /* I/O not complete */ 1156 break; /* I/O not complete */
1157 1157
1158 if (buf->m_head) { 1158 if (buf->m_head) {
1159 txr->bytes += buf->m_head->m_pkthdr.len; 1159 txr->bytes += buf->m_head->m_pkthdr.len;
1160 bus_dmamap_sync(txr->txtag->dt_dmat, buf->map, 1160 bus_dmamap_sync(txr->txtag->dt_dmat, buf->map,
1161 0, buf->m_head->m_pkthdr.len, 1161 0, buf->m_head->m_pkthdr.len,
1162 BUS_DMASYNC_POSTWRITE); 1162 BUS_DMASYNC_POSTWRITE);
1163 ixgbe_dmamap_unload(txr->txtag, buf->map); 1163 ixgbe_dmamap_unload(txr->txtag, buf->map);
1164 m_freem(buf->m_head); 1164 m_freem(buf->m_head);
1165 buf->m_head = NULL; 1165 buf->m_head = NULL;
1166 } 1166 }
1167 buf->eop = NULL; 1167 buf->eop = NULL;
1168 txr->txr_no_space = false; 1168 txr->txr_no_space = false;
1169 ++txr->tx_avail; 1169 ++txr->tx_avail;
1170 1170
1171 /* We clean the range if multi segment */ 1171 /* We clean the range if multi segment */
1172 while (txd != eop) { 1172 while (txd != eop) {
1173 ++txd; 1173 ++txd;
1174 ++buf; 1174 ++buf;
1175 ++work; 1175 ++work;
1176 /* wrap the ring? */ 1176 /* wrap the ring? */
1177 if (__predict_false(!work)) { 1177 if (__predict_false(!work)) {
1178 work -= txr->num_desc; 1178 work -= txr->num_desc;
1179 buf = txr->tx_buffers; 1179 buf = txr->tx_buffers;
1180 txd = txr->tx_base; 1180 txd = txr->tx_base;
1181 } 1181 }
1182 if (buf->m_head) { 1182 if (buf->m_head) {
1183 txr->bytes += 1183 txr->bytes +=
1184 buf->m_head->m_pkthdr.len; 1184 buf->m_head->m_pkthdr.len;
1185 bus_dmamap_sync(txr->txtag->dt_dmat, 1185 bus_dmamap_sync(txr->txtag->dt_dmat,
1186 buf->map, 1186 buf->map,
1187 0, buf->m_head->m_pkthdr.len, 1187 0, buf->m_head->m_pkthdr.len,
1188 BUS_DMASYNC_POSTWRITE); 1188 BUS_DMASYNC_POSTWRITE);
1189 ixgbe_dmamap_unload(txr->txtag, 1189 ixgbe_dmamap_unload(txr->txtag,
1190 buf->map); 1190 buf->map);
1191 m_freem(buf->m_head); 1191 m_freem(buf->m_head);
1192 buf->m_head = NULL; 1192 buf->m_head = NULL;
1193 } 1193 }
1194 ++txr->tx_avail; 1194 ++txr->tx_avail;
1195 buf->eop = NULL; 1195 buf->eop = NULL;
1196 1196
1197 } 1197 }
1198 ++txr->packets; 1198 ++txr->packets;
1199 ++processed; 1199 ++processed;
1200 ++ifp->if_opackets; 1200 ++ifp->if_opackets;
1201 1201
1202 /* Try the next packet */ 1202 /* Try the next packet */
1203 ++txd; 1203 ++txd;
1204 ++buf; 1204 ++buf;
1205 ++work; 1205 ++work;
1206 /* reset with a wrap */ 1206 /* reset with a wrap */
1207 if (__predict_false(!work)) { 1207 if (__predict_false(!work)) {
1208 work -= txr->num_desc; 1208 work -= txr->num_desc;
1209 buf = txr->tx_buffers; 1209 buf = txr->tx_buffers;
1210 txd = txr->tx_base; 1210 txd = txr->tx_base;
1211 } 1211 }
1212 prefetch(txd); 1212 prefetch(txd);
1213 } while (__predict_true(--limit)); 1213 } while (__predict_true(--limit));
1214 1214
1215 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 1215 ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1216 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1216 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1217 1217
1218 work += txr->num_desc; 1218 work += txr->num_desc;
1219 txr->next_to_clean = work; 1219 txr->next_to_clean = work;
1220 1220
1221 /* 1221 /*
1222 * Queue Hang detection, we know there's 1222 * Queue Hang detection, we know there's
1223 * work outstanding or the first return 1223 * work outstanding or the first return
1224 * would have been taken, so increment busy 1224 * would have been taken, so increment busy
1225 * if nothing managed to get cleaned, then 1225 * if nothing managed to get cleaned, then
1226 * in local_timer it will be checked and 1226 * in local_timer it will be checked and
1227 * marked as HUNG if it exceeds a MAX attempt. 1227 * marked as HUNG if it exceeds a MAX attempt.
1228 */ 1228 */
1229 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG)) 1229 if ((processed == 0) && (txr->busy != IXGBE_QUEUE_HUNG))
1230 ++txr->busy; 1230 ++txr->busy;
1231 /* 1231 /*
1232 * If anything gets cleaned we reset state to 1, 1232 * If anything gets cleaned we reset state to 1,
1233 * note this will turn off HUNG if its set. 1233 * note this will turn off HUNG if its set.
1234 */ 1234 */
1235 if (processed) 1235 if (processed)
1236 txr->busy = 1; 1236 txr->busy = 1;
1237 1237
1238 if (txr->tx_avail == txr->num_desc) 1238 if (txr->tx_avail == txr->num_desc)
1239 txr->busy = 0; 1239 txr->busy = 0;
1240 1240
1241 return ((limit > 0) ? false : true); 1241 return ((limit > 0) ? false : true);
1242} /* ixgbe_txeof */ 1242} /* ixgbe_txeof */
1243 1243
1244/************************************************************************ 1244/************************************************************************
1245 * ixgbe_rsc_count 1245 * ixgbe_rsc_count
1246 * 1246 *
1247 * Used to detect a descriptor that has been merged by Hardware RSC. 1247 * Used to detect a descriptor that has been merged by Hardware RSC.
1248 ************************************************************************/ 1248 ************************************************************************/
1249static inline u32 1249static inline u32
1250ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) 1250ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
1251{ 1251{
1252 return (le32toh(rx->wb.lower.lo_dword.data) & 1252 return (le32toh(rx->wb.lower.lo_dword.data) &
1253 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; 1253 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
1254} /* ixgbe_rsc_count */ 1254} /* ixgbe_rsc_count */
1255 1255
1256/************************************************************************ 1256/************************************************************************
1257 * ixgbe_setup_hw_rsc 1257 * ixgbe_setup_hw_rsc
1258 * 1258 *
1259 * Initialize Hardware RSC (LRO) feature on 82599 1259 * Initialize Hardware RSC (LRO) feature on 82599
1260 * for an RX ring, this is toggled by the LRO capability 1260 * for an RX ring, this is toggled by the LRO capability
1261 * even though it is transparent to the stack. 1261 * even though it is transparent to the stack.
1262 * 1262 *
1263 * NOTE: Since this HW feature only works with IPv4 and 1263 * NOTE: Since this HW feature only works with IPv4 and
1264 * testing has shown soft LRO to be as effective, 1264 * testing has shown soft LRO to be as effective,
1265 * this feature will be disabled by default. 1265 * this feature will be disabled by default.
1266 ************************************************************************/ 1266 ************************************************************************/
1267static void 1267static void
1268ixgbe_setup_hw_rsc(struct rx_ring *rxr) 1268ixgbe_setup_hw_rsc(struct rx_ring *rxr)
1269{ 1269{
1270 struct adapter *adapter = rxr->adapter; 1270 struct adapter *adapter = rxr->adapter;
1271 struct ixgbe_hw *hw = &adapter->hw; 1271 struct ixgbe_hw *hw = &adapter->hw;
1272 u32 rscctrl, rdrxctl; 1272 u32 rscctrl, rdrxctl;
1273 1273
1274 /* If turning LRO/RSC off we need to disable it */ 1274 /* If turning LRO/RSC off we need to disable it */
1275 if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) { 1275 if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
1276 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); 1276 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1277 rscctrl &= ~IXGBE_RSCCTL_RSCEN; 1277 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
1278 return; 1278 return;
1279 } 1279 }
1280 1280
1281 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1281 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1282 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 1282 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1283#ifdef DEV_NETMAP 1283#ifdef DEV_NETMAP
1284 /* Always strip CRC unless Netmap disabled it */ 1284 /* Always strip CRC unless Netmap disabled it */
1285 if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) || 1285 if (!(adapter->feat_en & IXGBE_FEATURE_NETMAP) ||
1286 !(adapter->ifp->if_capenable & IFCAP_NETMAP) || 1286 !(adapter->ifp->if_capenable & IFCAP_NETMAP) ||
1287 ix_crcstrip) 1287 ix_crcstrip)
1288#endif /* DEV_NETMAP */ 1288#endif /* DEV_NETMAP */
1289 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 1289 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
1290 rdrxctl |= IXGBE_RDRXCTL_RSCACKC; 1290 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
1291 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1291 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1292 1292
1293 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); 1293 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
1294 rscctrl |= IXGBE_RSCCTL_RSCEN; 1294 rscctrl |= IXGBE_RSCCTL_RSCEN;
1295 /* 1295 /*
1296 * Limit the total number of descriptors that 1296 * Limit the total number of descriptors that
1297 * can be combined, so it does not exceed 64K 1297 * can be combined, so it does not exceed 64K
1298 */ 1298 */
1299 if (rxr->mbuf_sz == MCLBYTES) 1299 if (rxr->mbuf_sz == MCLBYTES)
1300 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 1300 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1301 else if (rxr->mbuf_sz == MJUMPAGESIZE) 1301 else if (rxr->mbuf_sz == MJUMPAGESIZE)
1302 rscctrl |= IXGBE_RSCCTL_MAXDESC_8; 1302 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1303 else if (rxr->mbuf_sz == MJUM9BYTES) 1303 else if (rxr->mbuf_sz == MJUM9BYTES)
1304 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 1304 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1305 else /* Using 16K cluster */ 1305 else /* Using 16K cluster */
1306 rscctrl |= IXGBE_RSCCTL_MAXDESC_1; 1306 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1307 1307
1308 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); 1308 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
1309 1309
1310 /* Enable TCP header recognition */ 1310 /* Enable TCP header recognition */
1311 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), 1311 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
1312 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR)); 1312 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | IXGBE_PSRTYPE_TCPHDR));
1313 1313
1314 /* Disable RSC for ACK packets */ 1314 /* Disable RSC for ACK packets */
1315 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 1315 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1316 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 1316 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1317 1317
1318 rxr->hw_rsc = TRUE; 1318 rxr->hw_rsc = TRUE;
1319} /* ixgbe_setup_hw_rsc */ 1319} /* ixgbe_setup_hw_rsc */
1320 1320
1321/************************************************************************ 1321/************************************************************************
1322 * ixgbe_refresh_mbufs 1322 * ixgbe_refresh_mbufs
1323 * 1323 *
1324 * Refresh mbuf buffers for RX descriptor rings 1324 * Refresh mbuf buffers for RX descriptor rings
1325 * - now keeps its own state so discards due to resource 1325 * - now keeps its own state so discards due to resource
1326 * exhaustion are unnecessary, if an mbuf cannot be obtained 1326 * exhaustion are unnecessary, if an mbuf cannot be obtained
1327 * it just returns, keeping its placeholder, thus it can simply 1327 * it just returns, keeping its placeholder, thus it can simply
1328 * be recalled to try again. 1328 * be recalled to try again.
 1329 *
 1330 * XXX NetBSD TODO:
 1331 * - The ixgbe_rxeof() function always preallocates mbuf cluster (jcl),
 1332 * so the ixgbe_refresh_mbufs() function can be simplified.
 1333 *
1329 ************************************************************************/ 1334 ************************************************************************/
1330static void 1335static void
1331ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) 1336ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
1332{ 1337{
1333 struct adapter *adapter = rxr->adapter; 1338 struct adapter *adapter = rxr->adapter;
1334 struct ixgbe_rx_buf *rxbuf; 1339 struct ixgbe_rx_buf *rxbuf;
1335 struct mbuf *mp; 1340 struct mbuf *mp;
1336 int i, j, error; 1341 int i, j, error;
1337 bool refreshed = false; 1342 bool refreshed = false;
1338 1343
1339 i = j = rxr->next_to_refresh; 1344 i = j = rxr->next_to_refresh;
1340 /* Control the loop with one beyond */ 1345 /* Control the loop with one beyond */
1341 if (++j == rxr->num_desc) 1346 if (++j == rxr->num_desc)
1342 j = 0; 1347 j = 0;
1343 1348
1344 while (j != limit) { 1349 while (j != limit) {
1345 rxbuf = &rxr->rx_buffers[i]; 1350 rxbuf = &rxr->rx_buffers[i];
1346 if (rxbuf->buf == NULL) { 1351 if (rxbuf->buf == NULL) {
1347 mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, 1352 mp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1348 MT_DATA, M_PKTHDR, rxr->mbuf_sz); 1353 MT_DATA, M_PKTHDR, rxr->mbuf_sz);
1349 if (mp == NULL) { 1354 if (mp == NULL) {
1350 rxr->no_jmbuf.ev_count++; 1355 rxr->no_jmbuf.ev_count++;
1351 goto update; 1356 goto update;
1352 } 1357 }
1353 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 1358 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1354 m_adj(mp, ETHER_ALIGN); 1359 m_adj(mp, ETHER_ALIGN);
1355 } else 1360 } else
1356 mp = rxbuf->buf; 1361 mp = rxbuf->buf;
1357 1362
1358 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; 1363 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1359 1364
1360 /* If we're dealing with an mbuf that was copied rather 1365 /* If we're dealing with an mbuf that was copied rather
1361 * than replaced, there's no need to go through busdma. 1366 * than replaced, there's no need to go through busdma.
1362 */ 1367 */
1363 if ((rxbuf->flags & IXGBE_RX_COPY) == 0) { 1368 if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
1364 /* Get the memory mapping */ 1369 /* Get the memory mapping */
1365 ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap); 1370 ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap);
1366 error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, 1371 error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat,
1367 rxbuf->pmap, mp, BUS_DMA_NOWAIT); 1372 rxbuf->pmap, mp, BUS_DMA_NOWAIT);
1368 if (error != 0) { 1373 if (error != 0) {
1369 device_printf(adapter->dev, "Refresh mbufs: " 1374 device_printf(adapter->dev, "Refresh mbufs: "
1370 "payload dmamap load failure - %d\n", 1375 "payload dmamap load failure - %d\n",
1371 error); 1376 error);
1372 m_free(mp); 1377 m_free(mp);
1373 rxbuf->buf = NULL; 1378 rxbuf->buf = NULL;
1374 goto update; 1379 goto update;
1375 } 1380 }
1376 rxbuf->buf = mp; 1381 rxbuf->buf = mp;
1377 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, 1382 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1378 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); 1383 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1379 rxbuf->addr = rxr->rx_base[i].read.pkt_addr = 1384 rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
1380 htole64(rxbuf->pmap->dm_segs[0].ds_addr); 1385 htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1381 } else { 1386 } else {
1382 rxr->rx_base[i].read.pkt_addr = rxbuf->addr; 1387 rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
1383 rxbuf->flags &= ~IXGBE_RX_COPY; 1388 rxbuf->flags &= ~IXGBE_RX_COPY;
1384 } 1389 }
1385 1390
1386 refreshed = true; 1391 refreshed = true;
1387 /* Next is precalculated */ 1392 /* Next is precalculated */
1388 i = j; 1393 i = j;
1389 rxr->next_to_refresh = i; 1394 rxr->next_to_refresh = i;
1390 if (++j == rxr->num_desc) 1395 if (++j == rxr->num_desc)
1391 j = 0; 1396 j = 0;
1392 } 1397 }
1393 1398
1394update: 1399update:
1395 if (refreshed) /* Update hardware tail index */ 1400 if (refreshed) /* Update hardware tail index */
1396 IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh); 1401 IXGBE_WRITE_REG(&adapter->hw, rxr->tail, rxr->next_to_refresh);
1397 1402
1398 return; 1403 return;
1399} /* ixgbe_refresh_mbufs */ 1404} /* ixgbe_refresh_mbufs */
1400 1405
1401/************************************************************************ 1406/************************************************************************
1402 * ixgbe_allocate_receive_buffers 1407 * ixgbe_allocate_receive_buffers
1403 * 1408 *
1404 * Allocate memory for rx_buffer structures. Since we use one 1409 * Allocate memory for rx_buffer structures. Since we use one
1405 * rx_buffer per received packet, the maximum number of rx_buffer's 1410 * rx_buffer per received packet, the maximum number of rx_buffer's
1406 * that we'll need is equal to the number of receive descriptors 1411 * that we'll need is equal to the number of receive descriptors
1407 * that we've allocated. 1412 * that we've allocated.
1408 ************************************************************************/ 1413 ************************************************************************/
1409static int 1414static int
1410ixgbe_allocate_receive_buffers(struct rx_ring *rxr) 1415ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
1411{ 1416{
1412 struct adapter *adapter = rxr->adapter; 1417 struct adapter *adapter = rxr->adapter;
1413 device_t dev = adapter->dev; 1418 device_t dev = adapter->dev;
1414 struct ixgbe_rx_buf *rxbuf; 1419 struct ixgbe_rx_buf *rxbuf;
1415 int bsize, error; 1420 int bsize, error;
1416 1421
1417 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc; 1422 bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
1418 rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF, 1423 rxr->rx_buffers = (struct ixgbe_rx_buf *)malloc(bsize, M_DEVBUF,
1419 M_NOWAIT | M_ZERO); 1424 M_NOWAIT | M_ZERO);
1420 if (rxr->rx_buffers == NULL) { 1425 if (rxr->rx_buffers == NULL) {
1421 aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n"); 1426 aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n");
1422 error = ENOMEM; 1427 error = ENOMEM;
1423 goto fail; 1428 goto fail;
1424 } 1429 }
1425 1430
1426 error = ixgbe_dma_tag_create( 1431 error = ixgbe_dma_tag_create(
1427 /* parent */ adapter->osdep.dmat, 1432 /* parent */ adapter->osdep.dmat,
1428 /* alignment */ 1, 1433 /* alignment */ 1,
1429 /* bounds */ 0, 1434 /* bounds */ 0,
1430 /* maxsize */ MJUM16BYTES, 1435 /* maxsize */ MJUM16BYTES,
1431 /* nsegments */ 1, 1436 /* nsegments */ 1,
1432 /* maxsegsize */ MJUM16BYTES, 1437 /* maxsegsize */ MJUM16BYTES,
1433 /* flags */ 0, 1438 /* flags */ 0,
1434 &rxr->ptag); 1439 &rxr->ptag);
1435 if (error != 0) { 1440 if (error != 0) {
1436 aprint_error_dev(dev, "Unable to create RX DMA tag\n"); 1441 aprint_error_dev(dev, "Unable to create RX DMA tag\n");
1437 goto fail; 1442 goto fail;
1438 } 1443 }
1439 1444
1440 for (int i = 0; i < rxr->num_desc; i++, rxbuf++) { 1445 for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
1441 rxbuf = &rxr->rx_buffers[i]; 1446 rxbuf = &rxr->rx_buffers[i];
1442 error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); 1447 error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
1443 if (error) { 1448 if (error) {
1444 aprint_error_dev(dev, "Unable to create RX dma map\n"); 1449 aprint_error_dev(dev, "Unable to create RX dma map\n");
1445 goto fail; 1450 goto fail;
1446 } 1451 }
1447 } 1452 }
1448 1453
1449 return (0); 1454 return (0);
1450 1455
1451fail: 1456fail:
1452 /* Frees all, but can handle partial completion */ 1457 /* Frees all, but can handle partial completion */
1453 ixgbe_free_receive_structures(adapter); 1458 ixgbe_free_receive_structures(adapter);
1454 1459
1455 return (error); 1460 return (error);
1456} /* ixgbe_allocate_receive_buffers */ 1461} /* ixgbe_allocate_receive_buffers */
1457 1462
1458/************************************************************************ 1463/************************************************************************
1459 * ixgbe_free_receive_ring 1464 * ixgbe_free_receive_ring
1460 ************************************************************************/ 1465 ************************************************************************/
1461static void 1466static void
1462ixgbe_free_receive_ring(struct rx_ring *rxr) 1467ixgbe_free_receive_ring(struct rx_ring *rxr)
1463{ 1468{
1464 for (int i = 0; i < rxr->num_desc; i++) { 1469 for (int i = 0; i < rxr->num_desc; i++) {
1465 ixgbe_rx_discard(rxr, i); 1470 ixgbe_rx_discard(rxr, i);
1466 } 1471 }
1467} /* ixgbe_free_receive_ring */ 1472} /* ixgbe_free_receive_ring */
1468 1473
1469/************************************************************************ 1474/************************************************************************
1470 * ixgbe_setup_receive_ring 1475 * ixgbe_setup_receive_ring
1471 * 1476 *
1472 * Initialize a receive ring and its buffers. 1477 * Initialize a receive ring and its buffers.
1473 ************************************************************************/ 1478 ************************************************************************/
1474static int 1479static int
1475ixgbe_setup_receive_ring(struct rx_ring *rxr) 1480ixgbe_setup_receive_ring(struct rx_ring *rxr)
1476{ 1481{
1477 struct adapter *adapter; 1482 struct adapter *adapter;
1478 struct ixgbe_rx_buf *rxbuf; 1483 struct ixgbe_rx_buf *rxbuf;
1479#ifdef LRO 1484#ifdef LRO
1480 struct ifnet *ifp; 1485 struct ifnet *ifp;
1481 struct lro_ctrl *lro = &rxr->lro; 1486 struct lro_ctrl *lro = &rxr->lro;
1482#endif /* LRO */ 1487#endif /* LRO */
1483#ifdef DEV_NETMAP 1488#ifdef DEV_NETMAP
1484 struct netmap_adapter *na = NA(rxr->adapter->ifp); 1489 struct netmap_adapter *na = NA(rxr->adapter->ifp);
1485 struct netmap_slot *slot; 1490 struct netmap_slot *slot;
1486#endif /* DEV_NETMAP */ 1491#endif /* DEV_NETMAP */
1487 int rsize, error = 0; 1492 int rsize, error = 0;
1488 1493
1489 adapter = rxr->adapter; 1494 adapter = rxr->adapter;
1490#ifdef LRO 1495#ifdef LRO
1491 ifp = adapter->ifp; 1496 ifp = adapter->ifp;
1492#endif /* LRO */ 1497#endif /* LRO */
1493 1498
1494 /* Clear the ring contents */ 1499 /* Clear the ring contents */
1495 IXGBE_RX_LOCK(rxr); 1500 IXGBE_RX_LOCK(rxr);
1496 1501
1497#ifdef DEV_NETMAP 1502#ifdef DEV_NETMAP
1498 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 1503 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1499 slot = netmap_reset(na, NR_RX, rxr->me, 0); 1504 slot = netmap_reset(na, NR_RX, rxr->me, 0);
1500#endif /* DEV_NETMAP */ 1505#endif /* DEV_NETMAP */
1501 1506
1502 rsize = roundup2(adapter->num_rx_desc * 1507 rsize = roundup2(adapter->num_rx_desc *
1503 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); 1508 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
1504 bzero((void *)rxr->rx_base, rsize); 1509 bzero((void *)rxr->rx_base, rsize);
1505 /* Cache the size */ 1510 /* Cache the size */
1506 rxr->mbuf_sz = adapter->rx_mbuf_sz; 1511 rxr->mbuf_sz = adapter->rx_mbuf_sz;
1507 1512
1508 /* Free current RX buffer structs and their mbufs */ 1513 /* Free current RX buffer structs and their mbufs */
1509 ixgbe_free_receive_ring(rxr); 1514 ixgbe_free_receive_ring(rxr);
1510 1515
1511 IXGBE_RX_UNLOCK(rxr); 1516 IXGBE_RX_UNLOCK(rxr);
1512 /* 1517 /*
1513 * Now reinitialize our supply of jumbo mbufs. The number 1518 * Now reinitialize our supply of jumbo mbufs. The number
1514 * or size of jumbo mbufs may have changed. 1519 * or size of jumbo mbufs may have changed.
1515 * Assume all of rxr->ptag are the same. 1520 * Assume all of rxr->ptag are the same.
1516 */ 1521 */
1517 ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr, 1522 ixgbe_jcl_reinit(adapter, rxr->ptag->dt_dmat, rxr,
1518 (2 * adapter->num_rx_desc), adapter->rx_mbuf_sz); 1523 adapter->num_jcl, adapter->rx_mbuf_sz);
1519 1524
1520 IXGBE_RX_LOCK(rxr); 1525 IXGBE_RX_LOCK(rxr);
1521 1526
1522 /* Now replenish the mbufs */ 1527 /* Now replenish the mbufs */
1523 for (int j = 0; j != rxr->num_desc; ++j) { 1528 for (int j = 0; j != rxr->num_desc; ++j) {
1524 struct mbuf *mp; 1529 struct mbuf *mp;
1525 1530
1526 rxbuf = &rxr->rx_buffers[j]; 1531 rxbuf = &rxr->rx_buffers[j];
1527 1532
1528#ifdef DEV_NETMAP 1533#ifdef DEV_NETMAP
1529 /* 1534 /*
1530 * In netmap mode, fill the map and set the buffer 1535 * In netmap mode, fill the map and set the buffer
1531 * address in the NIC ring, considering the offset 1536 * address in the NIC ring, considering the offset
1532 * between the netmap and NIC rings (see comment in 1537 * between the netmap and NIC rings (see comment in
1533 * ixgbe_setup_transmit_ring() ). No need to allocate 1538 * ixgbe_setup_transmit_ring() ). No need to allocate
1534 * an mbuf, so end the block with a continue; 1539 * an mbuf, so end the block with a continue;
1535 */ 1540 */
1536 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) { 1541 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && slot) {
1537 int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j); 1542 int sj = netmap_idx_n2k(na->rx_rings[rxr->me], j);
1538 uint64_t paddr; 1543 uint64_t paddr;
1539 void *addr; 1544 void *addr;
1540 1545
1541 addr = PNMB(na, slot + sj, &paddr); 1546 addr = PNMB(na, slot + sj, &paddr);
1542 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); 1547 netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
1543 /* Update descriptor and the cached value */ 1548 /* Update descriptor and the cached value */
1544 rxr->rx_base[j].read.pkt_addr = htole64(paddr); 1549 rxr->rx_base[j].read.pkt_addr = htole64(paddr);
1545 rxbuf->addr = htole64(paddr); 1550 rxbuf->addr = htole64(paddr);
1546 continue; 1551 continue;
1547 } 1552 }
1548#endif /* DEV_NETMAP */ 1553#endif /* DEV_NETMAP */
1549 1554
1550 rxbuf->flags = 0; 1555 rxbuf->flags = 0;
1551 rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, 1556 rxbuf->buf = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT,
1552 MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); 1557 MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz);
1553 if (rxbuf->buf == NULL) { 1558 if (rxbuf->buf == NULL) {
1554 error = ENOBUFS; 1559 error = ENOBUFS;
1555 goto fail; 1560 goto fail;
1556 } 1561 }
1557 mp = rxbuf->buf; 1562 mp = rxbuf->buf;
1558 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; 1563 mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
1559 /* Get the memory mapping */ 1564 /* Get the memory mapping */
1560 error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap, 1565 error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, rxbuf->pmap,
1561 mp, BUS_DMA_NOWAIT); 1566 mp, BUS_DMA_NOWAIT);
1562 if (error != 0) 1567 if (error != 0)
1563 goto fail; 1568 goto fail;
1564 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, 1569 bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
1565 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD); 1570 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD);
1566 /* Update the descriptor and the cached value */ 1571 /* Update the descriptor and the cached value */
1567 rxr->rx_base[j].read.pkt_addr = 1572 rxr->rx_base[j].read.pkt_addr =
1568 htole64(rxbuf->pmap->dm_segs[0].ds_addr); 1573 htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1569 rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr); 1574 rxbuf->addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
1570 } 1575 }
1571 1576
1572 /* Setup our descriptor indices */ 1577 /* Setup our descriptor indices */
1573 rxr->next_to_check = 0; 1578 rxr->next_to_check = 0;
1574 rxr->next_to_refresh = 0; 1579 rxr->next_to_refresh = 0;
1575 rxr->lro_enabled = FALSE; 1580 rxr->lro_enabled = FALSE;
1576 rxr->rx_copies.ev_count = 0; 1581 rxr->rx_copies.ev_count = 0;
1577#if 0 /* NetBSD */ 1582#if 0 /* NetBSD */
1578 rxr->rx_bytes.ev_count = 0; 1583 rxr->rx_bytes.ev_count = 0;
1579#if 1 /* Fix inconsistency */ 1584#if 1 /* Fix inconsistency */
1580 rxr->rx_packets.ev_count = 0; 1585 rxr->rx_packets.ev_count = 0;
1581#endif 1586#endif
1582#endif 1587#endif
1583 rxr->vtag_strip = FALSE; 1588 rxr->vtag_strip = FALSE;
1584 1589
1585 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1590 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1586 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1587 1592
1588 /* 1593 /*
1589 * Now set up the LRO interface 1594 * Now set up the LRO interface
1590 */ 1595 */
1591 if (ixgbe_rsc_enable) 1596 if (ixgbe_rsc_enable)
1592 ixgbe_setup_hw_rsc(rxr); 1597 ixgbe_setup_hw_rsc(rxr);
1593#ifdef LRO 1598#ifdef LRO
1594 else if (ifp->if_capenable & IFCAP_LRO) { 1599 else if (ifp->if_capenable & IFCAP_LRO) {
1595 device_t dev = adapter->dev; 1600 device_t dev = adapter->dev;
1596 int err = tcp_lro_init(lro); 1601 int err = tcp_lro_init(lro);
1597 if (err) { 1602 if (err) {
1598 device_printf(dev, "LRO Initialization failed!\n"); 1603 device_printf(dev, "LRO Initialization failed!\n");
1599 goto fail; 1604 goto fail;
1600 } 1605 }
1601 INIT_DEBUGOUT("RX Soft LRO Initialized\n"); 1606 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
1602 rxr->lro_enabled = TRUE; 1607 rxr->lro_enabled = TRUE;
1603 lro->ifp = adapter->ifp; 1608 lro->ifp = adapter->ifp;
1604 } 1609 }
1605#endif /* LRO */ 1610#endif /* LRO */
1606 1611
1607 IXGBE_RX_UNLOCK(rxr); 1612 IXGBE_RX_UNLOCK(rxr);
1608 1613
1609 return (0); 1614 return (0);
1610 1615
1611fail: 1616fail:
1612 ixgbe_free_receive_ring(rxr); 1617 ixgbe_free_receive_ring(rxr);
1613 IXGBE_RX_UNLOCK(rxr); 1618 IXGBE_RX_UNLOCK(rxr);
1614 1619
1615 return (error); 1620 return (error);
1616} /* ixgbe_setup_receive_ring */ 1621} /* ixgbe_setup_receive_ring */
1617 1622
1618/************************************************************************ 1623/************************************************************************
1619 * ixgbe_setup_receive_structures - Initialize all receive rings. 1624 * ixgbe_setup_receive_structures - Initialize all receive rings.
1620 ************************************************************************/ 1625 ************************************************************************/
1621int 1626int
1622ixgbe_setup_receive_structures(struct adapter *adapter) 1627ixgbe_setup_receive_structures(struct adapter *adapter)
1623{ 1628{
1624 struct rx_ring *rxr = adapter->rx_rings; 1629 struct rx_ring *rxr = adapter->rx_rings;
1625 int j; 1630 int j;
1626 1631
1627 INIT_DEBUGOUT("ixgbe_setup_receive_structures"); 1632 INIT_DEBUGOUT("ixgbe_setup_receive_structures");
1628 for (j = 0; j < adapter->num_queues; j++, rxr++) 1633 for (j = 0; j < adapter->num_queues; j++, rxr++)
1629 if (ixgbe_setup_receive_ring(rxr)) 1634 if (ixgbe_setup_receive_ring(rxr))
1630 goto fail; 1635 goto fail;
1631 1636
1632 return (0); 1637 return (0);
1633fail: 1638fail:
1634 /* 1639 /*
1635 * Free RX buffers allocated so far, we will only handle 1640 * Free RX buffers allocated so far, we will only handle
1636 * the rings that completed, the failing case will have 1641 * the rings that completed, the failing case will have
1637 * cleaned up for itself. 'j' failed, so its the terminus. 1642 * cleaned up for itself. 'j' failed, so its the terminus.
1638 */ 1643 */
1639 for (int i = 0; i < j; ++i) { 1644 for (int i = 0; i < j; ++i) {
1640 rxr = &adapter->rx_rings[i]; 1645 rxr = &adapter->rx_rings[i];
1641 IXGBE_RX_LOCK(rxr); 1646 IXGBE_RX_LOCK(rxr);
1642 ixgbe_free_receive_ring(rxr); 1647 ixgbe_free_receive_ring(rxr);
1643 IXGBE_RX_UNLOCK(rxr); 1648 IXGBE_RX_UNLOCK(rxr);
1644 } 1649 }
1645 1650
1646 return (ENOBUFS); 1651 return (ENOBUFS);
1647} /* ixgbe_setup_receive_structures */ 1652} /* ixgbe_setup_receive_structures */
1648 1653
1649 1654
1650/************************************************************************ 1655/************************************************************************
1651 * ixgbe_free_receive_structures - Free all receive rings. 1656 * ixgbe_free_receive_structures - Free all receive rings.
1652 ************************************************************************/ 1657 ************************************************************************/
1653void 1658void
1654ixgbe_free_receive_structures(struct adapter *adapter) 1659ixgbe_free_receive_structures(struct adapter *adapter)
1655{ 1660{
1656 struct rx_ring *rxr = adapter->rx_rings; 1661 struct rx_ring *rxr = adapter->rx_rings;
1657 1662
1658 INIT_DEBUGOUT("ixgbe_free_receive_structures: begin"); 1663 INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
1659 1664
1660 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1665 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1661 ixgbe_free_receive_buffers(rxr); 1666 ixgbe_free_receive_buffers(rxr);
1662#ifdef LRO 1667#ifdef LRO
1663 /* Free LRO memory */ 1668 /* Free LRO memory */
1664 tcp_lro_free(&rxr->lro); 1669 tcp_lro_free(&rxr->lro);
1665#endif /* LRO */ 1670#endif /* LRO */
1666 /* Free the ring memory as well */ 1671 /* Free the ring memory as well */
1667 ixgbe_dma_free(adapter, &rxr->rxdma); 1672 ixgbe_dma_free(adapter, &rxr->rxdma);
1668 IXGBE_RX_LOCK_DESTROY(rxr); 1673 IXGBE_RX_LOCK_DESTROY(rxr);
1669 } 1674 }
1670 1675
1671 free(adapter->rx_rings, M_DEVBUF); 1676 free(adapter->rx_rings, M_DEVBUF);
1672} /* ixgbe_free_receive_structures */ 1677} /* ixgbe_free_receive_structures */
1673 1678
1674 1679
1675/************************************************************************ 1680/************************************************************************
1676 * ixgbe_free_receive_buffers - Free receive ring data structures 1681 * ixgbe_free_receive_buffers - Free receive ring data structures
1677 ************************************************************************/ 1682 ************************************************************************/
1678static void 1683static void
1679ixgbe_free_receive_buffers(struct rx_ring *rxr) 1684ixgbe_free_receive_buffers(struct rx_ring *rxr)
1680{ 1685{
1681 struct adapter *adapter = rxr->adapter; 1686 struct adapter *adapter = rxr->adapter;
1682 struct ixgbe_rx_buf *rxbuf; 1687 struct ixgbe_rx_buf *rxbuf;
1683 1688
1684 INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin"); 1689 INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
1685 1690
1686 /* Cleanup any existing buffers */ 1691 /* Cleanup any existing buffers */
1687 if (rxr->rx_buffers != NULL) { 1692 if (rxr->rx_buffers != NULL) {
1688 for (int i = 0; i < adapter->num_rx_desc; i++) { 1693 for (int i = 0; i < adapter->num_rx_desc; i++) {
1689 rxbuf = &rxr->rx_buffers[i]; 1694 rxbuf = &rxr->rx_buffers[i];
1690 ixgbe_rx_discard(rxr, i); 1695 ixgbe_rx_discard(rxr, i);
1691 if (rxbuf->pmap != NULL) { 1696 if (rxbuf->pmap != NULL) {
1692 ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); 1697 ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap);
1693 rxbuf->pmap = NULL; 1698 rxbuf->pmap = NULL;
1694 } 1699 }
1695 } 1700 }
1696 1701
1697 /* NetBSD specific. See ixgbe_netbsd.c */ 1702 /* NetBSD specific. See ixgbe_netbsd.c */
1698 ixgbe_jcl_destroy(adapter, rxr); 1703 ixgbe_jcl_destroy(adapter, rxr);
1699 1704
1700 if (rxr->rx_buffers != NULL) { 1705 if (rxr->rx_buffers != NULL) {
1701 free(rxr->rx_buffers, M_DEVBUF); 1706 free(rxr->rx_buffers, M_DEVBUF);
1702 rxr->rx_buffers = NULL; 1707 rxr->rx_buffers = NULL;
1703 } 1708 }
1704 } 1709 }
1705 1710
1706 if (rxr->ptag != NULL) { 1711 if (rxr->ptag != NULL) {
1707 ixgbe_dma_tag_destroy(rxr->ptag); 1712 ixgbe_dma_tag_destroy(rxr->ptag);
1708 rxr->ptag = NULL; 1713 rxr->ptag = NULL;
1709 } 1714 }
1710 1715
1711 return; 1716 return;
1712} /* ixgbe_free_receive_buffers */ 1717} /* ixgbe_free_receive_buffers */
1713 1718
1714/************************************************************************ 1719/************************************************************************
1715 * ixgbe_rx_input 1720 * ixgbe_rx_input
1716 ************************************************************************/ 1721 ************************************************************************/
1717static __inline void 1722static __inline void
1718ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, 1723ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m,
1719 u32 ptype) 1724 u32 ptype)
1720{ 1725{
1721 struct adapter *adapter = ifp->if_softc; 1726 struct adapter *adapter = ifp->if_softc;
1722 1727
1723#ifdef LRO 1728#ifdef LRO
1724 struct ethercom *ec = &adapter->osdep.ec; 1729 struct ethercom *ec = &adapter->osdep.ec;
1725 1730
1726 /* 1731 /*
1727 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet 1732 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
1728 * should be computed by hardware. Also it should not have VLAN tag in 1733 * should be computed by hardware. Also it should not have VLAN tag in
1729 * ethernet header. In case of IPv6 we do not yet support ext. hdrs. 1734 * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
1730 */ 1735 */
1731 if (rxr->lro_enabled && 1736 if (rxr->lro_enabled &&
1732 (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 && 1737 (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 &&
1733 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && 1738 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
1734 ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == 1739 ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1735 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) || 1740 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
1736 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) == 1741 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
1737 (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) && 1742 (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
1738 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 1743 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
1739 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { 1744 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
1740 /* 1745 /*
1741 * Send to the stack if: 1746 * Send to the stack if:
1742 ** - LRO not enabled, or 1747 ** - LRO not enabled, or
1743 ** - no LRO resources, or 1748 ** - no LRO resources, or
1744 ** - lro enqueue fails 1749 ** - lro enqueue fails
1745 */ 1750 */
1746 if (rxr->lro.lro_cnt != 0) 1751 if (rxr->lro.lro_cnt != 0)
1747 if (tcp_lro_rx(&rxr->lro, m, 0) == 0) 1752 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
1748 return; 1753 return;
1749 } 1754 }
1750#endif /* LRO */ 1755#endif /* LRO */
1751 1756
1752 if_percpuq_enqueue(adapter->ipq, m); 1757 if_percpuq_enqueue(adapter->ipq, m);
1753} /* ixgbe_rx_input */ 1758} /* ixgbe_rx_input */
1754 1759
1755/************************************************************************ 1760/************************************************************************
1756 * ixgbe_rx_discard 1761 * ixgbe_rx_discard
1757 ************************************************************************/ 1762 ************************************************************************/
1758static __inline void 1763static __inline void
1759ixgbe_rx_discard(struct rx_ring *rxr, int i) 1764ixgbe_rx_discard(struct rx_ring *rxr, int i)
1760{ 1765{
1761 struct ixgbe_rx_buf *rbuf; 1766 struct ixgbe_rx_buf *rbuf;
1762 1767
1763 rbuf = &rxr->rx_buffers[i]; 1768 rbuf = &rxr->rx_buffers[i];
1764 1769
1765 /* 1770 /*
1766 * With advanced descriptors the writeback 1771 * With advanced descriptors the writeback
1767 * clobbers the buffer addrs, so its easier 1772 * clobbers the buffer addrs, so its easier
1768 * to just free the existing mbufs and take 1773 * to just free the existing mbufs and take
1769 * the normal refresh path to get new buffers 1774 * the normal refresh path to get new buffers
1770 * and mapping. 1775 * and mapping.
1771 */ 1776 */
1772 1777
1773 if (rbuf->fmp != NULL) {/* Partial chain ? */ 1778 if (rbuf->fmp != NULL) {/* Partial chain ? */
1774 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, 1779 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1775 rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); 1780 rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1776 m_freem(rbuf->fmp); 1781 m_freem(rbuf->fmp);
1777 rbuf->fmp = NULL; 1782 rbuf->fmp = NULL;
1778 rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */ 1783 rbuf->buf = NULL; /* rbuf->buf is part of fmp's chain */
1779 } else if (rbuf->buf) { 1784 } else if (rbuf->buf) {
1780 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, 1785 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1781 rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); 1786 rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1782 m_free(rbuf->buf); 1787 m_free(rbuf->buf);
1783 rbuf->buf = NULL; 1788 rbuf->buf = NULL;
1784 } 1789 }
1785 ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap); 1790 ixgbe_dmamap_unload(rxr->ptag, rbuf->pmap);
1786 1791
1787 rbuf->flags = 0; 1792 rbuf->flags = 0;
1788 1793
1789 return; 1794 return;
1790} /* ixgbe_rx_discard */ 1795} /* ixgbe_rx_discard */
1791 1796
1792 1797
1793/************************************************************************ 1798/************************************************************************
1794 * ixgbe_rxeof 1799 * ixgbe_rxeof
1795 * 1800 *
1796 * Executes in interrupt context. It replenishes the 1801 * Executes in interrupt context. It replenishes the
1797 * mbufs in the descriptor and sends data which has 1802 * mbufs in the descriptor and sends data which has
1798 * been dma'ed into host memory to upper layer. 1803 * been dma'ed into host memory to upper layer.
1799 * 1804 *
1800 * Return TRUE for more work, FALSE for all clean. 1805 * Return TRUE for more work, FALSE for all clean.
1801 ************************************************************************/ 1806 ************************************************************************/
1802bool 1807bool
1803ixgbe_rxeof(struct ix_queue *que) 1808ixgbe_rxeof(struct ix_queue *que)
1804{ 1809{
1805 struct adapter *adapter = que->adapter; 1810 struct adapter *adapter = que->adapter;
1806 struct rx_ring *rxr = que->rxr; 1811 struct rx_ring *rxr = que->rxr;
1807 struct ifnet *ifp = adapter->ifp; 1812 struct ifnet *ifp = adapter->ifp;
1808#ifdef LRO 1813#ifdef LRO
1809 struct lro_ctrl *lro = &rxr->lro; 1814 struct lro_ctrl *lro = &rxr->lro;
1810#endif /* LRO */ 1815#endif /* LRO */
1811 union ixgbe_adv_rx_desc *cur; 1816 union ixgbe_adv_rx_desc *cur;
1812 struct ixgbe_rx_buf *rbuf, *nbuf; 1817 struct ixgbe_rx_buf *rbuf, *nbuf;
1813 int i, nextp, processed = 0; 1818 int i, nextp, processed = 0;
1814 u32 staterr = 0; 1819 u32 staterr = 0;
1815 u32 count = adapter->rx_process_limit; 1820 u32 count = 0;
 1821 u32 limit = adapter->rx_process_limit;
 1822 bool discard_multidesc = false;
1816#ifdef RSS 1823#ifdef RSS
1817 u16 pkt_info; 1824 u16 pkt_info;
1818#endif 1825#endif
1819 1826
1820 IXGBE_RX_LOCK(rxr); 1827 IXGBE_RX_LOCK(rxr);
1821 1828
1822#ifdef DEV_NETMAP 1829#ifdef DEV_NETMAP
1823 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) { 1830 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) {
1824 /* Same as the txeof routine: wakeup clients on intr. */ 1831 /* Same as the txeof routine: wakeup clients on intr. */
1825 if (netmap_rx_irq(ifp, rxr->me, &processed)) { 1832 if (netmap_rx_irq(ifp, rxr->me, &processed)) {
1826 IXGBE_RX_UNLOCK(rxr); 1833 IXGBE_RX_UNLOCK(rxr);
1827 return (FALSE); 1834 return (FALSE);
1828 } 1835 }
1829 } 1836 }
1830#endif /* DEV_NETMAP */ 1837#endif /* DEV_NETMAP */
1831 1838
1832 for (i = rxr->next_to_check; count != 0;) { 1839 /*
 1840 * The max number of loop is rx_process_limit. If discard_multidesc is
 1841 * true, continue processing to not to send broken packet to the upper
 1842 * layer.
 1843 */
 1844 for (i = rxr->next_to_check;
 1845 (count < limit) || (discard_multidesc == true);) {
 1846
1833 struct mbuf *sendmp, *mp; 1847 struct mbuf *sendmp, *mp;
 1848 struct mbuf *newmp;
1834 u32 rsc, ptype; 1849 u32 rsc, ptype;
1835 u16 len; 1850 u16 len;
1836 u16 vtag = 0; 1851 u16 vtag = 0;
1837 bool eop; 1852 bool eop;
1838 1853
1839 /* Sync the ring. */ 1854 /* Sync the ring. */
1840 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1855 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1841 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1856 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1842 1857
1843 cur = &rxr->rx_base[i]; 1858 cur = &rxr->rx_base[i];
1844 staterr = le32toh(cur->wb.upper.status_error); 1859 staterr = le32toh(cur->wb.upper.status_error);
1845#ifdef RSS 1860#ifdef RSS
1846 pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info); 1861 pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
1847#endif 1862#endif
1848 1863
1849 if ((staterr & IXGBE_RXD_STAT_DD) == 0) 1864 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
1850 break; 1865 break;
1851 1866
1852 count--; 1867 count++;
1853 sendmp = NULL; 1868 sendmp = NULL;
1854 nbuf = NULL; 1869 nbuf = NULL;
1855 rsc = 0; 1870 rsc = 0;
1856 cur->wb.upper.status_error = 0; 1871 cur->wb.upper.status_error = 0;
1857 rbuf = &rxr->rx_buffers[i]; 1872 rbuf = &rxr->rx_buffers[i];
1858 mp = rbuf->buf; 1873 mp = rbuf->buf;
1859 1874
1860 len = le16toh(cur->wb.upper.length); 1875 len = le16toh(cur->wb.upper.length);
1861 ptype = le32toh(cur->wb.lower.lo_dword.data) & 1876 ptype = le32toh(cur->wb.lower.lo_dword.data) &
1862 IXGBE_RXDADV_PKTTYPE_MASK; 1877 IXGBE_RXDADV_PKTTYPE_MASK;
1863 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); 1878 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
1864 1879
1865 /* Make sure bad packets are discarded */ 1880 /* Make sure bad packets are discarded */
1866 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { 1881 if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
1867#if __FreeBSD_version >= 1100036 1882#if __FreeBSD_version >= 1100036
1868 if (adapter->feat_en & IXGBE_FEATURE_VF) 1883 if (adapter->feat_en & IXGBE_FEATURE_VF)
1869 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1884 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1870#endif 1885#endif
1871 rxr->rx_discarded.ev_count++; 1886 rxr->rx_discarded.ev_count++;
1872 ixgbe_rx_discard(rxr, i); 1887 ixgbe_rx_discard(rxr, i);
 1888 discard_multidesc = false;
1873 goto next_desc; 1889 goto next_desc;
1874 } 1890 }
1875 1891
 1892 /* pre-alloc new mbuf */
 1893 if (!discard_multidesc)
 1894 newmp = ixgbe_getjcl(&rxr->jcl_head, M_NOWAIT, MT_DATA,
 1895 M_PKTHDR, rxr->mbuf_sz);
 1896 else
 1897 newmp = NULL;
 1898 if (newmp == NULL) {
 1899 rxr->no_jmbuf.ev_count++;
 1900 /*
 1901 * Descriptor initialization is already done by the
 1902 * above code (cur->wb.upper.status_error = 0).
 1903 * So, we can reuse current rbuf->buf for new packet.
 1904 *
 1905 * Rewrite the buffer addr, see comment in
 1906 * ixgbe_rx_discard().
 1907 */
 1908 cur->read.pkt_addr = rbuf->addr;
 1909 m_freem(rbuf->fmp);
 1910 rbuf->fmp = NULL;
 1911 if (!eop) {
 1912 /* Discard the entire packet. */
 1913 discard_multidesc = true;
 1914 } else
 1915 discard_multidesc = false;
 1916 goto next_desc;
 1917 }
 1918 discard_multidesc = false;
 1919
1876 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0, 1920 bus_dmamap_sync(rxr->ptag->dt_dmat, rbuf->pmap, 0,
1877 rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD); 1921 rbuf->buf->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1878 1922
1879 /* 1923 /*
1880 * On 82599 which supports a hardware 1924 * On 82599 which supports a hardware
1881 * LRO (called HW RSC), packets need 1925 * LRO (called HW RSC), packets need
1882 * not be fragmented across sequential 1926 * not be fragmented across sequential
1883 * descriptors, rather the next descriptor 1927 * descriptors, rather the next descriptor
1884 * is indicated in bits of the descriptor. 1928 * is indicated in bits of the descriptor.
1885 * This also means that we might proceses 1929 * This also means that we might proceses
1886 * more than one packet at a time, something 1930 * more than one packet at a time, something
1887 * that has never been true before, it 1931 * that has never been true before, it
1888 * required eliminating global chain pointers 1932 * required eliminating global chain pointers
1889 * in favor of what we are doing here. -jfv 1933 * in favor of what we are doing here. -jfv
1890 */ 1934 */
1891 if (!eop) { 1935 if (!eop) {
1892 /* 1936 /*
1893 * Figure out the next descriptor 1937 * Figure out the next descriptor
1894 * of this frame. 1938 * of this frame.
1895 */ 1939 */
1896 if (rxr->hw_rsc == TRUE) { 1940 if (rxr->hw_rsc == TRUE) {
1897 rsc = ixgbe_rsc_count(cur); 1941 rsc = ixgbe_rsc_count(cur);
1898 rxr->rsc_num += (rsc - 1); 1942 rxr->rsc_num += (rsc - 1);
1899 } 1943 }
1900 if (rsc) { /* Get hardware index */ 1944 if (rsc) { /* Get hardware index */
1901 nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >> 1945 nextp = ((staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1902 IXGBE_RXDADV_NEXTP_SHIFT); 1946 IXGBE_RXDADV_NEXTP_SHIFT);
1903 } else { /* Just sequential */ 1947 } else { /* Just sequential */
1904 nextp = i + 1; 1948 nextp = i + 1;
1905 if (nextp == adapter->num_rx_desc) 1949 if (nextp == adapter->num_rx_desc)
1906 nextp = 0; 1950 nextp = 0;
1907 } 1951 }
1908 nbuf = &rxr->rx_buffers[nextp]; 1952 nbuf = &rxr->rx_buffers[nextp];
1909 prefetch(nbuf); 1953 prefetch(nbuf);
1910 } 1954 }
1911 /* 1955 /*
1912 * Rather than using the fmp/lmp global pointers 1956 * Rather than using the fmp/lmp global pointers
1913 * we now keep the head of a packet chain in the 1957 * we now keep the head of a packet chain in the
1914 * buffer struct and pass this along from one 1958 * buffer struct and pass this along from one
1915 * descriptor to the next, until we get EOP. 1959 * descriptor to the next, until we get EOP.
1916 */ 1960 */
1917 mp->m_len = len; 1961 mp->m_len = len;
1918 /* 1962 /*
1919 * See if there is a stored head 1963 * See if there is a stored head
1920 * that determines what we are 1964 * that determines what we are
1921 */ 1965 */
1922 sendmp = rbuf->fmp; 1966 sendmp = rbuf->fmp;
1923 if (sendmp != NULL) { /* secondary frag */ 1967 if (sendmp != NULL) { /* secondary frag */
1924 rbuf->buf = rbuf->fmp = NULL; 1968 rbuf->buf = newmp;
 1969 rbuf->fmp = NULL;
1925 mp->m_flags &= ~M_PKTHDR; 1970 mp->m_flags &= ~M_PKTHDR;
1926 sendmp->m_pkthdr.len += mp->m_len; 1971 sendmp->m_pkthdr.len += mp->m_len;
1927 } else { 1972 } else {
1928 /* 1973 /*
1929 * Optimize. This might be a small packet, 1974 * Optimize. This might be a small packet,
1930 * maybe just a TCP ACK. Do a fast copy that 1975 * maybe just a TCP ACK. Do a fast copy that
1931 * is cache aligned into a new mbuf, and 1976 * is cache aligned into a new mbuf, and
1932 * leave the old mbuf+cluster for re-use. 1977 * leave the old mbuf+cluster for re-use.
1933 */ 1978 */
1934 if (eop && len <= IXGBE_RX_COPY_LEN) { 1979 if (eop && len <= IXGBE_RX_COPY_LEN) {
1935 sendmp = m_gethdr(M_NOWAIT, MT_DATA); 1980 sendmp = m_gethdr(M_NOWAIT, MT_DATA);
1936 if (sendmp != NULL) { 1981 if (sendmp != NULL) {
1937 sendmp->m_data += IXGBE_RX_COPY_ALIGN; 1982 sendmp->m_data += IXGBE_RX_COPY_ALIGN;
1938 ixgbe_bcopy(mp->m_data, sendmp->m_data, 1983 ixgbe_bcopy(mp->m_data, sendmp->m_data,
1939 len); 1984 len);
1940 sendmp->m_len = len; 1985 sendmp->m_len = len;
1941 rxr->rx_copies.ev_count++; 1986 rxr->rx_copies.ev_count++;
1942 rbuf->flags |= IXGBE_RX_COPY; 1987 rbuf->flags |= IXGBE_RX_COPY;
 1988
 1989 m_freem(newmp);
1943 } 1990 }
1944 } 1991 }
1945 if (sendmp == NULL) { 1992 if (sendmp == NULL) {
1946 rbuf->buf = rbuf->fmp = NULL; 1993 rbuf->buf = newmp;
 1994 rbuf->fmp = NULL;
1947 sendmp = mp; 1995 sendmp = mp;
1948 } 1996 }
1949 1997
1950 /* first desc of a non-ps chain */ 1998 /* first desc of a non-ps chain */
1951 sendmp->m_flags |= M_PKTHDR; 1999 sendmp->m_flags |= M_PKTHDR;
1952 sendmp->m_pkthdr.len = mp->m_len; 2000 sendmp->m_pkthdr.len = mp->m_len;
1953 } 2001 }
1954 ++processed; 2002 ++processed;
1955 2003
1956 /* Pass the head pointer on */ 2004 /* Pass the head pointer on */
1957 if (eop == 0) { 2005 if (eop == 0) {
1958 nbuf->fmp = sendmp; 2006 nbuf->fmp = sendmp;
1959 sendmp = NULL; 2007 sendmp = NULL;
1960 mp->m_next = nbuf->buf; 2008 mp->m_next = nbuf->buf;
1961 } else { /* Sending this frame */ 2009 } else { /* Sending this frame */
1962 m_set_rcvif(sendmp, ifp); 2010 m_set_rcvif(sendmp, ifp);
1963 ++rxr->packets; 2011 ++rxr->packets;
1964 rxr->rx_packets.ev_count++; 2012 rxr->rx_packets.ev_count++;
1965 /* capture data for AIM */ 2013 /* capture data for AIM */
1966 rxr->bytes += sendmp->m_pkthdr.len; 2014 rxr->bytes += sendmp->m_pkthdr.len;
1967 rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len; 2015 rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
1968 /* Process vlan info */ 2016 /* Process vlan info */
1969 if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP)) 2017 if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
1970 vtag = le16toh(cur->wb.upper.vlan); 2018 vtag = le16toh(cur->wb.upper.vlan);
1971 if (vtag) { 2019 if (vtag) {
1972 vlan_set_tag(sendmp, vtag); 2020 vlan_set_tag(sendmp, vtag);
1973 } 2021 }
1974 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 2022 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1975 ixgbe_rx_checksum(staterr, sendmp, ptype, 2023 ixgbe_rx_checksum(staterr, sendmp, ptype,
1976 &adapter->stats.pf); 2024 &adapter->stats.pf);
1977 } 2025 }
1978 2026
1979#if 0 /* FreeBSD */ 2027#if 0 /* FreeBSD */
1980 /* 2028 /*
1981 * In case of multiqueue, we have RXCSUM.PCSD bit set 2029 * In case of multiqueue, we have RXCSUM.PCSD bit set
1982 * and never cleared. This means we have RSS hash 2030 * and never cleared. This means we have RSS hash
1983 * available to be used. 2031 * available to be used.
1984 */ 2032 */
1985 if (adapter->num_queues > 1) { 2033 if (adapter->num_queues > 1) {
1986 sendmp->m_pkthdr.flowid = 2034 sendmp->m_pkthdr.flowid =
1987 le32toh(cur->wb.lower.hi_dword.rss); 2035 le32toh(cur->wb.lower.hi_dword.rss);
1988 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) { 2036 switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
1989 case IXGBE_RXDADV_RSSTYPE_IPV4: 2037 case IXGBE_RXDADV_RSSTYPE_IPV4:
1990 M_HASHTYPE_SET(sendmp, 2038 M_HASHTYPE_SET(sendmp,
1991 M_HASHTYPE_RSS_IPV4); 2039 M_HASHTYPE_RSS_IPV4);
1992 break; 2040 break;
1993 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP: 2041 case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
1994 M_HASHTYPE_SET(sendmp, 2042 M_HASHTYPE_SET(sendmp,
1995 M_HASHTYPE_RSS_TCP_IPV4); 2043 M_HASHTYPE_RSS_TCP_IPV4);
1996 break; 2044 break;
1997 case IXGBE_RXDADV_RSSTYPE_IPV6: 2045 case IXGBE_RXDADV_RSSTYPE_IPV6:
1998 M_HASHTYPE_SET(sendmp, 2046 M_HASHTYPE_SET(sendmp,
1999 M_HASHTYPE_RSS_IPV6); 2047 M_HASHTYPE_RSS_IPV6);
2000 break; 2048 break;
2001 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP: 2049 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP:
2002 M_HASHTYPE_SET(sendmp, 2050 M_HASHTYPE_SET(sendmp,
2003 M_HASHTYPE_RSS_TCP_IPV6); 2051 M_HASHTYPE_RSS_TCP_IPV6);
2004 break; 2052 break;
2005 case IXGBE_RXDADV_RSSTYPE_IPV6_EX: 2053 case IXGBE_RXDADV_RSSTYPE_IPV6_EX:
2006 M_HASHTYPE_SET(sendmp, 2054 M_HASHTYPE_SET(sendmp,
2007 M_HASHTYPE_RSS_IPV6_EX); 2055 M_HASHTYPE_RSS_IPV6_EX);
2008 break; 2056 break;
2009 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX: 2057 case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX:
2010 M_HASHTYPE_SET(sendmp, 2058 M_HASHTYPE_SET(sendmp,
2011 M_HASHTYPE_RSS_TCP_IPV6_EX); 2059 M_HASHTYPE_RSS_TCP_IPV6_EX);
2012 break; 2060 break;
2013#if __FreeBSD_version > 1100000 2061#if __FreeBSD_version > 1100000
2014 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP: 2062 case IXGBE_RXDADV_RSSTYPE_IPV4_UDP:
2015 M_HASHTYPE_SET(sendmp, 2063 M_HASHTYPE_SET(sendmp,
2016 M_HASHTYPE_RSS_UDP_IPV4); 2064 M_HASHTYPE_RSS_UDP_IPV4);
2017 break; 2065 break;
2018 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP: 2066 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP:
2019 M_HASHTYPE_SET(sendmp, 2067 M_HASHTYPE_SET(sendmp,
2020 M_HASHTYPE_RSS_UDP_IPV6); 2068 M_HASHTYPE_RSS_UDP_IPV6);
2021 break; 2069 break;
2022 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX: 2070 case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX:
2023 M_HASHTYPE_SET(sendmp, 2071 M_HASHTYPE_SET(sendmp,
2024 M_HASHTYPE_RSS_UDP_IPV6_EX); 2072 M_HASHTYPE_RSS_UDP_IPV6_EX);
2025 break; 2073 break;
2026#endif 2074#endif
2027 default: 2075 default:
2028 M_HASHTYPE_SET(sendmp, 2076 M_HASHTYPE_SET(sendmp,
2029 M_HASHTYPE_OPAQUE_HASH); 2077 M_HASHTYPE_OPAQUE_HASH);
2030 } 2078 }
2031 } else { 2079 } else {
2032 sendmp->m_pkthdr.flowid = que->msix; 2080 sendmp->m_pkthdr.flowid = que->msix;
2033 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); 2081 M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
2034 } 2082 }
2035#endif 2083#endif
2036 } 2084 }
2037next_desc: 2085next_desc:
2038 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 2086 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2039 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2087 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2040 2088
2041 /* Advance our pointers to the next descriptor. */ 2089 /* Advance our pointers to the next descriptor. */
2042 if (++i == rxr->num_desc) 2090 if (++i == rxr->num_desc)
2043 i = 0; 2091 i = 0;
2044 2092
2045 /* Now send to the stack or do LRO */ 2093 /* Now send to the stack or do LRO */
2046 if (sendmp != NULL) { 2094 if (sendmp != NULL) {
2047 rxr->next_to_check = i; 2095 rxr->next_to_check = i;
2048 IXGBE_RX_UNLOCK(rxr); 2096 IXGBE_RX_UNLOCK(rxr);
2049 ixgbe_rx_input(rxr, ifp, sendmp, ptype); 2097 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2050 IXGBE_RX_LOCK(rxr); 2098 IXGBE_RX_LOCK(rxr);
2051 i = rxr->next_to_check; 2099 i = rxr->next_to_check;
2052 } 2100 }
2053 2101
2054 /* Every 8 descriptors we go to refresh mbufs */ 2102 /* Every 8 descriptors we go to refresh mbufs */
2055 if (processed == 8) { 2103 if (processed == 8) {
2056 ixgbe_refresh_mbufs(rxr, i); 2104 ixgbe_refresh_mbufs(rxr, i);
2057 processed = 0; 2105 processed = 0;
2058 } 2106 }
2059 } 2107 }
2060 2108
2061 /* Refresh any remaining buf structs */ 2109 /* Refresh any remaining buf structs */
2062 if (ixgbe_rx_unrefreshed(rxr)) 2110 if (ixgbe_rx_unrefreshed(rxr))
2063 ixgbe_refresh_mbufs(rxr, i); 2111 ixgbe_refresh_mbufs(rxr, i);
2064 2112
2065 rxr->next_to_check = i; 2113 rxr->next_to_check = i;
2066 2114
2067 IXGBE_RX_UNLOCK(rxr); 2115 IXGBE_RX_UNLOCK(rxr);
2068 2116
2069#ifdef LRO 2117#ifdef LRO
2070 /* 2118 /*
2071 * Flush any outstanding LRO work 2119 * Flush any outstanding LRO work
2072 */ 2120 */
2073 tcp_lro_flush_all(lro); 2121 tcp_lro_flush_all(lro);
2074#endif /* LRO */ 2122#endif /* LRO */
2075 2123
2076 /* 2124 /*
2077 * Still have cleaning to do? 2125 * Still have cleaning to do?
2078 */ 2126 */
2079 if ((staterr & IXGBE_RXD_STAT_DD) != 0) 2127 if ((staterr & IXGBE_RXD_STAT_DD) != 0)
2080 return (TRUE); 2128 return (TRUE);
2081 2129
2082 return (FALSE); 2130 return (FALSE);
2083} /* ixgbe_rxeof */ 2131} /* ixgbe_rxeof */
2084 2132
2085 2133
2086/************************************************************************ 2134/************************************************************************
2087 * ixgbe_rx_checksum 2135 * ixgbe_rx_checksum
2088 * 2136 *
2089 * Verify that the hardware indicated that the checksum is valid. 2137 * Verify that the hardware indicated that the checksum is valid.
2090 * Inform the stack about the status of checksum so that stack 2138 * Inform the stack about the status of checksum so that stack
2091 * doesn't spend time verifying the checksum. 2139 * doesn't spend time verifying the checksum.
2092 ************************************************************************/ 2140 ************************************************************************/
2093static void 2141static void
2094ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype, 2142ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype,
2095 struct ixgbe_hw_stats *stats) 2143 struct ixgbe_hw_stats *stats)
2096{ 2144{
2097 u16 status = (u16)staterr; 2145 u16 status = (u16)staterr;
2098 u8 errors = (u8)(staterr >> 24); 2146 u8 errors = (u8)(staterr >> 24);
2099#if 0 2147#if 0
2100 bool sctp = false; 2148 bool sctp = false;
2101 2149
2102 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && 2150 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
2103 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) 2151 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
2104 sctp = true; 2152 sctp = true;
2105#endif 2153#endif
2106 2154
2107 /* IPv4 checksum */ 2155 /* IPv4 checksum */
2108 if (status & IXGBE_RXD_STAT_IPCS) { 2156 if (status & IXGBE_RXD_STAT_IPCS) {
2109 stats->ipcs.ev_count++; 2157 stats->ipcs.ev_count++;
2110 if (!(errors & IXGBE_RXD_ERR_IPE)) { 2158 if (!(errors & IXGBE_RXD_ERR_IPE)) {
2111 /* IP Checksum Good */ 2159 /* IP Checksum Good */
2112 mp->m_pkthdr.csum_flags = M_CSUM_IPv4; 2160 mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
2113 } else { 2161 } else {
2114 stats->ipcs_bad.ev_count++; 2162 stats->ipcs_bad.ev_count++;
2115 mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD; 2163 mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
2116 } 2164 }
2117 } 2165 }
2118 /* TCP/UDP/SCTP checksum */ 2166 /* TCP/UDP/SCTP checksum */
2119 if (status & IXGBE_RXD_STAT_L4CS) { 2167 if (status & IXGBE_RXD_STAT_L4CS) {
2120 stats->l4cs.ev_count++; 2168 stats->l4cs.ev_count++;
2121 int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6; 2169 int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
2122 if (!(errors & IXGBE_RXD_ERR_TCPE)) { 2170 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
2123 mp->m_pkthdr.csum_flags |= type; 2171 mp->m_pkthdr.csum_flags |= type;
2124 } else { 2172 } else {
2125 stats->l4cs_bad.ev_count++; 2173 stats->l4cs_bad.ev_count++;
2126 mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD; 2174 mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
2127 } 2175 }
2128 } 2176 }
2129} /* ixgbe_rx_checksum */ 2177} /* ixgbe_rx_checksum */
2130 2178
2131/************************************************************************ 2179/************************************************************************
2132 * ixgbe_dma_malloc 2180 * ixgbe_dma_malloc
2133 ************************************************************************/ 2181 ************************************************************************/
2134int 2182int
2135ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size, 2183ixgbe_dma_malloc(struct adapter *adapter, const bus_size_t size,
2136 struct ixgbe_dma_alloc *dma, const int mapflags) 2184 struct ixgbe_dma_alloc *dma, const int mapflags)
2137{ 2185{
2138 device_t dev = adapter->dev; 2186 device_t dev = adapter->dev;
2139 int r, rsegs; 2187 int r, rsegs;
2140 2188
2141 r = ixgbe_dma_tag_create( 2189 r = ixgbe_dma_tag_create(
2142 /* parent */ adapter->osdep.dmat, 2190 /* parent */ adapter->osdep.dmat,
2143 /* alignment */ DBA_ALIGN, 2191 /* alignment */ DBA_ALIGN,
2144 /* bounds */ 0, 2192 /* bounds */ 0,
2145 /* maxsize */ size, 2193 /* maxsize */ size,
2146 /* nsegments */ 1, 2194 /* nsegments */ 1,
2147 /* maxsegsize */ size, 2195 /* maxsegsize */ size,
2148 /* flags */ BUS_DMA_ALLOCNOW, 2196 /* flags */ BUS_DMA_ALLOCNOW,
2149 &dma->dma_tag); 2197 &dma->dma_tag);
2150 if (r != 0) { 2198 if (r != 0) {
2151 aprint_error_dev(dev, 2199 aprint_error_dev(dev,
2152 "%s: ixgbe_dma_tag_create failed; error %d\n", __func__, 2200 "%s: ixgbe_dma_tag_create failed; error %d\n", __func__,
2153 r); 2201 r);
2154 goto fail_0; 2202 goto fail_0;
2155 } 2203 }
2156 2204
2157 r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size, 2205 r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, size,
2158 dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary, 2206 dma->dma_tag->dt_alignment, dma->dma_tag->dt_boundary,
2159 &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT); 2207 &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT);
2160 if (r != 0) { 2208 if (r != 0) {
2161 aprint_error_dev(dev, 2209 aprint_error_dev(dev,
2162 "%s: bus_dmamem_alloc failed; error %d\n", __func__, r); 2210 "%s: bus_dmamem_alloc failed; error %d\n", __func__, r);
2163 goto fail_1; 2211 goto fail_1;
2164 } 2212 }
2165 2213
2166 r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs, 2214 r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs,
2167 size, &dma->dma_vaddr, BUS_DMA_NOWAIT); 2215 size, &dma->dma_vaddr, BUS_DMA_NOWAIT);
2168 if (r != 0) { 2216 if (r != 0) {
2169 aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", 2217 aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2170 __func__, r); 2218 __func__, r);
2171 goto fail_2; 2219 goto fail_2;
2172 } 2220 }
2173 2221
2174 r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map); 2222 r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map);
2175 if (r != 0) { 2223 if (r != 0) {
2176 aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", 2224 aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n",
2177 __func__, r); 2225 __func__, r);
2178 goto fail_3; 2226 goto fail_3;
2179 } 2227 }
2180 2228
2181 r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, 2229 r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map,
2182 dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT); 2230 dma->dma_vaddr, size, NULL, mapflags | BUS_DMA_NOWAIT);
2183 if (r != 0) { 2231 if (r != 0) {
2184 aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n", 2232 aprint_error_dev(dev, "%s: bus_dmamap_load failed; error %d\n",
2185 __func__, r); 2233 __func__, r);
2186 goto fail_4; 2234 goto fail_4;
2187 } 2235 }
2188 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 2236 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
2189 dma->dma_size = size; 2237 dma->dma_size = size;
2190 return 0; 2238 return 0;
2191fail_4: 2239fail_4:
2192 ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map); 2240 ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map);
2193fail_3: 2241fail_3:
2194 bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size); 2242 bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size);
2195fail_2: 2243fail_2:
2196 bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs); 2244 bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs);
2197fail_1: 2245fail_1:
2198 ixgbe_dma_tag_destroy(dma->dma_tag); 2246 ixgbe_dma_tag_destroy(dma->dma_tag);
2199fail_0: 2247fail_0:
2200 2248
2201 return (r); 2249 return (r);
2202} /* ixgbe_dma_malloc */ 2250} /* ixgbe_dma_malloc */
2203 2251
2204/************************************************************************ 2252/************************************************************************
2205 * ixgbe_dma_free 2253 * ixgbe_dma_free
2206 ************************************************************************/ 2254 ************************************************************************/
2207void 2255void
2208ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma) 2256ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2209{ 2257{
2210 bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size, 2258 bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size,
2211 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2259 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2212 ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map); 2260 ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map);
2213 bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1); 2261 bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1);
2214 ixgbe_dma_tag_destroy(dma->dma_tag); 2262 ixgbe_dma_tag_destroy(dma->dma_tag);
2215} /* ixgbe_dma_free */ 2263} /* ixgbe_dma_free */
2216 2264
2217 2265
2218/************************************************************************ 2266/************************************************************************
2219 * ixgbe_allocate_queues 2267 * ixgbe_allocate_queues
2220 * 2268 *
2221 * Allocate memory for the transmit and receive rings, and then 2269 * Allocate memory for the transmit and receive rings, and then
2222 * the descriptors associated with each, called only once at attach. 2270 * the descriptors associated with each, called only once at attach.
2223 ************************************************************************/ 2271 ************************************************************************/
2224int 2272int
2225ixgbe_allocate_queues(struct adapter *adapter) 2273ixgbe_allocate_queues(struct adapter *adapter)
2226{ 2274{
2227 device_t dev = adapter->dev; 2275 device_t dev = adapter->dev;
2228 struct ix_queue *que; 2276 struct ix_queue *que;
2229 struct tx_ring *txr; 2277 struct tx_ring *txr;
2230 struct rx_ring *rxr; 2278 struct rx_ring *rxr;
2231 int rsize, tsize, error = IXGBE_SUCCESS; 2279 int rsize, tsize, error = IXGBE_SUCCESS;
2232 int txconf = 0, rxconf = 0; 2280 int txconf = 0, rxconf = 0;
2233 2281
2234 /* First, allocate the top level queue structs */ 2282 /* First, allocate the top level queue structs */
2235 adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) * 2283 adapter->queues = (struct ix_queue *)malloc(sizeof(struct ix_queue) *
2236 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO); 2284 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2237 if (adapter->queues == NULL) { 2285 if (adapter->queues == NULL) {
2238 aprint_error_dev(dev, "Unable to allocate queue memory\n"); 2286 aprint_error_dev(dev, "Unable to allocate queue memory\n");
2239 error = ENOMEM; 2287 error = ENOMEM;
2240 goto fail; 2288 goto fail;
2241 } 2289 }
2242 2290
2243 /* Second, allocate the TX ring struct memory */ 2291 /* Second, allocate the TX ring struct memory */
2244 adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) * 2292 adapter->tx_rings = (struct tx_ring *)malloc(sizeof(struct tx_ring) *
2245 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO); 2293 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2246 if (adapter->tx_rings == NULL) { 2294 if (adapter->tx_rings == NULL) {
2247 aprint_error_dev(dev, "Unable to allocate TX ring memory\n"); 2295 aprint_error_dev(dev, "Unable to allocate TX ring memory\n");
2248 error = ENOMEM; 2296 error = ENOMEM;
2249 goto tx_fail; 2297 goto tx_fail;
2250 } 2298 }
2251 2299
2252 /* Third, allocate the RX ring */ 2300 /* Third, allocate the RX ring */
2253 adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) * 2301 adapter->rx_rings = (struct rx_ring *)malloc(sizeof(struct rx_ring) *
2254 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO); 2302 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO);
2255 if (adapter->rx_rings == NULL) { 2303 if (adapter->rx_rings == NULL) {
2256 aprint_error_dev(dev, "Unable to allocate RX ring memory\n"); 2304 aprint_error_dev(dev, "Unable to allocate RX ring memory\n");
2257 error = ENOMEM; 2305 error = ENOMEM;
2258 goto rx_fail; 2306 goto rx_fail;
2259 } 2307 }
2260 2308
2261 /* For the ring itself */ 2309 /* For the ring itself */
2262 tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc), 2310 tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc),
2263 DBA_ALIGN); 2311 DBA_ALIGN);
2264 2312
2265 /* 2313 /*
2266 * Now set up the TX queues, txconf is needed to handle the 2314 * Now set up the TX queues, txconf is needed to handle the
2267 * possibility that things fail midcourse and we need to 2315 * possibility that things fail midcourse and we need to
2268 * undo memory gracefully 2316 * undo memory gracefully
2269 */ 2317 */
2270 for (int i = 0; i < adapter->num_queues; i++, txconf++) { 2318 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2271 /* Set up some basics */ 2319 /* Set up some basics */
2272 txr = &adapter->tx_rings[i]; 2320 txr = &adapter->tx_rings[i];
2273 txr->adapter = adapter; 2321 txr->adapter = adapter;
2274 txr->txr_interq = NULL; 2322 txr->txr_interq = NULL;
2275 /* In case SR-IOV is enabled, align the index properly */ 2323 /* In case SR-IOV is enabled, align the index properly */
2276#ifdef PCI_IOV 2324#ifdef PCI_IOV
2277 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 2325 txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2278 i); 2326 i);
2279#else 2327#else
2280 txr->me = i; 2328 txr->me = i;
2281#endif 2329#endif
2282 txr->num_desc = adapter->num_tx_desc; 2330 txr->num_desc = adapter->num_tx_desc;
2283 2331
2284 /* Initialize the TX side lock */ 2332 /* Initialize the TX side lock */
2285 mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET); 2333 mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET);
2286 2334
2287 if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma, 2335 if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma,
2288 BUS_DMA_NOWAIT)) { 2336 BUS_DMA_NOWAIT)) {
2289 aprint_error_dev(dev, 2337 aprint_error_dev(dev,
2290 "Unable to allocate TX Descriptor memory\n"); 2338 "Unable to allocate TX Descriptor memory\n");
2291 error = ENOMEM; 2339 error = ENOMEM;
2292 goto err_tx_desc; 2340 goto err_tx_desc;
2293 } 2341 }
2294 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; 2342 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2295 bzero((void *)txr->tx_base, tsize); 2343 bzero((void *)txr->tx_base, tsize);
2296 2344
2297 /* Now allocate transmit buffers for the ring */ 2345 /* Now allocate transmit buffers for the ring */
2298 if (ixgbe_allocate_transmit_buffers(txr)) { 2346 if (ixgbe_allocate_transmit_buffers(txr)) {
2299 aprint_error_dev(dev, 2347 aprint_error_dev(dev,
2300 "Critical Failure setting up transmit buffers\n"); 2348 "Critical Failure setting up transmit buffers\n");
2301 error = ENOMEM; 2349 error = ENOMEM;
2302 goto err_tx_desc; 2350 goto err_tx_desc;
2303 } 2351 }
2304 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 2352 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
2305 /* Allocate a buf ring */ 2353 /* Allocate a buf ring */
2306 txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP); 2354 txr->txr_interq = pcq_create(IXGBE_BR_SIZE, KM_SLEEP);
2307 if (txr->txr_interq == NULL) { 2355 if (txr->txr_interq == NULL) {
2308 aprint_error_dev(dev, 2356 aprint_error_dev(dev,
2309 "Critical Failure setting up buf ring\n"); 2357 "Critical Failure setting up buf ring\n");
2310 error = ENOMEM; 2358 error = ENOMEM;
2311 goto err_tx_desc; 2359 goto err_tx_desc;
2312 } 2360 }
2313 } 2361 }
2314 } 2362 }
2315 2363
2316 /* 2364 /*
2317 * Next the RX queues... 2365 * Next the RX queues...
2318 */ 2366 */
2319 rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), 2367 rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc),
2320 DBA_ALIGN); 2368 DBA_ALIGN);
2321 for (int i = 0; i < adapter->num_queues; i++, rxconf++) { 2369 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2322 rxr = &adapter->rx_rings[i]; 2370 rxr = &adapter->rx_rings[i];
2323 /* Set up some basics */ 2371 /* Set up some basics */
2324 rxr->adapter = adapter; 2372 rxr->adapter = adapter;
2325#ifdef PCI_IOV 2373#ifdef PCI_IOV
2326 /* In case SR-IOV is enabled, align the index properly */ 2374 /* In case SR-IOV is enabled, align the index properly */
2327 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, 2375 rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool,
2328 i); 2376 i);
2329#else 2377#else
2330 rxr->me = i; 2378 rxr->me = i;
2331#endif 2379#endif
2332 rxr->num_desc = adapter->num_rx_desc; 2380 rxr->num_desc = adapter->num_rx_desc;
2333 2381
2334 /* Initialize the RX side lock */ 2382 /* Initialize the RX side lock */
2335 mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET); 2383 mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET);
2336 2384
2337 if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma, 2385 if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma,
2338 BUS_DMA_NOWAIT)) { 2386 BUS_DMA_NOWAIT)) {
2339 aprint_error_dev(dev, 2387 aprint_error_dev(dev,
2340 "Unable to allocate RxDescriptor memory\n"); 2388 "Unable to allocate RxDescriptor memory\n");
2341 error = ENOMEM; 2389 error = ENOMEM;
2342 goto err_rx_desc; 2390 goto err_rx_desc;
2343 } 2391 }
2344 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; 2392 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2345 bzero((void *)rxr->rx_base, rsize); 2393 bzero((void *)rxr->rx_base, rsize);
2346 2394
2347 /* Allocate receive buffers for the ring */ 2395 /* Allocate receive buffers for the ring */
2348 if (ixgbe_allocate_receive_buffers(rxr)) { 2396 if (ixgbe_allocate_receive_buffers(rxr)) {
2349 aprint_error_dev(dev, 2397 aprint_error_dev(dev,
2350 "Critical Failure setting up receive buffers\n"); 2398 "Critical Failure setting up receive buffers\n");
2351 error = ENOMEM; 2399 error = ENOMEM;
2352 goto err_rx_desc; 2400 goto err_rx_desc;
2353 } 2401 }
2354 } 2402 }
2355 2403
2356 /* 2404 /*
2357 * Finally set up the queue holding structs 2405 * Finally set up the queue holding structs
2358 */ 2406 */
2359 for (int i = 0; i < adapter->num_queues; i++) { 2407 for (int i = 0; i < adapter->num_queues; i++) {
2360 que = &adapter->queues[i]; 2408 que = &adapter->queues[i];
2361 que->adapter = adapter; 2409 que->adapter = adapter;
2362 que->me = i; 2410 que->me = i;
2363 que->txr = &adapter->tx_rings[i]; 2411 que->txr = &adapter->tx_rings[i];
2364 que->rxr = &adapter->rx_rings[i]; 2412 que->rxr = &adapter->rx_rings[i];
2365 2413
2366 mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET); 2414 mutex_init(&que->dc_mtx, MUTEX_DEFAULT, IPL_NET);
2367 que->disabled_count = 0; 2415 que->disabled_count = 0;
2368 } 2416 }
2369 2417
2370 return (0); 2418 return (0);
2371 2419
2372err_rx_desc: 2420err_rx_desc:
2373 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) 2421 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2374 ixgbe_dma_free(adapter, &rxr->rxdma); 2422 ixgbe_dma_free(adapter, &rxr->rxdma);
2375err_tx_desc: 2423err_tx_desc:
2376 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) 2424 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2377 ixgbe_dma_free(adapter, &txr->txdma); 2425 ixgbe_dma_free(adapter, &txr->txdma);
2378 free(adapter->rx_rings, M_DEVBUF); 2426 free(adapter->rx_rings, M_DEVBUF);
2379rx_fail: 2427rx_fail:
2380 free(adapter->tx_rings, M_DEVBUF); 2428 free(adapter->tx_rings, M_DEVBUF);
2381tx_fail: 2429tx_fail:
2382 free(adapter->queues, M_DEVBUF); 2430 free(adapter->queues, M_DEVBUF);
2383fail: 2431fail:
2384 return (error); 2432 return (error);
2385} /* ixgbe_allocate_queues */ 2433} /* ixgbe_allocate_queues */
2386 2434
2387/************************************************************************ 2435/************************************************************************
2388 * ixgbe_free_queues 2436 * ixgbe_free_queues
2389 * 2437 *
2390 * Free descriptors for the transmit and receive rings, and then 2438 * Free descriptors for the transmit and receive rings, and then
2391 * the memory associated with each. 2439 * the memory associated with each.
2392 ************************************************************************/ 2440 ************************************************************************/
2393void 2441void
2394ixgbe_free_queues(struct adapter *adapter) 2442ixgbe_free_queues(struct adapter *adapter)
2395{ 2443{
2396 struct ix_queue *que; 2444 struct ix_queue *que;
2397 int i; 2445 int i;
2398 2446
2399 ixgbe_free_transmit_structures(adapter); 2447 ixgbe_free_transmit_structures(adapter);
2400 ixgbe_free_receive_structures(adapter); 2448 ixgbe_free_receive_structures(adapter);
2401 for (i = 0; i < adapter->num_queues; i++) { 2449 for (i = 0; i < adapter->num_queues; i++) {
2402 que = &adapter->queues[i]; 2450 que = &adapter->queues[i];
2403 mutex_destroy(&que->dc_mtx); 2451 mutex_destroy(&que->dc_mtx);
2404 } 2452 }
2405 free(adapter->queues, M_DEVBUF); 2453 free(adapter->queues, M_DEVBUF);
2406} /* ixgbe_free_queues */ 2454} /* ixgbe_free_queues */

cvs diff -r1.199.2.12 -r1.199.2.13 src/sys/dev/pci/ixgbe/ixgbe.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.c 2020/09/02 12:34:55 1.199.2.12
+++ src/sys/dev/pci/ixgbe/ixgbe.c 2021/03/11 16:00:24 1.199.2.13
@@ -1,4299 +1,4309 @@ @@ -1,4299 +1,4309 @@
1/* $NetBSD: ixgbe.c,v 1.199.2.12 2020/09/02 12:34:55 martin Exp $ */ 1/* $NetBSD: ixgbe.c,v 1.199.2.13 2021/03/11 16:00:24 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37/* 37/*
38 * Copyright (c) 2011 The NetBSD Foundation, Inc. 38 * Copyright (c) 2011 The NetBSD Foundation, Inc.
39 * All rights reserved. 39 * All rights reserved.
40 * 40 *
41 * This code is derived from software contributed to The NetBSD Foundation 41 * This code is derived from software contributed to The NetBSD Foundation
42 * by Coyote Point Systems, Inc. 42 * by Coyote Point Systems, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 53 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE. 63 * POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_inet.h" 67#include "opt_inet.h"
68#include "opt_inet6.h" 68#include "opt_inet6.h"
69#include "opt_net_mpsafe.h" 69#include "opt_net_mpsafe.h"
 70#include "opt_ixgbe.h"
70#endif 71#endif
71 72
72#include "ixgbe.h" 73#include "ixgbe.h"
73#include "ixgbe_sriov.h" 74#include "ixgbe_sriov.h"
74#include "vlan.h" 75#include "vlan.h"
75 76
76#include <sys/cprng.h> 77#include <sys/cprng.h>
77#include <dev/mii/mii.h> 78#include <dev/mii/mii.h>
78#include <dev/mii/miivar.h> 79#include <dev/mii/miivar.h>
79 80
80/************************************************************************ 81/************************************************************************
81 * Driver version 82 * Driver version
82 ************************************************************************/ 83 ************************************************************************/
83static const char ixgbe_driver_version[] = "4.0.1-k"; 84static const char ixgbe_driver_version[] = "4.0.1-k";
84/* XXX NetBSD: + 3.3.10 */ 85/* XXX NetBSD: + 3.3.10 */
85 86
86/************************************************************************ 87/************************************************************************
87 * PCI Device ID Table 88 * PCI Device ID Table
88 * 89 *
89 * Used by probe to select devices to load on 90 * Used by probe to select devices to load on
90 * Last field stores an index into ixgbe_strings 91 * Last field stores an index into ixgbe_strings
91 * Last entry must be all 0s 92 * Last entry must be all 0s
92 * 93 *
93 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 94 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
94 ************************************************************************/ 95 ************************************************************************/
95static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] = 96static const ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
96{ 97{
97 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, 98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
98 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, 99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
99 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, 100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
100 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, 101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
101 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, 102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
102 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, 103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
103 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0}, 104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX, 0, 0, 0},
104 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, 105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
105 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, 106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
106 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, 107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
107 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, 108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
108 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, 109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
109 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0}, 110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR, 0, 0, 0},
110 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, 111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
111 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, 112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
112 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, 113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
113 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0}, 114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM, 0, 0, 0},
114 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, 115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
115 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, 116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
116 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, 117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
117 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, 118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
118 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, 119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
119 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0}, 120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
120 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, 121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
121 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, 122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
122 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0}, 123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
123 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0}, 124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, 0, 0, 0},
124 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, 125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
125 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0}, 126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
126 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0}, 127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, 0, 0, 0},
127 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0}, 128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, 0, 0, 0},
128 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0}, 129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, 0, 0, 0},
129 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0}, 130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, 0, 0, 0},
130 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0}, 131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, 0, 0, 0},
131 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0}, 132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, 0, 0, 0},
132 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0}, 133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, 0, 0, 0},
133 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0}, 134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI, 0, 0, 0},
134 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0}, 135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, 0, 0, 0},
135 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0}, 136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, 0, 0, 0},
136 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0}, 137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP, 0, 0, 0},
137 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0}, 138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N, 0, 0, 0},
138 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0}, 139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, 0, 0, 0},
139 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0}, 140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, 0, 0, 0},
140 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0}, 141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, 0, 0, 0},
141 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0}, 142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, 0, 0, 0},
142 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0}, 143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, 0, 0, 0},
143 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0}, 144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, 0, 0, 0},
144 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0}, 145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, 0, 0, 0},
145 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0}, 146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, 0, 0, 0},
146 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0}, 147 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, 0, 0, 0},
147 /* required last entry */ 148 /* required last entry */
148 {0, 0, 0, 0, 0} 149 {0, 0, 0, 0, 0}
149}; 150};
150 151
151/************************************************************************ 152/************************************************************************
152 * Table of branding strings 153 * Table of branding strings
153 ************************************************************************/ 154 ************************************************************************/
154static const char *ixgbe_strings[] = { 155static const char *ixgbe_strings[] = {
155 "Intel(R) PRO/10GbE PCI-Express Network Driver" 156 "Intel(R) PRO/10GbE PCI-Express Network Driver"
156}; 157};
157 158
158/************************************************************************ 159/************************************************************************
159 * Function prototypes 160 * Function prototypes
160 ************************************************************************/ 161 ************************************************************************/
161static int ixgbe_probe(device_t, cfdata_t, void *); 162static int ixgbe_probe(device_t, cfdata_t, void *);
162static void ixgbe_attach(device_t, device_t, void *); 163static void ixgbe_attach(device_t, device_t, void *);
163static int ixgbe_detach(device_t, int); 164static int ixgbe_detach(device_t, int);
164#if 0 165#if 0
165static int ixgbe_shutdown(device_t); 166static int ixgbe_shutdown(device_t);
166#endif 167#endif
167static bool ixgbe_suspend(device_t, const pmf_qual_t *); 168static bool ixgbe_suspend(device_t, const pmf_qual_t *);
168static bool ixgbe_resume(device_t, const pmf_qual_t *); 169static bool ixgbe_resume(device_t, const pmf_qual_t *);
169static int ixgbe_ifflags_cb(struct ethercom *); 170static int ixgbe_ifflags_cb(struct ethercom *);
170static int ixgbe_ioctl(struct ifnet *, u_long, void *); 171static int ixgbe_ioctl(struct ifnet *, u_long, void *);
171static int ixgbe_init(struct ifnet *); 172static int ixgbe_init(struct ifnet *);
172static void ixgbe_init_locked(struct adapter *); 173static void ixgbe_init_locked(struct adapter *);
173static void ixgbe_ifstop(struct ifnet *, int); 174static void ixgbe_ifstop(struct ifnet *, int);
174static void ixgbe_stop(void *); 175static void ixgbe_stop(void *);
175static void ixgbe_init_device_features(struct adapter *); 176static void ixgbe_init_device_features(struct adapter *);
176static void ixgbe_check_fan_failure(struct adapter *, u32, bool); 177static void ixgbe_check_fan_failure(struct adapter *, u32, bool);
177static void ixgbe_add_media_types(struct adapter *); 178static void ixgbe_add_media_types(struct adapter *);
178static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); 179static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
179static int ixgbe_media_change(struct ifnet *); 180static int ixgbe_media_change(struct ifnet *);
180static int ixgbe_allocate_pci_resources(struct adapter *, 181static int ixgbe_allocate_pci_resources(struct adapter *,
181 const struct pci_attach_args *); 182 const struct pci_attach_args *);
182static void ixgbe_free_softint(struct adapter *); 183static void ixgbe_free_softint(struct adapter *);
183static void ixgbe_get_slot_info(struct adapter *); 184static void ixgbe_get_slot_info(struct adapter *);
184static int ixgbe_allocate_msix(struct adapter *, 185static int ixgbe_allocate_msix(struct adapter *,
185 const struct pci_attach_args *); 186 const struct pci_attach_args *);
186static int ixgbe_allocate_legacy(struct adapter *, 187static int ixgbe_allocate_legacy(struct adapter *,
187 const struct pci_attach_args *); 188 const struct pci_attach_args *);
188static int ixgbe_configure_interrupts(struct adapter *); 189static int ixgbe_configure_interrupts(struct adapter *);
189static void ixgbe_free_pciintr_resources(struct adapter *); 190static void ixgbe_free_pciintr_resources(struct adapter *);
190static void ixgbe_free_pci_resources(struct adapter *); 191static void ixgbe_free_pci_resources(struct adapter *);
191static void ixgbe_local_timer(void *); 192static void ixgbe_local_timer(void *);
192static void ixgbe_local_timer1(void *); 193static void ixgbe_local_timer1(void *);
193static void ixgbe_recovery_mode_timer(void *); 194static void ixgbe_recovery_mode_timer(void *);
194static int ixgbe_setup_interface(device_t, struct adapter *); 195static int ixgbe_setup_interface(device_t, struct adapter *);
195static void ixgbe_config_gpie(struct adapter *); 196static void ixgbe_config_gpie(struct adapter *);
196static void ixgbe_config_dmac(struct adapter *); 197static void ixgbe_config_dmac(struct adapter *);
197static void ixgbe_config_delay_values(struct adapter *); 198static void ixgbe_config_delay_values(struct adapter *);
198static void ixgbe_config_link(struct adapter *); 199static void ixgbe_config_link(struct adapter *);
199static void ixgbe_check_wol_support(struct adapter *); 200static void ixgbe_check_wol_support(struct adapter *);
200static int ixgbe_setup_low_power_mode(struct adapter *); 201static int ixgbe_setup_low_power_mode(struct adapter *);
201#if 0 202#if 0
202static void ixgbe_rearm_queues(struct adapter *, u64); 203static void ixgbe_rearm_queues(struct adapter *, u64);
203#endif 204#endif
204 205
205static void ixgbe_initialize_transmit_units(struct adapter *); 206static void ixgbe_initialize_transmit_units(struct adapter *);
206static void ixgbe_initialize_receive_units(struct adapter *); 207static void ixgbe_initialize_receive_units(struct adapter *);
207static void ixgbe_enable_rx_drop(struct adapter *); 208static void ixgbe_enable_rx_drop(struct adapter *);
208static void ixgbe_disable_rx_drop(struct adapter *); 209static void ixgbe_disable_rx_drop(struct adapter *);
209static void ixgbe_initialize_rss_mapping(struct adapter *); 210static void ixgbe_initialize_rss_mapping(struct adapter *);
210 211
211static void ixgbe_enable_intr(struct adapter *); 212static void ixgbe_enable_intr(struct adapter *);
212static void ixgbe_disable_intr(struct adapter *); 213static void ixgbe_disable_intr(struct adapter *);
213static void ixgbe_update_stats_counters(struct adapter *); 214static void ixgbe_update_stats_counters(struct adapter *);
214static void ixgbe_set_rxfilter(struct adapter *); 215static void ixgbe_set_rxfilter(struct adapter *);
215static void ixgbe_update_link_status(struct adapter *); 216static void ixgbe_update_link_status(struct adapter *);
216static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); 217static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
217static void ixgbe_configure_ivars(struct adapter *); 218static void ixgbe_configure_ivars(struct adapter *);
218static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 219static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
219static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t); 220static void ixgbe_eitr_write(struct adapter *, uint32_t, uint32_t);
220 221
221static void ixgbe_setup_vlan_hw_tagging(struct adapter *); 222static void ixgbe_setup_vlan_hw_tagging(struct adapter *);
222static void ixgbe_setup_vlan_hw_support(struct adapter *); 223static void ixgbe_setup_vlan_hw_support(struct adapter *);
223static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool); 224static int ixgbe_vlan_cb(struct ethercom *, uint16_t, bool);
224static int ixgbe_register_vlan(struct adapter *, u16); 225static int ixgbe_register_vlan(struct adapter *, u16);
225static int ixgbe_unregister_vlan(struct adapter *, u16); 226static int ixgbe_unregister_vlan(struct adapter *, u16);
226 227
227static void ixgbe_add_device_sysctls(struct adapter *); 228static void ixgbe_add_device_sysctls(struct adapter *);
228static void ixgbe_add_hw_stats(struct adapter *); 229static void ixgbe_add_hw_stats(struct adapter *);
229static void ixgbe_clear_evcnt(struct adapter *); 230static void ixgbe_clear_evcnt(struct adapter *);
230static int ixgbe_set_flowcntl(struct adapter *, int); 231static int ixgbe_set_flowcntl(struct adapter *, int);
231static int ixgbe_set_advertise(struct adapter *, int); 232static int ixgbe_set_advertise(struct adapter *, int);
232static int ixgbe_get_advertise(struct adapter *); 233static int ixgbe_get_advertise(struct adapter *);
233 234
234/* Sysctl handlers */ 235/* Sysctl handlers */
235static void ixgbe_set_sysctl_value(struct adapter *, const char *, 236static void ixgbe_set_sysctl_value(struct adapter *, const char *,
236 const char *, int *, int); 237 const char *, int *, int);
237static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO); 238static int ixgbe_sysctl_flowcntl(SYSCTLFN_PROTO);
238static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO); 239static int ixgbe_sysctl_advertise(SYSCTLFN_PROTO);
239static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 240static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
240static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO); 241static int ixgbe_sysctl_dmac(SYSCTLFN_PROTO);
241static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO); 242static int ixgbe_sysctl_phy_temp(SYSCTLFN_PROTO);
242static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO); 243static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_PROTO);
243#ifdef IXGBE_DEBUG 244#ifdef IXGBE_DEBUG
244static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO); 245static int ixgbe_sysctl_power_state(SYSCTLFN_PROTO);
245static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO); 246static int ixgbe_sysctl_print_rss_config(SYSCTLFN_PROTO);
246#endif 247#endif
247static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 248static int ixgbe_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
248static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO); 249static int ixgbe_sysctl_rdh_handler(SYSCTLFN_PROTO);
249static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO); 250static int ixgbe_sysctl_rdt_handler(SYSCTLFN_PROTO);
250static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO); 251static int ixgbe_sysctl_tdt_handler(SYSCTLFN_PROTO);
251static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO); 252static int ixgbe_sysctl_tdh_handler(SYSCTLFN_PROTO);
252static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO); 253static int ixgbe_sysctl_eee_state(SYSCTLFN_PROTO);
253static int ixgbe_sysctl_debug(SYSCTLFN_PROTO); 254static int ixgbe_sysctl_debug(SYSCTLFN_PROTO);
254static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO); 255static int ixgbe_sysctl_wol_enable(SYSCTLFN_PROTO);
255static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO); 256static int ixgbe_sysctl_wufc(SYSCTLFN_PROTO);
256 257
257/* Support for pluggable optic modules */ 258/* Support for pluggable optic modules */
258static bool ixgbe_sfp_probe(struct adapter *); 259static bool ixgbe_sfp_probe(struct adapter *);
259 260
260/* Legacy (single vector) interrupt handler */ 261/* Legacy (single vector) interrupt handler */
261static int ixgbe_legacy_irq(void *); 262static int ixgbe_legacy_irq(void *);
262 263
263/* The MSI/MSI-X Interrupt handlers */ 264/* The MSI/MSI-X Interrupt handlers */
264static int ixgbe_msix_que(void *); 265static int ixgbe_msix_que(void *);
265static int ixgbe_msix_link(void *); 266static int ixgbe_msix_link(void *);
266 267
267/* Software interrupts for deferred work */ 268/* Software interrupts for deferred work */
268static void ixgbe_handle_que(void *); 269static void ixgbe_handle_que(void *);
269static void ixgbe_handle_link(void *); 270static void ixgbe_handle_link(void *);
270static void ixgbe_handle_msf(void *); 271static void ixgbe_handle_msf(void *);
271static void ixgbe_handle_mod(void *); 272static void ixgbe_handle_mod(void *);
272static void ixgbe_handle_phy(void *); 273static void ixgbe_handle_phy(void *);
273 274
274/* Workqueue handler for deferred work */ 275/* Workqueue handler for deferred work */
275static void ixgbe_handle_que_work(struct work *, void *); 276static void ixgbe_handle_que_work(struct work *, void *);
276 277
277static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *); 278static const ixgbe_vendor_info_t *ixgbe_lookup(const struct pci_attach_args *);
278 279
279/************************************************************************ 280/************************************************************************
280 * NetBSD Device Interface Entry Points 281 * NetBSD Device Interface Entry Points
281 ************************************************************************/ 282 ************************************************************************/
282CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter), 283CFATTACH_DECL3_NEW(ixg, sizeof(struct adapter),
283 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL, 284 ixgbe_probe, ixgbe_attach, ixgbe_detach, NULL, NULL, NULL,
284 DVF_DETACH_SHUTDOWN); 285 DVF_DETACH_SHUTDOWN);
285 286
286#if 0 287#if 0
287devclass_t ix_devclass; 288devclass_t ix_devclass;
288DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); 289DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0);
289 290
290MODULE_DEPEND(ix, pci, 1, 1, 1); 291MODULE_DEPEND(ix, pci, 1, 1, 1);
291MODULE_DEPEND(ix, ether, 1, 1, 1); 292MODULE_DEPEND(ix, ether, 1, 1, 1);
292#ifdef DEV_NETMAP 293#ifdef DEV_NETMAP
293MODULE_DEPEND(ix, netmap, 1, 1, 1); 294MODULE_DEPEND(ix, netmap, 1, 1, 1);
294#endif 295#endif
295#endif 296#endif
296 297
297/* 298/*
298 * TUNEABLE PARAMETERS: 299 * TUNEABLE PARAMETERS:
299 */ 300 */
300 301
301/* 302/*
302 * AIM: Adaptive Interrupt Moderation 303 * AIM: Adaptive Interrupt Moderation
303 * which means that the interrupt rate 304 * which means that the interrupt rate
304 * is varied over time based on the 305 * is varied over time based on the
305 * traffic for that interrupt vector 306 * traffic for that interrupt vector
306 */ 307 */
307static bool ixgbe_enable_aim = true; 308static bool ixgbe_enable_aim = true;
308#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7) 309#define SYSCTL_INT(_a1, _a2, _a3, _a4, _a5, _a6, _a7)
309SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0, 310SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RDTUN, &ixgbe_enable_aim, 0,
310 "Enable adaptive interrupt moderation"); 311 "Enable adaptive interrupt moderation");
311 312
312static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 313static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
313SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, 314SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
314 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); 315 &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second");
315 316
316/* How many packets rxeof tries to clean at a time */ 317/* How many packets rxeof tries to clean at a time */
317static int ixgbe_rx_process_limit = 256; 318static int ixgbe_rx_process_limit = 256;
318SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, 319SYSCTL_INT(_hw_ix, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
319 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); 320 &ixgbe_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited");
320 321
321/* How many packets txeof tries to clean at a time */ 322/* How many packets txeof tries to clean at a time */
322static int ixgbe_tx_process_limit = 256; 323static int ixgbe_tx_process_limit = 256;
323SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, 324SYSCTL_INT(_hw_ix, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN,
324 &ixgbe_tx_process_limit, 0, 325 &ixgbe_tx_process_limit, 0,
325 "Maximum number of sent packets to process at a time, -1 means unlimited"); 326 "Maximum number of sent packets to process at a time, -1 means unlimited");
326 327
327/* Flow control setting, default to full */ 328/* Flow control setting, default to full */
328static int ixgbe_flow_control = ixgbe_fc_full; 329static int ixgbe_flow_control = ixgbe_fc_full;
329SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, 330SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN,
330 &ixgbe_flow_control, 0, "Default flow control used for all adapters"); 331 &ixgbe_flow_control, 0, "Default flow control used for all adapters");
331 332
332/* Which packet processing uses workqueue or softint */ 333/* Which packet processing uses workqueue or softint */
333static bool ixgbe_txrx_workqueue = false; 334static bool ixgbe_txrx_workqueue = false;
334 335
335/* 336/*
336 * Smart speed setting, default to on 337 * Smart speed setting, default to on
337 * this only works as a compile option 338 * this only works as a compile option
338 * right now as its during attach, set 339 * right now as its during attach, set
339 * this to 'ixgbe_smart_speed_off' to 340 * this to 'ixgbe_smart_speed_off' to
340 * disable. 341 * disable.
341 */ 342 */
342static int ixgbe_smart_speed = ixgbe_smart_speed_on; 343static int ixgbe_smart_speed = ixgbe_smart_speed_on;
343 344
344/* 345/*
345 * MSI-X should be the default for best performance, 346 * MSI-X should be the default for best performance,
346 * but this allows it to be forced off for testing. 347 * but this allows it to be forced off for testing.
347 */ 348 */
348static int ixgbe_enable_msix = 1; 349static int ixgbe_enable_msix = 1;
349SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, 350SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
350 "Enable MSI-X interrupts"); 351 "Enable MSI-X interrupts");
351 352
352/* 353/*
353 * Number of Queues, can be set to 0, 354 * Number of Queues, can be set to 0,
354 * it then autoconfigures based on the 355 * it then autoconfigures based on the
355 * number of cpus with a max of 8. This 356 * number of cpus with a max of 8. This
356 * can be overridden manually here. 357 * can be overridden manually here.
357 */ 358 */
358static int ixgbe_num_queues = 0; 359static int ixgbe_num_queues = 0;
359SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0, 360SYSCTL_INT(_hw_ix, OID_AUTO, num_queues, CTLFLAG_RDTUN, &ixgbe_num_queues, 0,
360 "Number of queues to configure, 0 indicates autoconfigure"); 361 "Number of queues to configure, 0 indicates autoconfigure");
361 362
362/* 363/*
363 * Number of TX descriptors per ring, 364 * Number of TX descriptors per ring,
364 * setting higher than RX as this seems 365 * setting higher than RX as this seems
365 * the better performing choice. 366 * the better performing choice.
366 */ 367 */
367static int ixgbe_txd = PERFORM_TXD; 368static int ixgbe_txd = PERFORM_TXD;
368SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0, 369SYSCTL_INT(_hw_ix, OID_AUTO, txd, CTLFLAG_RDTUN, &ixgbe_txd, 0,
369 "Number of transmit descriptors per queue"); 370 "Number of transmit descriptors per queue");
370 371
371/* Number of RX descriptors per ring */ 372/* Number of RX descriptors per ring */
372static int ixgbe_rxd = PERFORM_RXD; 373static int ixgbe_rxd = PERFORM_RXD;
373SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0, 374SYSCTL_INT(_hw_ix, OID_AUTO, rxd, CTLFLAG_RDTUN, &ixgbe_rxd, 0,
374 "Number of receive descriptors per queue"); 375 "Number of receive descriptors per queue");
375 376
376/* 377/*
377 * Defining this on will allow the use 378 * Defining this on will allow the use
378 * of unsupported SFP+ modules, note that 379 * of unsupported SFP+ modules, note that
379 * doing so you are on your own :) 380 * doing so you are on your own :)
380 */ 381 */
381static int allow_unsupported_sfp = false; 382static int allow_unsupported_sfp = false;
382#define TUNABLE_INT(__x, __y) 383#define TUNABLE_INT(__x, __y)
383TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp); 384TUNABLE_INT("hw.ix.unsupported_sfp", &allow_unsupported_sfp);
384 385
385/* 386/*
386 * Not sure if Flow Director is fully baked, 387 * Not sure if Flow Director is fully baked,
387 * so we'll default to turning it off. 388 * so we'll default to turning it off.
388 */ 389 */
389static int ixgbe_enable_fdir = 0; 390static int ixgbe_enable_fdir = 0;
390SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, 391SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
391 "Enable Flow Director"); 392 "Enable Flow Director");
392 393
393/* Legacy Transmit (single queue) */ 394/* Legacy Transmit (single queue) */
394static int ixgbe_enable_legacy_tx = 0; 395static int ixgbe_enable_legacy_tx = 0;
395SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN, 396SYSCTL_INT(_hw_ix, OID_AUTO, enable_legacy_tx, CTLFLAG_RDTUN,
396 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow"); 397 &ixgbe_enable_legacy_tx, 0, "Enable Legacy TX flow");
397 398
398/* Receive-Side Scaling */ 399/* Receive-Side Scaling */
399static int ixgbe_enable_rss = 1; 400static int ixgbe_enable_rss = 1;
400SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, 401SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
401 "Enable Receive-Side Scaling (RSS)"); 402 "Enable Receive-Side Scaling (RSS)");
402 403
403#if 0 404#if 0
404static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *); 405static int (*ixgbe_start_locked)(struct ifnet *, struct tx_ring *);
405static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *); 406static int (*ixgbe_ring_empty)(struct ifnet *, pcq_t *);
406#endif 407#endif
407 408
408#ifdef NET_MPSAFE 409#ifdef NET_MPSAFE
409#define IXGBE_MPSAFE 1 410#define IXGBE_MPSAFE 1
410#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 411#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
411#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 412#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
412#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 413#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
413#else 414#else
414#define IXGBE_CALLOUT_FLAGS 0 415#define IXGBE_CALLOUT_FLAGS 0
415#define IXGBE_SOFTINT_FLAGS 0 416#define IXGBE_SOFTINT_FLAGS 0
416#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 417#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
417#endif 418#endif
418#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 419#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
419 420
420/************************************************************************ 421/************************************************************************
421 * ixgbe_initialize_rss_mapping 422 * ixgbe_initialize_rss_mapping
422 ************************************************************************/ 423 ************************************************************************/
423static void 424static void
424ixgbe_initialize_rss_mapping(struct adapter *adapter) 425ixgbe_initialize_rss_mapping(struct adapter *adapter)
425{ 426{
426 struct ixgbe_hw *hw = &adapter->hw; 427 struct ixgbe_hw *hw = &adapter->hw;
427 u32 reta = 0, mrqc, rss_key[10]; 428 u32 reta = 0, mrqc, rss_key[10];
428 int queue_id, table_size, index_mult; 429 int queue_id, table_size, index_mult;
429 int i, j; 430 int i, j;
430 u32 rss_hash_config; 431 u32 rss_hash_config;
431 432
432 /* force use default RSS key. */ 433 /* force use default RSS key. */
433#ifdef __NetBSD__ 434#ifdef __NetBSD__
434 rss_getkey((uint8_t *) &rss_key); 435 rss_getkey((uint8_t *) &rss_key);
435#else 436#else
436 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 437 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
437 /* Fetch the configured RSS key */ 438 /* Fetch the configured RSS key */
438 rss_getkey((uint8_t *) &rss_key); 439 rss_getkey((uint8_t *) &rss_key);
439 } else { 440 } else {
440 /* set up random bits */ 441 /* set up random bits */
441 cprng_fast(&rss_key, sizeof(rss_key)); 442 cprng_fast(&rss_key, sizeof(rss_key));
442 } 443 }
443#endif 444#endif
444 445
445 /* Set multiplier for RETA setup and table size based on MAC */ 446 /* Set multiplier for RETA setup and table size based on MAC */
446 index_mult = 0x1; 447 index_mult = 0x1;
447 table_size = 128; 448 table_size = 128;
448 switch (adapter->hw.mac.type) { 449 switch (adapter->hw.mac.type) {
449 case ixgbe_mac_82598EB: 450 case ixgbe_mac_82598EB:
450 index_mult = 0x11; 451 index_mult = 0x11;
451 break; 452 break;
452 case ixgbe_mac_X550: 453 case ixgbe_mac_X550:
453 case ixgbe_mac_X550EM_x: 454 case ixgbe_mac_X550EM_x:
454 case ixgbe_mac_X550EM_a: 455 case ixgbe_mac_X550EM_a:
455 table_size = 512; 456 table_size = 512;
456 break; 457 break;
457 default: 458 default:
458 break; 459 break;
459 } 460 }
460 461
461 /* Set up the redirection table */ 462 /* Set up the redirection table */
462 for (i = 0, j = 0; i < table_size; i++, j++) { 463 for (i = 0, j = 0; i < table_size; i++, j++) {
463 if (j == adapter->num_queues) 464 if (j == adapter->num_queues)
464 j = 0; 465 j = 0;
465 466
466 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 467 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
467 /* 468 /*
468 * Fetch the RSS bucket id for the given indirection 469 * Fetch the RSS bucket id for the given indirection
469 * entry. Cap it at the number of configured buckets 470 * entry. Cap it at the number of configured buckets
470 * (which is num_queues.) 471 * (which is num_queues.)
471 */ 472 */
472 queue_id = rss_get_indirection_to_bucket(i); 473 queue_id = rss_get_indirection_to_bucket(i);
473 queue_id = queue_id % adapter->num_queues; 474 queue_id = queue_id % adapter->num_queues;
474 } else 475 } else
475 queue_id = (j * index_mult); 476 queue_id = (j * index_mult);
476 477
477 /* 478 /*
478 * The low 8 bits are for hash value (n+0); 479 * The low 8 bits are for hash value (n+0);
479 * The next 8 bits are for hash value (n+1), etc. 480 * The next 8 bits are for hash value (n+1), etc.
480 */ 481 */
481 reta = reta >> 8; 482 reta = reta >> 8;
482 reta = reta | (((uint32_t) queue_id) << 24); 483 reta = reta | (((uint32_t) queue_id) << 24);
483 if ((i & 3) == 3) { 484 if ((i & 3) == 3) {
484 if (i < 128) 485 if (i < 128)
485 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 486 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
486 else 487 else
487 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 488 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
488 reta); 489 reta);
489 reta = 0; 490 reta = 0;
490 } 491 }
491 } 492 }
492 493
493 /* Now fill our hash function seeds */ 494 /* Now fill our hash function seeds */
494 for (i = 0; i < 10; i++) 495 for (i = 0; i < 10; i++)
495 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); 496 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
496 497
497 /* Perform hash on these packet types */ 498 /* Perform hash on these packet types */
498 if (adapter->feat_en & IXGBE_FEATURE_RSS) 499 if (adapter->feat_en & IXGBE_FEATURE_RSS)
499 rss_hash_config = rss_gethashconfig(); 500 rss_hash_config = rss_gethashconfig();
500 else { 501 else {
501 /* 502 /*
502 * Disable UDP - IP fragments aren't currently being handled 503 * Disable UDP - IP fragments aren't currently being handled
503 * and so we end up with a mix of 2-tuple and 4-tuple 504 * and so we end up with a mix of 2-tuple and 4-tuple
504 * traffic. 505 * traffic.
505 */ 506 */
506 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 507 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
507 | RSS_HASHTYPE_RSS_TCP_IPV4 508 | RSS_HASHTYPE_RSS_TCP_IPV4
508 | RSS_HASHTYPE_RSS_IPV6 509 | RSS_HASHTYPE_RSS_IPV6
509 | RSS_HASHTYPE_RSS_TCP_IPV6 510 | RSS_HASHTYPE_RSS_TCP_IPV6
510 | RSS_HASHTYPE_RSS_IPV6_EX 511 | RSS_HASHTYPE_RSS_IPV6_EX
511 | RSS_HASHTYPE_RSS_TCP_IPV6_EX; 512 | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
512 } 513 }
513 514
514 mrqc = IXGBE_MRQC_RSSEN; 515 mrqc = IXGBE_MRQC_RSSEN;
515 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 516 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
516 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 517 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
517 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 518 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
518 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 519 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
519 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 520 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
520 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 521 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
521 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 522 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
522 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 523 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
523 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 524 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
524 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; 525 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
525 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 526 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
526 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; 527 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
527 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 528 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
528 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 529 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
529 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 530 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
530 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 531 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
531 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 532 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
532 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 533 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
533 mrqc |= ixgbe_get_mrqc(adapter->iov_mode); 534 mrqc |= ixgbe_get_mrqc(adapter->iov_mode);
534 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 535 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
535} /* ixgbe_initialize_rss_mapping */ 536} /* ixgbe_initialize_rss_mapping */
536 537
537/************************************************************************ 538/************************************************************************
538 * ixgbe_initialize_receive_units - Setup receive registers and features. 539 * ixgbe_initialize_receive_units - Setup receive registers and features.
539 ************************************************************************/ 540 ************************************************************************/
540#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) 541#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
541 542
542static void 543static void
543ixgbe_initialize_receive_units(struct adapter *adapter) 544ixgbe_initialize_receive_units(struct adapter *adapter)
544{ 545{
545 struct rx_ring *rxr = adapter->rx_rings; 546 struct rx_ring *rxr = adapter->rx_rings;
546 struct ixgbe_hw *hw = &adapter->hw; 547 struct ixgbe_hw *hw = &adapter->hw;
547 struct ifnet *ifp = adapter->ifp; 548 struct ifnet *ifp = adapter->ifp;
548 int i, j; 549 int i, j;
549 u32 bufsz, fctrl, srrctl, rxcsum; 550 u32 bufsz, fctrl, srrctl, rxcsum;
550 u32 hlreg; 551 u32 hlreg;
551 552
552 /* 553 /*
553 * Make sure receives are disabled while 554 * Make sure receives are disabled while
554 * setting up the descriptor ring 555 * setting up the descriptor ring
555 */ 556 */
556 ixgbe_disable_rx(hw); 557 ixgbe_disable_rx(hw);
557 558
558 /* Enable broadcasts */ 559 /* Enable broadcasts */
559 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 560 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
560 fctrl |= IXGBE_FCTRL_BAM; 561 fctrl |= IXGBE_FCTRL_BAM;
561 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 562 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
562 fctrl |= IXGBE_FCTRL_DPF; 563 fctrl |= IXGBE_FCTRL_DPF;
563 fctrl |= IXGBE_FCTRL_PMCF; 564 fctrl |= IXGBE_FCTRL_PMCF;
564 } 565 }
565 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 566 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
566 567
567 /* Set for Jumbo Frames? */ 568 /* Set for Jumbo Frames? */
568 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 569 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
569 if (ifp->if_mtu > ETHERMTU) 570 if (ifp->if_mtu > ETHERMTU)
570 hlreg |= IXGBE_HLREG0_JUMBOEN; 571 hlreg |= IXGBE_HLREG0_JUMBOEN;
571 else 572 else
572 hlreg &= ~IXGBE_HLREG0_JUMBOEN; 573 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
573 574
574#ifdef DEV_NETMAP 575#ifdef DEV_NETMAP
575 /* CRC stripping is conditional in Netmap */ 576 /* CRC stripping is conditional in Netmap */
576 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 577 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
577 (ifp->if_capenable & IFCAP_NETMAP) && 578 (ifp->if_capenable & IFCAP_NETMAP) &&
578 !ix_crcstrip) 579 !ix_crcstrip)
579 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP; 580 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
580 else 581 else
581#endif /* DEV_NETMAP */ 582#endif /* DEV_NETMAP */
582 hlreg |= IXGBE_HLREG0_RXCRCSTRP; 583 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
583 584
584 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); 585 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
585 586
586 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 587 bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
587 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 588 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
588 589
589 for (i = 0; i < adapter->num_queues; i++, rxr++) { 590 for (i = 0; i < adapter->num_queues; i++, rxr++) {
590 u64 rdba = rxr->rxdma.dma_paddr; 591 u64 rdba = rxr->rxdma.dma_paddr;
591 u32 reg; 592 u32 reg;
592 int regnum = i / 4; /* 1 register per 4 queues */ 593 int regnum = i / 4; /* 1 register per 4 queues */
593 int regshift = i % 4; /* 4 bits per 1 queue */ 594 int regshift = i % 4; /* 4 bits per 1 queue */
594 j = rxr->me; 595 j = rxr->me;
595 596
596 /* Setup the Base and Length of the Rx Descriptor Ring */ 597 /* Setup the Base and Length of the Rx Descriptor Ring */
597 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), 598 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
598 (rdba & 0x00000000ffffffffULL)); 599 (rdba & 0x00000000ffffffffULL));
599 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 600 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
600 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), 601 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
601 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 602 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
602 603
603 /* Set up the SRRCTL register */ 604 /* Set up the SRRCTL register */
604 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); 605 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
605 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
606 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
607 srrctl |= bufsz; 608 srrctl |= bufsz;
608 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 609 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
609 610
610 /* Set RQSMR (Receive Queue Statistic Mapping) register */ 611 /* Set RQSMR (Receive Queue Statistic Mapping) register */
611 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum)); 612 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(regnum));
612 reg &= ~(0x000000ffUL << (regshift * 8)); 613 reg &= ~(0x000000ffUL << (regshift * 8));
613 reg |= i << (regshift * 8); 614 reg |= i << (regshift * 8);
614 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg); 615 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(regnum), reg);
615 616
616 /* 617 /*
617 * Set DROP_EN iff we have no flow control and >1 queue. 618 * Set DROP_EN iff we have no flow control and >1 queue.
618 * Note that srrctl was cleared shortly before during reset, 619 * Note that srrctl was cleared shortly before during reset,
619 * so we do not need to clear the bit, but do it just in case 620 * so we do not need to clear the bit, but do it just in case
620 * this code is moved elsewhere. 621 * this code is moved elsewhere.
621 */ 622 */
622 if (adapter->num_queues > 1 && 623 if (adapter->num_queues > 1 &&
623 adapter->hw.fc.requested_mode == ixgbe_fc_none) { 624 adapter->hw.fc.requested_mode == ixgbe_fc_none) {
624 srrctl |= IXGBE_SRRCTL_DROP_EN; 625 srrctl |= IXGBE_SRRCTL_DROP_EN;
625 } else { 626 } else {
626 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 627 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
627 } 628 }
628 629
629 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); 630 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
630 631
631 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 632 /* Setup the HW Rx Head and Tail Descriptor Pointers */
632 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 633 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
633 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 634 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
634 635
635 /* Set the driver rx tail address */ 636 /* Set the driver rx tail address */
636 rxr->tail = IXGBE_RDT(rxr->me); 637 rxr->tail = IXGBE_RDT(rxr->me);
637 } 638 }
638 639
639 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 640 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
640 u32 psrtype = IXGBE_PSRTYPE_TCPHDR 641 u32 psrtype = IXGBE_PSRTYPE_TCPHDR
641 | IXGBE_PSRTYPE_UDPHDR 642 | IXGBE_PSRTYPE_UDPHDR
642 | IXGBE_PSRTYPE_IPV4HDR 643 | IXGBE_PSRTYPE_IPV4HDR
643 | IXGBE_PSRTYPE_IPV6HDR; 644 | IXGBE_PSRTYPE_IPV6HDR;
644 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 645 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
645 } 646 }
646 647
647 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 648 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
648 649
649 ixgbe_initialize_rss_mapping(adapter); 650 ixgbe_initialize_rss_mapping(adapter);
650 651
651 if (adapter->num_queues > 1) { 652 if (adapter->num_queues > 1) {
652 /* RSS and RX IPP Checksum are mutually exclusive */ 653 /* RSS and RX IPP Checksum are mutually exclusive */
653 rxcsum |= IXGBE_RXCSUM_PCSD; 654 rxcsum |= IXGBE_RXCSUM_PCSD;
654 } 655 }
655 656
656 if (ifp->if_capenable & IFCAP_RXCSUM) 657 if (ifp->if_capenable & IFCAP_RXCSUM)
657 rxcsum |= IXGBE_RXCSUM_PCSD; 658 rxcsum |= IXGBE_RXCSUM_PCSD;
658 659
659 /* This is useful for calculating UDP/IP fragment checksums */ 660 /* This is useful for calculating UDP/IP fragment checksums */
660 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) 661 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
661 rxcsum |= IXGBE_RXCSUM_IPPCSE; 662 rxcsum |= IXGBE_RXCSUM_IPPCSE;
662 663
663 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 664 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
664 665
665} /* ixgbe_initialize_receive_units */ 666} /* ixgbe_initialize_receive_units */
666 667
667/************************************************************************ 668/************************************************************************
668 * ixgbe_initialize_transmit_units - Enable transmit units. 669 * ixgbe_initialize_transmit_units - Enable transmit units.
669 ************************************************************************/ 670 ************************************************************************/
670static void 671static void
671ixgbe_initialize_transmit_units(struct adapter *adapter) 672ixgbe_initialize_transmit_units(struct adapter *adapter)
672{ 673{
673 struct tx_ring *txr = adapter->tx_rings; 674 struct tx_ring *txr = adapter->tx_rings;
674 struct ixgbe_hw *hw = &adapter->hw; 675 struct ixgbe_hw *hw = &adapter->hw;
675 int i; 676 int i;
676 677
677 INIT_DEBUGOUT("ixgbe_initialize_transmit_units"); 678 INIT_DEBUGOUT("ixgbe_initialize_transmit_units");
678 679
679 /* Setup the Base and Length of the Tx Descriptor Ring */ 680 /* Setup the Base and Length of the Tx Descriptor Ring */
680 for (i = 0; i < adapter->num_queues; i++, txr++) { 681 for (i = 0; i < adapter->num_queues; i++, txr++) {
681 u64 tdba = txr->txdma.dma_paddr; 682 u64 tdba = txr->txdma.dma_paddr;
682 u32 txctrl = 0; 683 u32 txctrl = 0;
683 u32 tqsmreg, reg; 684 u32 tqsmreg, reg;
684 int regnum = i / 4; /* 1 register per 4 queues */ 685 int regnum = i / 4; /* 1 register per 4 queues */
685 int regshift = i % 4; /* 4 bits per 1 queue */ 686 int regshift = i % 4; /* 4 bits per 1 queue */
686 int j = txr->me; 687 int j = txr->me;
687 688
688 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 689 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
689 (tdba & 0x00000000ffffffffULL)); 690 (tdba & 0x00000000ffffffffULL));
690 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 691 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
691 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), 692 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
692 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc)); 693 adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
693 694
694 /* 695 /*
695 * Set TQSMR (Transmit Queue Statistic Mapping) register. 696 * Set TQSMR (Transmit Queue Statistic Mapping) register.
696 * Register location is different between 82598 and others. 697 * Register location is different between 82598 and others.
697 */ 698 */
698 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 699 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
699 tqsmreg = IXGBE_TQSMR(regnum); 700 tqsmreg = IXGBE_TQSMR(regnum);
700 else 701 else
701 tqsmreg = IXGBE_TQSM(regnum); 702 tqsmreg = IXGBE_TQSM(regnum);
702 reg = IXGBE_READ_REG(hw, tqsmreg); 703 reg = IXGBE_READ_REG(hw, tqsmreg);
703 reg &= ~(0x000000ffUL << (regshift * 8)); 704 reg &= ~(0x000000ffUL << (regshift * 8));
704 reg |= i << (regshift * 8); 705 reg |= i << (regshift * 8);
705 IXGBE_WRITE_REG(hw, tqsmreg, reg); 706 IXGBE_WRITE_REG(hw, tqsmreg, reg);
706 707
707 /* Setup the HW Tx Head and Tail descriptor pointers */ 708 /* Setup the HW Tx Head and Tail descriptor pointers */
708 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 709 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
709 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 710 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
710 711
711 /* Cache the tail address */ 712 /* Cache the tail address */
712 txr->tail = IXGBE_TDT(j); 713 txr->tail = IXGBE_TDT(j);
713 714
714 txr->txr_no_space = false; 715 txr->txr_no_space = false;
715 716
716 /* Disable Head Writeback */ 717 /* Disable Head Writeback */
717 /* 718 /*
718 * Note: for X550 series devices, these registers are actually 719 * Note: for X550 series devices, these registers are actually
719 * prefixed with TPH_ isntead of DCA_, but the addresses and 720 * prefixed with TPH_ isntead of DCA_, but the addresses and
720 * fields remain the same. 721 * fields remain the same.
721 */ 722 */
722 switch (hw->mac.type) { 723 switch (hw->mac.type) {
723 case ixgbe_mac_82598EB: 724 case ixgbe_mac_82598EB:
724 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 725 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
725 break; 726 break;
726 default: 727 default:
727 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 728 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
728 break; 729 break;
729 } 730 }
730 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 731 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
731 switch (hw->mac.type) { 732 switch (hw->mac.type) {
732 case ixgbe_mac_82598EB: 733 case ixgbe_mac_82598EB:
733 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 734 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
734 break; 735 break;
735 default: 736 default:
736 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); 737 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
737 break; 738 break;
738 } 739 }
739 740
740 } 741 }
741 742
742 if (hw->mac.type != ixgbe_mac_82598EB) { 743 if (hw->mac.type != ixgbe_mac_82598EB) {
743 u32 dmatxctl, rttdcs; 744 u32 dmatxctl, rttdcs;
744 745
745 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 746 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
746 dmatxctl |= IXGBE_DMATXCTL_TE; 747 dmatxctl |= IXGBE_DMATXCTL_TE;
747 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 748 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
748 /* Disable arbiter to set MTQC */ 749 /* Disable arbiter to set MTQC */
749 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 750 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
750 rttdcs |= IXGBE_RTTDCS_ARBDIS; 751 rttdcs |= IXGBE_RTTDCS_ARBDIS;
751 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 752 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
752 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 753 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
753 ixgbe_get_mtqc(adapter->iov_mode)); 754 ixgbe_get_mtqc(adapter->iov_mode));
754 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 755 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
755 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 756 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
756 } 757 }
757 758
758 return; 759 return;
759} /* ixgbe_initialize_transmit_units */ 760} /* ixgbe_initialize_transmit_units */
760 761
761/************************************************************************ 762/************************************************************************
762 * ixgbe_attach - Device initialization routine 763 * ixgbe_attach - Device initialization routine
763 * 764 *
764 * Called when the driver is being loaded. 765 * Called when the driver is being loaded.
765 * Identifies the type of hardware, allocates all resources 766 * Identifies the type of hardware, allocates all resources
766 * and initializes the hardware. 767 * and initializes the hardware.
767 * 768 *
768 * return 0 on success, positive on failure 769 * return 0 on success, positive on failure
769 ************************************************************************/ 770 ************************************************************************/
770static void 771static void
771ixgbe_attach(device_t parent, device_t dev, void *aux) 772ixgbe_attach(device_t parent, device_t dev, void *aux)
772{ 773{
773 struct adapter *adapter; 774 struct adapter *adapter;
774 struct ixgbe_hw *hw; 775 struct ixgbe_hw *hw;
775 int error = -1; 776 int error = -1;
776 u32 ctrl_ext; 777 u32 ctrl_ext;
777 u16 high, low, nvmreg; 778 u16 high, low, nvmreg;
778 pcireg_t id, subid; 779 pcireg_t id, subid;
779 const ixgbe_vendor_info_t *ent; 780 const ixgbe_vendor_info_t *ent;
780 struct pci_attach_args *pa = aux; 781 struct pci_attach_args *pa = aux;
781 bool unsupported_sfp = false; 782 bool unsupported_sfp = false;
782 const char *str; 783 const char *str;
783 char buf[256]; 784 char buf[256];
784 785
785 INIT_DEBUGOUT("ixgbe_attach: begin"); 786 INIT_DEBUGOUT("ixgbe_attach: begin");
786 787
787 /* Allocate, clear, and link in our adapter structure */ 788 /* Allocate, clear, and link in our adapter structure */
788 adapter = device_private(dev); 789 adapter = device_private(dev);
789 adapter->hw.back = adapter; 790 adapter->hw.back = adapter;
790 adapter->dev = dev; 791 adapter->dev = dev;
791 hw = &adapter->hw; 792 hw = &adapter->hw;
792 adapter->osdep.pc = pa->pa_pc; 793 adapter->osdep.pc = pa->pa_pc;
793 adapter->osdep.tag = pa->pa_tag; 794 adapter->osdep.tag = pa->pa_tag;
794 if (pci_dma64_available(pa)) 795 if (pci_dma64_available(pa))
795 adapter->osdep.dmat = pa->pa_dmat64; 796 adapter->osdep.dmat = pa->pa_dmat64;
796 else 797 else
797 adapter->osdep.dmat = pa->pa_dmat; 798 adapter->osdep.dmat = pa->pa_dmat;
798 adapter->osdep.attached = false; 799 adapter->osdep.attached = false;
799 800
800 ent = ixgbe_lookup(pa); 801 ent = ixgbe_lookup(pa);
801 802
802 KASSERT(ent != NULL); 803 KASSERT(ent != NULL);
803 804
804 aprint_normal(": %s, Version - %s\n", 805 aprint_normal(": %s, Version - %s\n",
805 ixgbe_strings[ent->index], ixgbe_driver_version); 806 ixgbe_strings[ent->index], ixgbe_driver_version);
806 807
807 /* Core Lock Init*/ 808 /* Core Lock Init*/
808 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 809 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
809 810
810 /* Set up the timer callout */ 811 /* Set up the timer callout */
811 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 812 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
812 813
813 /* Determine hardware revision */ 814 /* Determine hardware revision */
814 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 815 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
815 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 816 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
816 817
817 hw->vendor_id = PCI_VENDOR(id); 818 hw->vendor_id = PCI_VENDOR(id);
818 hw->device_id = PCI_PRODUCT(id); 819 hw->device_id = PCI_PRODUCT(id);
819 hw->revision_id = 820 hw->revision_id =
820 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 821 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
821 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 822 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
822 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 823 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
823 824
824 /* 825 /*
825 * Make sure BUSMASTER is set 826 * Make sure BUSMASTER is set
826 */ 827 */
827 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 828 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
828 829
829 /* Do base PCI setup - map BAR0 */ 830 /* Do base PCI setup - map BAR0 */
830 if (ixgbe_allocate_pci_resources(adapter, pa)) { 831 if (ixgbe_allocate_pci_resources(adapter, pa)) {
831 aprint_error_dev(dev, "Allocation of PCI resources failed\n"); 832 aprint_error_dev(dev, "Allocation of PCI resources failed\n");
832 error = ENXIO; 833 error = ENXIO;
833 goto err_out; 834 goto err_out;
834 } 835 }
835 836
836 /* let hardware know driver is loaded */ 837 /* let hardware know driver is loaded */
837 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 838 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
838 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 839 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
839 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 840 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
840 841
841 /* 842 /*
842 * Initialize the shared code 843 * Initialize the shared code
843 */ 844 */
844 if (ixgbe_init_shared_code(hw) != 0) { 845 if (ixgbe_init_shared_code(hw) != 0) {
845 aprint_error_dev(dev, "Unable to initialize the shared code\n"); 846 aprint_error_dev(dev, "Unable to initialize the shared code\n");
846 error = ENXIO; 847 error = ENXIO;
847 goto err_out; 848 goto err_out;
848 } 849 }
849 850
850 switch (hw->mac.type) { 851 switch (hw->mac.type) {
851 case ixgbe_mac_82598EB: 852 case ixgbe_mac_82598EB:
852 str = "82598EB"; 853 str = "82598EB";
853 break; 854 break;
854 case ixgbe_mac_82599EB: 855 case ixgbe_mac_82599EB:
855 str = "82599EB"; 856 str = "82599EB";
856 break; 857 break;
857 case ixgbe_mac_X540: 858 case ixgbe_mac_X540:
858 str = "X540"; 859 str = "X540";
859 break; 860 break;
860 case ixgbe_mac_X550: 861 case ixgbe_mac_X550:
861 str = "X550"; 862 str = "X550";
862 break; 863 break;
863 case ixgbe_mac_X550EM_x: 864 case ixgbe_mac_X550EM_x:
864 str = "X550EM X"; 865 str = "X550EM X";
865 break; 866 break;
866 case ixgbe_mac_X550EM_a: 867 case ixgbe_mac_X550EM_a:
867 str = "X550EM A"; 868 str = "X550EM A";
868 break; 869 break;
869 default: 870 default:
870 str = "Unknown"; 871 str = "Unknown";
871 break; 872 break;
872 } 873 }
873 aprint_normal_dev(dev, "device %s\n", str); 874 aprint_normal_dev(dev, "device %s\n", str);
874 875
875 if (hw->mbx.ops.init_params) 876 if (hw->mbx.ops.init_params)
876 hw->mbx.ops.init_params(hw); 877 hw->mbx.ops.init_params(hw);
877 878
878 hw->allow_unsupported_sfp = allow_unsupported_sfp; 879 hw->allow_unsupported_sfp = allow_unsupported_sfp;
879 880
880 /* Pick up the 82599 settings */ 881 /* Pick up the 82599 settings */
881 if (hw->mac.type != ixgbe_mac_82598EB) { 882 if (hw->mac.type != ixgbe_mac_82598EB) {
882 hw->phy.smart_speed = ixgbe_smart_speed; 883 hw->phy.smart_speed = ixgbe_smart_speed;
883 adapter->num_segs = IXGBE_82599_SCATTER; 884 adapter->num_segs = IXGBE_82599_SCATTER;
884 } else 885 } else
885 adapter->num_segs = IXGBE_82598_SCATTER; 886 adapter->num_segs = IXGBE_82598_SCATTER;
886 887
887 /* Ensure SW/FW semaphore is free */ 888 /* Ensure SW/FW semaphore is free */
888 ixgbe_init_swfw_semaphore(hw); 889 ixgbe_init_swfw_semaphore(hw);
889 890
890 hw->mac.ops.set_lan_id(hw); 891 hw->mac.ops.set_lan_id(hw);
891 ixgbe_init_device_features(adapter); 892 ixgbe_init_device_features(adapter);
892 893
893 if (ixgbe_configure_interrupts(adapter)) { 894 if (ixgbe_configure_interrupts(adapter)) {
894 error = ENXIO; 895 error = ENXIO;
895 goto err_out; 896 goto err_out;
896 } 897 }
897 898
898 /* Allocate multicast array memory. */ 899 /* Allocate multicast array memory. */
899 adapter->mta = malloc(sizeof(*adapter->mta) * 900 adapter->mta = malloc(sizeof(*adapter->mta) *
900 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); 901 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
901 if (adapter->mta == NULL) { 902 if (adapter->mta == NULL) {
902 aprint_error_dev(dev, "Cannot allocate multicast setup array\n"); 903 aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
903 error = ENOMEM; 904 error = ENOMEM;
904 goto err_out; 905 goto err_out;
905 } 906 }
906 907
907 /* Enable WoL (if supported) */ 908 /* Enable WoL (if supported) */
908 ixgbe_check_wol_support(adapter); 909 ixgbe_check_wol_support(adapter);
909 910
910 /* Register for VLAN events */ 911 /* Register for VLAN events */
911 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb); 912 ether_set_vlan_cb(&adapter->osdep.ec, ixgbe_vlan_cb);
912 913
913 /* Verify adapter fan is still functional (if applicable) */ 914 /* Verify adapter fan is still functional (if applicable) */
914 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 915 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
915 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 916 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
916 ixgbe_check_fan_failure(adapter, esdp, FALSE); 917 ixgbe_check_fan_failure(adapter, esdp, FALSE);
917 } 918 }
918 919
919 /* Set an initial default flow control value */ 920 /* Set an initial default flow control value */
920 hw->fc.requested_mode = ixgbe_flow_control; 921 hw->fc.requested_mode = ixgbe_flow_control;
921 922
922 /* Sysctls for limiting the amount of work done in the taskqueues */ 923 /* Sysctls for limiting the amount of work done in the taskqueues */
923 ixgbe_set_sysctl_value(adapter, "rx_processing_limit", 924 ixgbe_set_sysctl_value(adapter, "rx_processing_limit",
924 "max number of rx packets to process", 925 "max number of rx packets to process",
925 &adapter->rx_process_limit, ixgbe_rx_process_limit); 926 &adapter->rx_process_limit, ixgbe_rx_process_limit);
926 927
927 ixgbe_set_sysctl_value(adapter, "tx_processing_limit", 928 ixgbe_set_sysctl_value(adapter, "tx_processing_limit",
928 "max number of tx packets to process", 929 "max number of tx packets to process",
929 &adapter->tx_process_limit, ixgbe_tx_process_limit); 930 &adapter->tx_process_limit, ixgbe_tx_process_limit);
930 931
931 /* Do descriptor calc and sanity checks */ 932 /* Do descriptor calc and sanity checks */
932 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 933 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
933 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { 934 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
934 aprint_error_dev(dev, "TXD config issue, using default!\n"); 935 aprint_error_dev(dev, "TXD config issue, using default!\n");
935 adapter->num_tx_desc = DEFAULT_TXD; 936 adapter->num_tx_desc = DEFAULT_TXD;
936 } else 937 } else
937 adapter->num_tx_desc = ixgbe_txd; 938 adapter->num_tx_desc = ixgbe_txd;
938 939
939 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 940 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
940 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) { 941 ixgbe_rxd < MIN_RXD || ixgbe_rxd > MAX_RXD) {
941 aprint_error_dev(dev, "RXD config issue, using default!\n"); 942 aprint_error_dev(dev, "RXD config issue, using default!\n");
942 adapter->num_rx_desc = DEFAULT_RXD; 943 adapter->num_rx_desc = DEFAULT_RXD;
943 } else 944 } else
944 adapter->num_rx_desc = ixgbe_rxd; 945 adapter->num_rx_desc = ixgbe_rxd;
945 946
 947 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
 948
946 /* Allocate our TX/RX Queues */ 949 /* Allocate our TX/RX Queues */
947 if (ixgbe_allocate_queues(adapter)) { 950 if (ixgbe_allocate_queues(adapter)) {
948 error = ENOMEM; 951 error = ENOMEM;
949 goto err_out; 952 goto err_out;
950 } 953 }
951 954
952 hw->phy.reset_if_overtemp = TRUE; 955 hw->phy.reset_if_overtemp = TRUE;
953 error = ixgbe_reset_hw(hw); 956 error = ixgbe_reset_hw(hw);
954 hw->phy.reset_if_overtemp = FALSE; 957 hw->phy.reset_if_overtemp = FALSE;
955 if (error == IXGBE_ERR_SFP_NOT_PRESENT) { 958 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
956 /* 959 /*
957 * No optics in this port, set up 960 * No optics in this port, set up
958 * so the timer routine will probe 961 * so the timer routine will probe
959 * for later insertion. 962 * for later insertion.
960 */ 963 */
961 adapter->sfp_probe = TRUE; 964 adapter->sfp_probe = TRUE;
962 error = IXGBE_SUCCESS; 965 error = IXGBE_SUCCESS;
963 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { 966 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
964 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n"); 967 aprint_error_dev(dev, "Unsupported SFP+ module detected!\n");
965 unsupported_sfp = true; 968 unsupported_sfp = true;
966 error = IXGBE_SUCCESS; 969 error = IXGBE_SUCCESS;
967 } else if (error) { 970 } else if (error) {
968 aprint_error_dev(dev, "Hardware initialization failed\n"); 971 aprint_error_dev(dev, "Hardware initialization failed\n");
969 error = EIO; 972 error = EIO;
970 goto err_late; 973 goto err_late;
971 } 974 }
972 975
973 /* Make sure we have a good EEPROM before we read from it */ 976 /* Make sure we have a good EEPROM before we read from it */
974 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { 977 if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) {
975 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n"); 978 aprint_error_dev(dev, "The EEPROM Checksum Is Not Valid\n");
976 error = EIO; 979 error = EIO;
977 goto err_late; 980 goto err_late;
978 } 981 }
979 982
980 aprint_normal("%s:", device_xname(dev)); 983 aprint_normal("%s:", device_xname(dev));
981 /* NVM Image Version */ 984 /* NVM Image Version */
982 high = low = 0; 985 high = low = 0;
983 switch (hw->mac.type) { 986 switch (hw->mac.type) {
984 case ixgbe_mac_X540: 987 case ixgbe_mac_X540:
985 case ixgbe_mac_X550EM_a: 988 case ixgbe_mac_X550EM_a:
986 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 989 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
987 if (nvmreg == 0xffff) 990 if (nvmreg == 0xffff)
988 break; 991 break;
989 high = (nvmreg >> 12) & 0x0f; 992 high = (nvmreg >> 12) & 0x0f;
990 low = (nvmreg >> 4) & 0xff; 993 low = (nvmreg >> 4) & 0xff;
991 id = nvmreg & 0x0f; 994 id = nvmreg & 0x0f;
992 aprint_normal(" NVM Image Version %u.", high); 995 aprint_normal(" NVM Image Version %u.", high);
993 if (hw->mac.type == ixgbe_mac_X540) 996 if (hw->mac.type == ixgbe_mac_X540)
994 str = "%x"; 997 str = "%x";
995 else 998 else
996 str = "%02x"; 999 str = "%02x";
997 aprint_normal(str, low); 1000 aprint_normal(str, low);
998 aprint_normal(" ID 0x%x,", id); 1001 aprint_normal(" ID 0x%x,", id);
999 break; 1002 break;
1000 case ixgbe_mac_X550EM_x: 1003 case ixgbe_mac_X550EM_x:
1001 case ixgbe_mac_X550: 1004 case ixgbe_mac_X550:
1002 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg); 1005 hw->eeprom.ops.read(hw, IXGBE_NVM_IMAGE_VER, &nvmreg);
1003 if (nvmreg == 0xffff) 1006 if (nvmreg == 0xffff)
1004 break; 1007 break;
1005 high = (nvmreg >> 12) & 0x0f; 1008 high = (nvmreg >> 12) & 0x0f;
1006 low = nvmreg & 0xff; 1009 low = nvmreg & 0xff;
1007 aprint_normal(" NVM Image Version %u.%02x,", high, low); 1010 aprint_normal(" NVM Image Version %u.%02x,", high, low);
1008 break; 1011 break;
1009 default: 1012 default:
1010 break; 1013 break;
1011 } 1014 }
1012 hw->eeprom.nvm_image_ver_high = high; 1015 hw->eeprom.nvm_image_ver_high = high;
1013 hw->eeprom.nvm_image_ver_low = low; 1016 hw->eeprom.nvm_image_ver_low = low;
1014 1017
1015 /* PHY firmware revision */ 1018 /* PHY firmware revision */
1016 switch (hw->mac.type) { 1019 switch (hw->mac.type) {
1017 case ixgbe_mac_X540: 1020 case ixgbe_mac_X540:
1018 case ixgbe_mac_X550: 1021 case ixgbe_mac_X550:
1019 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg); 1022 hw->eeprom.ops.read(hw, IXGBE_PHYFW_REV, &nvmreg);
1020 if (nvmreg == 0xffff) 1023 if (nvmreg == 0xffff)
1021 break; 1024 break;
1022 high = (nvmreg >> 12) & 0x0f; 1025 high = (nvmreg >> 12) & 0x0f;
1023 low = (nvmreg >> 4) & 0xff; 1026 low = (nvmreg >> 4) & 0xff;
1024 id = nvmreg & 0x000f; 1027 id = nvmreg & 0x000f;
1025 aprint_normal(" PHY FW Revision %u.", high); 1028 aprint_normal(" PHY FW Revision %u.", high);
1026 if (hw->mac.type == ixgbe_mac_X540) 1029 if (hw->mac.type == ixgbe_mac_X540)
1027 str = "%x"; 1030 str = "%x";
1028 else 1031 else
1029 str = "%02x"; 1032 str = "%02x";
1030 aprint_normal(str, low); 1033 aprint_normal(str, low);
1031 aprint_normal(" ID 0x%x,", id); 1034 aprint_normal(" ID 0x%x,", id);
1032 break; 1035 break;
1033 default: 1036 default:
1034 break; 1037 break;
1035 } 1038 }
1036 1039
1037 /* NVM Map version & OEM NVM Image version */ 1040 /* NVM Map version & OEM NVM Image version */
1038 switch (hw->mac.type) { 1041 switch (hw->mac.type) {
1039 case ixgbe_mac_X550: 1042 case ixgbe_mac_X550:
1040 case ixgbe_mac_X550EM_x: 1043 case ixgbe_mac_X550EM_x:
1041 case ixgbe_mac_X550EM_a: 1044 case ixgbe_mac_X550EM_a:
1042 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg); 1045 hw->eeprom.ops.read(hw, IXGBE_NVM_MAP_VER, &nvmreg);
1043 if (nvmreg != 0xffff) { 1046 if (nvmreg != 0xffff) {
1044 high = (nvmreg >> 12) & 0x0f; 1047 high = (nvmreg >> 12) & 0x0f;
1045 low = nvmreg & 0x00ff; 1048 low = nvmreg & 0x00ff;
1046 aprint_normal(" NVM Map version %u.%02x,", high, low); 1049 aprint_normal(" NVM Map version %u.%02x,", high, low);
1047 } 1050 }
1048 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg); 1051 hw->eeprom.ops.read(hw, IXGBE_OEM_NVM_IMAGE_VER, &nvmreg);
1049 if (nvmreg != 0xffff) { 1052 if (nvmreg != 0xffff) {
1050 high = (nvmreg >> 12) & 0x0f; 1053 high = (nvmreg >> 12) & 0x0f;
1051 low = nvmreg & 0x00ff; 1054 low = nvmreg & 0x00ff;
1052 aprint_verbose(" OEM NVM Image version %u.%02x,", high, 1055 aprint_verbose(" OEM NVM Image version %u.%02x,", high,
1053 low); 1056 low);
1054 } 1057 }
1055 break; 1058 break;
1056 default: 1059 default:
1057 break; 1060 break;
1058 } 1061 }
1059 1062
1060 /* Print the ETrackID */ 1063 /* Print the ETrackID */
1061 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high); 1064 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_H, &high);
1062 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low); 1065 hw->eeprom.ops.read(hw, IXGBE_ETRACKID_L, &low);
1063 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low); 1066 aprint_normal(" ETrackID %08x\n", ((uint32_t)high << 16) | low);
1064 1067
1065 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 1068 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
1066 error = ixgbe_allocate_msix(adapter, pa); 1069 error = ixgbe_allocate_msix(adapter, pa);
1067 if (error) { 1070 if (error) {
1068 /* Free allocated queue structures first */ 1071 /* Free allocated queue structures first */
1069 ixgbe_free_queues(adapter); 1072 ixgbe_free_queues(adapter);
1070 1073
1071 /* Fallback to legacy interrupt */ 1074 /* Fallback to legacy interrupt */
1072 adapter->feat_en &= ~IXGBE_FEATURE_MSIX; 1075 adapter->feat_en &= ~IXGBE_FEATURE_MSIX;
1073 if (adapter->feat_cap & IXGBE_FEATURE_MSI) 1076 if (adapter->feat_cap & IXGBE_FEATURE_MSI)
1074 adapter->feat_en |= IXGBE_FEATURE_MSI; 1077 adapter->feat_en |= IXGBE_FEATURE_MSI;
1075 adapter->num_queues = 1; 1078 adapter->num_queues = 1;
1076 1079
1077 /* Allocate our TX/RX Queues again */ 1080 /* Allocate our TX/RX Queues again */
1078 if (ixgbe_allocate_queues(adapter)) { 1081 if (ixgbe_allocate_queues(adapter)) {
1079 error = ENOMEM; 1082 error = ENOMEM;
1080 goto err_out; 1083 goto err_out;
1081 } 1084 }
1082 } 1085 }
1083 } 1086 }
1084 /* Recovery mode */ 1087 /* Recovery mode */
1085 switch (adapter->hw.mac.type) { 1088 switch (adapter->hw.mac.type) {
1086 case ixgbe_mac_X550: 1089 case ixgbe_mac_X550:
1087 case ixgbe_mac_X550EM_x: 1090 case ixgbe_mac_X550EM_x:
1088 case ixgbe_mac_X550EM_a: 1091 case ixgbe_mac_X550EM_a:
1089 /* >= 2.00 */ 1092 /* >= 2.00 */
1090 if (hw->eeprom.nvm_image_ver_high >= 2) { 1093 if (hw->eeprom.nvm_image_ver_high >= 2) {
1091 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE; 1094 adapter->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
1092 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE; 1095 adapter->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
1093 } 1096 }
1094 break; 1097 break;
1095 default: 1098 default:
1096 break; 1099 break;
1097 } 1100 }
1098 1101
1099 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0) 1102 if ((adapter->feat_en & IXGBE_FEATURE_MSIX) == 0)
1100 error = ixgbe_allocate_legacy(adapter, pa); 1103 error = ixgbe_allocate_legacy(adapter, pa);
1101 if (error) 1104 if (error)
1102 goto err_late; 1105 goto err_late;
1103 1106
1104 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */ 1107 /* Tasklets for Link, SFP, Multispeed Fiber and Flow Director */
1105 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS, 1108 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS,
1106 ixgbe_handle_link, adapter); 1109 ixgbe_handle_link, adapter);
1107 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 1110 adapter->mod_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1108 ixgbe_handle_mod, adapter); 1111 ixgbe_handle_mod, adapter);
1109 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 1112 adapter->msf_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1110 ixgbe_handle_msf, adapter); 1113 ixgbe_handle_msf, adapter);
1111 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 1114 adapter->phy_si = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1112 ixgbe_handle_phy, adapter); 1115 ixgbe_handle_phy, adapter);
1113 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 1116 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
1114 adapter->fdir_si = 1117 adapter->fdir_si =
1115 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 1118 softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
1116 ixgbe_reinit_fdir, adapter); 1119 ixgbe_reinit_fdir, adapter);
1117 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL) 1120 if ((adapter->link_si == NULL) || (adapter->mod_si == NULL)
1118 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL) 1121 || (adapter->msf_si == NULL) || (adapter->phy_si == NULL)
1119 || ((adapter->feat_en & IXGBE_FEATURE_FDIR) 1122 || ((adapter->feat_en & IXGBE_FEATURE_FDIR)
1120 && (adapter->fdir_si == NULL))) { 1123 && (adapter->fdir_si == NULL))) {
1121 aprint_error_dev(dev, 1124 aprint_error_dev(dev,
1122 "could not establish software interrupts ()\n"); 1125 "could not establish software interrupts ()\n");
1123 goto err_out; 1126 goto err_out;
1124 } 1127 }
1125 1128
1126 error = ixgbe_start_hw(hw); 1129 error = ixgbe_start_hw(hw);
1127 switch (error) { 1130 switch (error) {
1128 case IXGBE_ERR_EEPROM_VERSION: 1131 case IXGBE_ERR_EEPROM_VERSION:
1129 aprint_error_dev(dev, "This device is a pre-production adapter/" 1132 aprint_error_dev(dev, "This device is a pre-production adapter/"
1130 "LOM. Please be aware there may be issues associated " 1133 "LOM. Please be aware there may be issues associated "
1131 "with your hardware.\nIf you are experiencing problems " 1134 "with your hardware.\nIf you are experiencing problems "
1132 "please contact your Intel or hardware representative " 1135 "please contact your Intel or hardware representative "
1133 "who provided you with this hardware.\n"); 1136 "who provided you with this hardware.\n");
1134 break; 1137 break;
1135 default: 1138 default:
1136 break; 1139 break;
1137 } 1140 }
1138 1141
1139 /* Setup OS specific network interface */ 1142 /* Setup OS specific network interface */
1140 if (ixgbe_setup_interface(dev, adapter) != 0) 1143 if (ixgbe_setup_interface(dev, adapter) != 0)
1141 goto err_late; 1144 goto err_late;
1142 1145
1143 /* 1146 /*
1144 * Print PHY ID only for copper PHY. On device which has SFP(+) cage 1147 * Print PHY ID only for copper PHY. On device which has SFP(+) cage
1145 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID. 1148 * and a module is inserted, phy.id is not MII PHY id but SFF 8024 ID.
1146 */ 1149 */
1147 if (hw->phy.media_type == ixgbe_media_type_copper) { 1150 if (hw->phy.media_type == ixgbe_media_type_copper) {
1148 uint16_t id1, id2; 1151 uint16_t id1, id2;
1149 int oui, model, rev; 1152 int oui, model, rev;
1150 const char *descr; 1153 const char *descr;
1151 1154
1152 id1 = hw->phy.id >> 16; 1155 id1 = hw->phy.id >> 16;
1153 id2 = hw->phy.id & 0xffff; 1156 id2 = hw->phy.id & 0xffff;
1154 oui = MII_OUI(id1, id2); 1157 oui = MII_OUI(id1, id2);
1155 model = MII_MODEL(id2); 1158 model = MII_MODEL(id2);
1156 rev = MII_REV(id2); 1159 rev = MII_REV(id2);
1157 if ((descr = mii_get_descr(oui, model)) != NULL) 1160 if ((descr = mii_get_descr(oui, model)) != NULL)
1158 aprint_normal_dev(dev, 1161 aprint_normal_dev(dev,
1159 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n", 1162 "PHY: %s (OUI 0x%06x, model 0x%04x), rev. %d\n",
1160 descr, oui, model, rev); 1163 descr, oui, model, rev);
1161 else 1164 else
1162 aprint_normal_dev(dev, 1165 aprint_normal_dev(dev,
1163 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n", 1166 "PHY OUI 0x%06x, model 0x%04x, rev. %d\n",
1164 oui, model, rev); 1167 oui, model, rev);
1165 } 1168 }
1166 1169
1167 /* Enable EEE power saving */ 1170 /* Enable EEE power saving */
1168 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 1171 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
1169 hw->mac.ops.setup_eee(hw, 1172 hw->mac.ops.setup_eee(hw,
1170 adapter->feat_en & IXGBE_FEATURE_EEE); 1173 adapter->feat_en & IXGBE_FEATURE_EEE);
1171 1174
1172 /* Enable power to the phy. */ 1175 /* Enable power to the phy. */
1173 if (!unsupported_sfp) { 1176 if (!unsupported_sfp) {
1174 /* Enable the optics for 82599 SFP+ fiber */ 1177 /* Enable the optics for 82599 SFP+ fiber */
1175 ixgbe_enable_tx_laser(hw); 1178 ixgbe_enable_tx_laser(hw);
1176 1179
1177 /* 1180 /*
1178 * XXX Currently, ixgbe_set_phy_power() supports only copper 1181 * XXX Currently, ixgbe_set_phy_power() supports only copper
1179 * PHY, so it's not required to test with !unsupported_sfp. 1182 * PHY, so it's not required to test with !unsupported_sfp.
1180 */ 1183 */
1181 ixgbe_set_phy_power(hw, TRUE); 1184 ixgbe_set_phy_power(hw, TRUE);
1182 } 1185 }
1183 1186
1184 /* Initialize statistics */ 1187 /* Initialize statistics */
1185 ixgbe_update_stats_counters(adapter); 1188 ixgbe_update_stats_counters(adapter);
1186 1189
1187 /* Check PCIE slot type/speed/width */ 1190 /* Check PCIE slot type/speed/width */
1188 ixgbe_get_slot_info(adapter); 1191 ixgbe_get_slot_info(adapter);
1189 1192
1190 /* 1193 /*
1191 * Do time init and sysctl init here, but 1194 * Do time init and sysctl init here, but
1192 * only on the first port of a bypass adapter. 1195 * only on the first port of a bypass adapter.
1193 */ 1196 */
1194 ixgbe_bypass_init(adapter); 1197 ixgbe_bypass_init(adapter);
1195 1198
1196 /* Set an initial dmac value */ 1199 /* Set an initial dmac value */
1197 adapter->dmac = 0; 1200 adapter->dmac = 0;
1198 /* Set initial advertised speeds (if applicable) */ 1201 /* Set initial advertised speeds (if applicable) */
1199 adapter->advertise = ixgbe_get_advertise(adapter); 1202 adapter->advertise = ixgbe_get_advertise(adapter);
1200 1203
1201 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 1204 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
1202 ixgbe_define_iov_schemas(dev, &error); 1205 ixgbe_define_iov_schemas(dev, &error);
1203 1206
1204 /* Add sysctls */ 1207 /* Add sysctls */
1205 ixgbe_add_device_sysctls(adapter); 1208 ixgbe_add_device_sysctls(adapter);
1206 ixgbe_add_hw_stats(adapter); 1209 ixgbe_add_hw_stats(adapter);
1207 1210
1208 /* For Netmap */ 1211 /* For Netmap */
1209 adapter->init_locked = ixgbe_init_locked; 1212 adapter->init_locked = ixgbe_init_locked;
1210 adapter->stop_locked = ixgbe_stop; 1213 adapter->stop_locked = ixgbe_stop;
1211 1214
1212 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 1215 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
1213 ixgbe_netmap_attach(adapter); 1216 ixgbe_netmap_attach(adapter);
1214 1217
1215 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 1218 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
1216 aprint_verbose_dev(dev, "feature cap %s\n", buf); 1219 aprint_verbose_dev(dev, "feature cap %s\n", buf);
1217 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 1220 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
1218 aprint_verbose_dev(dev, "feature ena %s\n", buf); 1221 aprint_verbose_dev(dev, "feature ena %s\n", buf);
1219 1222
1220 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume)) 1223 if (pmf_device_register(dev, ixgbe_suspend, ixgbe_resume))
1221 pmf_class_network_register(dev, adapter->ifp); 1224 pmf_class_network_register(dev, adapter->ifp);
1222 else 1225 else
1223 aprint_error_dev(dev, "couldn't establish power handler\n"); 1226 aprint_error_dev(dev, "couldn't establish power handler\n");
1224 1227
1225 /* Init recovery mode timer and state variable */ 1228 /* Init recovery mode timer and state variable */
1226 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) { 1229 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
1227 adapter->recovery_mode = 0; 1230 adapter->recovery_mode = 0;
1228 1231
1229 /* Set up the timer callout */ 1232 /* Set up the timer callout */
1230 callout_init(&adapter->recovery_mode_timer, 1233 callout_init(&adapter->recovery_mode_timer,
1231 IXGBE_CALLOUT_FLAGS); 1234 IXGBE_CALLOUT_FLAGS);
1232 1235
1233 /* Start the task */ 1236 /* Start the task */
1234 callout_reset(&adapter->recovery_mode_timer, hz, 1237 callout_reset(&adapter->recovery_mode_timer, hz,
1235 ixgbe_recovery_mode_timer, adapter); 1238 ixgbe_recovery_mode_timer, adapter);
1236 } 1239 }
1237 1240
1238 INIT_DEBUGOUT("ixgbe_attach: end"); 1241 INIT_DEBUGOUT("ixgbe_attach: end");
1239 adapter->osdep.attached = true; 1242 adapter->osdep.attached = true;
1240 1243
1241 return; 1244 return;
1242 1245
1243err_late: 1246err_late:
1244 ixgbe_free_queues(adapter); 1247 ixgbe_free_queues(adapter);
1245err_out: 1248err_out:
1246 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 1249 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1247 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 1250 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
1248 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 1251 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
1249 ixgbe_free_softint(adapter); 1252 ixgbe_free_softint(adapter);
1250 ixgbe_free_pci_resources(adapter); 1253 ixgbe_free_pci_resources(adapter);
1251 if (adapter->mta != NULL) 1254 if (adapter->mta != NULL)
1252 free(adapter->mta, M_DEVBUF); 1255 free(adapter->mta, M_DEVBUF);
1253 IXGBE_CORE_LOCK_DESTROY(adapter); 1256 IXGBE_CORE_LOCK_DESTROY(adapter);
1254 1257
1255 return; 1258 return;
1256} /* ixgbe_attach */ 1259} /* ixgbe_attach */
1257 1260
1258/************************************************************************ 1261/************************************************************************
1259 * ixgbe_check_wol_support 1262 * ixgbe_check_wol_support
1260 * 1263 *
1261 * Checks whether the adapter's ports are capable of 1264 * Checks whether the adapter's ports are capable of
1262 * Wake On LAN by reading the adapter's NVM. 1265 * Wake On LAN by reading the adapter's NVM.
1263 * 1266 *
1264 * Sets each port's hw->wol_enabled value depending 1267 * Sets each port's hw->wol_enabled value depending
1265 * on the value read here. 1268 * on the value read here.
1266 ************************************************************************/ 1269 ************************************************************************/
1267static void 1270static void
1268ixgbe_check_wol_support(struct adapter *adapter) 1271ixgbe_check_wol_support(struct adapter *adapter)
1269{ 1272{
1270 struct ixgbe_hw *hw = &adapter->hw; 1273 struct ixgbe_hw *hw = &adapter->hw;
1271 u16 dev_caps = 0; 1274 u16 dev_caps = 0;
1272 1275
1273 /* Find out WoL support for port */ 1276 /* Find out WoL support for port */
1274 adapter->wol_support = hw->wol_enabled = 0; 1277 adapter->wol_support = hw->wol_enabled = 0;
1275 ixgbe_get_device_caps(hw, &dev_caps); 1278 ixgbe_get_device_caps(hw, &dev_caps);
1276 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || 1279 if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
1277 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && 1280 ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) &&
1278 hw->bus.func == 0)) 1281 hw->bus.func == 0))
1279 adapter->wol_support = hw->wol_enabled = 1; 1282 adapter->wol_support = hw->wol_enabled = 1;
1280 1283
1281 /* Save initial wake up filter configuration */ 1284 /* Save initial wake up filter configuration */
1282 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); 1285 adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC);
1283 1286
1284 return; 1287 return;
1285} /* ixgbe_check_wol_support */ 1288} /* ixgbe_check_wol_support */
1286 1289
1287/************************************************************************ 1290/************************************************************************
1288 * ixgbe_setup_interface 1291 * ixgbe_setup_interface
1289 * 1292 *
1290 * Setup networking device structure and register an interface. 1293 * Setup networking device structure and register an interface.
1291 ************************************************************************/ 1294 ************************************************************************/
1292static int 1295static int
1293ixgbe_setup_interface(device_t dev, struct adapter *adapter) 1296ixgbe_setup_interface(device_t dev, struct adapter *adapter)
1294{ 1297{
1295 struct ethercom *ec = &adapter->osdep.ec; 1298 struct ethercom *ec = &adapter->osdep.ec;
1296 struct ifnet *ifp; 1299 struct ifnet *ifp;
1297 int rv; 1300 int rv;
1298 1301
1299 INIT_DEBUGOUT("ixgbe_setup_interface: begin"); 1302 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1300 1303
1301 ifp = adapter->ifp = &ec->ec_if; 1304 ifp = adapter->ifp = &ec->ec_if;
1302 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1305 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1303 ifp->if_baudrate = IF_Gbps(10); 1306 ifp->if_baudrate = IF_Gbps(10);
1304 ifp->if_init = ixgbe_init; 1307 ifp->if_init = ixgbe_init;
1305 ifp->if_stop = ixgbe_ifstop; 1308 ifp->if_stop = ixgbe_ifstop;
1306 ifp->if_softc = adapter; 1309 ifp->if_softc = adapter;
1307 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1308#ifdef IXGBE_MPSAFE 1311#ifdef IXGBE_MPSAFE
1309 ifp->if_extflags = IFEF_MPSAFE; 1312 ifp->if_extflags = IFEF_MPSAFE;
1310#endif 1313#endif
1311 ifp->if_ioctl = ixgbe_ioctl; 1314 ifp->if_ioctl = ixgbe_ioctl;
1312#if __FreeBSD_version >= 1100045 1315#if __FreeBSD_version >= 1100045
1313 /* TSO parameters */ 1316 /* TSO parameters */
1314 ifp->if_hw_tsomax = 65518; 1317 ifp->if_hw_tsomax = 65518;
1315 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER; 1318 ifp->if_hw_tsomaxsegcount = IXGBE_82599_SCATTER;
1316 ifp->if_hw_tsomaxsegsize = 2048; 1319 ifp->if_hw_tsomaxsegsize = 2048;
1317#endif 1320#endif
1318 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1321 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1319#if 0 1322#if 0
1320 ixgbe_start_locked = ixgbe_legacy_start_locked; 1323 ixgbe_start_locked = ixgbe_legacy_start_locked;
1321#endif 1324#endif
1322 } else { 1325 } else {
1323 ifp->if_transmit = ixgbe_mq_start; 1326 ifp->if_transmit = ixgbe_mq_start;
1324#if 0 1327#if 0
1325 ixgbe_start_locked = ixgbe_mq_start_locked; 1328 ixgbe_start_locked = ixgbe_mq_start_locked;
1326#endif 1329#endif
1327 } 1330 }
1328 ifp->if_start = ixgbe_legacy_start; 1331 ifp->if_start = ixgbe_legacy_start;
1329 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1332 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1330 IFQ_SET_READY(&ifp->if_snd); 1333 IFQ_SET_READY(&ifp->if_snd);
1331 1334
1332 rv = if_initialize(ifp); 1335 rv = if_initialize(ifp);
1333 if (rv != 0) { 1336 if (rv != 0) {
1334 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); 1337 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1335 return rv; 1338 return rv;
1336 } 1339 }
1337 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1340 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1338 ether_ifattach(ifp, adapter->hw.mac.addr); 1341 ether_ifattach(ifp, adapter->hw.mac.addr);
1339 aprint_normal_dev(dev, "Ethernet address %s\n", 1342 aprint_normal_dev(dev, "Ethernet address %s\n",
1340 ether_sprintf(adapter->hw.mac.addr)); 1343 ether_sprintf(adapter->hw.mac.addr));
1341 /* 1344 /*
1342 * We use per TX queue softint, so if_deferred_start_init() isn't 1345 * We use per TX queue softint, so if_deferred_start_init() isn't
1343 * used. 1346 * used.
1344 */ 1347 */
1345 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb); 1348 ether_set_ifflags_cb(ec, ixgbe_ifflags_cb);
1346 1349
1347 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1350 adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1348 1351
1349 /* 1352 /*
1350 * Tell the upper layer(s) we support long frames. 1353 * Tell the upper layer(s) we support long frames.
1351 */ 1354 */
1352 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1355 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1353 1356
1354 /* Set capability flags */ 1357 /* Set capability flags */
1355 ifp->if_capabilities |= IFCAP_RXCSUM 1358 ifp->if_capabilities |= IFCAP_RXCSUM
1356 | IFCAP_TXCSUM 1359 | IFCAP_TXCSUM
1357 | IFCAP_TSOv4 1360 | IFCAP_TSOv4
1358 | IFCAP_TSOv6; 1361 | IFCAP_TSOv6;
1359 ifp->if_capenable = 0; 1362 ifp->if_capenable = 0;
1360 1363
1361 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING 1364 ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING
1362 | ETHERCAP_VLAN_HWCSUM 1365 | ETHERCAP_VLAN_HWCSUM
1363 | ETHERCAP_JUMBO_MTU 1366 | ETHERCAP_JUMBO_MTU
1364 | ETHERCAP_VLAN_MTU; 1367 | ETHERCAP_VLAN_MTU;
1365 1368
1366 /* Enable the above capabilities by default */ 1369 /* Enable the above capabilities by default */
1367 ec->ec_capenable = ec->ec_capabilities; 1370 ec->ec_capenable = ec->ec_capabilities;
1368 1371
1369 /* 1372 /*
1370 * Don't turn this on by default, if vlans are 1373 * Don't turn this on by default, if vlans are
1371 * created on another pseudo device (eg. lagg) 1374 * created on another pseudo device (eg. lagg)
1372 * then vlan events are not passed thru, breaking 1375 * then vlan events are not passed thru, breaking
1373 * operation, but with HW FILTER off it works. If 1376 * operation, but with HW FILTER off it works. If
1374 * using vlans directly on the ixgbe driver you can 1377 * using vlans directly on the ixgbe driver you can
1375 * enable this and get full hardware tag filtering. 1378 * enable this and get full hardware tag filtering.
1376 */ 1379 */
1377 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; 1380 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1378 1381
1379 /* 1382 /*
1380 * Specify the media types supported by this adapter and register 1383 * Specify the media types supported by this adapter and register
1381 * callbacks to update media and link information 1384 * callbacks to update media and link information
1382 */ 1385 */
1383 ec->ec_ifmedia = &adapter->media; 1386 ec->ec_ifmedia = &adapter->media;
1384 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, 1387 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
1385 ixgbe_media_status); 1388 ixgbe_media_status);
1386 1389
1387 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); 1390 adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw);
1388 ixgbe_add_media_types(adapter); 1391 ixgbe_add_media_types(adapter);
1389 1392
1390 /* Set autoselect media by default */ 1393 /* Set autoselect media by default */
1391 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1394 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1392 1395
1393 if_register(ifp); 1396 if_register(ifp);
1394 1397
1395 return (0); 1398 return (0);
1396} /* ixgbe_setup_interface */ 1399} /* ixgbe_setup_interface */
1397 1400
1398/************************************************************************ 1401/************************************************************************
1399 * ixgbe_add_media_types 1402 * ixgbe_add_media_types
1400 ************************************************************************/ 1403 ************************************************************************/
1401static void 1404static void
1402ixgbe_add_media_types(struct adapter *adapter) 1405ixgbe_add_media_types(struct adapter *adapter)
1403{ 1406{
1404 struct ixgbe_hw *hw = &adapter->hw; 1407 struct ixgbe_hw *hw = &adapter->hw;
1405 u64 layer; 1408 u64 layer;
1406 1409
1407 layer = adapter->phy_layer; 1410 layer = adapter->phy_layer;
1408 1411
1409#define ADD(mm, dd) \ 1412#define ADD(mm, dd) \
1410 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL); 1413 ifmedia_add(&adapter->media, IFM_ETHER | (mm), (dd), NULL);
1411 1414
1412 ADD(IFM_NONE, 0); 1415 ADD(IFM_NONE, 0);
1413 1416
1414 /* Media types with matching NetBSD media defines */ 1417 /* Media types with matching NetBSD media defines */
1415 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { 1418 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
1416 ADD(IFM_10G_T | IFM_FDX, 0); 1419 ADD(IFM_10G_T | IFM_FDX, 0);
1417 } 1420 }
1418 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { 1421 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
1419 ADD(IFM_1000_T | IFM_FDX, 0); 1422 ADD(IFM_1000_T | IFM_FDX, 0);
1420 } 1423 }
1421 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) { 1424 if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) {
1422 ADD(IFM_100_TX | IFM_FDX, 0); 1425 ADD(IFM_100_TX | IFM_FDX, 0);
1423 } 1426 }
1424 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) { 1427 if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1425 ADD(IFM_10_T | IFM_FDX, 0); 1428 ADD(IFM_10_T | IFM_FDX, 0);
1426 } 1429 }
1427 1430
1428 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 1431 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1429 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) { 1432 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1430 ADD(IFM_10G_TWINAX | IFM_FDX, 0); 1433 ADD(IFM_10G_TWINAX | IFM_FDX, 0);
1431 } 1434 }
1432 1435
1433 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { 1436 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1434 ADD(IFM_10G_LR | IFM_FDX, 0); 1437 ADD(IFM_10G_LR | IFM_FDX, 0);
1435 if (hw->phy.multispeed_fiber) { 1438 if (hw->phy.multispeed_fiber) {
1436 ADD(IFM_1000_LX | IFM_FDX, 0); 1439 ADD(IFM_1000_LX | IFM_FDX, 0);
1437 } 1440 }
1438 } 1441 }
1439 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { 1442 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1440 ADD(IFM_10G_SR | IFM_FDX, 0); 1443 ADD(IFM_10G_SR | IFM_FDX, 0);
1441 if (hw->phy.multispeed_fiber) { 1444 if (hw->phy.multispeed_fiber) {
1442 ADD(IFM_1000_SX | IFM_FDX, 0); 1445 ADD(IFM_1000_SX | IFM_FDX, 0);
1443 } 1446 }
1444 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) { 1447 } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1445 ADD(IFM_1000_SX | IFM_FDX, 0); 1448 ADD(IFM_1000_SX | IFM_FDX, 0);
1446 } 1449 }
1447 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) { 1450 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1448 ADD(IFM_10G_CX4 | IFM_FDX, 0); 1451 ADD(IFM_10G_CX4 | IFM_FDX, 0);
1449 } 1452 }
1450 1453
1451 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { 1454 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1452 ADD(IFM_10G_KR | IFM_FDX, 0); 1455 ADD(IFM_10G_KR | IFM_FDX, 0);
1453 } 1456 }
1454 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { 1457 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) {
1455 ADD(IFM_10G_KX4 | IFM_FDX, 0); 1458 ADD(IFM_10G_KX4 | IFM_FDX, 0);
1456 } 1459 }
1457 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { 1460 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1458 ADD(IFM_1000_KX | IFM_FDX, 0); 1461 ADD(IFM_1000_KX | IFM_FDX, 0);
1459 } 1462 }
1460 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { 1463 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) {
1461 ADD(IFM_2500_KX | IFM_FDX, 0); 1464 ADD(IFM_2500_KX | IFM_FDX, 0);
1462 } 1465 }
1463 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) { 1466 if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T) {
1464 ADD(IFM_2500_T | IFM_FDX, 0); 1467 ADD(IFM_2500_T | IFM_FDX, 0);
1465 } 1468 }
1466 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) { 1469 if (layer & IXGBE_PHYSICAL_LAYER_5GBASE_T) {
1467 ADD(IFM_5000_T | IFM_FDX, 0); 1470 ADD(IFM_5000_T | IFM_FDX, 0);
1468 } 1471 }
1469 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) 1472 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
1470 ADD(IFM_1000_BX10 | IFM_FDX, 0); 1473 ADD(IFM_1000_BX10 | IFM_FDX, 0);
1471 /* XXX no ifmedia_set? */ 1474 /* XXX no ifmedia_set? */
1472 1475
1473 ADD(IFM_AUTO, 0); 1476 ADD(IFM_AUTO, 0);
1474 1477
1475#undef ADD 1478#undef ADD
1476} /* ixgbe_add_media_types */ 1479} /* ixgbe_add_media_types */
1477 1480
1478/************************************************************************ 1481/************************************************************************
1479 * ixgbe_is_sfp 1482 * ixgbe_is_sfp
1480 ************************************************************************/ 1483 ************************************************************************/
1481static inline bool 1484static inline bool
1482ixgbe_is_sfp(struct ixgbe_hw *hw) 1485ixgbe_is_sfp(struct ixgbe_hw *hw)
1483{ 1486{
1484 switch (hw->mac.type) { 1487 switch (hw->mac.type) {
1485 case ixgbe_mac_82598EB: 1488 case ixgbe_mac_82598EB:
1486 if (hw->phy.type == ixgbe_phy_nl) 1489 if (hw->phy.type == ixgbe_phy_nl)
1487 return (TRUE); 1490 return (TRUE);
1488 return (FALSE); 1491 return (FALSE);
1489 case ixgbe_mac_82599EB: 1492 case ixgbe_mac_82599EB:
1490 case ixgbe_mac_X550EM_x: 1493 case ixgbe_mac_X550EM_x:
1491 case ixgbe_mac_X550EM_a: 1494 case ixgbe_mac_X550EM_a:
1492 switch (hw->mac.ops.get_media_type(hw)) { 1495 switch (hw->mac.ops.get_media_type(hw)) {
1493 case ixgbe_media_type_fiber: 1496 case ixgbe_media_type_fiber:
1494 case ixgbe_media_type_fiber_qsfp: 1497 case ixgbe_media_type_fiber_qsfp:
1495 return (TRUE); 1498 return (TRUE);
1496 default: 1499 default:
1497 return (FALSE); 1500 return (FALSE);
1498 } 1501 }
1499 default: 1502 default:
1500 return (FALSE); 1503 return (FALSE);
1501 } 1504 }
1502} /* ixgbe_is_sfp */ 1505} /* ixgbe_is_sfp */
1503 1506
1504/************************************************************************ 1507/************************************************************************
1505 * ixgbe_config_link 1508 * ixgbe_config_link
1506 ************************************************************************/ 1509 ************************************************************************/
1507static void 1510static void
1508ixgbe_config_link(struct adapter *adapter) 1511ixgbe_config_link(struct adapter *adapter)
1509{ 1512{
1510 struct ixgbe_hw *hw = &adapter->hw; 1513 struct ixgbe_hw *hw = &adapter->hw;
1511 u32 autoneg, err = 0; 1514 u32 autoneg, err = 0;
1512 bool sfp, negotiate = false; 1515 bool sfp, negotiate = false;
1513 1516
1514 sfp = ixgbe_is_sfp(hw); 1517 sfp = ixgbe_is_sfp(hw);
1515 1518
1516 if (sfp) { 1519 if (sfp) {
1517 if (hw->phy.multispeed_fiber) { 1520 if (hw->phy.multispeed_fiber) {
1518 ixgbe_enable_tx_laser(hw); 1521 ixgbe_enable_tx_laser(hw);
1519 kpreempt_disable(); 1522 kpreempt_disable();
1520 softint_schedule(adapter->msf_si); 1523 softint_schedule(adapter->msf_si);
1521 kpreempt_enable(); 1524 kpreempt_enable();
1522 } 1525 }
1523 kpreempt_disable(); 1526 kpreempt_disable();
1524 softint_schedule(adapter->mod_si); 1527 softint_schedule(adapter->mod_si);
1525 kpreempt_enable(); 1528 kpreempt_enable();
1526 } else { 1529 } else {
1527 struct ifmedia *ifm = &adapter->media; 1530 struct ifmedia *ifm = &adapter->media;
1528 1531
1529 if (hw->mac.ops.check_link) 1532 if (hw->mac.ops.check_link)
1530 err = ixgbe_check_link(hw, &adapter->link_speed, 1533 err = ixgbe_check_link(hw, &adapter->link_speed,
1531 &adapter->link_up, FALSE); 1534 &adapter->link_up, FALSE);
1532 if (err) 1535 if (err)
1533 return; 1536 return;
1534 1537
1535 /* 1538 /*
1536 * Check if it's the first call. If it's the first call, 1539 * Check if it's the first call. If it's the first call,
1537 * get value for auto negotiation. 1540 * get value for auto negotiation.
1538 */ 1541 */
1539 autoneg = hw->phy.autoneg_advertised; 1542 autoneg = hw->phy.autoneg_advertised;
1540 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE) 1543 if ((IFM_SUBTYPE(ifm->ifm_cur->ifm_media) != IFM_NONE)
1541 && ((!autoneg) && (hw->mac.ops.get_link_capabilities))) 1544 && ((!autoneg) && (hw->mac.ops.get_link_capabilities)))
1542 err = hw->mac.ops.get_link_capabilities(hw, &autoneg, 1545 err = hw->mac.ops.get_link_capabilities(hw, &autoneg,
1543 &negotiate); 1546 &negotiate);
1544 if (err) 1547 if (err)
1545 return; 1548 return;
1546 if (hw->mac.ops.setup_link) 1549 if (hw->mac.ops.setup_link)
1547 err = hw->mac.ops.setup_link(hw, autoneg, 1550 err = hw->mac.ops.setup_link(hw, autoneg,
1548 adapter->link_up); 1551 adapter->link_up);
1549 } 1552 }
1550 1553
1551} /* ixgbe_config_link */ 1554} /* ixgbe_config_link */
1552 1555
1553/************************************************************************ 1556/************************************************************************
1554 * ixgbe_update_stats_counters - Update board statistics counters. 1557 * ixgbe_update_stats_counters - Update board statistics counters.
1555 ************************************************************************/ 1558 ************************************************************************/
1556static void 1559static void
1557ixgbe_update_stats_counters(struct adapter *adapter) 1560ixgbe_update_stats_counters(struct adapter *adapter)
1558{ 1561{
1559 struct ifnet *ifp = adapter->ifp; 1562 struct ifnet *ifp = adapter->ifp;
1560 struct ixgbe_hw *hw = &adapter->hw; 1563 struct ixgbe_hw *hw = &adapter->hw;
1561 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1564 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1562 u32 missed_rx = 0, bprc, lxon, lxoff, total; 1565 u32 missed_rx = 0, bprc, lxon, lxoff, total;
1563 u64 total_missed_rx = 0; 1566 u64 total_missed_rx = 0;
1564 uint64_t crcerrs, rlec; 1567 uint64_t crcerrs, rlec;
1565 unsigned int queue_counters; 1568 unsigned int queue_counters;
1566 int i; 1569 int i;
1567 1570
1568 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS); 1571 crcerrs = IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1569 stats->crcerrs.ev_count += crcerrs; 1572 stats->crcerrs.ev_count += crcerrs;
1570 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 1573 stats->illerrc.ev_count += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
1571 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC); 1574 stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
1572 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC); 1575 stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
1573 if (hw->mac.type >= ixgbe_mac_X550) 1576 if (hw->mac.type >= ixgbe_mac_X550)
1574 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC); 1577 stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
1575 1578
1576 /* 16 registers exist */ 1579 /* 16 registers exist */
1577 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues); 1580 queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
1578 for (i = 0; i < queue_counters; i++) { 1581 for (i = 0; i < queue_counters; i++) {
1579 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 1582 stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1580 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 1583 stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1581 if (hw->mac.type >= ixgbe_mac_82599EB) { 1584 if (hw->mac.type >= ixgbe_mac_82599EB) {
1582 stats->qprdc[i].ev_count 1585 stats->qprdc[i].ev_count
1583 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 1586 += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1584 } 1587 }
1585 } 1588 }
1586 1589
1587 /* 8 registers exist */ 1590 /* 8 registers exist */
1588 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1591 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1589 uint32_t mp; 1592 uint32_t mp;
1590 1593
1591 /* MPC */ 1594 /* MPC */
1592 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 1595 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
1593 /* global total per queue */ 1596 /* global total per queue */
1594 stats->mpc[i].ev_count += mp; 1597 stats->mpc[i].ev_count += mp;
1595 /* running comprehensive total for stats display */ 1598 /* running comprehensive total for stats display */
1596 total_missed_rx += mp; 1599 total_missed_rx += mp;
1597 1600
1598 if (hw->mac.type == ixgbe_mac_82598EB) 1601 if (hw->mac.type == ixgbe_mac_82598EB)
1599 stats->rnbc[i].ev_count 1602 stats->rnbc[i].ev_count
1600 += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 1603 += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
1601 1604
1602 stats->pxontxc[i].ev_count 1605 stats->pxontxc[i].ev_count
1603 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 1606 += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
1604 stats->pxofftxc[i].ev_count 1607 stats->pxofftxc[i].ev_count
1605 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 1608 += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
1606 if (hw->mac.type >= ixgbe_mac_82599EB) { 1609 if (hw->mac.type >= ixgbe_mac_82599EB) {
1607 stats->pxonrxc[i].ev_count 1610 stats->pxonrxc[i].ev_count
1608 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 1611 += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
1609 stats->pxoffrxc[i].ev_count 1612 stats->pxoffrxc[i].ev_count
1610 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 1613 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
1611 stats->pxon2offc[i].ev_count 1614 stats->pxon2offc[i].ev_count
1612 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 1615 += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
1613 } else { 1616 } else {
1614 stats->pxonrxc[i].ev_count 1617 stats->pxonrxc[i].ev_count
1615 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 1618 += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
1616 stats->pxoffrxc[i].ev_count 1619 stats->pxoffrxc[i].ev_count
1617 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 1620 += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
1618 } 1621 }
1619 } 1622 }
1620 stats->mpctotal.ev_count += total_missed_rx; 1623 stats->mpctotal.ev_count += total_missed_rx;
1621 1624
1622 /* Document says M[LR]FC are valid when link is up and 10Gbps */ 1625 /* Document says M[LR]FC are valid when link is up and 10Gbps */
1623 if ((adapter->link_active == LINK_STATE_UP) 1626 if ((adapter->link_active == LINK_STATE_UP)
1624 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) { 1627 && (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
1625 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC); 1628 stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
1626 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC); 1629 stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
1627 } 1630 }
1628 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC); 1631 rlec = IXGBE_READ_REG(hw, IXGBE_RLEC);
1629 stats->rlec.ev_count += rlec; 1632 stats->rlec.ev_count += rlec;
1630 1633
1631 /* Hardware workaround, gprc counts missed packets */ 1634 /* Hardware workaround, gprc counts missed packets */
1632 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx; 1635 stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
1633 1636
1634 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 1637 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
1635 stats->lxontxc.ev_count += lxon; 1638 stats->lxontxc.ev_count += lxon;
1636 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 1639 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
1637 stats->lxofftxc.ev_count += lxoff; 1640 stats->lxofftxc.ev_count += lxoff;
1638 total = lxon + lxoff; 1641 total = lxon + lxoff;
1639 1642
1640 if (hw->mac.type != ixgbe_mac_82598EB) { 1643 if (hw->mac.type != ixgbe_mac_82598EB) {
1641 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) + 1644 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
1642 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 1645 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
1643 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) + 1646 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
1644 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN; 1647 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32) - total * ETHER_MIN_LEN;
1645 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) + 1648 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
1646 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 1649 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
1647 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 1650 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
1648 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 1651 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
1649 } else { 1652 } else {
1650 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 1653 stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
1651 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 1654 stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
1652 /* 82598 only has a counter in the high register */ 1655 /* 82598 only has a counter in the high register */
1653 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH); 1656 stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
1654 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN; 1657 stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH) - total * ETHER_MIN_LEN;
1655 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH); 1658 stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
1656 } 1659 }
1657 1660
1658 /* 1661 /*
1659 * Workaround: mprc hardware is incorrectly counting 1662 * Workaround: mprc hardware is incorrectly counting
1660 * broadcasts, so for now we subtract those. 1663 * broadcasts, so for now we subtract those.
1661 */ 1664 */
1662 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 1665 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1663 stats->bprc.ev_count += bprc; 1666 stats->bprc.ev_count += bprc;
1664 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC) 1667 stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
1665 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0); 1668 - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
1666 1669
1667 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64); 1670 stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
1668 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127); 1671 stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
1669 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255); 1672 stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
1670 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511); 1673 stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
1671 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023); 1674 stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1672 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522); 1675 stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1673 1676
1674 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total; 1677 stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
1675 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total; 1678 stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
1676 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total; 1679 stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
1677 1680
1678 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC); 1681 stats->ruc.ev_count += IXGBE_READ_REG(hw, IXGBE_RUC);
1679 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC); 1682 stats->rfc.ev_count += IXGBE_READ_REG(hw, IXGBE_RFC);
1680 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC); 1683 stats->roc.ev_count += IXGBE_READ_REG(hw, IXGBE_ROC);
1681 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC); 1684 stats->rjc.ev_count += IXGBE_READ_REG(hw, IXGBE_RJC);
1682 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 1685 stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
1683 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 1686 stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
1684 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 1687 stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
1685 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR); 1688 stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
1686 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT); 1689 stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
1687 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127); 1690 stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
1688 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255); 1691 stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
1689 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511); 1692 stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
1690 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023); 1693 stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
1691 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522); 1694 stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
1692 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC); 1695 stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
1693 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC); 1696 stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
1694 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC); 1697 stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
1695 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST); 1698 stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
1696 /* Only read FCOE on 82599 */ 1699 /* Only read FCOE on 82599 */
1697 if (hw->mac.type != ixgbe_mac_82598EB) { 1700 if (hw->mac.type != ixgbe_mac_82598EB) {
1698 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 1701 stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
1699 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 1702 stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
1700 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 1703 stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
1701 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 1704 stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
1702 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 1705 stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
1703 } 1706 }
1704 1707
1705 /* Fill out the OS statistics structure */ 1708 /* Fill out the OS statistics structure */
1706 /* 1709 /*
1707 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with 1710 * NetBSD: Don't override if_{i|o}{packets|bytes|mcasts} with
1708 * adapter->stats counters. It's required to make ifconfig -z 1711 * adapter->stats counters. It's required to make ifconfig -z
1709 * (SOICZIFDATA) work. 1712 * (SOICZIFDATA) work.
1710 */ 1713 */
1711 ifp->if_collisions = 0; 1714 ifp->if_collisions = 0;
1712 1715
1713 /* Rx Errors */ 1716 /* Rx Errors */
1714 ifp->if_iqdrops += total_missed_rx; 1717 ifp->if_iqdrops += total_missed_rx;
1715 ifp->if_ierrors += crcerrs + rlec; 1718 ifp->if_ierrors += crcerrs + rlec;
1716} /* ixgbe_update_stats_counters */ 1719} /* ixgbe_update_stats_counters */
1717 1720
1718/************************************************************************ 1721/************************************************************************
1719 * ixgbe_add_hw_stats 1722 * ixgbe_add_hw_stats
1720 * 1723 *
1721 * Add sysctl variables, one per statistic, to the system. 1724 * Add sysctl variables, one per statistic, to the system.
1722 ************************************************************************/ 1725 ************************************************************************/
1723static void 1726static void
1724ixgbe_add_hw_stats(struct adapter *adapter) 1727ixgbe_add_hw_stats(struct adapter *adapter)
1725{ 1728{
1726 device_t dev = adapter->dev; 1729 device_t dev = adapter->dev;
1727 const struct sysctlnode *rnode, *cnode; 1730 const struct sysctlnode *rnode, *cnode;
1728 struct sysctllog **log = &adapter->sysctllog; 1731 struct sysctllog **log = &adapter->sysctllog;
1729 struct tx_ring *txr = adapter->tx_rings; 1732 struct tx_ring *txr = adapter->tx_rings;
1730 struct rx_ring *rxr = adapter->rx_rings; 1733 struct rx_ring *rxr = adapter->rx_rings;
1731 struct ixgbe_hw *hw = &adapter->hw; 1734 struct ixgbe_hw *hw = &adapter->hw;
1732 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 1735 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
1733 const char *xname = device_xname(dev); 1736 const char *xname = device_xname(dev);
1734 int i; 1737 int i;
1735 1738
1736 /* Driver Statistics */ 1739 /* Driver Statistics */
1737 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 1740 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
1738 NULL, xname, "Driver tx dma soft fail EFBIG"); 1741 NULL, xname, "Driver tx dma soft fail EFBIG");
1739 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 1742 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
1740 NULL, xname, "m_defrag() failed"); 1743 NULL, xname, "m_defrag() failed");
1741 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 1744 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
1742 NULL, xname, "Driver tx dma hard fail EFBIG"); 1745 NULL, xname, "Driver tx dma hard fail EFBIG");
1743 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 1746 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
1744 NULL, xname, "Driver tx dma hard fail EINVAL"); 1747 NULL, xname, "Driver tx dma hard fail EINVAL");
1745 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 1748 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
1746 NULL, xname, "Driver tx dma hard fail other"); 1749 NULL, xname, "Driver tx dma hard fail other");
1747 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 1750 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
1748 NULL, xname, "Driver tx dma soft fail EAGAIN"); 1751 NULL, xname, "Driver tx dma soft fail EAGAIN");
1749 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 1752 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
1750 NULL, xname, "Driver tx dma soft fail ENOMEM"); 1753 NULL, xname, "Driver tx dma soft fail ENOMEM");
1751 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 1754 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
1752 NULL, xname, "Watchdog timeouts"); 1755 NULL, xname, "Watchdog timeouts");
1753 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 1756 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
1754 NULL, xname, "TSO errors"); 1757 NULL, xname, "TSO errors");
1755 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, 1758 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
1756 NULL, xname, "Link MSI-X IRQ Handled"); 1759 NULL, xname, "Link MSI-X IRQ Handled");
1757 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR, 1760 evcnt_attach_dynamic(&adapter->link_sicount, EVCNT_TYPE_INTR,
1758 NULL, xname, "Link softint"); 1761 NULL, xname, "Link softint");
1759 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR, 1762 evcnt_attach_dynamic(&adapter->mod_sicount, EVCNT_TYPE_INTR,
1760 NULL, xname, "module softint"); 1763 NULL, xname, "module softint");
1761 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR, 1764 evcnt_attach_dynamic(&adapter->msf_sicount, EVCNT_TYPE_INTR,
1762 NULL, xname, "multimode softint"); 1765 NULL, xname, "multimode softint");
1763 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR, 1766 evcnt_attach_dynamic(&adapter->phy_sicount, EVCNT_TYPE_INTR,
1764 NULL, xname, "external PHY softint"); 1767 NULL, xname, "external PHY softint");
1765 1768
1766 /* Max number of traffic class is 8 */ 1769 /* Max number of traffic class is 8 */
1767 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8); 1770 KASSERT(IXGBE_DCB_MAX_TRAFFIC_CLASS == 8);
1768 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 1771 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
1769 snprintf(adapter->tcs[i].evnamebuf, 1772 snprintf(adapter->tcs[i].evnamebuf,
1770 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d", 1773 sizeof(adapter->tcs[i].evnamebuf), "%s tc%d",
1771 xname, i); 1774 xname, i);
1772 if (i < __arraycount(stats->mpc)) { 1775 if (i < __arraycount(stats->mpc)) {
1773 evcnt_attach_dynamic(&stats->mpc[i], 1776 evcnt_attach_dynamic(&stats->mpc[i],
1774 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1777 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1775 "RX Missed Packet Count"); 1778 "RX Missed Packet Count");
1776 if (hw->mac.type == ixgbe_mac_82598EB) 1779 if (hw->mac.type == ixgbe_mac_82598EB)
1777 evcnt_attach_dynamic(&stats->rnbc[i], 1780 evcnt_attach_dynamic(&stats->rnbc[i],
1778 EVCNT_TYPE_MISC, NULL, 1781 EVCNT_TYPE_MISC, NULL,
1779 adapter->tcs[i].evnamebuf, 1782 adapter->tcs[i].evnamebuf,
1780 "Receive No Buffers"); 1783 "Receive No Buffers");
1781 } 1784 }
1782 if (i < __arraycount(stats->pxontxc)) { 1785 if (i < __arraycount(stats->pxontxc)) {
1783 evcnt_attach_dynamic(&stats->pxontxc[i], 1786 evcnt_attach_dynamic(&stats->pxontxc[i],
1784 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1787 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1785 "pxontxc"); 1788 "pxontxc");
1786 evcnt_attach_dynamic(&stats->pxonrxc[i], 1789 evcnt_attach_dynamic(&stats->pxonrxc[i],
1787 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1790 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1788 "pxonrxc"); 1791 "pxonrxc");
1789 evcnt_attach_dynamic(&stats->pxofftxc[i], 1792 evcnt_attach_dynamic(&stats->pxofftxc[i],
1790 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1793 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1791 "pxofftxc"); 1794 "pxofftxc");
1792 evcnt_attach_dynamic(&stats->pxoffrxc[i], 1795 evcnt_attach_dynamic(&stats->pxoffrxc[i],
1793 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf, 1796 EVCNT_TYPE_MISC, NULL, adapter->tcs[i].evnamebuf,
1794 "pxoffrxc"); 1797 "pxoffrxc");
1795 if (hw->mac.type >= ixgbe_mac_82599EB) 1798 if (hw->mac.type >= ixgbe_mac_82599EB)
1796 evcnt_attach_dynamic(&stats->pxon2offc[i], 1799 evcnt_attach_dynamic(&stats->pxon2offc[i],
1797 EVCNT_TYPE_MISC, NULL, 1800 EVCNT_TYPE_MISC, NULL,
1798 adapter->tcs[i].evnamebuf, 1801 adapter->tcs[i].evnamebuf,
1799 "pxon2offc"); 1802 "pxon2offc");
1800 } 1803 }
1801 } 1804 }
1802 1805
1803 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 1806 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
1804#ifdef LRO 1807#ifdef LRO
1805 struct lro_ctrl *lro = &rxr->lro; 1808 struct lro_ctrl *lro = &rxr->lro;
1806#endif /* LRO */ 1809#endif /* LRO */
1807 1810
1808 snprintf(adapter->queues[i].evnamebuf, 1811 snprintf(adapter->queues[i].evnamebuf,
1809 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 1812 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
1810 xname, i); 1813 xname, i);
1811 snprintf(adapter->queues[i].namebuf, 1814 snprintf(adapter->queues[i].namebuf,
1812 sizeof(adapter->queues[i].namebuf), "q%d", i); 1815 sizeof(adapter->queues[i].namebuf), "q%d", i);
1813 1816
1814 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 1817 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
1815 aprint_error_dev(dev, "could not create sysctl root\n"); 1818 aprint_error_dev(dev, "could not create sysctl root\n");
1816 break; 1819 break;
1817 } 1820 }
1818 1821
1819 if (sysctl_createv(log, 0, &rnode, &rnode, 1822 if (sysctl_createv(log, 0, &rnode, &rnode,
1820 0, CTLTYPE_NODE, 1823 0, CTLTYPE_NODE,
1821 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 1824 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
1822 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 1825 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
1823 break; 1826 break;
1824 1827
1825 if (sysctl_createv(log, 0, &rnode, &cnode, 1828 if (sysctl_createv(log, 0, &rnode, &cnode,
1826 CTLFLAG_READWRITE, CTLTYPE_INT, 1829 CTLFLAG_READWRITE, CTLTYPE_INT,
1827 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 1830 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
1828 ixgbe_sysctl_interrupt_rate_handler, 0, 1831 ixgbe_sysctl_interrupt_rate_handler, 0,
1829 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 1832 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
1830 break; 1833 break;
1831 1834
1832 if (sysctl_createv(log, 0, &rnode, &cnode, 1835 if (sysctl_createv(log, 0, &rnode, &cnode,
1833 CTLFLAG_READONLY, CTLTYPE_INT, 1836 CTLFLAG_READONLY, CTLTYPE_INT,
1834 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 1837 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
1835 ixgbe_sysctl_tdh_handler, 0, (void *)txr, 1838 ixgbe_sysctl_tdh_handler, 0, (void *)txr,
1836 0, CTL_CREATE, CTL_EOL) != 0) 1839 0, CTL_CREATE, CTL_EOL) != 0)
1837 break; 1840 break;
1838 1841
1839 if (sysctl_createv(log, 0, &rnode, &cnode, 1842 if (sysctl_createv(log, 0, &rnode, &cnode,
1840 CTLFLAG_READONLY, CTLTYPE_INT, 1843 CTLFLAG_READONLY, CTLTYPE_INT,
1841 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 1844 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
1842 ixgbe_sysctl_tdt_handler, 0, (void *)txr, 1845 ixgbe_sysctl_tdt_handler, 0, (void *)txr,
1843 0, CTL_CREATE, CTL_EOL) != 0) 1846 0, CTL_CREATE, CTL_EOL) != 0)
1844 break; 1847 break;
1845 1848
1846 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 1849 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
1847 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 1850 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
1848 evcnt_attach_dynamic(&adapter->queues[i].handleq, 1851 evcnt_attach_dynamic(&adapter->queues[i].handleq,
1849 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1852 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1850 "Handled queue in softint"); 1853 "Handled queue in softint");
1851 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 1854 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
1852 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 1855 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
1853 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 1856 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
1854 NULL, adapter->queues[i].evnamebuf, "TSO"); 1857 NULL, adapter->queues[i].evnamebuf, "TSO");
1855 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 1858 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
1856 NULL, adapter->queues[i].evnamebuf, 1859 NULL, adapter->queues[i].evnamebuf,
1857 "Queue No Descriptor Available"); 1860 "TX Queue No Descriptor Available");
1858 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 1861 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
1859 NULL, adapter->queues[i].evnamebuf, 1862 NULL, adapter->queues[i].evnamebuf,
1860 "Queue Packets Transmitted"); 1863 "Queue Packets Transmitted");
1861#ifndef IXGBE_LEGACY_TX 1864#ifndef IXGBE_LEGACY_TX
1862 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 1865 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
1863 NULL, adapter->queues[i].evnamebuf, 1866 NULL, adapter->queues[i].evnamebuf,
1864 "Packets dropped in pcq"); 1867 "Packets dropped in pcq");
1865#endif 1868#endif
1866 1869
1867 if (sysctl_createv(log, 0, &rnode, &cnode, 1870 if (sysctl_createv(log, 0, &rnode, &cnode,
1868 CTLFLAG_READONLY, 1871 CTLFLAG_READONLY,
1869 CTLTYPE_INT, 1872 CTLTYPE_INT,
1870 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), 1873 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
1871 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 1874 ixgbe_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
1872 CTL_CREATE, CTL_EOL) != 0) 1875 CTL_CREATE, CTL_EOL) != 0)
1873 break; 1876 break;
1874 1877
1875 if (sysctl_createv(log, 0, &rnode, &cnode, 1878 if (sysctl_createv(log, 0, &rnode, &cnode,
1876 CTLFLAG_READONLY, 1879 CTLFLAG_READONLY,
1877 CTLTYPE_INT, 1880 CTLTYPE_INT,
1878 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), 1881 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
1879 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0, 1882 ixgbe_sysctl_rdh_handler, 0, (void *)rxr, 0,
1880 CTL_CREATE, CTL_EOL) != 0) 1883 CTL_CREATE, CTL_EOL) != 0)
1881 break; 1884 break;
1882 1885
1883 if (sysctl_createv(log, 0, &rnode, &cnode, 1886 if (sysctl_createv(log, 0, &rnode, &cnode,
1884 CTLFLAG_READONLY, 1887 CTLFLAG_READONLY,
1885 CTLTYPE_INT, 1888 CTLTYPE_INT,
1886 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), 1889 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
1887 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0, 1890 ixgbe_sysctl_rdt_handler, 0, (void *)rxr, 0,
1888 CTL_CREATE, CTL_EOL) != 0) 1891 CTL_CREATE, CTL_EOL) != 0)
1889 break; 1892 break;
1890 1893
1891 if (i < __arraycount(stats->qprc)) { 1894 if (i < __arraycount(stats->qprc)) {
1892 evcnt_attach_dynamic(&stats->qprc[i], 1895 evcnt_attach_dynamic(&stats->qprc[i],
1893 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1896 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1894 "qprc"); 1897 "qprc");
1895 evcnt_attach_dynamic(&stats->qptc[i], 1898 evcnt_attach_dynamic(&stats->qptc[i],
1896 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1899 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1897 "qptc"); 1900 "qptc");
1898 evcnt_attach_dynamic(&stats->qbrc[i], 1901 evcnt_attach_dynamic(&stats->qbrc[i],
1899 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1902 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1900 "qbrc"); 1903 "qbrc");
1901 evcnt_attach_dynamic(&stats->qbtc[i], 1904 evcnt_attach_dynamic(&stats->qbtc[i],
1902 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 1905 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
1903 "qbtc"); 1906 "qbtc");
1904 if (hw->mac.type >= ixgbe_mac_82599EB) 1907 if (hw->mac.type >= ixgbe_mac_82599EB)
1905 evcnt_attach_dynamic(&stats->qprdc[i], 1908 evcnt_attach_dynamic(&stats->qprdc[i],
1906 EVCNT_TYPE_MISC, NULL, 1909 EVCNT_TYPE_MISC, NULL,
1907 adapter->queues[i].evnamebuf, "qprdc"); 1910 adapter->queues[i].evnamebuf, "qprdc");
1908 } 1911 }
1909 1912
1910 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 1913 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
1911 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); 1914 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
1912 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 1915 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
1913 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); 1916 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
1914 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 1917 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
1915 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 1918 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
1916 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, 1919 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
1917 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); 1920 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
1918 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 1921 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
1919 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 1922 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
1920#ifdef LRO 1923#ifdef LRO
1921 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 1924 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
1922 CTLFLAG_RD, &lro->lro_queued, 0, 1925 CTLFLAG_RD, &lro->lro_queued, 0,
1923 "LRO Queued"); 1926 "LRO Queued");
1924 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 1927 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
1925 CTLFLAG_RD, &lro->lro_flushed, 0, 1928 CTLFLAG_RD, &lro->lro_flushed, 0,
1926 "LRO Flushed"); 1929 "LRO Flushed");
1927#endif /* LRO */ 1930#endif /* LRO */
1928 } 1931 }
1929 1932
1930 /* MAC stats get their own sub node */ 1933 /* MAC stats get their own sub node */
1931 1934
1932 snprintf(stats->namebuf, 1935 snprintf(stats->namebuf,
1933 sizeof(stats->namebuf), "%s MAC Statistics", xname); 1936 sizeof(stats->namebuf), "%s MAC Statistics", xname);
1934 1937
1935 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 1938 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
1936 stats->namebuf, "rx csum offload - IP"); 1939 stats->namebuf, "rx csum offload - IP");
1937 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 1940 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
1938 stats->namebuf, "rx csum offload - L4"); 1941 stats->namebuf, "rx csum offload - L4");
1939 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 1942 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
1940 stats->namebuf, "rx csum offload - IP bad"); 1943 stats->namebuf, "rx csum offload - IP bad");
1941 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 1944 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
1942 stats->namebuf, "rx csum offload - L4 bad"); 1945 stats->namebuf, "rx csum offload - L4 bad");
1943 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL, 1946 evcnt_attach_dynamic(&stats->intzero, EVCNT_TYPE_MISC, NULL,
1944 stats->namebuf, "Interrupt conditions zero"); 1947 stats->namebuf, "Interrupt conditions zero");
1945 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL, 1948 evcnt_attach_dynamic(&stats->legint, EVCNT_TYPE_MISC, NULL,
1946 stats->namebuf, "Legacy interrupts"); 1949 stats->namebuf, "Legacy interrupts");
1947 1950
1948 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL, 1951 evcnt_attach_dynamic(&stats->crcerrs, EVCNT_TYPE_MISC, NULL,
1949 stats->namebuf, "CRC Errors"); 1952 stats->namebuf, "CRC Errors");
1950 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL, 1953 evcnt_attach_dynamic(&stats->illerrc, EVCNT_TYPE_MISC, NULL,
1951 stats->namebuf, "Illegal Byte Errors"); 1954 stats->namebuf, "Illegal Byte Errors");
1952 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL, 1955 evcnt_attach_dynamic(&stats->errbc, EVCNT_TYPE_MISC, NULL,
1953 stats->namebuf, "Byte Errors"); 1956 stats->namebuf, "Byte Errors");
1954 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL, 1957 evcnt_attach_dynamic(&stats->mspdc, EVCNT_TYPE_MISC, NULL,
1955 stats->namebuf, "MAC Short Packets Discarded"); 1958 stats->namebuf, "MAC Short Packets Discarded");
1956 if (hw->mac.type >= ixgbe_mac_X550) 1959 if (hw->mac.type >= ixgbe_mac_X550)
1957 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL, 1960 evcnt_attach_dynamic(&stats->mbsdc, EVCNT_TYPE_MISC, NULL,
1958 stats->namebuf, "Bad SFD"); 1961 stats->namebuf, "Bad SFD");
1959 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL, 1962 evcnt_attach_dynamic(&stats->mpctotal, EVCNT_TYPE_MISC, NULL,
1960 stats->namebuf, "Total Packets Missed"); 1963 stats->namebuf, "Total Packets Missed");
1961 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL, 1964 evcnt_attach_dynamic(&stats->mlfc, EVCNT_TYPE_MISC, NULL,
1962 stats->namebuf, "MAC Local Faults"); 1965 stats->namebuf, "MAC Local Faults");
1963 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL, 1966 evcnt_attach_dynamic(&stats->mrfc, EVCNT_TYPE_MISC, NULL,
1964 stats->namebuf, "MAC Remote Faults"); 1967 stats->namebuf, "MAC Remote Faults");
1965 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL, 1968 evcnt_attach_dynamic(&stats->rlec, EVCNT_TYPE_MISC, NULL,
1966 stats->namebuf, "Receive Length Errors"); 1969 stats->namebuf, "Receive Length Errors");
1967 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL, 1970 evcnt_attach_dynamic(&stats->lxontxc, EVCNT_TYPE_MISC, NULL,
1968 stats->namebuf, "Link XON Transmitted"); 1971 stats->namebuf, "Link XON Transmitted");
1969 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL, 1972 evcnt_attach_dynamic(&stats->lxonrxc, EVCNT_TYPE_MISC, NULL,
1970 stats->namebuf, "Link XON Received"); 1973 stats->namebuf, "Link XON Received");
1971 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL, 1974 evcnt_attach_dynamic(&stats->lxofftxc, EVCNT_TYPE_MISC, NULL,
1972 stats->namebuf, "Link XOFF Transmitted"); 1975 stats->namebuf, "Link XOFF Transmitted");
1973 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL, 1976 evcnt_attach_dynamic(&stats->lxoffrxc, EVCNT_TYPE_MISC, NULL,
1974 stats->namebuf, "Link XOFF Received"); 1977 stats->namebuf, "Link XOFF Received");
1975 1978
1976 /* Packet Reception Stats */ 1979 /* Packet Reception Stats */
1977 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL, 1980 evcnt_attach_dynamic(&stats->tor, EVCNT_TYPE_MISC, NULL,
1978 stats->namebuf, "Total Octets Received"); 1981 stats->namebuf, "Total Octets Received");
1979 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL, 1982 evcnt_attach_dynamic(&stats->gorc, EVCNT_TYPE_MISC, NULL,
1980 stats->namebuf, "Good Octets Received"); 1983 stats->namebuf, "Good Octets Received");
1981 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL, 1984 evcnt_attach_dynamic(&stats->tpr, EVCNT_TYPE_MISC, NULL,
1982 stats->namebuf, "Total Packets Received"); 1985 stats->namebuf, "Total Packets Received");
1983 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL, 1986 evcnt_attach_dynamic(&stats->gprc, EVCNT_TYPE_MISC, NULL,
1984 stats->namebuf, "Good Packets Received"); 1987 stats->namebuf, "Good Packets Received");
1985 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL, 1988 evcnt_attach_dynamic(&stats->mprc, EVCNT_TYPE_MISC, NULL,
1986 stats->namebuf, "Multicast Packets Received"); 1989 stats->namebuf, "Multicast Packets Received");
1987 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL, 1990 evcnt_attach_dynamic(&stats->bprc, EVCNT_TYPE_MISC, NULL,
1988 stats->namebuf, "Broadcast Packets Received"); 1991 stats->namebuf, "Broadcast Packets Received");
1989 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL, 1992 evcnt_attach_dynamic(&stats->prc64, EVCNT_TYPE_MISC, NULL,
1990 stats->namebuf, "64 byte frames received "); 1993 stats->namebuf, "64 byte frames received ");
1991 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL, 1994 evcnt_attach_dynamic(&stats->prc127, EVCNT_TYPE_MISC, NULL,
1992 stats->namebuf, "65-127 byte frames received"); 1995 stats->namebuf, "65-127 byte frames received");
1993 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL, 1996 evcnt_attach_dynamic(&stats->prc255, EVCNT_TYPE_MISC, NULL,
1994 stats->namebuf, "128-255 byte frames received"); 1997 stats->namebuf, "128-255 byte frames received");
1995 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL, 1998 evcnt_attach_dynamic(&stats->prc511, EVCNT_TYPE_MISC, NULL,
1996 stats->namebuf, "256-511 byte frames received"); 1999 stats->namebuf, "256-511 byte frames received");
1997 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL, 2000 evcnt_attach_dynamic(&stats->prc1023, EVCNT_TYPE_MISC, NULL,
1998 stats->namebuf, "512-1023 byte frames received"); 2001 stats->namebuf, "512-1023 byte frames received");
1999 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL, 2002 evcnt_attach_dynamic(&stats->prc1522, EVCNT_TYPE_MISC, NULL,
2000 stats->namebuf, "1023-1522 byte frames received"); 2003 stats->namebuf, "1023-1522 byte frames received");
2001 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL, 2004 evcnt_attach_dynamic(&stats->ruc, EVCNT_TYPE_MISC, NULL,
2002 stats->namebuf, "Receive Undersized"); 2005 stats->namebuf, "Receive Undersized");
2003 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL, 2006 evcnt_attach_dynamic(&stats->rfc, EVCNT_TYPE_MISC, NULL,
2004 stats->namebuf, "Fragmented Packets Received "); 2007 stats->namebuf, "Fragmented Packets Received ");
2005 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL, 2008 evcnt_attach_dynamic(&stats->roc, EVCNT_TYPE_MISC, NULL,
2006 stats->namebuf, "Oversized Packets Received"); 2009 stats->namebuf, "Oversized Packets Received");
2007 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL, 2010 evcnt_attach_dynamic(&stats->rjc, EVCNT_TYPE_MISC, NULL,
2008 stats->namebuf, "Received Jabber"); 2011 stats->namebuf, "Received Jabber");
2009 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL, 2012 evcnt_attach_dynamic(&stats->mngprc, EVCNT_TYPE_MISC, NULL,
2010 stats->namebuf, "Management Packets Received"); 2013 stats->namebuf, "Management Packets Received");
2011 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL, 2014 evcnt_attach_dynamic(&stats->mngpdc, EVCNT_TYPE_MISC, NULL,
2012 stats->namebuf, "Management Packets Dropped"); 2015 stats->namebuf, "Management Packets Dropped");
2013 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL, 2016 evcnt_attach_dynamic(&stats->xec, EVCNT_TYPE_MISC, NULL,
2014 stats->namebuf, "Checksum Errors"); 2017 stats->namebuf, "Checksum Errors");
2015 2018
2016 /* Packet Transmission Stats */ 2019 /* Packet Transmission Stats */
2017 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL, 2020 evcnt_attach_dynamic(&stats->gotc, EVCNT_TYPE_MISC, NULL,
2018 stats->namebuf, "Good Octets Transmitted"); 2021 stats->namebuf, "Good Octets Transmitted");
2019 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL, 2022 evcnt_attach_dynamic(&stats->tpt, EVCNT_TYPE_MISC, NULL,
2020 stats->namebuf, "Total Packets Transmitted"); 2023 stats->namebuf, "Total Packets Transmitted");
2021 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL, 2024 evcnt_attach_dynamic(&stats->gptc, EVCNT_TYPE_MISC, NULL,
2022 stats->namebuf, "Good Packets Transmitted"); 2025 stats->namebuf, "Good Packets Transmitted");
2023 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL, 2026 evcnt_attach_dynamic(&stats->bptc, EVCNT_TYPE_MISC, NULL,
2024 stats->namebuf, "Broadcast Packets Transmitted"); 2027 stats->namebuf, "Broadcast Packets Transmitted");
2025 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL, 2028 evcnt_attach_dynamic(&stats->mptc, EVCNT_TYPE_MISC, NULL,
2026 stats->namebuf, "Multicast Packets Transmitted"); 2029 stats->namebuf, "Multicast Packets Transmitted");
2027 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL, 2030 evcnt_attach_dynamic(&stats->mngptc, EVCNT_TYPE_MISC, NULL,
2028 stats->namebuf, "Management Packets Transmitted"); 2031 stats->namebuf, "Management Packets Transmitted");
2029 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL, 2032 evcnt_attach_dynamic(&stats->ptc64, EVCNT_TYPE_MISC, NULL,
2030 stats->namebuf, "64 byte frames transmitted "); 2033 stats->namebuf, "64 byte frames transmitted ");
2031 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL, 2034 evcnt_attach_dynamic(&stats->ptc127, EVCNT_TYPE_MISC, NULL,
2032 stats->namebuf, "65-127 byte frames transmitted"); 2035 stats->namebuf, "65-127 byte frames transmitted");
2033 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL, 2036 evcnt_attach_dynamic(&stats->ptc255, EVCNT_TYPE_MISC, NULL,
2034 stats->namebuf, "128-255 byte frames transmitted"); 2037 stats->namebuf, "128-255 byte frames transmitted");
2035 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL, 2038 evcnt_attach_dynamic(&stats->ptc511, EVCNT_TYPE_MISC, NULL,
2036 stats->namebuf, "256-511 byte frames transmitted"); 2039 stats->namebuf, "256-511 byte frames transmitted");
2037 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL, 2040 evcnt_attach_dynamic(&stats->ptc1023, EVCNT_TYPE_MISC, NULL,
2038 stats->namebuf, "512-1023 byte frames transmitted"); 2041 stats->namebuf, "512-1023 byte frames transmitted");
2039 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL, 2042 evcnt_attach_dynamic(&stats->ptc1522, EVCNT_TYPE_MISC, NULL,
2040 stats->namebuf, "1024-1522 byte frames transmitted"); 2043 stats->namebuf, "1024-1522 byte frames transmitted");
2041} /* ixgbe_add_hw_stats */ 2044} /* ixgbe_add_hw_stats */
2042 2045
2043static void 2046static void
2044ixgbe_clear_evcnt(struct adapter *adapter) 2047ixgbe_clear_evcnt(struct adapter *adapter)
2045{ 2048{
2046 struct tx_ring *txr = adapter->tx_rings; 2049 struct tx_ring *txr = adapter->tx_rings;
2047 struct rx_ring *rxr = adapter->rx_rings; 2050 struct rx_ring *rxr = adapter->rx_rings;
2048 struct ixgbe_hw *hw = &adapter->hw; 2051 struct ixgbe_hw *hw = &adapter->hw;
2049 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 2052 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
2050 int i; 2053 int i;
2051 2054
2052 adapter->efbig_tx_dma_setup.ev_count = 0; 2055 adapter->efbig_tx_dma_setup.ev_count = 0;
2053 adapter->mbuf_defrag_failed.ev_count = 0; 2056 adapter->mbuf_defrag_failed.ev_count = 0;
2054 adapter->efbig2_tx_dma_setup.ev_count = 0; 2057 adapter->efbig2_tx_dma_setup.ev_count = 0;
2055 adapter->einval_tx_dma_setup.ev_count = 0; 2058 adapter->einval_tx_dma_setup.ev_count = 0;
2056 adapter->other_tx_dma_setup.ev_count = 0; 2059 adapter->other_tx_dma_setup.ev_count = 0;
2057 adapter->eagain_tx_dma_setup.ev_count = 0; 2060 adapter->eagain_tx_dma_setup.ev_count = 0;
2058 adapter->enomem_tx_dma_setup.ev_count = 0; 2061 adapter->enomem_tx_dma_setup.ev_count = 0;
2059 adapter->tso_err.ev_count = 0; 2062 adapter->tso_err.ev_count = 0;
2060 adapter->watchdog_events.ev_count = 0; 2063 adapter->watchdog_events.ev_count = 0;
2061 adapter->link_irq.ev_count = 0; 2064 adapter->link_irq.ev_count = 0;
2062 adapter->link_sicount.ev_count = 0; 2065 adapter->link_sicount.ev_count = 0;
2063 adapter->mod_sicount.ev_count = 0; 2066 adapter->mod_sicount.ev_count = 0;
2064 adapter->msf_sicount.ev_count = 0; 2067 adapter->msf_sicount.ev_count = 0;
2065 adapter->phy_sicount.ev_count = 0; 2068 adapter->phy_sicount.ev_count = 0;
2066 2069
2067 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 2070 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
2068 if (i < __arraycount(stats->mpc)) { 2071 if (i < __arraycount(stats->mpc)) {
2069 stats->mpc[i].ev_count = 0; 2072 stats->mpc[i].ev_count = 0;
2070 if (hw->mac.type == ixgbe_mac_82598EB) 2073 if (hw->mac.type == ixgbe_mac_82598EB)
2071 stats->rnbc[i].ev_count = 0; 2074 stats->rnbc[i].ev_count = 0;
2072 } 2075 }
2073 if (i < __arraycount(stats->pxontxc)) { 2076 if (i < __arraycount(stats->pxontxc)) {
2074 stats->pxontxc[i].ev_count = 0; 2077 stats->pxontxc[i].ev_count = 0;
2075 stats->pxonrxc[i].ev_count = 0; 2078 stats->pxonrxc[i].ev_count = 0;
2076 stats->pxofftxc[i].ev_count = 0; 2079 stats->pxofftxc[i].ev_count = 0;
2077 stats->pxoffrxc[i].ev_count = 0; 2080 stats->pxoffrxc[i].ev_count = 0;
2078 if (hw->mac.type >= ixgbe_mac_82599EB) 2081 if (hw->mac.type >= ixgbe_mac_82599EB)
2079 stats->pxon2offc[i].ev_count = 0; 2082 stats->pxon2offc[i].ev_count = 0;
2080 } 2083 }
2081 } 2084 }
2082 2085
2083 txr = adapter->tx_rings; 2086 txr = adapter->tx_rings;
2084 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2087 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2085 adapter->queues[i].irqs.ev_count = 0; 2088 adapter->queues[i].irqs.ev_count = 0;
2086 adapter->queues[i].handleq.ev_count = 0; 2089 adapter->queues[i].handleq.ev_count = 0;
2087 adapter->queues[i].req.ev_count = 0; 2090 adapter->queues[i].req.ev_count = 0;
2088 txr->no_desc_avail.ev_count = 0; 2091 txr->no_desc_avail.ev_count = 0;
2089 txr->total_packets.ev_count = 0; 2092 txr->total_packets.ev_count = 0;
2090 txr->tso_tx.ev_count = 0; 2093 txr->tso_tx.ev_count = 0;
2091#ifndef IXGBE_LEGACY_TX 2094#ifndef IXGBE_LEGACY_TX
2092 txr->pcq_drops.ev_count = 0; 2095 txr->pcq_drops.ev_count = 0;
2093#endif 2096#endif
2094 txr->q_efbig_tx_dma_setup = 0; 2097 txr->q_efbig_tx_dma_setup = 0;
2095 txr->q_mbuf_defrag_failed = 0; 2098 txr->q_mbuf_defrag_failed = 0;
2096 txr->q_efbig2_tx_dma_setup = 0; 2099 txr->q_efbig2_tx_dma_setup = 0;
2097 txr->q_einval_tx_dma_setup = 0; 2100 txr->q_einval_tx_dma_setup = 0;
2098 txr->q_other_tx_dma_setup = 0; 2101 txr->q_other_tx_dma_setup = 0;
2099 txr->q_eagain_tx_dma_setup = 0; 2102 txr->q_eagain_tx_dma_setup = 0;
2100 txr->q_enomem_tx_dma_setup = 0; 2103 txr->q_enomem_tx_dma_setup = 0;
2101 txr->q_tso_err = 0; 2104 txr->q_tso_err = 0;
2102 2105
2103 if (i < __arraycount(stats->qprc)) { 2106 if (i < __arraycount(stats->qprc)) {
2104 stats->qprc[i].ev_count = 0; 2107 stats->qprc[i].ev_count = 0;
2105 stats->qptc[i].ev_count = 0; 2108 stats->qptc[i].ev_count = 0;
2106 stats->qbrc[i].ev_count = 0; 2109 stats->qbrc[i].ev_count = 0;
2107 stats->qbtc[i].ev_count = 0; 2110 stats->qbtc[i].ev_count = 0;
2108 if (hw->mac.type >= ixgbe_mac_82599EB) 2111 if (hw->mac.type >= ixgbe_mac_82599EB)
2109 stats->qprdc[i].ev_count = 0; 2112 stats->qprdc[i].ev_count = 0;
2110 } 2113 }
2111 2114
2112 rxr->rx_packets.ev_count = 0; 2115 rxr->rx_packets.ev_count = 0;
2113 rxr->rx_bytes.ev_count = 0; 2116 rxr->rx_bytes.ev_count = 0;
2114 rxr->rx_copies.ev_count = 0; 2117 rxr->rx_copies.ev_count = 0;
2115 rxr->no_jmbuf.ev_count = 0; 2118 rxr->no_jmbuf.ev_count = 0;
2116 rxr->rx_discarded.ev_count = 0; 2119 rxr->rx_discarded.ev_count = 0;
2117 } 2120 }
2118 stats->ipcs.ev_count = 0; 2121 stats->ipcs.ev_count = 0;
2119 stats->l4cs.ev_count = 0; 2122 stats->l4cs.ev_count = 0;
2120 stats->ipcs_bad.ev_count = 0; 2123 stats->ipcs_bad.ev_count = 0;
2121 stats->l4cs_bad.ev_count = 0; 2124 stats->l4cs_bad.ev_count = 0;
2122 stats->intzero.ev_count = 0; 2125 stats->intzero.ev_count = 0;
2123 stats->legint.ev_count = 0; 2126 stats->legint.ev_count = 0;
2124 stats->crcerrs.ev_count = 0; 2127 stats->crcerrs.ev_count = 0;
2125 stats->illerrc.ev_count = 0; 2128 stats->illerrc.ev_count = 0;
2126 stats->errbc.ev_count = 0; 2129 stats->errbc.ev_count = 0;
2127 stats->mspdc.ev_count = 0; 2130 stats->mspdc.ev_count = 0;
2128 if (hw->mac.type >= ixgbe_mac_X550) 2131 if (hw->mac.type >= ixgbe_mac_X550)
2129 stats->mbsdc.ev_count = 0; 2132 stats->mbsdc.ev_count = 0;
2130 stats->mpctotal.ev_count = 0; 2133 stats->mpctotal.ev_count = 0;
2131 stats->mlfc.ev_count = 0; 2134 stats->mlfc.ev_count = 0;
2132 stats->mrfc.ev_count = 0; 2135 stats->mrfc.ev_count = 0;
2133 stats->rlec.ev_count = 0; 2136 stats->rlec.ev_count = 0;
2134 stats->lxontxc.ev_count = 0; 2137 stats->lxontxc.ev_count = 0;
2135 stats->lxonrxc.ev_count = 0; 2138 stats->lxonrxc.ev_count = 0;
2136 stats->lxofftxc.ev_count = 0; 2139 stats->lxofftxc.ev_count = 0;
2137 stats->lxoffrxc.ev_count = 0; 2140 stats->lxoffrxc.ev_count = 0;
2138 2141
2139 /* Packet Reception Stats */ 2142 /* Packet Reception Stats */
2140 stats->tor.ev_count = 0; 2143 stats->tor.ev_count = 0;
2141 stats->gorc.ev_count = 0; 2144 stats->gorc.ev_count = 0;
2142 stats->tpr.ev_count = 0; 2145 stats->tpr.ev_count = 0;
2143 stats->gprc.ev_count = 0; 2146 stats->gprc.ev_count = 0;
2144 stats->mprc.ev_count = 0; 2147 stats->mprc.ev_count = 0;
2145 stats->bprc.ev_count = 0; 2148 stats->bprc.ev_count = 0;
2146 stats->prc64.ev_count = 0; 2149 stats->prc64.ev_count = 0;
2147 stats->prc127.ev_count = 0; 2150 stats->prc127.ev_count = 0;
2148 stats->prc255.ev_count = 0; 2151 stats->prc255.ev_count = 0;
2149 stats->prc511.ev_count = 0; 2152 stats->prc511.ev_count = 0;
2150 stats->prc1023.ev_count = 0; 2153 stats->prc1023.ev_count = 0;
2151 stats->prc1522.ev_count = 0; 2154 stats->prc1522.ev_count = 0;
2152 stats->ruc.ev_count = 0; 2155 stats->ruc.ev_count = 0;
2153 stats->rfc.ev_count = 0; 2156 stats->rfc.ev_count = 0;
2154 stats->roc.ev_count = 0; 2157 stats->roc.ev_count = 0;
2155 stats->rjc.ev_count = 0; 2158 stats->rjc.ev_count = 0;
2156 stats->mngprc.ev_count = 0; 2159 stats->mngprc.ev_count = 0;
2157 stats->mngpdc.ev_count = 0; 2160 stats->mngpdc.ev_count = 0;
2158 stats->xec.ev_count = 0; 2161 stats->xec.ev_count = 0;
2159 2162
2160 /* Packet Transmission Stats */ 2163 /* Packet Transmission Stats */
2161 stats->gotc.ev_count = 0; 2164 stats->gotc.ev_count = 0;
2162 stats->tpt.ev_count = 0; 2165 stats->tpt.ev_count = 0;
2163 stats->gptc.ev_count = 0; 2166 stats->gptc.ev_count = 0;
2164 stats->bptc.ev_count = 0; 2167 stats->bptc.ev_count = 0;
2165 stats->mptc.ev_count = 0; 2168 stats->mptc.ev_count = 0;
2166 stats->mngptc.ev_count = 0; 2169 stats->mngptc.ev_count = 0;
2167 stats->ptc64.ev_count = 0; 2170 stats->ptc64.ev_count = 0;
2168 stats->ptc127.ev_count = 0; 2171 stats->ptc127.ev_count = 0;
2169 stats->ptc255.ev_count = 0; 2172 stats->ptc255.ev_count = 0;
2170 stats->ptc511.ev_count = 0; 2173 stats->ptc511.ev_count = 0;
2171 stats->ptc1023.ev_count = 0; 2174 stats->ptc1023.ev_count = 0;
2172 stats->ptc1522.ev_count = 0; 2175 stats->ptc1522.ev_count = 0;
2173} 2176}
2174 2177
2175/************************************************************************ 2178/************************************************************************
2176 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function 2179 * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function
2177 * 2180 *
2178 * Retrieves the TDH value from the hardware 2181 * Retrieves the TDH value from the hardware
2179 ************************************************************************/ 2182 ************************************************************************/
2180static int 2183static int
2181ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS) 2184ixgbe_sysctl_tdh_handler(SYSCTLFN_ARGS)
2182{ 2185{
2183 struct sysctlnode node = *rnode; 2186 struct sysctlnode node = *rnode;
2184 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2187 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2185 struct adapter *adapter; 2188 struct adapter *adapter;
2186 uint32_t val; 2189 uint32_t val;
2187 2190
2188 if (!txr) 2191 if (!txr)
2189 return (0); 2192 return (0);
2190 2193
2191 adapter = txr->adapter; 2194 adapter = txr->adapter;
2192 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2195 if (ixgbe_fw_recovery_mode_swflag(adapter))
2193 return (EPERM); 2196 return (EPERM);
2194 2197
2195 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)); 2198 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me));
2196 node.sysctl_data = &val; 2199 node.sysctl_data = &val;
2197 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2200 return sysctl_lookup(SYSCTLFN_CALL(&node));
2198} /* ixgbe_sysctl_tdh_handler */ 2201} /* ixgbe_sysctl_tdh_handler */
2199 2202
2200/************************************************************************ 2203/************************************************************************
2201 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 2204 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
2202 * 2205 *
2203 * Retrieves the TDT value from the hardware 2206 * Retrieves the TDT value from the hardware
2204 ************************************************************************/ 2207 ************************************************************************/
2205static int 2208static int
2206ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS) 2209ixgbe_sysctl_tdt_handler(SYSCTLFN_ARGS)
2207{ 2210{
2208 struct sysctlnode node = *rnode; 2211 struct sysctlnode node = *rnode;
2209 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 2212 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
2210 struct adapter *adapter; 2213 struct adapter *adapter;
2211 uint32_t val; 2214 uint32_t val;
2212 2215
2213 if (!txr) 2216 if (!txr)
2214 return (0); 2217 return (0);
2215 2218
2216 adapter = txr->adapter; 2219 adapter = txr->adapter;
2217 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2220 if (ixgbe_fw_recovery_mode_swflag(adapter))
2218 return (EPERM); 2221 return (EPERM);
2219 2222
2220 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)); 2223 val = IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me));
2221 node.sysctl_data = &val; 2224 node.sysctl_data = &val;
2222 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2225 return sysctl_lookup(SYSCTLFN_CALL(&node));
2223} /* ixgbe_sysctl_tdt_handler */ 2226} /* ixgbe_sysctl_tdt_handler */
2224 2227
2225/************************************************************************ 2228/************************************************************************
2226 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check 2229 * ixgbe_sysctl_next_to_check_handler - Receive Descriptor next to check
2227 * handler function 2230 * handler function
2228 * 2231 *
2229 * Retrieves the next_to_check value 2232 * Retrieves the next_to_check value
2230 ************************************************************************/ 2233 ************************************************************************/
2231static int 2234static int
2232ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 2235ixgbe_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
2233{ 2236{
2234 struct sysctlnode node = *rnode; 2237 struct sysctlnode node = *rnode;
2235 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2238 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2236 struct adapter *adapter; 2239 struct adapter *adapter;
2237 uint32_t val; 2240 uint32_t val;
2238 2241
2239 if (!rxr) 2242 if (!rxr)
2240 return (0); 2243 return (0);
2241 2244
2242 adapter = rxr->adapter; 2245 adapter = rxr->adapter;
2243 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2246 if (ixgbe_fw_recovery_mode_swflag(adapter))
2244 return (EPERM); 2247 return (EPERM);
2245 2248
2246 val = rxr->next_to_check; 2249 val = rxr->next_to_check;
2247 node.sysctl_data = &val; 2250 node.sysctl_data = &val;
2248 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2251 return sysctl_lookup(SYSCTLFN_CALL(&node));
2249} /* ixgbe_sysctl_next_to_check_handler */ 2252} /* ixgbe_sysctl_next_to_check_handler */
2250 2253
2251/************************************************************************ 2254/************************************************************************
2252 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function 2255 * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function
2253 * 2256 *
2254 * Retrieves the RDH value from the hardware 2257 * Retrieves the RDH value from the hardware
2255 ************************************************************************/ 2258 ************************************************************************/
2256static int 2259static int
2257ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS) 2260ixgbe_sysctl_rdh_handler(SYSCTLFN_ARGS)
2258{ 2261{
2259 struct sysctlnode node = *rnode; 2262 struct sysctlnode node = *rnode;
2260 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2263 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2261 struct adapter *adapter; 2264 struct adapter *adapter;
2262 uint32_t val; 2265 uint32_t val;
2263 2266
2264 if (!rxr) 2267 if (!rxr)
2265 return (0); 2268 return (0);
2266 2269
2267 adapter = rxr->adapter; 2270 adapter = rxr->adapter;
2268 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2271 if (ixgbe_fw_recovery_mode_swflag(adapter))
2269 return (EPERM); 2272 return (EPERM);
2270 2273
2271 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me)); 2274 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(rxr->me));
2272 node.sysctl_data = &val; 2275 node.sysctl_data = &val;
2273 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2276 return sysctl_lookup(SYSCTLFN_CALL(&node));
2274} /* ixgbe_sysctl_rdh_handler */ 2277} /* ixgbe_sysctl_rdh_handler */
2275 2278
2276/************************************************************************ 2279/************************************************************************
2277 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function 2280 * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function
2278 * 2281 *
2279 * Retrieves the RDT value from the hardware 2282 * Retrieves the RDT value from the hardware
2280 ************************************************************************/ 2283 ************************************************************************/
2281static int 2284static int
2282ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) 2285ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS)
2283{ 2286{
2284 struct sysctlnode node = *rnode; 2287 struct sysctlnode node = *rnode;
2285 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2288 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
2286 struct adapter *adapter; 2289 struct adapter *adapter;
2287 uint32_t val; 2290 uint32_t val;
2288 2291
2289 if (!rxr) 2292 if (!rxr)
2290 return (0); 2293 return (0);
2291 2294
2292 adapter = rxr->adapter; 2295 adapter = rxr->adapter;
2293 if (ixgbe_fw_recovery_mode_swflag(adapter)) 2296 if (ixgbe_fw_recovery_mode_swflag(adapter))
2294 return (EPERM); 2297 return (EPERM);
2295 2298
2296 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me)); 2299 val = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(rxr->me));
2297 node.sysctl_data = &val; 2300 node.sysctl_data = &val;
2298 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2301 return sysctl_lookup(SYSCTLFN_CALL(&node));
2299} /* ixgbe_sysctl_rdt_handler */ 2302} /* ixgbe_sysctl_rdt_handler */
2300 2303
2301static int 2304static int
2302ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2305ixgbe_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2303{ 2306{
2304 struct ifnet *ifp = &ec->ec_if; 2307 struct ifnet *ifp = &ec->ec_if;
2305 struct adapter *adapter = ifp->if_softc; 2308 struct adapter *adapter = ifp->if_softc;
2306 int rv; 2309 int rv;
2307 2310
2308 if (set) 2311 if (set)
2309 rv = ixgbe_register_vlan(adapter, vid); 2312 rv = ixgbe_register_vlan(adapter, vid);
2310 else 2313 else
2311 rv = ixgbe_unregister_vlan(adapter, vid); 2314 rv = ixgbe_unregister_vlan(adapter, vid);
2312 2315
2313 if (rv != 0) 2316 if (rv != 0)
2314 return rv; 2317 return rv;
2315 2318
2316 /* 2319 /*
2317 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 2320 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2318 * or 0 to 1. 2321 * or 0 to 1.
2319 */ 2322 */
2320 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) 2323 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2321 ixgbe_setup_vlan_hw_tagging(adapter); 2324 ixgbe_setup_vlan_hw_tagging(adapter);
2322 2325
2323 return rv; 2326 return rv;
2324} 2327}
2325 2328
2326/************************************************************************ 2329/************************************************************************
2327 * ixgbe_register_vlan 2330 * ixgbe_register_vlan
2328 * 2331 *
2329 * Run via vlan config EVENT, it enables us to use the 2332 * Run via vlan config EVENT, it enables us to use the
2330 * HW Filter table since we can get the vlan id. This 2333 * HW Filter table since we can get the vlan id. This
2331 * just creates the entry in the soft version of the 2334 * just creates the entry in the soft version of the
2332 * VFTA, init will repopulate the real table. 2335 * VFTA, init will repopulate the real table.
2333 ************************************************************************/ 2336 ************************************************************************/
2334static int 2337static int
2335ixgbe_register_vlan(struct adapter *adapter, u16 vtag) 2338ixgbe_register_vlan(struct adapter *adapter, u16 vtag)
2336{ 2339{
2337 u16 index, bit; 2340 u16 index, bit;
2338 int error; 2341 int error;
2339 2342
2340 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2343 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2341 return EINVAL; 2344 return EINVAL;
2342 2345
2343 IXGBE_CORE_LOCK(adapter); 2346 IXGBE_CORE_LOCK(adapter);
2344 index = (vtag >> 5) & 0x7F; 2347 index = (vtag >> 5) & 0x7F;
2345 bit = vtag & 0x1F; 2348 bit = vtag & 0x1F;
2346 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2349 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2347 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true, 2350 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, true,
2348 true); 2351 true);
2349 IXGBE_CORE_UNLOCK(adapter); 2352 IXGBE_CORE_UNLOCK(adapter);
2350 if (error != 0) 2353 if (error != 0)
2351 error = EACCES; 2354 error = EACCES;
2352 2355
2353 return error; 2356 return error;
2354} /* ixgbe_register_vlan */ 2357} /* ixgbe_register_vlan */
2355 2358
2356/************************************************************************ 2359/************************************************************************
2357 * ixgbe_unregister_vlan 2360 * ixgbe_unregister_vlan
2358 * 2361 *
2359 * Run via vlan unconfig EVENT, remove our entry in the soft vfta. 2362 * Run via vlan unconfig EVENT, remove our entry in the soft vfta.
2360 ************************************************************************/ 2363 ************************************************************************/
2361static int 2364static int
2362ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag) 2365ixgbe_unregister_vlan(struct adapter *adapter, u16 vtag)
2363{ 2366{
2364 u16 index, bit; 2367 u16 index, bit;
2365 int error; 2368 int error;
2366 2369
2367 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2370 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2368 return EINVAL; 2371 return EINVAL;
2369 2372
2370 IXGBE_CORE_LOCK(adapter); 2373 IXGBE_CORE_LOCK(adapter);
2371 index = (vtag >> 5) & 0x7F; 2374 index = (vtag >> 5) & 0x7F;
2372 bit = vtag & 0x1F; 2375 bit = vtag & 0x1F;
2373 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2376 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2374 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false, 2377 error = adapter->hw.mac.ops.set_vfta(&adapter->hw, vtag, 0, false,
2375 true); 2378 true);
2376 IXGBE_CORE_UNLOCK(adapter); 2379 IXGBE_CORE_UNLOCK(adapter);
2377 if (error != 0) 2380 if (error != 0)
2378 error = EACCES; 2381 error = EACCES;
2379 2382
2380 return error; 2383 return error;
2381} /* ixgbe_unregister_vlan */ 2384} /* ixgbe_unregister_vlan */
2382 2385
2383static void 2386static void
2384ixgbe_setup_vlan_hw_tagging(struct adapter *adapter) 2387ixgbe_setup_vlan_hw_tagging(struct adapter *adapter)
2385{ 2388{
2386 struct ethercom *ec = &adapter->osdep.ec; 2389 struct ethercom *ec = &adapter->osdep.ec;
2387 struct ixgbe_hw *hw = &adapter->hw; 2390 struct ixgbe_hw *hw = &adapter->hw;
2388 struct rx_ring *rxr; 2391 struct rx_ring *rxr;
2389 u32 ctrl; 2392 u32 ctrl;
2390 int i; 2393 int i;
2391 bool hwtagging; 2394 bool hwtagging;
2392 2395
2393 /* Enable HW tagging only if any vlan is attached */ 2396 /* Enable HW tagging only if any vlan is attached */
2394 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2397 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2395 && VLAN_ATTACHED(ec); 2398 && VLAN_ATTACHED(ec);
2396 2399
2397 /* Setup the queues for vlans */ 2400 /* Setup the queues for vlans */
2398 for (i = 0; i < adapter->num_queues; i++) { 2401 for (i = 0; i < adapter->num_queues; i++) {
2399 rxr = &adapter->rx_rings[i]; 2402 rxr = &adapter->rx_rings[i];
2400 /* 2403 /*
2401 * On 82599 and later, the VLAN enable is per/queue in RXDCTL. 2404 * On 82599 and later, the VLAN enable is per/queue in RXDCTL.
2402 */ 2405 */
2403 if (hw->mac.type != ixgbe_mac_82598EB) { 2406 if (hw->mac.type != ixgbe_mac_82598EB) {
2404 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 2407 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
2405 if (hwtagging) 2408 if (hwtagging)
2406 ctrl |= IXGBE_RXDCTL_VME; 2409 ctrl |= IXGBE_RXDCTL_VME;
2407 else 2410 else
2408 ctrl &= ~IXGBE_RXDCTL_VME; 2411 ctrl &= ~IXGBE_RXDCTL_VME;
2409 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); 2412 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
2410 } 2413 }
2411 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 2414 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2412 } 2415 }
2413 2416
2414 /* VLAN hw tagging for 82598 */ 2417 /* VLAN hw tagging for 82598 */
2415 if (hw->mac.type == ixgbe_mac_82598EB) { 2418 if (hw->mac.type == ixgbe_mac_82598EB) {
2416 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2419 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2417 if (hwtagging) 2420 if (hwtagging)
2418 ctrl |= IXGBE_VLNCTRL_VME; 2421 ctrl |= IXGBE_VLNCTRL_VME;
2419 else 2422 else
2420 ctrl &= ~IXGBE_VLNCTRL_VME; 2423 ctrl &= ~IXGBE_VLNCTRL_VME;
2421 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2424 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2422 } 2425 }
2423} /* ixgbe_setup_vlan_hw_tagging */ 2426} /* ixgbe_setup_vlan_hw_tagging */
2424 2427
2425static void 2428static void
2426ixgbe_setup_vlan_hw_support(struct adapter *adapter) 2429ixgbe_setup_vlan_hw_support(struct adapter *adapter)
2427{ 2430{
2428 struct ethercom *ec = &adapter->osdep.ec; 2431 struct ethercom *ec = &adapter->osdep.ec;
2429 struct ixgbe_hw *hw = &adapter->hw; 2432 struct ixgbe_hw *hw = &adapter->hw;
2430 int i; 2433 int i;
2431 u32 ctrl; 2434 u32 ctrl;
2432 struct vlanid_list *vlanidp; 2435 struct vlanid_list *vlanidp;
2433 2436
2434 /* 2437 /*
2435 * This function is called from both if_init and ifflags_cb() 2438 * This function is called from both if_init and ifflags_cb()
2436 * on NetBSD. 2439 * on NetBSD.
2437 */ 2440 */
2438 2441
2439 /* 2442 /*
2440 * Part 1: 2443 * Part 1:
2441 * Setup VLAN HW tagging 2444 * Setup VLAN HW tagging
2442 */ 2445 */
2443 ixgbe_setup_vlan_hw_tagging(adapter); 2446 ixgbe_setup_vlan_hw_tagging(adapter);
2444 2447
2445 /* 2448 /*
2446 * Part 2: 2449 * Part 2:
2447 * Setup VLAN HW filter 2450 * Setup VLAN HW filter
2448 */ 2451 */
2449 /* Cleanup shadow_vfta */ 2452 /* Cleanup shadow_vfta */
2450 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2453 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2451 adapter->shadow_vfta[i] = 0; 2454 adapter->shadow_vfta[i] = 0;
2452 /* Generate shadow_vfta from ec_vids */ 2455 /* Generate shadow_vfta from ec_vids */
2453 ETHER_LOCK(ec); 2456 ETHER_LOCK(ec);
2454 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2457 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2455 uint32_t idx; 2458 uint32_t idx;
2456 2459
2457 idx = vlanidp->vid / 32; 2460 idx = vlanidp->vid / 32;
2458 KASSERT(idx < IXGBE_VFTA_SIZE); 2461 KASSERT(idx < IXGBE_VFTA_SIZE);
2459 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2462 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2460 } 2463 }
2461 ETHER_UNLOCK(ec); 2464 ETHER_UNLOCK(ec);
2462 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 2465 for (i = 0; i < IXGBE_VFTA_SIZE; i++)
2463 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]); 2466 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]);
2464 2467
2465 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2468 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2466 /* Enable the Filter Table if enabled */ 2469 /* Enable the Filter Table if enabled */
2467 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER) 2470 if (ec->ec_capenable & ETHERCAP_VLAN_HWFILTER)
2468 ctrl |= IXGBE_VLNCTRL_VFE; 2471 ctrl |= IXGBE_VLNCTRL_VFE;
2469 else 2472 else
2470 ctrl &= ~IXGBE_VLNCTRL_VFE; 2473 ctrl &= ~IXGBE_VLNCTRL_VFE;
2471 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2474 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
2472} /* ixgbe_setup_vlan_hw_support */ 2475} /* ixgbe_setup_vlan_hw_support */
2473 2476
2474/************************************************************************ 2477/************************************************************************
2475 * ixgbe_get_slot_info 2478 * ixgbe_get_slot_info
2476 * 2479 *
2477 * Get the width and transaction speed of 2480 * Get the width and transaction speed of
2478 * the slot this adapter is plugged into. 2481 * the slot this adapter is plugged into.
2479 ************************************************************************/ 2482 ************************************************************************/
2480static void 2483static void
2481ixgbe_get_slot_info(struct adapter *adapter) 2484ixgbe_get_slot_info(struct adapter *adapter)
2482{ 2485{
2483 device_t dev = adapter->dev; 2486 device_t dev = adapter->dev;
2484 struct ixgbe_hw *hw = &adapter->hw; 2487 struct ixgbe_hw *hw = &adapter->hw;
2485 u32 offset; 2488 u32 offset;
2486 u16 link; 2489 u16 link;
2487 int bus_info_valid = TRUE; 2490 int bus_info_valid = TRUE;
2488 2491
2489 /* Some devices are behind an internal bridge */ 2492 /* Some devices are behind an internal bridge */
2490 switch (hw->device_id) { 2493 switch (hw->device_id) {
2491 case IXGBE_DEV_ID_82599_SFP_SF_QP: 2494 case IXGBE_DEV_ID_82599_SFP_SF_QP:
2492 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 2495 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
2493 goto get_parent_info; 2496 goto get_parent_info;
2494 default: 2497 default:
2495 break; 2498 break;
2496 } 2499 }
2497 2500
2498 ixgbe_get_bus_info(hw); 2501 ixgbe_get_bus_info(hw);
2499 2502
2500 /* 2503 /*
2501 * Some devices don't use PCI-E, but there is no need 2504 * Some devices don't use PCI-E, but there is no need
2502 * to display "Unknown" for bus speed and width. 2505 * to display "Unknown" for bus speed and width.
2503 */ 2506 */
2504 switch (hw->mac.type) { 2507 switch (hw->mac.type) {
2505 case ixgbe_mac_X550EM_x: 2508 case ixgbe_mac_X550EM_x:
2506 case ixgbe_mac_X550EM_a: 2509 case ixgbe_mac_X550EM_a:
2507 return; 2510 return;
2508 default: 2511 default:
2509 goto display; 2512 goto display;
2510 } 2513 }
2511 2514
2512get_parent_info: 2515get_parent_info:
2513 /* 2516 /*
2514 * For the Quad port adapter we need to parse back 2517 * For the Quad port adapter we need to parse back
2515 * up the PCI tree to find the speed of the expansion 2518 * up the PCI tree to find the speed of the expansion
2516 * slot into which this adapter is plugged. A bit more work. 2519 * slot into which this adapter is plugged. A bit more work.
2517 */ 2520 */
2518 dev = device_parent(device_parent(dev)); 2521 dev = device_parent(device_parent(dev));
2519#if 0 2522#if 0
2520#ifdef IXGBE_DEBUG 2523#ifdef IXGBE_DEBUG
2521 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), 2524 device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev),
2522 pci_get_slot(dev), pci_get_function(dev)); 2525 pci_get_slot(dev), pci_get_function(dev));
2523#endif 2526#endif
2524 dev = device_parent(device_parent(dev)); 2527 dev = device_parent(device_parent(dev));
2525#ifdef IXGBE_DEBUG 2528#ifdef IXGBE_DEBUG
2526 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), 2529 device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev),
2527 pci_get_slot(dev), pci_get_function(dev)); 2530 pci_get_slot(dev), pci_get_function(dev));
2528#endif 2531#endif
2529#endif 2532#endif
2530 /* Now get the PCI Express Capabilities offset */ 2533 /* Now get the PCI Express Capabilities offset */
2531 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag, 2534 if (pci_get_capability(adapter->osdep.pc, adapter->osdep.tag,
2532 PCI_CAP_PCIEXPRESS, &offset, NULL)) { 2535 PCI_CAP_PCIEXPRESS, &offset, NULL)) {
2533 /* 2536 /*
2534 * Hmm...can't get PCI-Express capabilities. 2537 * Hmm...can't get PCI-Express capabilities.
2535 * Falling back to default method. 2538 * Falling back to default method.
2536 */ 2539 */
2537 bus_info_valid = FALSE; 2540 bus_info_valid = FALSE;
2538 ixgbe_get_bus_info(hw); 2541 ixgbe_get_bus_info(hw);
2539 goto display; 2542 goto display;
2540 } 2543 }
2541 /* ...and read the Link Status Register */ 2544 /* ...and read the Link Status Register */
2542 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag, 2545 link = pci_conf_read(adapter->osdep.pc, adapter->osdep.tag,
2543 offset + PCIE_LCSR) >> 16; 2546 offset + PCIE_LCSR) >> 16;
2544 ixgbe_set_pci_config_data_generic(hw, link); 2547 ixgbe_set_pci_config_data_generic(hw, link);
2545 2548
2546display: 2549display:
2547 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n", 2550 device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
2548 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : 2551 ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
2549 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : 2552 (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
2550 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : 2553 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
2551 "Unknown"), 2554 "Unknown"),
2552 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" : 2555 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
2553 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" : 2556 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
2554 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" : 2557 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
2555 "Unknown")); 2558 "Unknown"));
2556 2559
2557 if (bus_info_valid) { 2560 if (bus_info_valid) {
2558 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && 2561 if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2559 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && 2562 ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
2560 (hw->bus.speed == ixgbe_bus_speed_2500))) { 2563 (hw->bus.speed == ixgbe_bus_speed_2500))) {
2561 device_printf(dev, "PCI-Express bandwidth available" 2564 device_printf(dev, "PCI-Express bandwidth available"
2562 " for this card\n is not sufficient for" 2565 " for this card\n is not sufficient for"
2563 " optimal performance.\n"); 2566 " optimal performance.\n");
2564 device_printf(dev, "For optimal performance a x8 " 2567 device_printf(dev, "For optimal performance a x8 "
2565 "PCIE, or x4 PCIE Gen2 slot is required.\n"); 2568 "PCIE, or x4 PCIE Gen2 slot is required.\n");
2566 } 2569 }
2567 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && 2570 if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
2568 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && 2571 ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
2569 (hw->bus.speed < ixgbe_bus_speed_8000))) { 2572 (hw->bus.speed < ixgbe_bus_speed_8000))) {
2570 device_printf(dev, "PCI-Express bandwidth available" 2573 device_printf(dev, "PCI-Express bandwidth available"
2571 " for this card\n is not sufficient for" 2574 " for this card\n is not sufficient for"
2572 " optimal performance.\n"); 2575 " optimal performance.\n");
2573 device_printf(dev, "For optimal performance a x8 " 2576 device_printf(dev, "For optimal performance a x8 "
2574 "PCIE Gen3 slot is required.\n"); 2577 "PCIE Gen3 slot is required.\n");
2575 } 2578 }
2576 } else 2579 } else
2577 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); 2580 device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
2578 2581
2579 return; 2582 return;
2580} /* ixgbe_get_slot_info */ 2583} /* ixgbe_get_slot_info */
2581 2584
2582/************************************************************************ 2585/************************************************************************
2583 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets 2586 * ixgbe_enable_queue - MSI-X Interrupt Handlers and Tasklets
2584 ************************************************************************/ 2587 ************************************************************************/
2585static inline void 2588static inline void
2586ixgbe_enable_queue(struct adapter *adapter, u32 vector) 2589ixgbe_enable_queue(struct adapter *adapter, u32 vector)
2587{ 2590{
2588 struct ixgbe_hw *hw = &adapter->hw; 2591 struct ixgbe_hw *hw = &adapter->hw;
2589 struct ix_queue *que = &adapter->queues[vector]; 2592 struct ix_queue *que = &adapter->queues[vector];
2590 u64 queue = 1ULL << vector; 2593 u64 queue = 1ULL << vector;
2591 u32 mask; 2594 u32 mask;
2592 2595
2593 mutex_enter(&que->dc_mtx); 2596 mutex_enter(&que->dc_mtx);
2594 if (que->disabled_count > 0 && --que->disabled_count > 0) 2597 if (que->disabled_count > 0 && --que->disabled_count > 0)
2595 goto out; 2598 goto out;
2596 2599
2597 if (hw->mac.type == ixgbe_mac_82598EB) { 2600 if (hw->mac.type == ixgbe_mac_82598EB) {
2598 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2601 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2599 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 2602 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2600 } else { 2603 } else {
2601 mask = (queue & 0xFFFFFFFF); 2604 mask = (queue & 0xFFFFFFFF);
2602 if (mask) 2605 if (mask)
2603 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 2606 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
2604 mask = (queue >> 32); 2607 mask = (queue >> 32);
2605 if (mask) 2608 if (mask)
2606 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 2609 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
2607 } 2610 }
2608out: 2611out:
2609 mutex_exit(&que->dc_mtx); 2612 mutex_exit(&que->dc_mtx);
2610} /* ixgbe_enable_queue */ 2613} /* ixgbe_enable_queue */
2611 2614
2612/************************************************************************ 2615/************************************************************************
2613 * ixgbe_disable_queue_internal 2616 * ixgbe_disable_queue_internal
2614 ************************************************************************/ 2617 ************************************************************************/
2615static inline void 2618static inline void
2616ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok) 2619ixgbe_disable_queue_internal(struct adapter *adapter, u32 vector, bool nestok)
2617{ 2620{
2618 struct ixgbe_hw *hw = &adapter->hw; 2621 struct ixgbe_hw *hw = &adapter->hw;
2619 struct ix_queue *que = &adapter->queues[vector]; 2622 struct ix_queue *que = &adapter->queues[vector];
2620 u64 queue = 1ULL << vector; 2623 u64 queue = 1ULL << vector;
2621 u32 mask; 2624 u32 mask;
2622 2625
2623 mutex_enter(&que->dc_mtx); 2626 mutex_enter(&que->dc_mtx);
2624 2627
2625 if (que->disabled_count > 0) { 2628 if (que->disabled_count > 0) {
2626 if (nestok) 2629 if (nestok)
2627 que->disabled_count++; 2630 que->disabled_count++;
2628 goto out; 2631 goto out;
2629 } 2632 }
2630 que->disabled_count++; 2633 que->disabled_count++;
2631 2634
2632 if (hw->mac.type == ixgbe_mac_82598EB) { 2635 if (hw->mac.type == ixgbe_mac_82598EB) {
2633 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 2636 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
2634 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); 2637 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
2635 } else { 2638 } else {
2636 mask = (queue & 0xFFFFFFFF); 2639 mask = (queue & 0xFFFFFFFF);
2637 if (mask) 2640 if (mask)
2638 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); 2641 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
2639 mask = (queue >> 32); 2642 mask = (queue >> 32);
2640 if (mask) 2643 if (mask)
2641 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); 2644 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
2642 } 2645 }
2643out: 2646out:
2644 mutex_exit(&que->dc_mtx); 2647 mutex_exit(&que->dc_mtx);
2645} /* ixgbe_disable_queue_internal */ 2648} /* ixgbe_disable_queue_internal */
2646 2649
2647/************************************************************************ 2650/************************************************************************
2648 * ixgbe_disable_queue 2651 * ixgbe_disable_queue
2649 ************************************************************************/ 2652 ************************************************************************/
2650static inline void 2653static inline void
2651ixgbe_disable_queue(struct adapter *adapter, u32 vector) 2654ixgbe_disable_queue(struct adapter *adapter, u32 vector)
2652{ 2655{
2653 2656
2654 ixgbe_disable_queue_internal(adapter, vector, true); 2657 ixgbe_disable_queue_internal(adapter, vector, true);
2655} /* ixgbe_disable_queue */ 2658} /* ixgbe_disable_queue */
2656 2659
2657/************************************************************************ 2660/************************************************************************
2658 * ixgbe_sched_handle_que - schedule deferred packet processing 2661 * ixgbe_sched_handle_que - schedule deferred packet processing
2659 ************************************************************************/ 2662 ************************************************************************/
2660static inline void 2663static inline void
2661ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que) 2664ixgbe_sched_handle_que(struct adapter *adapter, struct ix_queue *que)
2662{ 2665{
2663 2666
2664 if (que->txrx_use_workqueue) { 2667 if (que->txrx_use_workqueue) {
2665 /* 2668 /*
2666 * adapter->que_wq is bound to each CPU instead of 2669 * adapter->que_wq is bound to each CPU instead of
2667 * each NIC queue to reduce workqueue kthread. As we 2670 * each NIC queue to reduce workqueue kthread. As we
2668 * should consider about interrupt affinity in this 2671 * should consider about interrupt affinity in this
2669 * function, the workqueue kthread must be WQ_PERCPU. 2672 * function, the workqueue kthread must be WQ_PERCPU.
2670 * If create WQ_PERCPU workqueue kthread for each NIC 2673 * If create WQ_PERCPU workqueue kthread for each NIC
2671 * queue, that number of created workqueue kthread is 2674 * queue, that number of created workqueue kthread is
2672 * (number of used NIC queue) * (number of CPUs) = 2675 * (number of used NIC queue) * (number of CPUs) =
2673 * (number of CPUs) ^ 2 most often. 2676 * (number of CPUs) ^ 2 most often.
2674 * 2677 *
2675 * The same NIC queue's interrupts are avoided by 2678 * The same NIC queue's interrupts are avoided by
2676 * masking the queue's interrupt. And different 2679 * masking the queue's interrupt. And different
2677 * NIC queue's interrupts use different struct work 2680 * NIC queue's interrupts use different struct work
2678 * (que->wq_cookie). So, "enqueued flag" to avoid 2681 * (que->wq_cookie). So, "enqueued flag" to avoid
2679 * twice workqueue_enqueue() is not required . 2682 * twice workqueue_enqueue() is not required .
2680 */ 2683 */
2681 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu()); 2684 workqueue_enqueue(adapter->que_wq, &que->wq_cookie, curcpu());
2682 } else { 2685 } else {
2683 softint_schedule(que->que_si); 2686 softint_schedule(que->que_si);
2684 } 2687 }
2685} 2688}
2686 2689
2687/************************************************************************ 2690/************************************************************************
2688 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine 2691 * ixgbe_msix_que - MSI-X Queue Interrupt Service routine
2689 ************************************************************************/ 2692 ************************************************************************/
2690static int 2693static int
2691ixgbe_msix_que(void *arg) 2694ixgbe_msix_que(void *arg)
2692{ 2695{
2693 struct ix_queue *que = arg; 2696 struct ix_queue *que = arg;
2694 struct adapter *adapter = que->adapter; 2697 struct adapter *adapter = que->adapter;
2695 struct ifnet *ifp = adapter->ifp; 2698 struct ifnet *ifp = adapter->ifp;
2696 struct tx_ring *txr = que->txr; 2699 struct tx_ring *txr = que->txr;
2697 struct rx_ring *rxr = que->rxr; 2700 struct rx_ring *rxr = que->rxr;
2698 bool more; 2701 bool more;
2699 u32 newitr = 0; 2702 u32 newitr = 0;
2700 2703
2701 /* Protect against spurious interrupts */ 2704 /* Protect against spurious interrupts */
2702 if ((ifp->if_flags & IFF_RUNNING) == 0) 2705 if ((ifp->if_flags & IFF_RUNNING) == 0)
2703 return 0; 2706 return 0;
2704 2707
2705 ixgbe_disable_queue(adapter, que->msix); 2708 ixgbe_disable_queue(adapter, que->msix);
2706 ++que->irqs.ev_count; 2709 ++que->irqs.ev_count;
2707 2710
2708 /* 2711 /*
2709 * Don't change "que->txrx_use_workqueue" from this point to avoid 2712 * Don't change "que->txrx_use_workqueue" from this point to avoid
2710 * flip-flopping softint/workqueue mode in one deferred processing. 2713 * flip-flopping softint/workqueue mode in one deferred processing.
2711 */ 2714 */
2712 que->txrx_use_workqueue = adapter->txrx_use_workqueue; 2715 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
2713 2716
2714#ifdef __NetBSD__ 2717#ifdef __NetBSD__
2715 /* Don't run ixgbe_rxeof in interrupt context */ 2718 /* Don't run ixgbe_rxeof in interrupt context */
2716 more = true; 2719 more = true;
2717#else 2720#else
2718 more = ixgbe_rxeof(que); 2721 more = ixgbe_rxeof(que);
2719#endif 2722#endif
2720 2723
2721 IXGBE_TX_LOCK(txr); 2724 IXGBE_TX_LOCK(txr);
2722 ixgbe_txeof(txr); 2725 ixgbe_txeof(txr);
2723 IXGBE_TX_UNLOCK(txr); 2726 IXGBE_TX_UNLOCK(txr);
2724 2727
2725 /* Do AIM now? */ 2728 /* Do AIM now? */
2726 2729
2727 if (adapter->enable_aim == false) 2730 if (adapter->enable_aim == false)
2728 goto no_calc; 2731 goto no_calc;
2729 /* 2732 /*
2730 * Do Adaptive Interrupt Moderation: 2733 * Do Adaptive Interrupt Moderation:
2731 * - Write out last calculated setting 2734 * - Write out last calculated setting
2732 * - Calculate based on average size over 2735 * - Calculate based on average size over
2733 * the last interval. 2736 * the last interval.
2734 */ 2737 */
2735 if (que->eitr_setting) 2738 if (que->eitr_setting)
2736 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting); 2739 ixgbe_eitr_write(adapter, que->msix, que->eitr_setting);
2737 2740
2738 que->eitr_setting = 0; 2741 que->eitr_setting = 0;
2739 2742
2740 /* Idle, do nothing */ 2743 /* Idle, do nothing */
2741 if ((txr->bytes == 0) && (rxr->bytes == 0)) 2744 if ((txr->bytes == 0) && (rxr->bytes == 0))
2742 goto no_calc; 2745 goto no_calc;
2743 2746
2744 if ((txr->bytes) && (txr->packets)) 2747 if ((txr->bytes) && (txr->packets))
2745 newitr = txr->bytes/txr->packets; 2748 newitr = txr->bytes/txr->packets;
2746 if ((rxr->bytes) && (rxr->packets)) 2749 if ((rxr->bytes) && (rxr->packets))
2747 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 2750 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
2748 newitr += 24; /* account for hardware frame, crc */ 2751 newitr += 24; /* account for hardware frame, crc */
2749 2752
2750 /* set an upper boundary */ 2753 /* set an upper boundary */
2751 newitr = uimin(newitr, 3000); 2754 newitr = uimin(newitr, 3000);
2752 2755
2753 /* Be nice to the mid range */ 2756 /* Be nice to the mid range */
2754 if ((newitr > 300) && (newitr < 1200)) 2757 if ((newitr > 300) && (newitr < 1200))
2755 newitr = (newitr / 3); 2758 newitr = (newitr / 3);
2756 else 2759 else
2757 newitr = (newitr / 2); 2760 newitr = (newitr / 2);
2758 2761
2759 /* 2762 /*
2760 * When RSC is used, ITR interval must be larger than RSC_DELAY. 2763 * When RSC is used, ITR interval must be larger than RSC_DELAY.
2761 * Currently, we use 2us for RSC_DELAY. The minimum value is always 2764 * Currently, we use 2us for RSC_DELAY. The minimum value is always
2762 * greater than 2us on 100M (and 10M?(not documented)), but it's not 2765 * greater than 2us on 100M (and 10M?(not documented)), but it's not
2763 * on 1G and higher. 2766 * on 1G and higher.
2764 */ 2767 */
2765 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2768 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2766 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2769 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2767 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 2770 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
2768 newitr = IXGBE_MIN_RSC_EITR_10G1G; 2771 newitr = IXGBE_MIN_RSC_EITR_10G1G;
2769 } 2772 }
2770 2773
2771 /* save for next interrupt */ 2774 /* save for next interrupt */
2772 que->eitr_setting = newitr; 2775 que->eitr_setting = newitr;
2773 2776
2774 /* Reset state */ 2777 /* Reset state */
2775 txr->bytes = 0; 2778 txr->bytes = 0;
2776 txr->packets = 0; 2779 txr->packets = 0;
2777 rxr->bytes = 0; 2780 rxr->bytes = 0;
2778 rxr->packets = 0; 2781 rxr->packets = 0;
2779 2782
2780no_calc: 2783no_calc:
2781 if (more) 2784 if (more)
2782 ixgbe_sched_handle_que(adapter, que); 2785 ixgbe_sched_handle_que(adapter, que);
2783 else 2786 else
2784 ixgbe_enable_queue(adapter, que->msix); 2787 ixgbe_enable_queue(adapter, que->msix);
2785 2788
2786 return 1; 2789 return 1;
2787} /* ixgbe_msix_que */ 2790} /* ixgbe_msix_que */
2788 2791
2789/************************************************************************ 2792/************************************************************************
2790 * ixgbe_media_status - Media Ioctl callback 2793 * ixgbe_media_status - Media Ioctl callback
2791 * 2794 *
2792 * Called whenever the user queries the status of 2795 * Called whenever the user queries the status of
2793 * the interface using ifconfig. 2796 * the interface using ifconfig.
2794 ************************************************************************/ 2797 ************************************************************************/
2795static void 2798static void
2796ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2799ixgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2797{ 2800{
2798 struct adapter *adapter = ifp->if_softc; 2801 struct adapter *adapter = ifp->if_softc;
2799 struct ixgbe_hw *hw = &adapter->hw; 2802 struct ixgbe_hw *hw = &adapter->hw;
2800 int layer; 2803 int layer;
2801 2804
2802 INIT_DEBUGOUT("ixgbe_media_status: begin"); 2805 INIT_DEBUGOUT("ixgbe_media_status: begin");
2803 IXGBE_CORE_LOCK(adapter); 2806 IXGBE_CORE_LOCK(adapter);
2804 ixgbe_update_link_status(adapter); 2807 ixgbe_update_link_status(adapter);
2805 2808
2806 ifmr->ifm_status = IFM_AVALID; 2809 ifmr->ifm_status = IFM_AVALID;
2807 ifmr->ifm_active = IFM_ETHER; 2810 ifmr->ifm_active = IFM_ETHER;
2808 2811
2809 if (adapter->link_active != LINK_STATE_UP) { 2812 if (adapter->link_active != LINK_STATE_UP) {
2810 ifmr->ifm_active |= IFM_NONE; 2813 ifmr->ifm_active |= IFM_NONE;
2811 IXGBE_CORE_UNLOCK(adapter); 2814 IXGBE_CORE_UNLOCK(adapter);
2812 return; 2815 return;
2813 } 2816 }
2814 2817
2815 ifmr->ifm_status |= IFM_ACTIVE; 2818 ifmr->ifm_status |= IFM_ACTIVE;
2816 layer = adapter->phy_layer; 2819 layer = adapter->phy_layer;
2817 2820
2818 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || 2821 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
2819 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T || 2822 layer & IXGBE_PHYSICAL_LAYER_5GBASE_T ||
2820 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T || 2823 layer & IXGBE_PHYSICAL_LAYER_2500BASE_T ||
2821 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || 2824 layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
2822 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || 2825 layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
2823 layer & IXGBE_PHYSICAL_LAYER_10BASE_T) 2826 layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
2824 switch (adapter->link_speed) { 2827 switch (adapter->link_speed) {
2825 case IXGBE_LINK_SPEED_10GB_FULL: 2828 case IXGBE_LINK_SPEED_10GB_FULL:
2826 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 2829 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2827 break; 2830 break;
2828 case IXGBE_LINK_SPEED_5GB_FULL: 2831 case IXGBE_LINK_SPEED_5GB_FULL:
2829 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 2832 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
2830 break; 2833 break;
2831 case IXGBE_LINK_SPEED_2_5GB_FULL: 2834 case IXGBE_LINK_SPEED_2_5GB_FULL:
2832 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 2835 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
2833 break; 2836 break;
2834 case IXGBE_LINK_SPEED_1GB_FULL: 2837 case IXGBE_LINK_SPEED_1GB_FULL:
2835 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 2838 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
2836 break; 2839 break;
2837 case IXGBE_LINK_SPEED_100_FULL: 2840 case IXGBE_LINK_SPEED_100_FULL:
2838 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 2841 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
2839 break; 2842 break;
2840 case IXGBE_LINK_SPEED_10_FULL: 2843 case IXGBE_LINK_SPEED_10_FULL:
2841 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 2844 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
2842 break; 2845 break;
2843 } 2846 }
2844 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || 2847 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
2845 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) 2848 layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
2846 switch (adapter->link_speed) { 2849 switch (adapter->link_speed) {
2847 case IXGBE_LINK_SPEED_10GB_FULL: 2850 case IXGBE_LINK_SPEED_10GB_FULL:
2848 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; 2851 ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
2849 break; 2852 break;
2850 } 2853 }
2851 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) 2854 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
2852 switch (adapter->link_speed) { 2855 switch (adapter->link_speed) {
2853 case IXGBE_LINK_SPEED_10GB_FULL: 2856 case IXGBE_LINK_SPEED_10GB_FULL:
2854 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 2857 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
2855 break; 2858 break;
2856 case IXGBE_LINK_SPEED_1GB_FULL: 2859 case IXGBE_LINK_SPEED_1GB_FULL:
2857 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2860 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2858 break; 2861 break;
2859 } 2862 }
2860 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) 2863 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
2861 switch (adapter->link_speed) { 2864 switch (adapter->link_speed) {
2862 case IXGBE_LINK_SPEED_10GB_FULL: 2865 case IXGBE_LINK_SPEED_10GB_FULL:
2863 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; 2866 ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX;
2864 break; 2867 break;
2865 case IXGBE_LINK_SPEED_1GB_FULL: 2868 case IXGBE_LINK_SPEED_1GB_FULL:
2866 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; 2869 ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
2867 break; 2870 break;
2868 } 2871 }
2869 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || 2872 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
2870 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) 2873 layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
2871 switch (adapter->link_speed) { 2874 switch (adapter->link_speed) {
2872 case IXGBE_LINK_SPEED_10GB_FULL: 2875 case IXGBE_LINK_SPEED_10GB_FULL:
2873 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 2876 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
2874 break; 2877 break;
2875 case IXGBE_LINK_SPEED_1GB_FULL: 2878 case IXGBE_LINK_SPEED_1GB_FULL:
2876 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 2879 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
2877 break; 2880 break;
2878 } 2881 }
2879 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) 2882 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
2880 switch (adapter->link_speed) { 2883 switch (adapter->link_speed) {
2881 case IXGBE_LINK_SPEED_10GB_FULL: 2884 case IXGBE_LINK_SPEED_10GB_FULL:
2882 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; 2885 ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
2883 break; 2886 break;
2884 } 2887 }
2885 /* 2888 /*
2886 * XXX: These need to use the proper media types once 2889 * XXX: These need to use the proper media types once
2887 * they're added. 2890 * they're added.
2888 */ 2891 */
2889 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) 2892 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
2890 switch (adapter->link_speed) { 2893 switch (adapter->link_speed) {
2891 case IXGBE_LINK_SPEED_10GB_FULL: 2894 case IXGBE_LINK_SPEED_10GB_FULL:
2892 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; 2895 ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
2893 break; 2896 break;
2894 case IXGBE_LINK_SPEED_2_5GB_FULL: 2897 case IXGBE_LINK_SPEED_2_5GB_FULL:
2895 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2898 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2896 break; 2899 break;
2897 case IXGBE_LINK_SPEED_1GB_FULL: 2900 case IXGBE_LINK_SPEED_1GB_FULL:
2898 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2901 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2899 break; 2902 break;
2900 } 2903 }
2901 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || 2904 else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
2902 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || 2905 layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
2903 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) 2906 layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
2904 switch (adapter->link_speed) { 2907 switch (adapter->link_speed) {
2905 case IXGBE_LINK_SPEED_10GB_FULL: 2908 case IXGBE_LINK_SPEED_10GB_FULL:
2906 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; 2909 ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
2907 break; 2910 break;
2908 case IXGBE_LINK_SPEED_2_5GB_FULL: 2911 case IXGBE_LINK_SPEED_2_5GB_FULL:
2909 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; 2912 ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
2910 break; 2913 break;
2911 case IXGBE_LINK_SPEED_1GB_FULL: 2914 case IXGBE_LINK_SPEED_1GB_FULL:
2912 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; 2915 ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
2913 break; 2916 break;
2914 } 2917 }
2915 2918
2916 /* If nothing is recognized... */ 2919 /* If nothing is recognized... */
2917#if 0 2920#if 0
2918 if (IFM_SUBTYPE(ifmr->ifm_active) == 0) 2921 if (IFM_SUBTYPE(ifmr->ifm_active) == 0)
2919 ifmr->ifm_active |= IFM_UNKNOWN; 2922 ifmr->ifm_active |= IFM_UNKNOWN;
2920#endif 2923#endif
2921 2924
2922 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 2925 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
2923 2926
2924 /* Display current flow control setting used on link */ 2927 /* Display current flow control setting used on link */
2925 if (hw->fc.current_mode == ixgbe_fc_rx_pause || 2928 if (hw->fc.current_mode == ixgbe_fc_rx_pause ||
2926 hw->fc.current_mode == ixgbe_fc_full) 2929 hw->fc.current_mode == ixgbe_fc_full)
2927 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2930 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2928 if (hw->fc.current_mode == ixgbe_fc_tx_pause || 2931 if (hw->fc.current_mode == ixgbe_fc_tx_pause ||
2929 hw->fc.current_mode == ixgbe_fc_full) 2932 hw->fc.current_mode == ixgbe_fc_full)
2930 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2933 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2931 2934
2932 IXGBE_CORE_UNLOCK(adapter); 2935 IXGBE_CORE_UNLOCK(adapter);
2933 2936
2934 return; 2937 return;
2935} /* ixgbe_media_status */ 2938} /* ixgbe_media_status */
2936 2939
2937/************************************************************************ 2940/************************************************************************
2938 * ixgbe_media_change - Media Ioctl callback 2941 * ixgbe_media_change - Media Ioctl callback
2939 * 2942 *
2940 * Called when the user changes speed/duplex using 2943 * Called when the user changes speed/duplex using
2941 * media/mediopt option with ifconfig. 2944 * media/mediopt option with ifconfig.
2942 ************************************************************************/ 2945 ************************************************************************/
2943static int 2946static int
2944ixgbe_media_change(struct ifnet *ifp) 2947ixgbe_media_change(struct ifnet *ifp)
2945{ 2948{
2946 struct adapter *adapter = ifp->if_softc; 2949 struct adapter *adapter = ifp->if_softc;
2947 struct ifmedia *ifm = &adapter->media; 2950 struct ifmedia *ifm = &adapter->media;
2948 struct ixgbe_hw *hw = &adapter->hw; 2951 struct ixgbe_hw *hw = &adapter->hw;
2949 ixgbe_link_speed speed = 0; 2952 ixgbe_link_speed speed = 0;
2950 ixgbe_link_speed link_caps = 0; 2953 ixgbe_link_speed link_caps = 0;
2951 bool negotiate = false; 2954 bool negotiate = false;
2952 s32 err = IXGBE_NOT_IMPLEMENTED; 2955 s32 err = IXGBE_NOT_IMPLEMENTED;
2953 2956
2954 INIT_DEBUGOUT("ixgbe_media_change: begin"); 2957 INIT_DEBUGOUT("ixgbe_media_change: begin");
2955 2958
2956 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2959 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2957 return (EINVAL); 2960 return (EINVAL);
2958 2961
2959 if (hw->phy.media_type == ixgbe_media_type_backplane) 2962 if (hw->phy.media_type == ixgbe_media_type_backplane)
2960 return (EPERM); 2963 return (EPERM);
2961 2964
2962 IXGBE_CORE_LOCK(adapter); 2965 IXGBE_CORE_LOCK(adapter);
2963 /* 2966 /*
2964 * We don't actually need to check against the supported 2967 * We don't actually need to check against the supported
2965 * media types of the adapter; ifmedia will take care of 2968 * media types of the adapter; ifmedia will take care of
2966 * that for us. 2969 * that for us.
2967 */ 2970 */
2968 switch (IFM_SUBTYPE(ifm->ifm_media)) { 2971 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2969 case IFM_AUTO: 2972 case IFM_AUTO:
2970 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 2973 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
2971 &negotiate); 2974 &negotiate);
2972 if (err != IXGBE_SUCCESS) { 2975 if (err != IXGBE_SUCCESS) {
2973 device_printf(adapter->dev, "Unable to determine " 2976 device_printf(adapter->dev, "Unable to determine "
2974 "supported advertise speeds\n"); 2977 "supported advertise speeds\n");
2975 IXGBE_CORE_UNLOCK(adapter); 2978 IXGBE_CORE_UNLOCK(adapter);
2976 return (ENODEV); 2979 return (ENODEV);
2977 } 2980 }
2978 speed |= link_caps; 2981 speed |= link_caps;
2979 break; 2982 break;
2980 case IFM_10G_T: 2983 case IFM_10G_T:
2981 case IFM_10G_LRM: 2984 case IFM_10G_LRM:
2982 case IFM_10G_LR: 2985 case IFM_10G_LR:
2983 case IFM_10G_TWINAX: 2986 case IFM_10G_TWINAX:
2984 case IFM_10G_SR: 2987 case IFM_10G_SR:
2985 case IFM_10G_CX4: 2988 case IFM_10G_CX4:
2986 case IFM_10G_KR: 2989 case IFM_10G_KR:
2987 case IFM_10G_KX4: 2990 case IFM_10G_KX4:
2988 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2991 speed |= IXGBE_LINK_SPEED_10GB_FULL;
2989 break; 2992 break;
2990 case IFM_5000_T: 2993 case IFM_5000_T:
2991 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2994 speed |= IXGBE_LINK_SPEED_5GB_FULL;
2992 break; 2995 break;
2993 case IFM_2500_T: 2996 case IFM_2500_T:
2994 case IFM_2500_KX: 2997 case IFM_2500_KX:
2995 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2998 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
2996 break; 2999 break;
2997 case IFM_1000_T: 3000 case IFM_1000_T:
2998 case IFM_1000_LX: 3001 case IFM_1000_LX:
2999 case IFM_1000_SX: 3002 case IFM_1000_SX:
3000 case IFM_1000_KX: 3003 case IFM_1000_KX:
3001 speed |= IXGBE_LINK_SPEED_1GB_FULL; 3004 speed |= IXGBE_LINK_SPEED_1GB_FULL;
3002 break; 3005 break;
3003 case IFM_100_TX: 3006 case IFM_100_TX:
3004 speed |= IXGBE_LINK_SPEED_100_FULL; 3007 speed |= IXGBE_LINK_SPEED_100_FULL;
3005 break; 3008 break;
3006 case IFM_10_T: 3009 case IFM_10_T:
3007 speed |= IXGBE_LINK_SPEED_10_FULL; 3010 speed |= IXGBE_LINK_SPEED_10_FULL;
3008 break; 3011 break;
3009 case IFM_NONE: 3012 case IFM_NONE:
3010 break; 3013 break;
3011 default: 3014 default:
3012 goto invalid; 3015 goto invalid;
3013 } 3016 }
3014 3017
3015 hw->mac.autotry_restart = TRUE; 3018 hw->mac.autotry_restart = TRUE;
3016 hw->mac.ops.setup_link(hw, speed, TRUE); 3019 hw->mac.ops.setup_link(hw, speed, TRUE);
3017 adapter->advertise = 0; 3020 adapter->advertise = 0;
3018 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) { 3021 if (IFM_SUBTYPE(ifm->ifm_media) != IFM_AUTO) {
3019 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0) 3022 if ((speed & IXGBE_LINK_SPEED_10GB_FULL) != 0)
3020 adapter->advertise |= 1 << 2; 3023 adapter->advertise |= 1 << 2;
3021 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0) 3024 if ((speed & IXGBE_LINK_SPEED_1GB_FULL) != 0)
3022 adapter->advertise |= 1 << 1; 3025 adapter->advertise |= 1 << 1;
3023 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0) 3026 if ((speed & IXGBE_LINK_SPEED_100_FULL) != 0)
3024 adapter->advertise |= 1 << 0; 3027 adapter->advertise |= 1 << 0;
3025 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0) 3028 if ((speed & IXGBE_LINK_SPEED_10_FULL) != 0)
3026 adapter->advertise |= 1 << 3; 3029 adapter->advertise |= 1 << 3;
3027 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0) 3030 if ((speed & IXGBE_LINK_SPEED_2_5GB_FULL) != 0)
3028 adapter->advertise |= 1 << 4; 3031 adapter->advertise |= 1 << 4;
3029 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0) 3032 if ((speed & IXGBE_LINK_SPEED_5GB_FULL) != 0)
3030 adapter->advertise |= 1 << 5; 3033 adapter->advertise |= 1 << 5;
3031 } 3034 }
3032 3035
3033 IXGBE_CORE_UNLOCK(adapter); 3036 IXGBE_CORE_UNLOCK(adapter);
3034 return (0); 3037 return (0);
3035 3038
3036invalid: 3039invalid:
3037 device_printf(adapter->dev, "Invalid media type!\n"); 3040 device_printf(adapter->dev, "Invalid media type!\n");
3038 IXGBE_CORE_UNLOCK(adapter); 3041 IXGBE_CORE_UNLOCK(adapter);
3039 3042
3040 return (EINVAL); 3043 return (EINVAL);
3041} /* ixgbe_media_change */ 3044} /* ixgbe_media_change */
3042 3045
3043/************************************************************************ 3046/************************************************************************
3044 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) 3047 * ixgbe_msix_link - Link status change ISR (MSI/MSI-X)
3045 ************************************************************************/ 3048 ************************************************************************/
3046static int 3049static int
3047ixgbe_msix_link(void *arg) 3050ixgbe_msix_link(void *arg)
3048{ 3051{
3049 struct adapter *adapter = arg; 3052 struct adapter *adapter = arg;
3050 struct ixgbe_hw *hw = &adapter->hw; 3053 struct ixgbe_hw *hw = &adapter->hw;
3051 u32 eicr, eicr_mask; 3054 u32 eicr, eicr_mask;
3052 s32 retval; 3055 s32 retval;
3053 3056
3054 ++adapter->link_irq.ev_count; 3057 ++adapter->link_irq.ev_count;
3055 3058
3056 /* Pause other interrupts */ 3059 /* Pause other interrupts */
3057 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); 3060 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
3058 3061
3059 /* First get the cause */ 3062 /* First get the cause */
3060 /* 3063 /*
3061 * The specifications of 82598, 82599, X540 and X550 say EICS register 3064 * The specifications of 82598, 82599, X540 and X550 say EICS register
3062 * is write only. However, Linux says it is a workaround for silicon 3065 * is write only. However, Linux says it is a workaround for silicon
3063 * errata to read EICS instead of EICR to get interrupt cause. It seems 3066 * errata to read EICS instead of EICR to get interrupt cause. It seems
3064 * there is a problem about read clear mechanism for EICR register. 3067 * there is a problem about read clear mechanism for EICR register.
3065 */ 3068 */
3066 eicr = IXGBE_READ_REG(hw, IXGBE_EICS); 3069 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
3067 /* Be sure the queue bits are not cleared */ 3070 /* Be sure the queue bits are not cleared */
3068 eicr &= ~IXGBE_EICR_RTX_QUEUE; 3071 eicr &= ~IXGBE_EICR_RTX_QUEUE;
3069 /* Clear interrupt with write */ 3072 /* Clear interrupt with write */
3070 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3073 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3071 3074
3072 if (ixgbe_is_sfp(hw)) { 3075 if (ixgbe_is_sfp(hw)) {
3073 /* Pluggable optics-related interrupt */ 3076 /* Pluggable optics-related interrupt */
3074 if (hw->mac.type >= ixgbe_mac_X540) 3077 if (hw->mac.type >= ixgbe_mac_X540)
3075 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 3078 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
3076 else 3079 else
3077 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 3080 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
3078 3081
3079 /* 3082 /*
3080 * An interrupt might not arrive when a module is inserted. 3083 * An interrupt might not arrive when a module is inserted.
3081 * When an link status change interrupt occurred and the driver 3084 * When an link status change interrupt occurred and the driver
3082 * still regard SFP as unplugged, issue the module softint 3085 * still regard SFP as unplugged, issue the module softint
3083 * and then issue LSC interrupt. 3086 * and then issue LSC interrupt.
3084 */ 3087 */
3085 if ((eicr & eicr_mask) 3088 if ((eicr & eicr_mask)
3086 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present) 3089 || ((hw->phy.sfp_type == ixgbe_sfp_type_not_present)
3087 && (eicr & IXGBE_EICR_LSC))) { 3090 && (eicr & IXGBE_EICR_LSC))) {
3088 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 3091 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
3089 softint_schedule(adapter->mod_si); 3092 softint_schedule(adapter->mod_si);
3090 } 3093 }
3091 3094
3092 if ((hw->mac.type == ixgbe_mac_82599EB) && 3095 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3093 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 3096 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
3094 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3097 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3095 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3098 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3096 softint_schedule(adapter->msf_si); 3099 softint_schedule(adapter->msf_si);
3097 } 3100 }
3098 } 3101 }
3099 3102
3100 /* Link status change */ 3103 /* Link status change */
3101 if (eicr & IXGBE_EICR_LSC) { 3104 if (eicr & IXGBE_EICR_LSC) {
3102 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 3105 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
3103 softint_schedule(adapter->link_si); 3106 softint_schedule(adapter->link_si);
3104 } 3107 }
3105 3108
3106 if (adapter->hw.mac.type != ixgbe_mac_82598EB) { 3109 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
3107 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && 3110 if ((adapter->feat_en & IXGBE_FEATURE_FDIR) &&
3108 (eicr & IXGBE_EICR_FLOW_DIR)) { 3111 (eicr & IXGBE_EICR_FLOW_DIR)) {
3109 /* This is probably overkill :) */ 3112 /* This is probably overkill :) */
3110 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1)) 3113 if (!atomic_cas_uint(&adapter->fdir_reinit, 0, 1))
3111 return 1; 3114 return 1;
3112 /* Disable the interrupt */ 3115 /* Disable the interrupt */
3113 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); 3116 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
3114 softint_schedule(adapter->fdir_si); 3117 softint_schedule(adapter->fdir_si);
3115 } 3118 }
3116 3119
3117 if (eicr & IXGBE_EICR_ECC) { 3120 if (eicr & IXGBE_EICR_ECC) {
3118 device_printf(adapter->dev, 3121 device_printf(adapter->dev,
3119 "CRITICAL: ECC ERROR!! Please Reboot!!\n"); 3122 "CRITICAL: ECC ERROR!! Please Reboot!!\n");
3120 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); 3123 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
3121 } 3124 }
3122 3125
3123 /* Check for over temp condition */ 3126 /* Check for over temp condition */
3124 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { 3127 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) {
3125 switch (adapter->hw.mac.type) { 3128 switch (adapter->hw.mac.type) {
3126 case ixgbe_mac_X550EM_a: 3129 case ixgbe_mac_X550EM_a:
3127 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) 3130 if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a))
3128 break; 3131 break;
3129 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 3132 IXGBE_WRITE_REG(hw, IXGBE_EIMC,
3130 IXGBE_EICR_GPI_SDP0_X550EM_a); 3133 IXGBE_EICR_GPI_SDP0_X550EM_a);
3131 IXGBE_WRITE_REG(hw, IXGBE_EICR, 3134 IXGBE_WRITE_REG(hw, IXGBE_EICR,
3132 IXGBE_EICR_GPI_SDP0_X550EM_a); 3135 IXGBE_EICR_GPI_SDP0_X550EM_a);
3133 retval = hw->phy.ops.check_overtemp(hw); 3136 retval = hw->phy.ops.check_overtemp(hw);
3134 if (retval != IXGBE_ERR_OVERTEMP) 3137 if (retval != IXGBE_ERR_OVERTEMP)
3135 break; 3138 break;
3136 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 3139 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3137 device_printf(adapter->dev, "System shutdown required!\n"); 3140 device_printf(adapter->dev, "System shutdown required!\n");
3138 break; 3141 break;
3139 default: 3142 default:
3140 if (!(eicr & IXGBE_EICR_TS)) 3143 if (!(eicr & IXGBE_EICR_TS))
3141 break; 3144 break;
3142 retval = hw->phy.ops.check_overtemp(hw); 3145 retval = hw->phy.ops.check_overtemp(hw);
3143 if (retval != IXGBE_ERR_OVERTEMP) 3146 if (retval != IXGBE_ERR_OVERTEMP)
3144 break; 3147 break;
3145 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); 3148 device_printf(adapter->dev, "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
3146 device_printf(adapter->dev, "System shutdown required!\n"); 3149 device_printf(adapter->dev, "System shutdown required!\n");
3147 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); 3150 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
3148 break; 3151 break;
3149 } 3152 }
3150 } 3153 }
3151 3154
3152 /* Check for VF message */ 3155 /* Check for VF message */
3153 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && 3156 if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) &&
3154 (eicr & IXGBE_EICR_MAILBOX)) 3157 (eicr & IXGBE_EICR_MAILBOX))
3155 softint_schedule(adapter->mbx_si); 3158 softint_schedule(adapter->mbx_si);
3156 } 3159 }
3157 3160
3158 /* Check for fan failure */ 3161 /* Check for fan failure */
3159 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 3162 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
3160 ixgbe_check_fan_failure(adapter, eicr, TRUE); 3163 ixgbe_check_fan_failure(adapter, eicr, TRUE);
3161 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3164 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3162 } 3165 }
3163 3166
3164 /* External PHY interrupt */ 3167 /* External PHY interrupt */
3165 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 3168 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
3166 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 3169 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
3167 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); 3170 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
3168 softint_schedule(adapter->phy_si); 3171 softint_schedule(adapter->phy_si);
3169 } 3172 }
3170 3173
3171 /* Re-enable other interrupts */ 3174 /* Re-enable other interrupts */
3172 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 3175 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
3173 return 1; 3176 return 1;
3174} /* ixgbe_msix_link */ 3177} /* ixgbe_msix_link */
3175 3178
3176static void 3179static void
3177ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 3180ixgbe_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
3178{ 3181{
3179 3182
3180 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 3183 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3181 itr |= itr << 16; 3184 itr |= itr << 16;
3182 else 3185 else
3183 itr |= IXGBE_EITR_CNT_WDIS; 3186 itr |= IXGBE_EITR_CNT_WDIS;
3184 3187
3185 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr); 3188 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(index), itr);
3186} 3189}
3187 3190
3188 3191
3189/************************************************************************ 3192/************************************************************************
3190 * ixgbe_sysctl_interrupt_rate_handler 3193 * ixgbe_sysctl_interrupt_rate_handler
3191 ************************************************************************/ 3194 ************************************************************************/
3192static int 3195static int
3193ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 3196ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
3194{ 3197{
3195 struct sysctlnode node = *rnode; 3198 struct sysctlnode node = *rnode;
3196 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 3199 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
3197 struct adapter *adapter; 3200 struct adapter *adapter;
3198 uint32_t reg, usec, rate; 3201 uint32_t reg, usec, rate;
3199 int error; 3202 int error;
3200 3203
3201 if (que == NULL) 3204 if (que == NULL)
3202 return 0; 3205 return 0;
3203 3206
3204 adapter = que->adapter; 3207 adapter = que->adapter;
3205 if (ixgbe_fw_recovery_mode_swflag(adapter)) 3208 if (ixgbe_fw_recovery_mode_swflag(adapter))
3206 return (EPERM); 3209 return (EPERM);
3207 3210
3208 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix)); 3211 reg = IXGBE_READ_REG(&adapter->hw, IXGBE_EITR(que->msix));
3209 usec = ((reg & 0x0FF8) >> 3); 3212 usec = ((reg & 0x0FF8) >> 3);
3210 if (usec > 0) 3213 if (usec > 0)
3211 rate = 500000 / usec; 3214 rate = 500000 / usec;
3212 else 3215 else
3213 rate = 0; 3216 rate = 0;
3214 node.sysctl_data = &rate; 3217 node.sysctl_data = &rate;
3215 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 3218 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3216 if (error || newp == NULL) 3219 if (error || newp == NULL)
3217 return error; 3220 return error;
3218 reg &= ~0xfff; /* default, no limitation */ 3221 reg &= ~0xfff; /* default, no limitation */
3219 if (rate > 0 && rate < 500000) { 3222 if (rate > 0 && rate < 500000) {
3220 if (rate < 1000) 3223 if (rate < 1000)
3221 rate = 1000; 3224 rate = 1000;
3222 reg |= ((4000000 / rate) & 0xff8); 3225 reg |= ((4000000 / rate) & 0xff8);
3223 /* 3226 /*
3224 * When RSC is used, ITR interval must be larger than 3227 * When RSC is used, ITR interval must be larger than
3225 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 3228 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
3226 * The minimum value is always greater than 2us on 100M 3229 * The minimum value is always greater than 2us on 100M
3227 * (and 10M?(not documented)), but it's not on 1G and higher. 3230 * (and 10M?(not documented)), but it's not on 1G and higher.
3228 */ 3231 */
3229 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 3232 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
3230 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 3233 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
3231 if ((adapter->num_queues > 1) 3234 if ((adapter->num_queues > 1)
3232 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 3235 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
3233 return EINVAL; 3236 return EINVAL;
3234 } 3237 }
3235 ixgbe_max_interrupt_rate = rate; 3238 ixgbe_max_interrupt_rate = rate;
3236 } else 3239 } else
3237 ixgbe_max_interrupt_rate = 0; 3240 ixgbe_max_interrupt_rate = 0;
3238 ixgbe_eitr_write(adapter, que->msix, reg); 3241 ixgbe_eitr_write(adapter, que->msix, reg);
3239 3242
3240 return (0); 3243 return (0);
3241} /* ixgbe_sysctl_interrupt_rate_handler */ 3244} /* ixgbe_sysctl_interrupt_rate_handler */
3242 3245
3243const struct sysctlnode * 3246const struct sysctlnode *
3244ixgbe_sysctl_instance(struct adapter *adapter) 3247ixgbe_sysctl_instance(struct adapter *adapter)
3245{ 3248{
3246 const char *dvname; 3249 const char *dvname;
3247 struct sysctllog **log; 3250 struct sysctllog **log;
3248 int rc; 3251 int rc;
3249 const struct sysctlnode *rnode; 3252 const struct sysctlnode *rnode;
3250 3253
3251 if (adapter->sysctltop != NULL) 3254 if (adapter->sysctltop != NULL)
3252 return adapter->sysctltop; 3255 return adapter->sysctltop;
3253 3256
3254 log = &adapter->sysctllog; 3257 log = &adapter->sysctllog;
3255 dvname = device_xname(adapter->dev); 3258 dvname = device_xname(adapter->dev);
3256 3259
3257 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 3260 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
3258 0, CTLTYPE_NODE, dvname, 3261 0, CTLTYPE_NODE, dvname,
3259 SYSCTL_DESCR("ixgbe information and settings"), 3262 SYSCTL_DESCR("ixgbe information and settings"),
3260 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 3263 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
3261 goto err; 3264 goto err;
3262 3265
3263 return rnode; 3266 return rnode;
3264err: 3267err:
3265 device_printf(adapter->dev, 3268 device_printf(adapter->dev,
3266 "%s: sysctl_createv failed, rc = %d\n", __func__, rc); 3269 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
3267 return NULL; 3270 return NULL;
3268} 3271}
3269 3272
3270/************************************************************************ 3273/************************************************************************
3271 * ixgbe_add_device_sysctls 3274 * ixgbe_add_device_sysctls
3272 ************************************************************************/ 3275 ************************************************************************/
3273static void 3276static void
3274ixgbe_add_device_sysctls(struct adapter *adapter) 3277ixgbe_add_device_sysctls(struct adapter *adapter)
3275{ 3278{
3276 device_t dev = adapter->dev; 3279 device_t dev = adapter->dev;
3277 struct ixgbe_hw *hw = &adapter->hw; 3280 struct ixgbe_hw *hw = &adapter->hw;
3278 struct sysctllog **log; 3281 struct sysctllog **log;
3279 const struct sysctlnode *rnode, *cnode; 3282 const struct sysctlnode *rnode, *cnode;
3280 3283
3281 log = &adapter->sysctllog; 3284 log = &adapter->sysctllog;
3282 3285
3283 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 3286 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
3284 aprint_error_dev(dev, "could not create sysctl root\n"); 3287 aprint_error_dev(dev, "could not create sysctl root\n");
3285 return; 3288 return;
3286 } 3289 }
3287 3290
3288 if (sysctl_createv(log, 0, &rnode, &cnode, 3291 if (sysctl_createv(log, 0, &rnode, &cnode,
3289 CTLFLAG_READWRITE, CTLTYPE_INT, 3292 CTLFLAG_READWRITE, CTLTYPE_INT,
3290 "debug", SYSCTL_DESCR("Debug Info"), 3293 "debug", SYSCTL_DESCR("Debug Info"),
3291 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 3294 ixgbe_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
3292 aprint_error_dev(dev, "could not create sysctl\n"); 3295 aprint_error_dev(dev, "could not create sysctl\n");
3293 3296
3294 if (sysctl_createv(log, 0, &rnode, &cnode, 3297 if (sysctl_createv(log, 0, &rnode, &cnode,
3295 CTLFLAG_READONLY, CTLTYPE_INT, 3298 CTLFLAG_READONLY, CTLTYPE_INT,
3296 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"), 3299 "num_rx_desc", SYSCTL_DESCR("Number of rx descriptors"),
3297 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0) 3300 NULL, 0, &adapter->num_rx_desc, 0, CTL_CREATE, CTL_EOL) != 0)
3298 aprint_error_dev(dev, "could not create sysctl\n"); 3301 aprint_error_dev(dev, "could not create sysctl\n");
3299 3302
3300 if (sysctl_createv(log, 0, &rnode, &cnode, 3303 if (sysctl_createv(log, 0, &rnode, &cnode,
 3304 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
 3305 SYSCTL_DESCR("Number of jumbo buffers per queue"),
 3306 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
 3307 CTL_EOL) != 0)
 3308 aprint_error_dev(dev, "could not create sysctl\n");
 3309
 3310 if (sysctl_createv(log, 0, &rnode, &cnode,
3301 CTLFLAG_READONLY, CTLTYPE_INT, 3311 CTLFLAG_READONLY, CTLTYPE_INT,
3302 "num_queues", SYSCTL_DESCR("Number of queues"), 3312 "num_queues", SYSCTL_DESCR("Number of queues"),
3303 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0) 3313 NULL, 0, &adapter->num_queues, 0, CTL_CREATE, CTL_EOL) != 0)
3304 aprint_error_dev(dev, "could not create sysctl\n"); 3314 aprint_error_dev(dev, "could not create sysctl\n");
3305 3315
3306 /* Sysctls for all devices */ 3316 /* Sysctls for all devices */
3307 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3317 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3308 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC), 3318 CTLTYPE_INT, "fc", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_SET_FC),
3309 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, 3319 ixgbe_sysctl_flowcntl, 0, (void *)adapter, 0, CTL_CREATE,
3310 CTL_EOL) != 0) 3320 CTL_EOL) != 0)
3311 aprint_error_dev(dev, "could not create sysctl\n"); 3321 aprint_error_dev(dev, "could not create sysctl\n");
3312 3322
3313 adapter->enable_aim = ixgbe_enable_aim; 3323 adapter->enable_aim = ixgbe_enable_aim;
3314 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3324 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3315 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 3325 CTLTYPE_BOOL, "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
3316 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 3326 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
3317 aprint_error_dev(dev, "could not create sysctl\n"); 3327 aprint_error_dev(dev, "could not create sysctl\n");
3318 3328
3319 if (sysctl_createv(log, 0, &rnode, &cnode, 3329 if (sysctl_createv(log, 0, &rnode, &cnode,
3320 CTLFLAG_READWRITE, CTLTYPE_INT, 3330 CTLFLAG_READWRITE, CTLTYPE_INT,
3321 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED), 3331 "advertise_speed", SYSCTL_DESCR(IXGBE_SYSCTL_DESC_ADV_SPEED),
3322 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE, 3332 ixgbe_sysctl_advertise, 0, (void *)adapter, 0, CTL_CREATE,
3323 CTL_EOL) != 0) 3333 CTL_EOL) != 0)
3324 aprint_error_dev(dev, "could not create sysctl\n"); 3334 aprint_error_dev(dev, "could not create sysctl\n");
3325 3335
3326 /* 3336 /*
3327 * If each "que->txrx_use_workqueue" is changed in sysctl handler, 3337 * If each "que->txrx_use_workqueue" is changed in sysctl handler,
3328 * it causesflip-flopping softint/workqueue mode in one deferred 3338 * it causesflip-flopping softint/workqueue mode in one deferred
3329 * processing. Therefore, preempt_disable()/preempt_enable() are 3339 * processing. Therefore, preempt_disable()/preempt_enable() are
3330 * required in ixgbe_sched_handle_que() to avoid 3340 * required in ixgbe_sched_handle_que() to avoid
3331 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule(). 3341 * KASSERT(ixgbe_sched_handle_que()) in softint_schedule().
3332 * I think changing "que->txrx_use_workqueue" in interrupt handler 3342 * I think changing "que->txrx_use_workqueue" in interrupt handler
3333 * is lighter than doing preempt_disable()/preempt_enable() in every 3343 * is lighter than doing preempt_disable()/preempt_enable() in every
3334 * ixgbe_sched_handle_que(). 3344 * ixgbe_sched_handle_que().
3335 */ 3345 */
3336 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue; 3346 adapter->txrx_use_workqueue = ixgbe_txrx_workqueue;
3337 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3347 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3338 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 3348 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
3339 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) 3349 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
3340 aprint_error_dev(dev, "could not create sysctl\n"); 3350 aprint_error_dev(dev, "could not create sysctl\n");
3341 3351
3342#ifdef IXGBE_DEBUG 3352#ifdef IXGBE_DEBUG
3343 /* testing sysctls (for all devices) */ 3353 /* testing sysctls (for all devices) */
3344 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3354 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3345 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"), 3355 CTLTYPE_INT, "power_state", SYSCTL_DESCR("PCI Power State"),
3346 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE, 3356 ixgbe_sysctl_power_state, 0, (void *)adapter, 0, CTL_CREATE,
3347 CTL_EOL) != 0) 3357 CTL_EOL) != 0)
3348 aprint_error_dev(dev, "could not create sysctl\n"); 3358 aprint_error_dev(dev, "could not create sysctl\n");
3349 3359
3350 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, 3360 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY,
3351 CTLTYPE_STRING, "print_rss_config", 3361 CTLTYPE_STRING, "print_rss_config",
3352 SYSCTL_DESCR("Prints RSS Configuration"), 3362 SYSCTL_DESCR("Prints RSS Configuration"),
3353 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE, 3363 ixgbe_sysctl_print_rss_config, 0, (void *)adapter, 0, CTL_CREATE,
3354 CTL_EOL) != 0) 3364 CTL_EOL) != 0)
3355 aprint_error_dev(dev, "could not create sysctl\n"); 3365 aprint_error_dev(dev, "could not create sysctl\n");
3356#endif 3366#endif
3357 /* for X550 series devices */ 3367 /* for X550 series devices */
3358 if (hw->mac.type >= ixgbe_mac_X550) 3368 if (hw->mac.type >= ixgbe_mac_X550)
3359 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3369 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3360 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"), 3370 CTLTYPE_INT, "dmac", SYSCTL_DESCR("DMA Coalesce"),
3361 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE, 3371 ixgbe_sysctl_dmac, 0, (void *)adapter, 0, CTL_CREATE,
3362 CTL_EOL) != 0) 3372 CTL_EOL) != 0)
3363 aprint_error_dev(dev, "could not create sysctl\n"); 3373 aprint_error_dev(dev, "could not create sysctl\n");
3364 3374
3365 /* for WoL-capable devices */ 3375 /* for WoL-capable devices */
3366 if (adapter->wol_support) { 3376 if (adapter->wol_support) {
3367 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3377 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3368 CTLTYPE_BOOL, "wol_enable", 3378 CTLTYPE_BOOL, "wol_enable",
3369 SYSCTL_DESCR("Enable/Disable Wake on LAN"), 3379 SYSCTL_DESCR("Enable/Disable Wake on LAN"),
3370 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE, 3380 ixgbe_sysctl_wol_enable, 0, (void *)adapter, 0, CTL_CREATE,
3371 CTL_EOL) != 0) 3381 CTL_EOL) != 0)
3372 aprint_error_dev(dev, "could not create sysctl\n"); 3382 aprint_error_dev(dev, "could not create sysctl\n");
3373 3383
3374 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3384 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3375 CTLTYPE_INT, "wufc", 3385 CTLTYPE_INT, "wufc",
3376 SYSCTL_DESCR("Enable/Disable Wake Up Filters"), 3386 SYSCTL_DESCR("Enable/Disable Wake Up Filters"),
3377 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE, 3387 ixgbe_sysctl_wufc, 0, (void *)adapter, 0, CTL_CREATE,
3378 CTL_EOL) != 0) 3388 CTL_EOL) != 0)
3379 aprint_error_dev(dev, "could not create sysctl\n"); 3389 aprint_error_dev(dev, "could not create sysctl\n");
3380 } 3390 }
3381 3391
3382 /* for X552/X557-AT devices */ 3392 /* for X552/X557-AT devices */
3383 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 3393 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
3384 const struct sysctlnode *phy_node; 3394 const struct sysctlnode *phy_node;
3385 3395
3386 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE, 3396 if (sysctl_createv(log, 0, &rnode, &phy_node, 0, CTLTYPE_NODE,
3387 "phy", SYSCTL_DESCR("External PHY sysctls"), 3397 "phy", SYSCTL_DESCR("External PHY sysctls"),
3388 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) { 3398 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) {
3389 aprint_error_dev(dev, "could not create sysctl\n"); 3399 aprint_error_dev(dev, "could not create sysctl\n");
3390 return; 3400 return;
3391 } 3401 }
3392 3402
3393 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3403 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3394 CTLTYPE_INT, "temp", 3404 CTLTYPE_INT, "temp",
3395 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"), 3405 SYSCTL_DESCR("Current External PHY Temperature (Celsius)"),
3396 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE, 3406 ixgbe_sysctl_phy_temp, 0, (void *)adapter, 0, CTL_CREATE,
3397 CTL_EOL) != 0) 3407 CTL_EOL) != 0)
3398 aprint_error_dev(dev, "could not create sysctl\n"); 3408 aprint_error_dev(dev, "could not create sysctl\n");
3399 3409
3400 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY, 3410 if (sysctl_createv(log, 0, &phy_node, &cnode, CTLFLAG_READONLY,
3401 CTLTYPE_INT, "overtemp_occurred", 3411 CTLTYPE_INT, "overtemp_occurred",
3402 SYSCTL_DESCR("External PHY High Temperature Event Occurred"), 3412 SYSCTL_DESCR("External PHY High Temperature Event Occurred"),
3403 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0, 3413 ixgbe_sysctl_phy_overtemp_occurred, 0, (void *)adapter, 0,
3404 CTL_CREATE, CTL_EOL) != 0) 3414 CTL_CREATE, CTL_EOL) != 0)
3405 aprint_error_dev(dev, "could not create sysctl\n"); 3415 aprint_error_dev(dev, "could not create sysctl\n");
3406 } 3416 }
3407 3417
3408 if ((hw->mac.type == ixgbe_mac_X550EM_a) 3418 if ((hw->mac.type == ixgbe_mac_X550EM_a)
3409 && (hw->phy.type == ixgbe_phy_fw)) 3419 && (hw->phy.type == ixgbe_phy_fw))
3410 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3420 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3411 CTLTYPE_BOOL, "force_10_100_autonego", 3421 CTLTYPE_BOOL, "force_10_100_autonego",
3412 SYSCTL_DESCR("Force autonego on 10M and 100M"), 3422 SYSCTL_DESCR("Force autonego on 10M and 100M"),
3413 NULL, 0, &hw->phy.force_10_100_autonego, 0, 3423 NULL, 0, &hw->phy.force_10_100_autonego, 0,
3414 CTL_CREATE, CTL_EOL) != 0) 3424 CTL_CREATE, CTL_EOL) != 0)
3415 aprint_error_dev(dev, "could not create sysctl\n"); 3425 aprint_error_dev(dev, "could not create sysctl\n");
3416 3426
3417 if (adapter->feat_cap & IXGBE_FEATURE_EEE) { 3427 if (adapter->feat_cap & IXGBE_FEATURE_EEE) {
3418 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 3428 if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
3419 CTLTYPE_INT, "eee_state", 3429 CTLTYPE_INT, "eee_state",
3420 SYSCTL_DESCR("EEE Power Save State"), 3430 SYSCTL_DESCR("EEE Power Save State"),
3421 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE, 3431 ixgbe_sysctl_eee_state, 0, (void *)adapter, 0, CTL_CREATE,
3422 CTL_EOL) != 0) 3432 CTL_EOL) != 0)
3423 aprint_error_dev(dev, "could not create sysctl\n"); 3433 aprint_error_dev(dev, "could not create sysctl\n");
3424 } 3434 }
3425} /* ixgbe_add_device_sysctls */ 3435} /* ixgbe_add_device_sysctls */
3426 3436
3427/************************************************************************ 3437/************************************************************************
3428 * ixgbe_allocate_pci_resources 3438 * ixgbe_allocate_pci_resources
3429 ************************************************************************/ 3439 ************************************************************************/
3430static int 3440static int
3431ixgbe_allocate_pci_resources(struct adapter *adapter, 3441ixgbe_allocate_pci_resources(struct adapter *adapter,
3432 const struct pci_attach_args *pa) 3442 const struct pci_attach_args *pa)
3433{ 3443{
3434 pcireg_t memtype, csr; 3444 pcireg_t memtype, csr;
3435 device_t dev = adapter->dev; 3445 device_t dev = adapter->dev;
3436 bus_addr_t addr; 3446 bus_addr_t addr;
3437 int flags; 3447 int flags;
3438 3448
3439 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 3449 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
3440 switch (memtype) { 3450 switch (memtype) {
3441 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3451 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
3442 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3452 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
3443 adapter->osdep.mem_bus_space_tag = pa->pa_memt; 3453 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
3444 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 3454 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
3445 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) 3455 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
3446 goto map_err; 3456 goto map_err;
3447 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 3457 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
3448 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 3458 aprint_normal_dev(dev, "clearing prefetchable bit\n");
3449 flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3459 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
3450 } 3460 }
3451 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, 3461 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
3452 adapter->osdep.mem_size, flags, 3462 adapter->osdep.mem_size, flags,
3453 &adapter->osdep.mem_bus_space_handle) != 0) { 3463 &adapter->osdep.mem_bus_space_handle) != 0) {
3454map_err: 3464map_err:
3455 adapter->osdep.mem_size = 0; 3465 adapter->osdep.mem_size = 0;
3456 aprint_error_dev(dev, "unable to map BAR0\n"); 3466 aprint_error_dev(dev, "unable to map BAR0\n");
3457 return ENXIO; 3467 return ENXIO;
3458 } 3468 }
3459 /* 3469 /*
3460 * Enable address decoding for memory range in case BIOS or 3470 * Enable address decoding for memory range in case BIOS or
3461 * UEFI don't set it. 3471 * UEFI don't set it.
3462 */ 3472 */
3463 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 3473 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
3464 PCI_COMMAND_STATUS_REG); 3474 PCI_COMMAND_STATUS_REG);
3465 csr |= PCI_COMMAND_MEM_ENABLE; 3475 csr |= PCI_COMMAND_MEM_ENABLE;
3466 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 3476 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3467 csr); 3477 csr);
3468 break; 3478 break;
3469 default: 3479 default:
3470 aprint_error_dev(dev, "unexpected type on BAR0\n"); 3480 aprint_error_dev(dev, "unexpected type on BAR0\n");
3471 return ENXIO; 3481 return ENXIO;
3472 } 3482 }
3473 3483
3474 return (0); 3484 return (0);
3475} /* ixgbe_allocate_pci_resources */ 3485} /* ixgbe_allocate_pci_resources */
3476 3486
3477static void 3487static void
3478ixgbe_free_softint(struct adapter *adapter) 3488ixgbe_free_softint(struct adapter *adapter)
3479{ 3489{
3480 struct ix_queue *que = adapter->queues; 3490 struct ix_queue *que = adapter->queues;
3481 struct tx_ring *txr = adapter->tx_rings; 3491 struct tx_ring *txr = adapter->tx_rings;
3482 int i; 3492 int i;
3483 3493
3484 for (i = 0; i < adapter->num_queues; i++, que++, txr++) { 3494 for (i = 0; i < adapter->num_queues; i++, que++, txr++) {
3485 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) { 3495 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) {
3486 if (txr->txr_si != NULL) 3496 if (txr->txr_si != NULL)
3487 softint_disestablish(txr->txr_si); 3497 softint_disestablish(txr->txr_si);
3488 } 3498 }
3489 if (que->que_si != NULL) 3499 if (que->que_si != NULL)
3490 softint_disestablish(que->que_si); 3500 softint_disestablish(que->que_si);
3491 } 3501 }
3492 if (adapter->txr_wq != NULL) 3502 if (adapter->txr_wq != NULL)
3493 workqueue_destroy(adapter->txr_wq); 3503 workqueue_destroy(adapter->txr_wq);
3494 if (adapter->txr_wq_enqueued != NULL) 3504 if (adapter->txr_wq_enqueued != NULL)
3495 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 3505 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
3496 if (adapter->que_wq != NULL) 3506 if (adapter->que_wq != NULL)
3497 workqueue_destroy(adapter->que_wq); 3507 workqueue_destroy(adapter->que_wq);
3498 3508
3499 /* Drain the Link queue */ 3509 /* Drain the Link queue */
3500 if (adapter->link_si != NULL) { 3510 if (adapter->link_si != NULL) {
3501 softint_disestablish(adapter->link_si); 3511 softint_disestablish(adapter->link_si);
3502 adapter->link_si = NULL; 3512 adapter->link_si = NULL;
3503 } 3513 }
3504 if (adapter->mod_si != NULL) { 3514 if (adapter->mod_si != NULL) {
3505 softint_disestablish(adapter->mod_si); 3515 softint_disestablish(adapter->mod_si);
3506 adapter->mod_si = NULL; 3516 adapter->mod_si = NULL;
3507 } 3517 }
3508 if (adapter->msf_si != NULL) { 3518 if (adapter->msf_si != NULL) {
3509 softint_disestablish(adapter->msf_si); 3519 softint_disestablish(adapter->msf_si);
3510 adapter->msf_si = NULL; 3520 adapter->msf_si = NULL;
3511 } 3521 }
3512 if (adapter->phy_si != NULL) { 3522 if (adapter->phy_si != NULL) {
3513 softint_disestablish(adapter->phy_si); 3523 softint_disestablish(adapter->phy_si);
3514 adapter->phy_si = NULL; 3524 adapter->phy_si = NULL;
3515 } 3525 }
3516 if (adapter->feat_en & IXGBE_FEATURE_FDIR) { 3526 if (adapter->feat_en & IXGBE_FEATURE_FDIR) {
3517 if (adapter->fdir_si != NULL) { 3527 if (adapter->fdir_si != NULL) {
3518 softint_disestablish(adapter->fdir_si); 3528 softint_disestablish(adapter->fdir_si);
3519 adapter->fdir_si = NULL; 3529 adapter->fdir_si = NULL;
3520 } 3530 }
3521 } 3531 }
3522 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) { 3532 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) {
3523 if (adapter->mbx_si != NULL) { 3533 if (adapter->mbx_si != NULL) {
3524 softint_disestablish(adapter->mbx_si); 3534 softint_disestablish(adapter->mbx_si);
3525 adapter->mbx_si = NULL; 3535 adapter->mbx_si = NULL;
3526 } 3536 }
3527 } 3537 }
3528} /* ixgbe_free_softint */ 3538} /* ixgbe_free_softint */
3529 3539
3530/************************************************************************ 3540/************************************************************************
3531 * ixgbe_detach - Device removal routine 3541 * ixgbe_detach - Device removal routine
3532 * 3542 *
3533 * Called when the driver is being removed. 3543 * Called when the driver is being removed.
3534 * Stops the adapter and deallocates all the resources 3544 * Stops the adapter and deallocates all the resources
3535 * that were allocated for driver operation. 3545 * that were allocated for driver operation.
3536 * 3546 *
3537 * return 0 on success, positive on failure 3547 * return 0 on success, positive on failure
3538 ************************************************************************/ 3548 ************************************************************************/
3539static int 3549static int
3540ixgbe_detach(device_t dev, int flags) 3550ixgbe_detach(device_t dev, int flags)
3541{ 3551{
3542 struct adapter *adapter = device_private(dev); 3552 struct adapter *adapter = device_private(dev);
3543 struct rx_ring *rxr = adapter->rx_rings; 3553 struct rx_ring *rxr = adapter->rx_rings;
3544 struct tx_ring *txr = adapter->tx_rings; 3554 struct tx_ring *txr = adapter->tx_rings;
3545 struct ixgbe_hw *hw = &adapter->hw; 3555 struct ixgbe_hw *hw = &adapter->hw;
3546 struct ixgbe_hw_stats *stats = &adapter->stats.pf; 3556 struct ixgbe_hw_stats *stats = &adapter->stats.pf;
3547 u32 ctrl_ext; 3557 u32 ctrl_ext;
3548 int i; 3558 int i;
3549 3559
3550 INIT_DEBUGOUT("ixgbe_detach: begin"); 3560 INIT_DEBUGOUT("ixgbe_detach: begin");
3551 if (adapter->osdep.attached == false) 3561 if (adapter->osdep.attached == false)
3552 return 0; 3562 return 0;
3553 3563
3554 if (ixgbe_pci_iov_detach(dev) != 0) { 3564 if (ixgbe_pci_iov_detach(dev) != 0) {
3555 device_printf(dev, "SR-IOV in use; detach first.\n"); 3565 device_printf(dev, "SR-IOV in use; detach first.\n");
3556 return (EBUSY); 3566 return (EBUSY);
3557 } 3567 }
3558 3568
3559 /* 3569 /*
3560 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(), 3570 * Stop the interface. ixgbe_setup_low_power_mode() calls ixgbe_stop(),
3561 * so it's not required to call ixgbe_stop() directly. 3571 * so it's not required to call ixgbe_stop() directly.
3562 */ 3572 */
3563 IXGBE_CORE_LOCK(adapter); 3573 IXGBE_CORE_LOCK(adapter);
3564 ixgbe_setup_low_power_mode(adapter); 3574 ixgbe_setup_low_power_mode(adapter);
3565 IXGBE_CORE_UNLOCK(adapter); 3575 IXGBE_CORE_UNLOCK(adapter);
3566#if NVLAN > 0 3576#if NVLAN > 0
3567 /* Make sure VLANs are not using driver */ 3577 /* Make sure VLANs are not using driver */
3568 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 3578 if (!VLAN_ATTACHED(&adapter->osdep.ec))
3569 ; /* nothing to do: no VLANs */ 3579 ; /* nothing to do: no VLANs */
3570 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 3580 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
3571 vlan_ifdetach(adapter->ifp); 3581 vlan_ifdetach(adapter->ifp);
3572 else { 3582 else {
3573 aprint_error_dev(dev, "VLANs in use, detach first\n"); 3583 aprint_error_dev(dev, "VLANs in use, detach first\n");
3574 return (EBUSY); 3584 return (EBUSY);
3575 } 3585 }
3576#endif 3586#endif
3577 3587
3578 pmf_device_deregister(dev); 3588 pmf_device_deregister(dev);
3579 3589
3580 ether_ifdetach(adapter->ifp); 3590 ether_ifdetach(adapter->ifp);
3581 3591
3582 ixgbe_free_softint(adapter); 3592 ixgbe_free_softint(adapter);
3583 3593
3584 /* let hardware know driver is unloading */ 3594 /* let hardware know driver is unloading */
3585 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 3595 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
3586 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3596 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); 3597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
3588 3598
3589 callout_halt(&adapter->timer, NULL); 3599 callout_halt(&adapter->timer, NULL);
3590 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) 3600 if (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE)
3591 callout_halt(&adapter->recovery_mode_timer, NULL); 3601 callout_halt(&adapter->recovery_mode_timer, NULL);
3592 3602
3593 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 3603 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
3594 netmap_detach(adapter->ifp); 3604 netmap_detach(adapter->ifp);
3595 3605
3596 ixgbe_free_pci_resources(adapter); 3606 ixgbe_free_pci_resources(adapter);
3597#if 0 /* XXX the NetBSD port is probably missing something here */ 3607#if 0 /* XXX the NetBSD port is probably missing something here */
3598 bus_generic_detach(dev); 3608 bus_generic_detach(dev);
3599#endif 3609#endif
3600 if_detach(adapter->ifp); 3610 if_detach(adapter->ifp);
3601 if_percpuq_destroy(adapter->ipq); 3611 if_percpuq_destroy(adapter->ipq);
3602 3612
3603 sysctl_teardown(&adapter->sysctllog); 3613 sysctl_teardown(&adapter->sysctllog);
3604 evcnt_detach(&adapter->efbig_tx_dma_setup); 3614 evcnt_detach(&adapter->efbig_tx_dma_setup);
3605 evcnt_detach(&adapter->mbuf_defrag_failed); 3615 evcnt_detach(&adapter->mbuf_defrag_failed);
3606 evcnt_detach(&adapter->efbig2_tx_dma_setup); 3616 evcnt_detach(&adapter->efbig2_tx_dma_setup);
3607 evcnt_detach(&adapter->einval_tx_dma_setup); 3617 evcnt_detach(&adapter->einval_tx_dma_setup);
3608 evcnt_detach(&adapter->other_tx_dma_setup); 3618 evcnt_detach(&adapter->other_tx_dma_setup);
3609 evcnt_detach(&adapter->eagain_tx_dma_setup); 3619 evcnt_detach(&adapter->eagain_tx_dma_setup);
3610 evcnt_detach(&adapter->enomem_tx_dma_setup); 3620 evcnt_detach(&adapter->enomem_tx_dma_setup);
3611 evcnt_detach(&adapter->watchdog_events); 3621 evcnt_detach(&adapter->watchdog_events);
3612 evcnt_detach(&adapter->tso_err); 3622 evcnt_detach(&adapter->tso_err);
3613 evcnt_detach(&adapter->link_irq); 3623 evcnt_detach(&adapter->link_irq);
3614 evcnt_detach(&adapter->link_sicount); 3624 evcnt_detach(&adapter->link_sicount);
3615 evcnt_detach(&adapter->mod_sicount); 3625 evcnt_detach(&adapter->mod_sicount);
3616 evcnt_detach(&adapter->msf_sicount); 3626 evcnt_detach(&adapter->msf_sicount);
3617 evcnt_detach(&adapter->phy_sicount); 3627 evcnt_detach(&adapter->phy_sicount);
3618 3628
3619 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) { 3629 for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
3620 if (i < __arraycount(stats->mpc)) { 3630 if (i < __arraycount(stats->mpc)) {
3621 evcnt_detach(&stats->mpc[i]); 3631 evcnt_detach(&stats->mpc[i]);
3622 if (hw->mac.type == ixgbe_mac_82598EB) 3632 if (hw->mac.type == ixgbe_mac_82598EB)
3623 evcnt_detach(&stats->rnbc[i]); 3633 evcnt_detach(&stats->rnbc[i]);
3624 } 3634 }
3625 if (i < __arraycount(stats->pxontxc)) { 3635 if (i < __arraycount(stats->pxontxc)) {
3626 evcnt_detach(&stats->pxontxc[i]); 3636 evcnt_detach(&stats->pxontxc[i]);
3627 evcnt_detach(&stats->pxonrxc[i]); 3637 evcnt_detach(&stats->pxonrxc[i]);
3628 evcnt_detach(&stats->pxofftxc[i]); 3638 evcnt_detach(&stats->pxofftxc[i]);
3629 evcnt_detach(&stats->pxoffrxc[i]); 3639 evcnt_detach(&stats->pxoffrxc[i]);
3630 if (hw->mac.type >= ixgbe_mac_82599EB) 3640 if (hw->mac.type >= ixgbe_mac_82599EB)
3631 evcnt_detach(&stats->pxon2offc[i]); 3641 evcnt_detach(&stats->pxon2offc[i]);
3632 } 3642 }
3633 } 3643 }
3634 3644
3635 txr = adapter->tx_rings; 3645 txr = adapter->tx_rings;
3636 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 3646 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
3637 evcnt_detach(&adapter->queues[i].irqs); 3647 evcnt_detach(&adapter->queues[i].irqs);
3638 evcnt_detach(&adapter->queues[i].handleq); 3648 evcnt_detach(&adapter->queues[i].handleq);
3639 evcnt_detach(&adapter->queues[i].req); 3649 evcnt_detach(&adapter->queues[i].req);
3640 evcnt_detach(&txr->no_desc_avail); 3650 evcnt_detach(&txr->no_desc_avail);
3641 evcnt_detach(&txr->total_packets); 3651 evcnt_detach(&txr->total_packets);
3642 evcnt_detach(&txr->tso_tx); 3652 evcnt_detach(&txr->tso_tx);
3643#ifndef IXGBE_LEGACY_TX 3653#ifndef IXGBE_LEGACY_TX
3644 evcnt_detach(&txr->pcq_drops); 3654 evcnt_detach(&txr->pcq_drops);
3645#endif 3655#endif
3646 3656
3647 if (i < __arraycount(stats->qprc)) { 3657 if (i < __arraycount(stats->qprc)) {
3648 evcnt_detach(&stats->qprc[i]); 3658 evcnt_detach(&stats->qprc[i]);
3649 evcnt_detach(&stats->qptc[i]); 3659 evcnt_detach(&stats->qptc[i]);
3650 evcnt_detach(&stats->qbrc[i]); 3660 evcnt_detach(&stats->qbrc[i]);
3651 evcnt_detach(&stats->qbtc[i]); 3661 evcnt_detach(&stats->qbtc[i]);
3652 if (hw->mac.type >= ixgbe_mac_82599EB) 3662 if (hw->mac.type >= ixgbe_mac_82599EB)
3653 evcnt_detach(&stats->qprdc[i]); 3663 evcnt_detach(&stats->qprdc[i]);
3654 } 3664 }
3655 3665
3656 evcnt_detach(&rxr->rx_packets); 3666 evcnt_detach(&rxr->rx_packets);
3657 evcnt_detach(&rxr->rx_bytes); 3667 evcnt_detach(&rxr->rx_bytes);
3658 evcnt_detach(&rxr->rx_copies); 3668 evcnt_detach(&rxr->rx_copies);
3659 evcnt_detach(&rxr->no_jmbuf); 3669 evcnt_detach(&rxr->no_jmbuf);
3660 evcnt_detach(&rxr->rx_discarded); 3670 evcnt_detach(&rxr->rx_discarded);
3661 } 3671 }
3662 evcnt_detach(&stats->ipcs); 3672 evcnt_detach(&stats->ipcs);
3663 evcnt_detach(&stats->l4cs); 3673 evcnt_detach(&stats->l4cs);
3664 evcnt_detach(&stats->ipcs_bad); 3674 evcnt_detach(&stats->ipcs_bad);
3665 evcnt_detach(&stats->l4cs_bad); 3675 evcnt_detach(&stats->l4cs_bad);
3666 evcnt_detach(&stats->intzero); 3676 evcnt_detach(&stats->intzero);
3667 evcnt_detach(&stats->legint); 3677 evcnt_detach(&stats->legint);
3668 evcnt_detach(&stats->crcerrs); 3678 evcnt_detach(&stats->crcerrs);
3669 evcnt_detach(&stats->illerrc); 3679 evcnt_detach(&stats->illerrc);
3670 evcnt_detach(&stats->errbc); 3680 evcnt_detach(&stats->errbc);
3671 evcnt_detach(&stats->mspdc); 3681 evcnt_detach(&stats->mspdc);
3672 if (hw->mac.type >= ixgbe_mac_X550) 3682 if (hw->mac.type >= ixgbe_mac_X550)
3673 evcnt_detach(&stats->mbsdc); 3683 evcnt_detach(&stats->mbsdc);
3674 evcnt_detach(&stats->mpctotal); 3684 evcnt_detach(&stats->mpctotal);
3675 evcnt_detach(&stats->mlfc); 3685 evcnt_detach(&stats->mlfc);
3676 evcnt_detach(&stats->mrfc); 3686 evcnt_detach(&stats->mrfc);
3677 evcnt_detach(&stats->rlec); 3687 evcnt_detach(&stats->rlec);
3678 evcnt_detach(&stats->lxontxc); 3688 evcnt_detach(&stats->lxontxc);
3679 evcnt_detach(&stats->lxonrxc); 3689 evcnt_detach(&stats->lxonrxc);
3680 evcnt_detach(&stats->lxofftxc); 3690 evcnt_detach(&stats->lxofftxc);
3681 evcnt_detach(&stats->lxoffrxc); 3691 evcnt_detach(&stats->lxoffrxc);
3682 3692
3683 /* Packet Reception Stats */ 3693 /* Packet Reception Stats */
3684 evcnt_detach(&stats->tor); 3694 evcnt_detach(&stats->tor);
3685 evcnt_detach(&stats->gorc); 3695 evcnt_detach(&stats->gorc);
3686 evcnt_detach(&stats->tpr); 3696 evcnt_detach(&stats->tpr);
3687 evcnt_detach(&stats->gprc); 3697 evcnt_detach(&stats->gprc);
3688 evcnt_detach(&stats->mprc); 3698 evcnt_detach(&stats->mprc);
3689 evcnt_detach(&stats->bprc); 3699 evcnt_detach(&stats->bprc);
3690 evcnt_detach(&stats->prc64); 3700 evcnt_detach(&stats->prc64);
3691 evcnt_detach(&stats->prc127); 3701 evcnt_detach(&stats->prc127);
3692 evcnt_detach(&stats->prc255); 3702 evcnt_detach(&stats->prc255);
3693 evcnt_detach(&stats->prc511); 3703 evcnt_detach(&stats->prc511);
3694 evcnt_detach(&stats->prc1023); 3704 evcnt_detach(&stats->prc1023);
3695 evcnt_detach(&stats->prc1522); 3705 evcnt_detach(&stats->prc1522);
3696 evcnt_detach(&stats->ruc); 3706 evcnt_detach(&stats->ruc);
3697 evcnt_detach(&stats->rfc); 3707 evcnt_detach(&stats->rfc);
3698 evcnt_detach(&stats->roc); 3708 evcnt_detach(&stats->roc);
3699 evcnt_detach(&stats->rjc); 3709 evcnt_detach(&stats->rjc);
3700 evcnt_detach(&stats->mngprc); 3710 evcnt_detach(&stats->mngprc);
3701 evcnt_detach(&stats->mngpdc); 3711 evcnt_detach(&stats->mngpdc);
3702 evcnt_detach(&stats->xec); 3712 evcnt_detach(&stats->xec);
3703 3713
3704 /* Packet Transmission Stats */ 3714 /* Packet Transmission Stats */
3705 evcnt_detach(&stats->gotc); 3715 evcnt_detach(&stats->gotc);
3706 evcnt_detach(&stats->tpt); 3716 evcnt_detach(&stats->tpt);
3707 evcnt_detach(&stats->gptc); 3717 evcnt_detach(&stats->gptc);
3708 evcnt_detach(&stats->bptc); 3718 evcnt_detach(&stats->bptc);
3709 evcnt_detach(&stats->mptc); 3719 evcnt_detach(&stats->mptc);
3710 evcnt_detach(&stats->mngptc); 3720 evcnt_detach(&stats->mngptc);
3711 evcnt_detach(&stats->ptc64); 3721 evcnt_detach(&stats->ptc64);
3712 evcnt_detach(&stats->ptc127); 3722 evcnt_detach(&stats->ptc127);
3713 evcnt_detach(&stats->ptc255); 3723 evcnt_detach(&stats->ptc255);
3714 evcnt_detach(&stats->ptc511); 3724 evcnt_detach(&stats->ptc511);
3715 evcnt_detach(&stats->ptc1023); 3725 evcnt_detach(&stats->ptc1023);
3716 evcnt_detach(&stats->ptc1522); 3726 evcnt_detach(&stats->ptc1522);
3717 3727
3718 ixgbe_free_queues(adapter); 3728 ixgbe_free_queues(adapter);
3719 free(adapter->mta, M_DEVBUF); 3729 free(adapter->mta, M_DEVBUF);
3720 3730
3721 IXGBE_CORE_LOCK_DESTROY(adapter); 3731 IXGBE_CORE_LOCK_DESTROY(adapter);
3722 3732
3723 return (0); 3733 return (0);
3724} /* ixgbe_detach */ 3734} /* ixgbe_detach */
3725 3735
3726/************************************************************************ 3736/************************************************************************
3727 * ixgbe_setup_low_power_mode - LPLU/WoL preparation 3737 * ixgbe_setup_low_power_mode - LPLU/WoL preparation
3728 * 3738 *
3729 * Prepare the adapter/port for LPLU and/or WoL 3739 * Prepare the adapter/port for LPLU and/or WoL
3730 ************************************************************************/ 3740 ************************************************************************/
3731static int 3741static int
3732ixgbe_setup_low_power_mode(struct adapter *adapter) 3742ixgbe_setup_low_power_mode(struct adapter *adapter)
3733{ 3743{
3734 struct ixgbe_hw *hw = &adapter->hw; 3744 struct ixgbe_hw *hw = &adapter->hw;
3735 device_t dev = adapter->dev; 3745 device_t dev = adapter->dev;
3736 s32 error = 0; 3746 s32 error = 0;
3737 3747
3738 KASSERT(mutex_owned(&adapter->core_mtx)); 3748 KASSERT(mutex_owned(&adapter->core_mtx));
3739 3749
3740 /* Limit power management flow to X550EM baseT */ 3750 /* Limit power management flow to X550EM baseT */
3741 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 3751 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
3742 hw->phy.ops.enter_lplu) { 3752 hw->phy.ops.enter_lplu) {
3743 /* X550EM baseT adapters need a special LPLU flow */ 3753 /* X550EM baseT adapters need a special LPLU flow */
3744 hw->phy.reset_disable = true; 3754 hw->phy.reset_disable = true;
3745 ixgbe_stop(adapter); 3755 ixgbe_stop(adapter);
3746 error = hw->phy.ops.enter_lplu(hw); 3756 error = hw->phy.ops.enter_lplu(hw);
3747 if (error) 3757 if (error)
3748 device_printf(dev, 3758 device_printf(dev,
3749 "Error entering LPLU: %d\n", error); 3759 "Error entering LPLU: %d\n", error);
3750 hw->phy.reset_disable = false; 3760 hw->phy.reset_disable = false;
3751 } else { 3761 } else {
3752 /* Just stop for other adapters */ 3762 /* Just stop for other adapters */
3753 ixgbe_stop(adapter); 3763 ixgbe_stop(adapter);
3754 } 3764 }
3755 3765
3756 if (!hw->wol_enabled) { 3766 if (!hw->wol_enabled) {
3757 ixgbe_set_phy_power(hw, FALSE); 3767 ixgbe_set_phy_power(hw, FALSE);
3758 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3768 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3759 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 3769 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3760 } else { 3770 } else {
3761 /* Turn off support for APM wakeup. (Using ACPI instead) */ 3771 /* Turn off support for APM wakeup. (Using ACPI instead) */
3762 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw), 3772 IXGBE_WRITE_REG(hw, IXGBE_GRC_BY_MAC(hw),
3763 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2); 3773 IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
3764 3774
3765 /* 3775 /*
3766 * Clear Wake Up Status register to prevent any previous wakeup 3776 * Clear Wake Up Status register to prevent any previous wakeup
3767 * events from waking us up immediately after we suspend. 3777 * events from waking us up immediately after we suspend.
3768 */ 3778 */
3769 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3779 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3770 3780
3771 /* 3781 /*
3772 * Program the Wakeup Filter Control register with user filter 3782 * Program the Wakeup Filter Control register with user filter
3773 * settings 3783 * settings
3774 */ 3784 */
3775 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); 3785 IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc);
3776 3786
3777 /* Enable wakeups and power management in Wakeup Control */ 3787 /* Enable wakeups and power management in Wakeup Control */
3778 IXGBE_WRITE_REG(hw, IXGBE_WUC, 3788 IXGBE_WRITE_REG(hw, IXGBE_WUC,
3779 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); 3789 IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN);
3780 3790
3781 } 3791 }
3782 3792
3783 return error; 3793 return error;
3784} /* ixgbe_setup_low_power_mode */ 3794} /* ixgbe_setup_low_power_mode */
3785 3795
3786/************************************************************************ 3796/************************************************************************
3787 * ixgbe_shutdown - Shutdown entry point 3797 * ixgbe_shutdown - Shutdown entry point
3788 ************************************************************************/ 3798 ************************************************************************/
3789#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 3799#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
3790static int 3800static int
3791ixgbe_shutdown(device_t dev) 3801ixgbe_shutdown(device_t dev)
3792{ 3802{
3793 struct adapter *adapter = device_private(dev); 3803 struct adapter *adapter = device_private(dev);
3794 int error = 0; 3804 int error = 0;
3795 3805
3796 INIT_DEBUGOUT("ixgbe_shutdown: begin"); 3806 INIT_DEBUGOUT("ixgbe_shutdown: begin");
3797 3807
3798 IXGBE_CORE_LOCK(adapter); 3808 IXGBE_CORE_LOCK(adapter);
3799 error = ixgbe_setup_low_power_mode(adapter); 3809 error = ixgbe_setup_low_power_mode(adapter);
3800 IXGBE_CORE_UNLOCK(adapter); 3810 IXGBE_CORE_UNLOCK(adapter);
3801 3811
3802 return (error); 3812 return (error);
3803} /* ixgbe_shutdown */ 3813} /* ixgbe_shutdown */
3804#endif 3814#endif
3805 3815
3806/************************************************************************ 3816/************************************************************************
3807 * ixgbe_suspend 3817 * ixgbe_suspend
3808 * 3818 *
3809 * From D0 to D3 3819 * From D0 to D3
3810 ************************************************************************/ 3820 ************************************************************************/
3811static bool 3821static bool
3812ixgbe_suspend(device_t dev, const pmf_qual_t *qual) 3822ixgbe_suspend(device_t dev, const pmf_qual_t *qual)
3813{ 3823{
3814 struct adapter *adapter = device_private(dev); 3824 struct adapter *adapter = device_private(dev);
3815 int error = 0; 3825 int error = 0;
3816 3826
3817 INIT_DEBUGOUT("ixgbe_suspend: begin"); 3827 INIT_DEBUGOUT("ixgbe_suspend: begin");
3818 3828
3819 IXGBE_CORE_LOCK(adapter); 3829 IXGBE_CORE_LOCK(adapter);
3820 3830
3821 error = ixgbe_setup_low_power_mode(adapter); 3831 error = ixgbe_setup_low_power_mode(adapter);
3822 3832
3823 IXGBE_CORE_UNLOCK(adapter); 3833 IXGBE_CORE_UNLOCK(adapter);
3824 3834
3825 return (error); 3835 return (error);
3826} /* ixgbe_suspend */ 3836} /* ixgbe_suspend */
3827 3837
3828/************************************************************************ 3838/************************************************************************
3829 * ixgbe_resume 3839 * ixgbe_resume
3830 * 3840 *
3831 * From D3 to D0 3841 * From D3 to D0
3832 ************************************************************************/ 3842 ************************************************************************/
3833static bool 3843static bool
3834ixgbe_resume(device_t dev, const pmf_qual_t *qual) 3844ixgbe_resume(device_t dev, const pmf_qual_t *qual)
3835{ 3845{
3836 struct adapter *adapter = device_private(dev); 3846 struct adapter *adapter = device_private(dev);
3837 struct ifnet *ifp = adapter->ifp; 3847 struct ifnet *ifp = adapter->ifp;
3838 struct ixgbe_hw *hw = &adapter->hw; 3848 struct ixgbe_hw *hw = &adapter->hw;
3839 u32 wus; 3849 u32 wus;
3840 3850
3841 INIT_DEBUGOUT("ixgbe_resume: begin"); 3851 INIT_DEBUGOUT("ixgbe_resume: begin");
3842 3852
3843 IXGBE_CORE_LOCK(adapter); 3853 IXGBE_CORE_LOCK(adapter);
3844 3854
3845 /* Read & clear WUS register */ 3855 /* Read & clear WUS register */
3846 wus = IXGBE_READ_REG(hw, IXGBE_WUS); 3856 wus = IXGBE_READ_REG(hw, IXGBE_WUS);
3847 if (wus) 3857 if (wus)
3848 device_printf(dev, "Woken up by (WUS): %#010x\n", 3858 device_printf(dev, "Woken up by (WUS): %#010x\n",
3849 IXGBE_READ_REG(hw, IXGBE_WUS)); 3859 IXGBE_READ_REG(hw, IXGBE_WUS));
3850 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); 3860 IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
3851 /* And clear WUFC until next low-power transition */ 3861 /* And clear WUFC until next low-power transition */
3852 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 3862 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3853 3863
3854 /* 3864 /*
3855 * Required after D3->D0 transition; 3865 * Required after D3->D0 transition;
3856 * will re-advertise all previous advertised speeds 3866 * will re-advertise all previous advertised speeds
3857 */ 3867 */
3858 if (ifp->if_flags & IFF_UP) 3868 if (ifp->if_flags & IFF_UP)
3859 ixgbe_init_locked(adapter); 3869 ixgbe_init_locked(adapter);
3860 3870
3861 IXGBE_CORE_UNLOCK(adapter); 3871 IXGBE_CORE_UNLOCK(adapter);
3862 3872
3863 return true; 3873 return true;
3864} /* ixgbe_resume */ 3874} /* ixgbe_resume */
3865 3875
3866/* 3876/*
3867 * Set the various hardware offload abilities. 3877 * Set the various hardware offload abilities.
3868 * 3878 *
3869 * This takes the ifnet's if_capenable flags (e.g. set by the user using 3879 * This takes the ifnet's if_capenable flags (e.g. set by the user using
3870 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what 3880 * ifconfig) and indicates to the OS via the ifnet's if_hwassist field what
3871 * mbuf offload flags the driver will understand. 3881 * mbuf offload flags the driver will understand.
3872 */ 3882 */
3873static void 3883static void
3874ixgbe_set_if_hwassist(struct adapter *adapter) 3884ixgbe_set_if_hwassist(struct adapter *adapter)
3875{ 3885{
3876 /* XXX */ 3886 /* XXX */
3877} 3887}
3878 3888
3879/************************************************************************ 3889/************************************************************************
3880 * ixgbe_init_locked - Init entry point 3890 * ixgbe_init_locked - Init entry point
3881 * 3891 *
3882 * Used in two ways: It is used by the stack as an init 3892 * Used in two ways: It is used by the stack as an init
3883 * entry point in network interface structure. It is also 3893 * entry point in network interface structure. It is also
3884 * used by the driver as a hw/sw initialization routine to 3894 * used by the driver as a hw/sw initialization routine to
3885 * get to a consistent state. 3895 * get to a consistent state.
3886 * 3896 *
3887 * return 0 on success, positive on failure 3897 * return 0 on success, positive on failure
3888 ************************************************************************/ 3898 ************************************************************************/
3889static void 3899static void
3890ixgbe_init_locked(struct adapter *adapter) 3900ixgbe_init_locked(struct adapter *adapter)
3891{ 3901{
3892 struct ifnet *ifp = adapter->ifp; 3902 struct ifnet *ifp = adapter->ifp;
3893 device_t dev = adapter->dev; 3903 device_t dev = adapter->dev;
3894 struct ixgbe_hw *hw = &adapter->hw; 3904 struct ixgbe_hw *hw = &adapter->hw;
3895 struct ix_queue *que; 3905 struct ix_queue *que;
3896 struct tx_ring *txr; 3906 struct tx_ring *txr;
3897 struct rx_ring *rxr; 3907 struct rx_ring *rxr;
3898 u32 txdctl, mhadd; 3908 u32 txdctl, mhadd;
3899 u32 rxdctl, rxctrl; 3909 u32 rxdctl, rxctrl;
3900 u32 ctrl_ext; 3910 u32 ctrl_ext;
3901 bool unsupported_sfp = false; 3911 bool unsupported_sfp = false;
3902 int i, j, err; 3912 int i, j, err;
3903 3913
3904 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */ 3914 /* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
3905 3915
3906 KASSERT(mutex_owned(&adapter->core_mtx)); 3916 KASSERT(mutex_owned(&adapter->core_mtx));
3907 INIT_DEBUGOUT("ixgbe_init_locked: begin"); 3917 INIT_DEBUGOUT("ixgbe_init_locked: begin");
3908 3918
3909 hw->need_unsupported_sfp_recovery = false; 3919 hw->need_unsupported_sfp_recovery = false;
3910 hw->adapter_stopped = FALSE; 3920 hw->adapter_stopped = FALSE;
3911 ixgbe_stop_adapter(hw); 3921 ixgbe_stop_adapter(hw);
3912 callout_stop(&adapter->timer); 3922 callout_stop(&adapter->timer);
3913 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 3923 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
3914 que->disabled_count = 0; 3924 que->disabled_count = 0;
3915 3925
3916 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */ 3926 /* XXX I moved this here from the SIOCSIFMTU case in ixgbe_ioctl(). */
3917 adapter->max_frame_size = 3927 adapter->max_frame_size =
3918 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 3928 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
3919 3929
3920 /* Queue indices may change with IOV mode */ 3930 /* Queue indices may change with IOV mode */
3921 ixgbe_align_all_queue_indices(adapter); 3931 ixgbe_align_all_queue_indices(adapter);
3922 3932
3923 /* reprogram the RAR[0] in case user changed it. */ 3933 /* reprogram the RAR[0] in case user changed it. */
3924 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); 3934 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
3925 3935
3926 /* Get the latest mac address, User can use a LAA */ 3936 /* Get the latest mac address, User can use a LAA */
3927 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 3937 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
3928 IXGBE_ETH_LENGTH_OF_ADDRESS); 3938 IXGBE_ETH_LENGTH_OF_ADDRESS);
3929 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); 3939 ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
3930 hw->addr_ctrl.rar_used_count = 1; 3940 hw->addr_ctrl.rar_used_count = 1;
3931 3941
3932 /* Set hardware offload abilities from ifnet flags */ 3942 /* Set hardware offload abilities from ifnet flags */
3933 ixgbe_set_if_hwassist(adapter); 3943 ixgbe_set_if_hwassist(adapter);
3934 3944
3935 /* Prepare transmit descriptors and buffers */ 3945 /* Prepare transmit descriptors and buffers */
3936 if (ixgbe_setup_transmit_structures(adapter)) { 3946 if (ixgbe_setup_transmit_structures(adapter)) {
3937 device_printf(dev, "Could not setup transmit structures\n"); 3947 device_printf(dev, "Could not setup transmit structures\n");
3938 ixgbe_stop(adapter); 3948 ixgbe_stop(adapter);
3939 return; 3949 return;
3940 } 3950 }
3941 3951
3942 ixgbe_init_hw(hw); 3952 ixgbe_init_hw(hw);
3943 3953
3944 ixgbe_initialize_iov(adapter); 3954 ixgbe_initialize_iov(adapter);
3945 3955
3946 ixgbe_initialize_transmit_units(adapter); 3956 ixgbe_initialize_transmit_units(adapter);
3947 3957
3948 /* Setup Multicast table */ 3958 /* Setup Multicast table */
3949 ixgbe_set_rxfilter(adapter); 3959 ixgbe_set_rxfilter(adapter);
3950 3960
3951 /* Determine the correct mbuf pool, based on frame size */ 3961 /* Determine the correct mbuf pool, based on frame size */
3952 if (adapter->max_frame_size <= MCLBYTES) 3962 if (adapter->max_frame_size <= MCLBYTES)
3953 adapter->rx_mbuf_sz = MCLBYTES; 3963 adapter->rx_mbuf_sz = MCLBYTES;
3954 else 3964 else
3955 adapter->rx_mbuf_sz = MJUMPAGESIZE; 3965 adapter->rx_mbuf_sz = MJUMPAGESIZE;
3956 3966
3957 /* Prepare receive descriptors and buffers */ 3967 /* Prepare receive descriptors and buffers */
3958 if (ixgbe_setup_receive_structures(adapter)) { 3968 if (ixgbe_setup_receive_structures(adapter)) {
3959 device_printf(dev, "Could not setup receive structures\n"); 3969 device_printf(dev, "Could not setup receive structures\n");
3960 ixgbe_stop(adapter); 3970 ixgbe_stop(adapter);
3961 return; 3971 return;
3962 } 3972 }
3963 3973
3964 /* Configure RX settings */ 3974 /* Configure RX settings */
3965 ixgbe_initialize_receive_units(adapter); 3975 ixgbe_initialize_receive_units(adapter);
3966 3976
3967 /* Enable SDP & MSI-X interrupts based on adapter */ 3977 /* Enable SDP & MSI-X interrupts based on adapter */
3968 ixgbe_config_gpie(adapter); 3978 ixgbe_config_gpie(adapter);
3969 3979
3970 /* Set MTU size */ 3980 /* Set MTU size */
3971 if (ifp->if_mtu > ETHERMTU) { 3981 if (ifp->if_mtu > ETHERMTU) {
3972 /* aka IXGBE_MAXFRS on 82599 and newer */ 3982 /* aka IXGBE_MAXFRS on 82599 and newer */
3973 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3983 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3974 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3984 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3975 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; 3985 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
3976 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3986 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3977 } 3987 }
3978 3988
3979 /* Now enable all the queues */ 3989 /* Now enable all the queues */
3980 for (i = 0; i < adapter->num_queues; i++) { 3990 for (i = 0; i < adapter->num_queues; i++) {
3981 txr = &adapter->tx_rings[i]; 3991 txr = &adapter->tx_rings[i];
3982 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); 3992 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
3983 txdctl |= IXGBE_TXDCTL_ENABLE; 3993 txdctl |= IXGBE_TXDCTL_ENABLE;
3984 /* Set WTHRESH to 8, burst writeback */ 3994 /* Set WTHRESH to 8, burst writeback */
3985 txdctl |= (8 << 16); 3995 txdctl |= (8 << 16);
3986 /* 3996 /*
3987 * When the internal queue falls below PTHRESH (32), 3997 * When the internal queue falls below PTHRESH (32),
3988 * start prefetching as long as there are at least 3998 * start prefetching as long as there are at least
3989 * HTHRESH (1) buffers ready. The values are taken 3999 * HTHRESH (1) buffers ready. The values are taken
3990 * from the Intel linux driver 3.8.21. 4000 * from the Intel linux driver 3.8.21.
3991 * Prefetching enables tx line rate even with 1 queue. 4001 * Prefetching enables tx line rate even with 1 queue.
3992 */ 4002 */
3993 txdctl |= (32 << 0) | (1 << 8); 4003 txdctl |= (32 << 0) | (1 << 8);
3994 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); 4004 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
3995 } 4005 }
3996 4006
3997 for (i = 0; i < adapter->num_queues; i++) { 4007 for (i = 0; i < adapter->num_queues; i++) {
3998 rxr = &adapter->rx_rings[i]; 4008 rxr = &adapter->rx_rings[i];
3999 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); 4009 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
4000 if (hw->mac.type == ixgbe_mac_82598EB) { 4010 if (hw->mac.type == ixgbe_mac_82598EB) {
4001 /* 4011 /*
4002 * PTHRESH = 21 4012 * PTHRESH = 21
4003 * HTHRESH = 4 4013 * HTHRESH = 4
4004 * WTHRESH = 8 4014 * WTHRESH = 8
4005 */ 4015 */
4006 rxdctl &= ~0x3FFFFF; 4016 rxdctl &= ~0x3FFFFF;
4007 rxdctl |= 0x080420; 4017 rxdctl |= 0x080420;
4008 } 4018 }
4009 rxdctl |= IXGBE_RXDCTL_ENABLE; 4019 rxdctl |= IXGBE_RXDCTL_ENABLE;
4010 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); 4020 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
4011 for (j = 0; j < 10; j++) { 4021 for (j = 0; j < 10; j++) {
4012 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & 4022 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
4013 IXGBE_RXDCTL_ENABLE) 4023 IXGBE_RXDCTL_ENABLE)
4014 break; 4024 break;
4015 else 4025 else
4016 msec_delay(1); 4026 msec_delay(1);
4017 } 4027 }
4018 IXGBE_WRITE_BARRIER(hw); 4028 IXGBE_WRITE_BARRIER(hw);
4019 4029
4020 /* 4030 /*
4021 * In netmap mode, we must preserve the buffers made 4031 * In netmap mode, we must preserve the buffers made
4022 * available to userspace before the if_init() 4032 * available to userspace before the if_init()
4023 * (this is true by default on the TX side, because 4033 * (this is true by default on the TX side, because
4024 * init makes all buffers available to userspace). 4034 * init makes all buffers available to userspace).
4025 * 4035 *
4026 * netmap_reset() and the device specific routines 4036 * netmap_reset() and the device specific routines
4027 * (e.g. ixgbe_setup_receive_rings()) map these 4037 * (e.g. ixgbe_setup_receive_rings()) map these
4028 * buffers at the end of the NIC ring, so here we 4038 * buffers at the end of the NIC ring, so here we
4029 * must set the RDT (tail) register to make sure 4039 * must set the RDT (tail) register to make sure
4030 * they are not overwritten. 4040 * they are not overwritten.
4031 * 4041 *
4032 * In this driver the NIC ring starts at RDH = 0, 4042 * In this driver the NIC ring starts at RDH = 0,
4033 * RDT points to the last slot available for reception (?), 4043 * RDT points to the last slot available for reception (?),
4034 * so RDT = num_rx_desc - 1 means the whole ring is available. 4044 * so RDT = num_rx_desc - 1 means the whole ring is available.
4035 */ 4045 */
4036#ifdef DEV_NETMAP 4046#ifdef DEV_NETMAP
4037 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 4047 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
4038 (ifp->if_capenable & IFCAP_NETMAP)) { 4048 (ifp->if_capenable & IFCAP_NETMAP)) {
4039 struct netmap_adapter *na = NA(adapter->ifp); 4049 struct netmap_adapter *na = NA(adapter->ifp);
4040 struct netmap_kring *kring = na->rx_rings[i]; 4050 struct netmap_kring *kring = na->rx_rings[i];
4041 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 4051 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
4042 4052
4043 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t); 4053 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
4044 } else 4054 } else
4045#endif /* DEV_NETMAP */ 4055#endif /* DEV_NETMAP */
4046 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), 4056 IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me),
4047 adapter->num_rx_desc - 1); 4057 adapter->num_rx_desc - 1);
4048 } 4058 }
4049 4059
4050 /* Enable Receive engine */ 4060 /* Enable Receive engine */
4051 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4061 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4052 if (hw->mac.type == ixgbe_mac_82598EB) 4062 if (hw->mac.type == ixgbe_mac_82598EB)
4053 rxctrl |= IXGBE_RXCTRL_DMBYPS; 4063 rxctrl |= IXGBE_RXCTRL_DMBYPS;
4054 rxctrl |= IXGBE_RXCTRL_RXEN; 4064 rxctrl |= IXGBE_RXCTRL_RXEN;
4055 ixgbe_enable_rx_dma(hw, rxctrl); 4065 ixgbe_enable_rx_dma(hw, rxctrl);
4056 4066
4057 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4067 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4058 4068
4059 /* Set up MSI/MSI-X routing */ 4069 /* Set up MSI/MSI-X routing */
4060 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4070 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4061 ixgbe_configure_ivars(adapter); 4071 ixgbe_configure_ivars(adapter);
4062 /* Set up auto-mask */ 4072 /* Set up auto-mask */
4063 if (hw->mac.type == ixgbe_mac_82598EB) 4073 if (hw->mac.type == ixgbe_mac_82598EB)
4064 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4074 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4065 else { 4075 else {
4066 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 4076 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
4067 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 4077 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
4068 } 4078 }
4069 } else { /* Simple settings for Legacy/MSI */ 4079 } else { /* Simple settings for Legacy/MSI */
4070 ixgbe_set_ivar(adapter, 0, 0, 0); 4080 ixgbe_set_ivar(adapter, 0, 0, 0);
4071 ixgbe_set_ivar(adapter, 0, 0, 1); 4081 ixgbe_set_ivar(adapter, 0, 0, 1);
4072 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 4082 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
4073 } 4083 }
4074 4084
4075 ixgbe_init_fdir(adapter); 4085 ixgbe_init_fdir(adapter);
4076 4086
4077 /* 4087 /*
4078 * Check on any SFP devices that 4088 * Check on any SFP devices that
4079 * need to be kick-started 4089 * need to be kick-started
4080 */ 4090 */
4081 if (hw->phy.type == ixgbe_phy_none) { 4091 if (hw->phy.type == ixgbe_phy_none) {
4082 err = hw->phy.ops.identify(hw); 4092 err = hw->phy.ops.identify(hw);
4083 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) 4093 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
4084 unsupported_sfp = true; 4094 unsupported_sfp = true;
4085 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported) 4095 } else if (hw->phy.type == ixgbe_phy_sfp_unsupported)
4086 unsupported_sfp = true; 4096 unsupported_sfp = true;
4087 4097
4088 if (unsupported_sfp) 4098 if (unsupported_sfp)
4089 device_printf(dev, 4099 device_printf(dev,
4090 "Unsupported SFP+ module type was detected.\n"); 4100 "Unsupported SFP+ module type was detected.\n");
4091 4101
4092 /* Set moderation on the Link interrupt */ 4102 /* Set moderation on the Link interrupt */
4093 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 4103 ixgbe_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
4094 4104
4095 /* Enable EEE power saving */ 4105 /* Enable EEE power saving */
4096 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 4106 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
4097 hw->mac.ops.setup_eee(hw, 4107 hw->mac.ops.setup_eee(hw,
4098 adapter->feat_en & IXGBE_FEATURE_EEE); 4108 adapter->feat_en & IXGBE_FEATURE_EEE);
4099 4109
4100 /* Enable power to the phy. */ 4110 /* Enable power to the phy. */
4101 if (!unsupported_sfp) { 4111 if (!unsupported_sfp) {
4102 ixgbe_set_phy_power(hw, TRUE); 4112 ixgbe_set_phy_power(hw, TRUE);
4103 4113
4104 /* Config/Enable Link */ 4114 /* Config/Enable Link */
4105 ixgbe_config_link(adapter); 4115 ixgbe_config_link(adapter);
4106 } 4116 }
4107 4117
4108 /* Hardware Packet Buffer & Flow Control setup */ 4118 /* Hardware Packet Buffer & Flow Control setup */
4109 ixgbe_config_delay_values(adapter); 4119 ixgbe_config_delay_values(adapter);
4110 4120
4111 /* Initialize the FC settings */ 4121 /* Initialize the FC settings */
4112 ixgbe_start_hw(hw); 4122 ixgbe_start_hw(hw);
4113 4123
4114 /* Set up VLAN support and filter */ 4124 /* Set up VLAN support and filter */
4115 ixgbe_setup_vlan_hw_support(adapter); 4125 ixgbe_setup_vlan_hw_support(adapter);
4116 4126
4117 /* Setup DMA Coalescing */ 4127 /* Setup DMA Coalescing */
4118 ixgbe_config_dmac(adapter); 4128 ixgbe_config_dmac(adapter);
4119 4129
4120 /* And now turn on interrupts */ 4130 /* And now turn on interrupts */
4121 ixgbe_enable_intr(adapter); 4131 ixgbe_enable_intr(adapter);
4122 4132
4123 /* Enable the use of the MBX by the VF's */ 4133 /* Enable the use of the MBX by the VF's */
4124 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { 4134 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) {
4125 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4135 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4126 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 4136 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
4127 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4137 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4128 } 4138 }
4129 4139
4130 /* Update saved flags. See ixgbe_ifflags_cb() */ 4140 /* Update saved flags. See ixgbe_ifflags_cb() */
4131 adapter->if_flags = ifp->if_flags; 4141 adapter->if_flags = ifp->if_flags;
4132 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 4142 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
4133 4143
4134 /* Now inform the stack we're ready */ 4144 /* Now inform the stack we're ready */
4135 ifp->if_flags |= IFF_RUNNING; 4145 ifp->if_flags |= IFF_RUNNING;
4136 4146
4137 return; 4147 return;
4138} /* ixgbe_init_locked */ 4148} /* ixgbe_init_locked */
4139 4149
4140/************************************************************************ 4150/************************************************************************
4141 * ixgbe_init 4151 * ixgbe_init
4142 ************************************************************************/ 4152 ************************************************************************/
4143static int 4153static int
4144ixgbe_init(struct ifnet *ifp) 4154ixgbe_init(struct ifnet *ifp)
4145{ 4155{
4146 struct adapter *adapter = ifp->if_softc; 4156 struct adapter *adapter = ifp->if_softc;
4147 4157
4148 IXGBE_CORE_LOCK(adapter); 4158 IXGBE_CORE_LOCK(adapter);
4149 ixgbe_init_locked(adapter); 4159 ixgbe_init_locked(adapter);
4150 IXGBE_CORE_UNLOCK(adapter); 4160 IXGBE_CORE_UNLOCK(adapter);
4151 4161
4152 return 0; /* XXX ixgbe_init_locked cannot fail? really? */ 4162 return 0; /* XXX ixgbe_init_locked cannot fail? really? */
4153} /* ixgbe_init */ 4163} /* ixgbe_init */
4154 4164
4155/************************************************************************ 4165/************************************************************************
4156 * ixgbe_set_ivar 4166 * ixgbe_set_ivar
4157 * 4167 *
4158 * Setup the correct IVAR register for a particular MSI-X interrupt 4168 * Setup the correct IVAR register for a particular MSI-X interrupt
4159 * (yes this is all very magic and confusing :) 4169 * (yes this is all very magic and confusing :)
4160 * - entry is the register array entry 4170 * - entry is the register array entry
4161 * - vector is the MSI-X vector for this queue 4171 * - vector is the MSI-X vector for this queue
4162 * - type is RX/TX/MISC 4172 * - type is RX/TX/MISC
4163 ************************************************************************/ 4173 ************************************************************************/
4164static void 4174static void
4165ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 4175ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4166{ 4176{
4167 struct ixgbe_hw *hw = &adapter->hw; 4177 struct ixgbe_hw *hw = &adapter->hw;
4168 u32 ivar, index; 4178 u32 ivar, index;
4169 4179
4170 vector |= IXGBE_IVAR_ALLOC_VAL; 4180 vector |= IXGBE_IVAR_ALLOC_VAL;
4171 4181
4172 switch (hw->mac.type) { 4182 switch (hw->mac.type) {
4173 case ixgbe_mac_82598EB: 4183 case ixgbe_mac_82598EB:
4174 if (type == -1) 4184 if (type == -1)
4175 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; 4185 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4176 else 4186 else
4177 entry += (type * 64); 4187 entry += (type * 64);
4178 index = (entry >> 2) & 0x1F; 4188 index = (entry >> 2) & 0x1F;
4179 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4189 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4180 ivar &= ~(0xffUL << (8 * (entry & 0x3))); 4190 ivar &= ~(0xffUL << (8 * (entry & 0x3)));
4181 ivar |= ((u32)vector << (8 * (entry & 0x3))); 4191 ivar |= ((u32)vector << (8 * (entry & 0x3)));
4182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); 4192 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4183 break; 4193 break;
4184 case ixgbe_mac_82599EB: 4194 case ixgbe_mac_82599EB:
4185 case ixgbe_mac_X540: 4195 case ixgbe_mac_X540:
4186 case ixgbe_mac_X550: 4196 case ixgbe_mac_X550:
4187 case ixgbe_mac_X550EM_x: 4197 case ixgbe_mac_X550EM_x:
4188 case ixgbe_mac_X550EM_a: 4198 case ixgbe_mac_X550EM_a:
4189 if (type == -1) { /* MISC IVAR */ 4199 if (type == -1) { /* MISC IVAR */
4190 index = (entry & 1) * 8; 4200 index = (entry & 1) * 8;
4191 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4201 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4192 ivar &= ~(0xffUL << index); 4202 ivar &= ~(0xffUL << index);
4193 ivar |= ((u32)vector << index); 4203 ivar |= ((u32)vector << index);
4194 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4204 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4195 } else { /* RX/TX IVARS */ 4205 } else { /* RX/TX IVARS */
4196 index = (16 * (entry & 1)) + (8 * type); 4206 index = (16 * (entry & 1)) + (8 * type);
4197 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); 4207 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4198 ivar &= ~(0xffUL << index); 4208 ivar &= ~(0xffUL << index);
4199 ivar |= ((u32)vector << index); 4209 ivar |= ((u32)vector << index);
4200 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); 4210 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4201 } 4211 }
4202 break; 4212 break;
4203 default: 4213 default:
4204 break; 4214 break;
4205 } 4215 }
4206} /* ixgbe_set_ivar */ 4216} /* ixgbe_set_ivar */
4207 4217
4208/************************************************************************ 4218/************************************************************************
4209 * ixgbe_configure_ivars 4219 * ixgbe_configure_ivars
4210 ************************************************************************/ 4220 ************************************************************************/
4211static void 4221static void
4212ixgbe_configure_ivars(struct adapter *adapter) 4222ixgbe_configure_ivars(struct adapter *adapter)
4213{ 4223{
4214 struct ix_queue *que = adapter->queues; 4224 struct ix_queue *que = adapter->queues;
4215 u32 newitr; 4225 u32 newitr;
4216 4226
4217 if (ixgbe_max_interrupt_rate > 0) 4227 if (ixgbe_max_interrupt_rate > 0)
4218 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; 4228 newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
4219 else { 4229 else {
4220 /* 4230 /*
4221 * Disable DMA coalescing if interrupt moderation is 4231 * Disable DMA coalescing if interrupt moderation is
4222 * disabled. 4232 * disabled.
4223 */ 4233 */
4224 adapter->dmac = 0; 4234 adapter->dmac = 0;
4225 newitr = 0; 4235 newitr = 0;
4226 } 4236 }
4227 4237
4228 for (int i = 0; i < adapter->num_queues; i++, que++) { 4238 for (int i = 0; i < adapter->num_queues; i++, que++) {
4229 struct rx_ring *rxr = &adapter->rx_rings[i]; 4239 struct rx_ring *rxr = &adapter->rx_rings[i];
4230 struct tx_ring *txr = &adapter->tx_rings[i]; 4240 struct tx_ring *txr = &adapter->tx_rings[i];
4231 /* First the RX queue entry */ 4241 /* First the RX queue entry */
4232 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0); 4242 ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
4233 /* ... and the TX */ 4243 /* ... and the TX */
4234 ixgbe_set_ivar(adapter, txr->me, que->msix, 1); 4244 ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
4235 /* Set an Initial EITR value */ 4245 /* Set an Initial EITR value */
4236 ixgbe_eitr_write(adapter, que->msix, newitr); 4246 ixgbe_eitr_write(adapter, que->msix, newitr);
4237 /* 4247 /*
4238 * To eliminate influence of the previous state. 4248 * To eliminate influence of the previous state.
4239 * At this point, Tx/Rx interrupt handler 4249 * At this point, Tx/Rx interrupt handler
4240 * (ixgbe_msix_que()) cannot be called, so both 4250 * (ixgbe_msix_que()) cannot be called, so both
4241 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required. 4251 * IXGBE_TX_LOCK and IXGBE_RX_LOCK are not required.
4242 */ 4252 */
4243 que->eitr_setting = 0; 4253 que->eitr_setting = 0;
4244 } 4254 }
4245 4255
4246 /* For the Link interrupt */ 4256 /* For the Link interrupt */
4247 ixgbe_set_ivar(adapter, 1, adapter->vector, -1); 4257 ixgbe_set_ivar(adapter, 1, adapter->vector, -1);
4248} /* ixgbe_configure_ivars */ 4258} /* ixgbe_configure_ivars */
4249 4259
4250/************************************************************************ 4260/************************************************************************
4251 * ixgbe_config_gpie 4261 * ixgbe_config_gpie
4252 ************************************************************************/ 4262 ************************************************************************/
4253static void 4263static void
4254ixgbe_config_gpie(struct adapter *adapter) 4264ixgbe_config_gpie(struct adapter *adapter)
4255{ 4265{
4256 struct ixgbe_hw *hw = &adapter->hw; 4266 struct ixgbe_hw *hw = &adapter->hw;
4257 u32 gpie; 4267 u32 gpie;
4258 4268
4259 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4269 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4260 4270
4261 if (adapter->feat_en & IXGBE_FEATURE_MSIX) { 4271 if (adapter->feat_en & IXGBE_FEATURE_MSIX) {
4262 /* Enable Enhanced MSI-X mode */ 4272 /* Enable Enhanced MSI-X mode */
4263 gpie |= IXGBE_GPIE_MSIX_MODE 4273 gpie |= IXGBE_GPIE_MSIX_MODE
4264 | IXGBE_GPIE_EIAME 4274 | IXGBE_GPIE_EIAME
4265 | IXGBE_GPIE_PBA_SUPPORT 4275 | IXGBE_GPIE_PBA_SUPPORT
4266 | IXGBE_GPIE_OCD; 4276 | IXGBE_GPIE_OCD;
4267 } 4277 }
4268 4278
4269 /* Fan Failure Interrupt */ 4279 /* Fan Failure Interrupt */
4270 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 4280 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4271 gpie |= IXGBE_SDP1_GPIEN; 4281 gpie |= IXGBE_SDP1_GPIEN;
4272 4282
4273 /* Thermal Sensor Interrupt */ 4283 /* Thermal Sensor Interrupt */
4274 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) 4284 if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR)
4275 gpie |= IXGBE_SDP0_GPIEN_X540; 4285 gpie |= IXGBE_SDP0_GPIEN_X540;
4276 4286
4277 /* Link detection */ 4287 /* Link detection */
4278 switch (hw->mac.type) { 4288 switch (hw->mac.type) {
4279 case ixgbe_mac_82599EB: 4289 case ixgbe_mac_82599EB:
4280 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; 4290 gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN;
4281 break; 4291 break;
4282 case ixgbe_mac_X550EM_x: 4292 case ixgbe_mac_X550EM_x:
4283 case ixgbe_mac_X550EM_a: 4293 case ixgbe_mac_X550EM_a:
4284 gpie |= IXGBE_SDP0_GPIEN_X540; 4294 gpie |= IXGBE_SDP0_GPIEN_X540;
4285 break; 4295 break;
4286 default: 4296 default:
4287 break; 4297 break;
4288 } 4298 }
4289 4299
4290 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4300 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4291 4301
4292} /* ixgbe_config_gpie */ 4302} /* ixgbe_config_gpie */
4293 4303
4294/************************************************************************ 4304/************************************************************************
4295 * ixgbe_config_delay_values 4305 * ixgbe_config_delay_values
4296 * 4306 *
4297 * Requires adapter->max_frame_size to be set. 4307 * Requires adapter->max_frame_size to be set.
4298 ************************************************************************/ 4308 ************************************************************************/
4299static void 4309static void
@@ -4386,2001 +4396,2001 @@ ixgbe_set_rxfilter(struct adapter *adapt @@ -4386,2001 +4396,2001 @@ ixgbe_set_rxfilter(struct adapter *adapt
4386 fctrl &= ~IXGBE_FCTRL_UPE; 4396 fctrl &= ~IXGBE_FCTRL_UPE;
4387 } else 4397 } else
4388 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4398 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
4389 4399
4390 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 4400 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
4391 4401
4392 /* Update multicast filter entries only when it's not ALLMULTI */ 4402 /* Update multicast filter entries only when it's not ALLMULTI */
4393 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) { 4403 if ((ec->ec_flags & ETHER_F_ALLMULTI) == 0) {
4394 ETHER_UNLOCK(ec); 4404 ETHER_UNLOCK(ec);
4395 update_ptr = (u8 *)mta; 4405 update_ptr = (u8 *)mta;
4396 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 4406 ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
4397 ixgbe_mc_array_itr, TRUE); 4407 ixgbe_mc_array_itr, TRUE);
4398 } else 4408 } else
4399 ETHER_UNLOCK(ec); 4409 ETHER_UNLOCK(ec);
4400} /* ixgbe_set_rxfilter */ 4410} /* ixgbe_set_rxfilter */
4401 4411
4402/************************************************************************ 4412/************************************************************************
4403 * ixgbe_mc_array_itr 4413 * ixgbe_mc_array_itr
4404 * 4414 *
4405 * An iterator function needed by the multicast shared code. 4415 * An iterator function needed by the multicast shared code.
4406 * It feeds the shared code routine the addresses in the 4416 * It feeds the shared code routine the addresses in the
4407 * array of ixgbe_set_rxfilter() one by one. 4417 * array of ixgbe_set_rxfilter() one by one.
4408 ************************************************************************/ 4418 ************************************************************************/
4409static u8 * 4419static u8 *
4410ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 4420ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
4411{ 4421{
4412 struct ixgbe_mc_addr *mta; 4422 struct ixgbe_mc_addr *mta;
4413 4423
4414 mta = (struct ixgbe_mc_addr *)*update_ptr; 4424 mta = (struct ixgbe_mc_addr *)*update_ptr;
4415 *vmdq = mta->vmdq; 4425 *vmdq = mta->vmdq;
4416 4426
4417 *update_ptr = (u8*)(mta + 1); 4427 *update_ptr = (u8*)(mta + 1);
4418 4428
4419 return (mta->addr); 4429 return (mta->addr);
4420} /* ixgbe_mc_array_itr */ 4430} /* ixgbe_mc_array_itr */
4421 4431
4422/************************************************************************ 4432/************************************************************************
4423 * ixgbe_local_timer - Timer routine 4433 * ixgbe_local_timer - Timer routine
4424 * 4434 *
4425 * Checks for link status, updates statistics, 4435 * Checks for link status, updates statistics,
4426 * and runs the watchdog check. 4436 * and runs the watchdog check.
4427 ************************************************************************/ 4437 ************************************************************************/
4428static void 4438static void
4429ixgbe_local_timer(void *arg) 4439ixgbe_local_timer(void *arg)
4430{ 4440{
4431 struct adapter *adapter = arg; 4441 struct adapter *adapter = arg;
4432 4442
4433 IXGBE_CORE_LOCK(adapter); 4443 IXGBE_CORE_LOCK(adapter);
4434 ixgbe_local_timer1(adapter); 4444 ixgbe_local_timer1(adapter);
4435 IXGBE_CORE_UNLOCK(adapter); 4445 IXGBE_CORE_UNLOCK(adapter);
4436} 4446}
4437 4447
4438static void 4448static void
4439ixgbe_local_timer1(void *arg) 4449ixgbe_local_timer1(void *arg)
4440{ 4450{
4441 struct adapter *adapter = arg; 4451 struct adapter *adapter = arg;
4442 device_t dev = adapter->dev; 4452 device_t dev = adapter->dev;
4443 struct ix_queue *que = adapter->queues; 4453 struct ix_queue *que = adapter->queues;
4444 u64 queues = 0; 4454 u64 queues = 0;
4445 u64 v0, v1, v2, v3, v4, v5, v6, v7; 4455 u64 v0, v1, v2, v3, v4, v5, v6, v7;
4446 int hung = 0; 4456 int hung = 0;
4447 int i; 4457 int i;
4448 4458
4449 KASSERT(mutex_owned(&adapter->core_mtx)); 4459 KASSERT(mutex_owned(&adapter->core_mtx));
4450 4460
4451 /* Check for pluggable optics */ 4461 /* Check for pluggable optics */
4452 if (adapter->sfp_probe) 4462 if (adapter->sfp_probe)
4453 if (!ixgbe_sfp_probe(adapter)) 4463 if (!ixgbe_sfp_probe(adapter))
4454 goto out; /* Nothing to do */ 4464 goto out; /* Nothing to do */
4455 4465
4456 ixgbe_update_link_status(adapter); 4466 ixgbe_update_link_status(adapter);
4457 ixgbe_update_stats_counters(adapter); 4467 ixgbe_update_stats_counters(adapter);
4458 4468
4459 /* Update some event counters */ 4469 /* Update some event counters */
4460 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 4470 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
4461 que = adapter->queues; 4471 que = adapter->queues;
4462 for (i = 0; i < adapter->num_queues; i++, que++) { 4472 for (i = 0; i < adapter->num_queues; i++, que++) {
4463 struct tx_ring *txr = que->txr; 4473 struct tx_ring *txr = que->txr;
4464 4474
4465 v0 += txr->q_efbig_tx_dma_setup; 4475 v0 += txr->q_efbig_tx_dma_setup;
4466 v1 += txr->q_mbuf_defrag_failed; 4476 v1 += txr->q_mbuf_defrag_failed;
4467 v2 += txr->q_efbig2_tx_dma_setup; 4477 v2 += txr->q_efbig2_tx_dma_setup;
4468 v3 += txr->q_einval_tx_dma_setup; 4478 v3 += txr->q_einval_tx_dma_setup;
4469 v4 += txr->q_other_tx_dma_setup; 4479 v4 += txr->q_other_tx_dma_setup;
4470 v5 += txr->q_eagain_tx_dma_setup; 4480 v5 += txr->q_eagain_tx_dma_setup;
4471 v6 += txr->q_enomem_tx_dma_setup; 4481 v6 += txr->q_enomem_tx_dma_setup;
4472 v7 += txr->q_tso_err; 4482 v7 += txr->q_tso_err;
4473 } 4483 }
4474 adapter->efbig_tx_dma_setup.ev_count = v0; 4484 adapter->efbig_tx_dma_setup.ev_count = v0;
4475 adapter->mbuf_defrag_failed.ev_count = v1; 4485 adapter->mbuf_defrag_failed.ev_count = v1;
4476 adapter->efbig2_tx_dma_setup.ev_count = v2; 4486 adapter->efbig2_tx_dma_setup.ev_count = v2;
4477 adapter->einval_tx_dma_setup.ev_count = v3; 4487 adapter->einval_tx_dma_setup.ev_count = v3;
4478 adapter->other_tx_dma_setup.ev_count = v4; 4488 adapter->other_tx_dma_setup.ev_count = v4;
4479 adapter->eagain_tx_dma_setup.ev_count = v5; 4489 adapter->eagain_tx_dma_setup.ev_count = v5;
4480 adapter->enomem_tx_dma_setup.ev_count = v6; 4490 adapter->enomem_tx_dma_setup.ev_count = v6;
4481 adapter->tso_err.ev_count = v7; 4491 adapter->tso_err.ev_count = v7;
4482 4492
4483 /* 4493 /*
4484 * Check the TX queues status 4494 * Check the TX queues status
4485 * - mark hung queues so we don't schedule on them 4495 * - mark hung queues so we don't schedule on them
4486 * - watchdog only if all queues show hung 4496 * - watchdog only if all queues show hung
4487 */ 4497 */
4488 que = adapter->queues; 4498 que = adapter->queues;
4489 for (i = 0; i < adapter->num_queues; i++, que++) { 4499 for (i = 0; i < adapter->num_queues; i++, que++) {
4490 /* Keep track of queues with work for soft irq */ 4500 /* Keep track of queues with work for soft irq */
4491 if (que->txr->busy) 4501 if (que->txr->busy)
4492 queues |= 1ULL << que->me; 4502 queues |= 1ULL << que->me;
4493 /* 4503 /*
4494 * Each time txeof runs without cleaning, but there 4504 * Each time txeof runs without cleaning, but there
4495 * are uncleaned descriptors it increments busy. If 4505 * are uncleaned descriptors it increments busy. If
4496 * we get to the MAX we declare it hung. 4506 * we get to the MAX we declare it hung.
4497 */ 4507 */
4498 if (que->busy == IXGBE_QUEUE_HUNG) { 4508 if (que->busy == IXGBE_QUEUE_HUNG) {
4499 ++hung; 4509 ++hung;
4500 /* Mark the queue as inactive */ 4510 /* Mark the queue as inactive */
4501 adapter->active_queues &= ~(1ULL << que->me); 4511 adapter->active_queues &= ~(1ULL << que->me);
4502 continue; 4512 continue;
4503 } else { 4513 } else {
4504 /* Check if we've come back from hung */ 4514 /* Check if we've come back from hung */
4505 if ((adapter->active_queues & (1ULL << que->me)) == 0) 4515 if ((adapter->active_queues & (1ULL << que->me)) == 0)
4506 adapter->active_queues |= 1ULL << que->me; 4516 adapter->active_queues |= 1ULL << que->me;
4507 } 4517 }
4508 if (que->busy >= IXGBE_MAX_TX_BUSY) { 4518 if (que->busy >= IXGBE_MAX_TX_BUSY) {
4509 device_printf(dev, 4519 device_printf(dev,
4510 "Warning queue %d appears to be hung!\n", i); 4520 "Warning queue %d appears to be hung!\n", i);
4511 que->txr->busy = IXGBE_QUEUE_HUNG; 4521 que->txr->busy = IXGBE_QUEUE_HUNG;
4512 ++hung; 4522 ++hung;
4513 } 4523 }
4514 } 4524 }
4515 4525
4516 /* Only truly watchdog if all queues show hung */ 4526 /* Only truly watchdog if all queues show hung */
4517 if (hung == adapter->num_queues) 4527 if (hung == adapter->num_queues)
4518 goto watchdog; 4528 goto watchdog;
4519#if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */ 4529#if 0 /* XXX Avoid unexpectedly disabling interrupt forever (PR#53294) */
4520 else if (queues != 0) { /* Force an IRQ on queues with work */ 4530 else if (queues != 0) { /* Force an IRQ on queues with work */
4521 que = adapter->queues; 4531 que = adapter->queues;
4522 for (i = 0; i < adapter->num_queues; i++, que++) { 4532 for (i = 0; i < adapter->num_queues; i++, que++) {
4523 mutex_enter(&que->dc_mtx); 4533 mutex_enter(&que->dc_mtx);
4524 if (que->disabled_count == 0) 4534 if (que->disabled_count == 0)
4525 ixgbe_rearm_queues(adapter, 4535 ixgbe_rearm_queues(adapter,
4526 queues & ((u64)1 << i)); 4536 queues & ((u64)1 << i));
4527 mutex_exit(&que->dc_mtx); 4537 mutex_exit(&que->dc_mtx);
4528 } 4538 }
4529 } 4539 }
4530#endif 4540#endif
4531 4541
4532out: 4542out:
4533 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); 4543 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
4534 return; 4544 return;
4535 4545
4536watchdog: 4546watchdog:
4537 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 4547 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
4538 adapter->ifp->if_flags &= ~IFF_RUNNING; 4548 adapter->ifp->if_flags &= ~IFF_RUNNING;
4539 adapter->watchdog_events.ev_count++; 4549 adapter->watchdog_events.ev_count++;
4540 ixgbe_init_locked(adapter); 4550 ixgbe_init_locked(adapter);
4541} /* ixgbe_local_timer */ 4551} /* ixgbe_local_timer */
4542 4552
4543/************************************************************************ 4553/************************************************************************
4544 * ixgbe_recovery_mode_timer - Recovery mode timer routine 4554 * ixgbe_recovery_mode_timer - Recovery mode timer routine
4545 ************************************************************************/ 4555 ************************************************************************/
4546static void 4556static void
4547ixgbe_recovery_mode_timer(void *arg) 4557ixgbe_recovery_mode_timer(void *arg)
4548{ 4558{
4549 struct adapter *adapter = arg; 4559 struct adapter *adapter = arg;
4550 struct ixgbe_hw *hw = &adapter->hw; 4560 struct ixgbe_hw *hw = &adapter->hw;
4551 4561
4552 IXGBE_CORE_LOCK(adapter); 4562 IXGBE_CORE_LOCK(adapter);
4553 if (ixgbe_fw_recovery_mode(hw)) { 4563 if (ixgbe_fw_recovery_mode(hw)) {
4554 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) { 4564 if (atomic_cas_uint(&adapter->recovery_mode, 0, 1)) {
4555 /* Firmware error detected, entering recovery mode */ 4565 /* Firmware error detected, entering recovery mode */
4556 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 4566 device_printf(adapter->dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
4557 4567
4558 if (hw->adapter_stopped == FALSE) 4568 if (hw->adapter_stopped == FALSE)
4559 ixgbe_stop(adapter); 4569 ixgbe_stop(adapter);
4560 } 4570 }
4561 } else 4571 } else
4562 atomic_cas_uint(&adapter->recovery_mode, 1, 0); 4572 atomic_cas_uint(&adapter->recovery_mode, 1, 0);
4563 4573
4564 callout_reset(&adapter->recovery_mode_timer, hz, 4574 callout_reset(&adapter->recovery_mode_timer, hz,
4565 ixgbe_recovery_mode_timer, adapter); 4575 ixgbe_recovery_mode_timer, adapter);
4566 IXGBE_CORE_UNLOCK(adapter); 4576 IXGBE_CORE_UNLOCK(adapter);
4567} /* ixgbe_recovery_mode_timer */ 4577} /* ixgbe_recovery_mode_timer */
4568 4578
4569/************************************************************************ 4579/************************************************************************
4570 * ixgbe_sfp_probe 4580 * ixgbe_sfp_probe
4571 * 4581 *
4572 * Determine if a port had optics inserted. 4582 * Determine if a port had optics inserted.
4573 ************************************************************************/ 4583 ************************************************************************/
4574static bool 4584static bool
4575ixgbe_sfp_probe(struct adapter *adapter) 4585ixgbe_sfp_probe(struct adapter *adapter)
4576{ 4586{
4577 struct ixgbe_hw *hw = &adapter->hw; 4587 struct ixgbe_hw *hw = &adapter->hw;
4578 device_t dev = adapter->dev; 4588 device_t dev = adapter->dev;
4579 bool result = FALSE; 4589 bool result = FALSE;
4580 4590
4581 if ((hw->phy.type == ixgbe_phy_nl) && 4591 if ((hw->phy.type == ixgbe_phy_nl) &&
4582 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { 4592 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4583 s32 ret = hw->phy.ops.identify_sfp(hw); 4593 s32 ret = hw->phy.ops.identify_sfp(hw);
4584 if (ret) 4594 if (ret)
4585 goto out; 4595 goto out;
4586 ret = hw->phy.ops.reset(hw); 4596 ret = hw->phy.ops.reset(hw);
4587 adapter->sfp_probe = FALSE; 4597 adapter->sfp_probe = FALSE;
4588 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4598 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4589 device_printf(dev,"Unsupported SFP+ module detected!"); 4599 device_printf(dev,"Unsupported SFP+ module detected!");
4590 device_printf(dev, 4600 device_printf(dev,
4591 "Reload driver with supported module.\n"); 4601 "Reload driver with supported module.\n");
4592 goto out; 4602 goto out;
4593 } else 4603 } else
4594 device_printf(dev, "SFP+ module detected!\n"); 4604 device_printf(dev, "SFP+ module detected!\n");
4595 /* We now have supported optics */ 4605 /* We now have supported optics */
4596 result = TRUE; 4606 result = TRUE;
4597 } 4607 }
4598out: 4608out:
4599 4609
4600 return (result); 4610 return (result);
4601} /* ixgbe_sfp_probe */ 4611} /* ixgbe_sfp_probe */
4602 4612
4603/************************************************************************ 4613/************************************************************************
4604 * ixgbe_handle_mod - Tasklet for SFP module interrupts 4614 * ixgbe_handle_mod - Tasklet for SFP module interrupts
4605 ************************************************************************/ 4615 ************************************************************************/
4606static void 4616static void
4607ixgbe_handle_mod(void *context) 4617ixgbe_handle_mod(void *context)
4608{ 4618{
4609 struct adapter *adapter = context; 4619 struct adapter *adapter = context;
4610 struct ixgbe_hw *hw = &adapter->hw; 4620 struct ixgbe_hw *hw = &adapter->hw;
4611 device_t dev = adapter->dev; 4621 device_t dev = adapter->dev;
4612 u32 err, cage_full = 0; 4622 u32 err, cage_full = 0;
4613 4623
4614 IXGBE_CORE_LOCK(adapter); 4624 IXGBE_CORE_LOCK(adapter);
4615 ++adapter->mod_sicount.ev_count; 4625 ++adapter->mod_sicount.ev_count;
4616 if (adapter->hw.need_crosstalk_fix) { 4626 if (adapter->hw.need_crosstalk_fix) {
4617 switch (hw->mac.type) { 4627 switch (hw->mac.type) {
4618 case ixgbe_mac_82599EB: 4628 case ixgbe_mac_82599EB:
4619 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4629 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4620 IXGBE_ESDP_SDP2; 4630 IXGBE_ESDP_SDP2;
4621 break; 4631 break;
4622 case ixgbe_mac_X550EM_x: 4632 case ixgbe_mac_X550EM_x:
4623 case ixgbe_mac_X550EM_a: 4633 case ixgbe_mac_X550EM_a:
4624 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & 4634 cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4625 IXGBE_ESDP_SDP0; 4635 IXGBE_ESDP_SDP0;
4626 break; 4636 break;
4627 default: 4637 default:
4628 break; 4638 break;
4629 } 4639 }
4630 4640
4631 if (!cage_full) 4641 if (!cage_full)
4632 goto out; 4642 goto out;
4633 } 4643 }
4634 4644
4635 err = hw->phy.ops.identify_sfp(hw); 4645 err = hw->phy.ops.identify_sfp(hw);
4636 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4646 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4637 device_printf(dev, 4647 device_printf(dev,
4638 "Unsupported SFP+ module type was detected.\n"); 4648 "Unsupported SFP+ module type was detected.\n");
4639 goto out; 4649 goto out;
4640 } 4650 }
4641 4651
4642 if (hw->need_unsupported_sfp_recovery) { 4652 if (hw->need_unsupported_sfp_recovery) {
4643 device_printf(dev, "Recovering from unsupported SFP\n"); 4653 device_printf(dev, "Recovering from unsupported SFP\n");
4644 /* 4654 /*
4645 * We could recover the status by calling setup_sfp(), 4655 * We could recover the status by calling setup_sfp(),
4646 * setup_link() and some others. It's complex and might not 4656 * setup_link() and some others. It's complex and might not
4647 * work correctly on some unknown cases. To avoid such type of 4657 * work correctly on some unknown cases. To avoid such type of
4648 * problem, call ixgbe_init_locked(). It's simple and safe 4658 * problem, call ixgbe_init_locked(). It's simple and safe
4649 * approach. 4659 * approach.
4650 */ 4660 */
4651 ixgbe_init_locked(adapter); 4661 ixgbe_init_locked(adapter);
4652 } else { 4662 } else {
4653 if (hw->mac.type == ixgbe_mac_82598EB) 4663 if (hw->mac.type == ixgbe_mac_82598EB)
4654 err = hw->phy.ops.reset(hw); 4664 err = hw->phy.ops.reset(hw);
4655 else { 4665 else {
4656 err = hw->mac.ops.setup_sfp(hw); 4666 err = hw->mac.ops.setup_sfp(hw);
4657 hw->phy.sfp_setup_needed = FALSE; 4667 hw->phy.sfp_setup_needed = FALSE;
4658 } 4668 }
4659 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { 4669 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4660 device_printf(dev, 4670 device_printf(dev,
4661 "Setup failure - unsupported SFP+ module type.\n"); 4671 "Setup failure - unsupported SFP+ module type.\n");
4662 goto out; 4672 goto out;
4663 } 4673 }
4664 } 4674 }
4665 softint_schedule(adapter->msf_si); 4675 softint_schedule(adapter->msf_si);
4666out: 4676out:
4667 IXGBE_CORE_UNLOCK(adapter); 4677 IXGBE_CORE_UNLOCK(adapter);
4668} /* ixgbe_handle_mod */ 4678} /* ixgbe_handle_mod */
4669 4679
4670 4680
4671/************************************************************************ 4681/************************************************************************
4672 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts 4682 * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts
4673 ************************************************************************/ 4683 ************************************************************************/
4674static void 4684static void
4675ixgbe_handle_msf(void *context) 4685ixgbe_handle_msf(void *context)
4676{ 4686{
4677 struct adapter *adapter = context; 4687 struct adapter *adapter = context;
4678 struct ixgbe_hw *hw = &adapter->hw; 4688 struct ixgbe_hw *hw = &adapter->hw;
4679 u32 autoneg; 4689 u32 autoneg;
4680 bool negotiate; 4690 bool negotiate;
4681 4691
4682 IXGBE_CORE_LOCK(adapter); 4692 IXGBE_CORE_LOCK(adapter);
4683 ++adapter->msf_sicount.ev_count; 4693 ++adapter->msf_sicount.ev_count;
4684 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ 4694 /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
4685 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); 4695 adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
4686 4696
4687 autoneg = hw->phy.autoneg_advertised; 4697 autoneg = hw->phy.autoneg_advertised;
4688 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 4698 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4689 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); 4699 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4690 else 4700 else
4691 negotiate = 0; 4701 negotiate = 0;
4692 if (hw->mac.ops.setup_link) 4702 if (hw->mac.ops.setup_link)
4693 hw->mac.ops.setup_link(hw, autoneg, TRUE); 4703 hw->mac.ops.setup_link(hw, autoneg, TRUE);
4694 4704
4695 /* Adjust media types shown in ifconfig */ 4705 /* Adjust media types shown in ifconfig */
4696 ifmedia_removeall(&adapter->media); 4706 ifmedia_removeall(&adapter->media);
4697 ixgbe_add_media_types(adapter); 4707 ixgbe_add_media_types(adapter);
4698 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 4708 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
4699 IXGBE_CORE_UNLOCK(adapter); 4709 IXGBE_CORE_UNLOCK(adapter);
4700} /* ixgbe_handle_msf */ 4710} /* ixgbe_handle_msf */
4701 4711
4702/************************************************************************ 4712/************************************************************************
4703 * ixgbe_handle_phy - Tasklet for external PHY interrupts 4713 * ixgbe_handle_phy - Tasklet for external PHY interrupts
4704 ************************************************************************/ 4714 ************************************************************************/
4705static void 4715static void
4706ixgbe_handle_phy(void *context) 4716ixgbe_handle_phy(void *context)
4707{ 4717{
4708 struct adapter *adapter = context; 4718 struct adapter *adapter = context;
4709 struct ixgbe_hw *hw = &adapter->hw; 4719 struct ixgbe_hw *hw = &adapter->hw;
4710 int error; 4720 int error;
4711 4721
4712 ++adapter->phy_sicount.ev_count; 4722 ++adapter->phy_sicount.ev_count;
4713 error = hw->phy.ops.handle_lasi(hw); 4723 error = hw->phy.ops.handle_lasi(hw);
4714 if (error == IXGBE_ERR_OVERTEMP) 4724 if (error == IXGBE_ERR_OVERTEMP)
4715 device_printf(adapter->dev, 4725 device_printf(adapter->dev,
4716 "CRITICAL: EXTERNAL PHY OVER TEMP!! " 4726 "CRITICAL: EXTERNAL PHY OVER TEMP!! "
4717 " PHY will downshift to lower power state!\n"); 4727 " PHY will downshift to lower power state!\n");
4718 else if (error) 4728 else if (error)
4719 device_printf(adapter->dev, 4729 device_printf(adapter->dev,
4720 "Error handling LASI interrupt: %d\n", error); 4730 "Error handling LASI interrupt: %d\n", error);
4721} /* ixgbe_handle_phy */ 4731} /* ixgbe_handle_phy */
4722 4732
4723static void 4733static void
4724ixgbe_ifstop(struct ifnet *ifp, int disable) 4734ixgbe_ifstop(struct ifnet *ifp, int disable)
4725{ 4735{
4726 struct adapter *adapter = ifp->if_softc; 4736 struct adapter *adapter = ifp->if_softc;
4727 4737
4728 IXGBE_CORE_LOCK(adapter); 4738 IXGBE_CORE_LOCK(adapter);
4729 ixgbe_stop(adapter); 4739 ixgbe_stop(adapter);
4730 IXGBE_CORE_UNLOCK(adapter); 4740 IXGBE_CORE_UNLOCK(adapter);
4731} 4741}
4732 4742
4733/************************************************************************ 4743/************************************************************************
4734 * ixgbe_stop - Stop the hardware 4744 * ixgbe_stop - Stop the hardware
4735 * 4745 *
4736 * Disables all traffic on the adapter by issuing a 4746 * Disables all traffic on the adapter by issuing a
4737 * global reset on the MAC and deallocates TX/RX buffers. 4747 * global reset on the MAC and deallocates TX/RX buffers.
4738 ************************************************************************/ 4748 ************************************************************************/
4739static void 4749static void
4740ixgbe_stop(void *arg) 4750ixgbe_stop(void *arg)
4741{ 4751{
4742 struct ifnet *ifp; 4752 struct ifnet *ifp;
4743 struct adapter *adapter = arg; 4753 struct adapter *adapter = arg;
4744 struct ixgbe_hw *hw = &adapter->hw; 4754 struct ixgbe_hw *hw = &adapter->hw;
4745 4755
4746 ifp = adapter->ifp; 4756 ifp = adapter->ifp;
4747 4757
4748 KASSERT(mutex_owned(&adapter->core_mtx)); 4758 KASSERT(mutex_owned(&adapter->core_mtx));
4749 4759
4750 INIT_DEBUGOUT("ixgbe_stop: begin\n"); 4760 INIT_DEBUGOUT("ixgbe_stop: begin\n");
4751 ixgbe_disable_intr(adapter); 4761 ixgbe_disable_intr(adapter);
4752 callout_stop(&adapter->timer); 4762 callout_stop(&adapter->timer);
4753 4763
4754 /* Let the stack know...*/ 4764 /* Let the stack know...*/
4755 ifp->if_flags &= ~IFF_RUNNING; 4765 ifp->if_flags &= ~IFF_RUNNING;
4756 4766
4757 ixgbe_reset_hw(hw); 4767 ixgbe_reset_hw(hw);
4758 hw->adapter_stopped = FALSE; 4768 hw->adapter_stopped = FALSE;
4759 ixgbe_stop_adapter(hw); 4769 ixgbe_stop_adapter(hw);
4760 if (hw->mac.type == ixgbe_mac_82599EB) 4770 if (hw->mac.type == ixgbe_mac_82599EB)
4761 ixgbe_stop_mac_link_on_d3_82599(hw); 4771 ixgbe_stop_mac_link_on_d3_82599(hw);
4762 /* Turn off the laser - noop with no optics */ 4772 /* Turn off the laser - noop with no optics */
4763 ixgbe_disable_tx_laser(hw); 4773 ixgbe_disable_tx_laser(hw);
4764 4774
4765 /* Update the stack */ 4775 /* Update the stack */
4766 adapter->link_up = FALSE; 4776 adapter->link_up = FALSE;
4767 ixgbe_update_link_status(adapter); 4777 ixgbe_update_link_status(adapter);
4768 4778
4769 /* reprogram the RAR[0] in case user changed it. */ 4779 /* reprogram the RAR[0] in case user changed it. */
4770 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 4780 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
4771 4781
4772 return; 4782 return;
4773} /* ixgbe_stop */ 4783} /* ixgbe_stop */
4774 4784
4775/************************************************************************ 4785/************************************************************************
4776 * ixgbe_update_link_status - Update OS on link state 4786 * ixgbe_update_link_status - Update OS on link state
4777 * 4787 *
4778 * Note: Only updates the OS on the cached link state. 4788 * Note: Only updates the OS on the cached link state.
4779 * The real check of the hardware only happens with 4789 * The real check of the hardware only happens with
4780 * a link interrupt. 4790 * a link interrupt.
4781 ************************************************************************/ 4791 ************************************************************************/
4782static void 4792static void
4783ixgbe_update_link_status(struct adapter *adapter) 4793ixgbe_update_link_status(struct adapter *adapter)
4784{ 4794{
4785 struct ifnet *ifp = adapter->ifp; 4795 struct ifnet *ifp = adapter->ifp;
4786 device_t dev = adapter->dev; 4796 device_t dev = adapter->dev;
4787 struct ixgbe_hw *hw = &adapter->hw; 4797 struct ixgbe_hw *hw = &adapter->hw;
4788 4798
4789 KASSERT(mutex_owned(&adapter->core_mtx)); 4799 KASSERT(mutex_owned(&adapter->core_mtx));
4790 4800
4791 if (adapter->link_up) { 4801 if (adapter->link_up) {
4792 if (adapter->link_active != LINK_STATE_UP) { 4802 if (adapter->link_active != LINK_STATE_UP) {
4793 /* 4803 /*
4794 * To eliminate influence of the previous state 4804 * To eliminate influence of the previous state
4795 * in the same way as ixgbe_init_locked(). 4805 * in the same way as ixgbe_init_locked().
4796 */ 4806 */
4797 struct ix_queue *que = adapter->queues; 4807 struct ix_queue *que = adapter->queues;
4798 for (int i = 0; i < adapter->num_queues; i++, que++) 4808 for (int i = 0; i < adapter->num_queues; i++, que++)
4799 que->eitr_setting = 0; 4809 que->eitr_setting = 0;
4800 4810
4801 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){ 4811 if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL){
4802 /* 4812 /*
4803 * Discard count for both MAC Local Fault and 4813 * Discard count for both MAC Local Fault and
4804 * Remote Fault because those registers are 4814 * Remote Fault because those registers are
4805 * valid only when the link speed is up and 4815 * valid only when the link speed is up and
4806 * 10Gbps. 4816 * 10Gbps.
4807 */ 4817 */
4808 IXGBE_READ_REG(hw, IXGBE_MLFC); 4818 IXGBE_READ_REG(hw, IXGBE_MLFC);
4809 IXGBE_READ_REG(hw, IXGBE_MRFC); 4819 IXGBE_READ_REG(hw, IXGBE_MRFC);
4810 } 4820 }
4811 4821
4812 if (bootverbose) { 4822 if (bootverbose) {
4813 const char *bpsmsg; 4823 const char *bpsmsg;
4814 4824
4815 switch (adapter->link_speed) { 4825 switch (adapter->link_speed) {
4816 case IXGBE_LINK_SPEED_10GB_FULL: 4826 case IXGBE_LINK_SPEED_10GB_FULL:
4817 bpsmsg = "10 Gbps"; 4827 bpsmsg = "10 Gbps";
4818 break; 4828 break;
4819 case IXGBE_LINK_SPEED_5GB_FULL: 4829 case IXGBE_LINK_SPEED_5GB_FULL:
4820 bpsmsg = "5 Gbps"; 4830 bpsmsg = "5 Gbps";
4821 break; 4831 break;
4822 case IXGBE_LINK_SPEED_2_5GB_FULL: 4832 case IXGBE_LINK_SPEED_2_5GB_FULL:
4823 bpsmsg = "2.5 Gbps"; 4833 bpsmsg = "2.5 Gbps";
4824 break; 4834 break;
4825 case IXGBE_LINK_SPEED_1GB_FULL: 4835 case IXGBE_LINK_SPEED_1GB_FULL:
4826 bpsmsg = "1 Gbps"; 4836 bpsmsg = "1 Gbps";
4827 break; 4837 break;
4828 case IXGBE_LINK_SPEED_100_FULL: 4838 case IXGBE_LINK_SPEED_100_FULL:
4829 bpsmsg = "100 Mbps"; 4839 bpsmsg = "100 Mbps";
4830 break; 4840 break;
4831 case IXGBE_LINK_SPEED_10_FULL: 4841 case IXGBE_LINK_SPEED_10_FULL:
4832 bpsmsg = "10 Mbps"; 4842 bpsmsg = "10 Mbps";
4833 break; 4843 break;
4834 default: 4844 default:
4835 bpsmsg = "unknown speed"; 4845 bpsmsg = "unknown speed";
4836 break; 4846 break;
4837 } 4847 }
4838 device_printf(dev, "Link is up %s %s \n", 4848 device_printf(dev, "Link is up %s %s \n",
4839 bpsmsg, "Full Duplex"); 4849 bpsmsg, "Full Duplex");
4840 } 4850 }
4841 adapter->link_active = LINK_STATE_UP; 4851 adapter->link_active = LINK_STATE_UP;
4842 /* Update any Flow Control changes */ 4852 /* Update any Flow Control changes */
4843 ixgbe_fc_enable(&adapter->hw); 4853 ixgbe_fc_enable(&adapter->hw);
4844 /* Update DMA coalescing config */ 4854 /* Update DMA coalescing config */
4845 ixgbe_config_dmac(adapter); 4855 ixgbe_config_dmac(adapter);
4846 if_link_state_change(ifp, LINK_STATE_UP); 4856 if_link_state_change(ifp, LINK_STATE_UP);
4847 4857
4848 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 4858 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4849 ixgbe_ping_all_vfs(adapter); 4859 ixgbe_ping_all_vfs(adapter);
4850 } 4860 }
4851 } else { 4861 } else {
4852 /* 4862 /*
4853 * Do it when link active changes to DOWN. i.e. 4863 * Do it when link active changes to DOWN. i.e.
4854 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 4864 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
4855 * b) LINK_STATE_UP -> LINK_STATE_DOWN 4865 * b) LINK_STATE_UP -> LINK_STATE_DOWN
4856 */ 4866 */
4857 if (adapter->link_active != LINK_STATE_DOWN) { 4867 if (adapter->link_active != LINK_STATE_DOWN) {
4858 if (bootverbose) 4868 if (bootverbose)
4859 device_printf(dev, "Link is Down\n"); 4869 device_printf(dev, "Link is Down\n");
4860 if_link_state_change(ifp, LINK_STATE_DOWN); 4870 if_link_state_change(ifp, LINK_STATE_DOWN);
4861 adapter->link_active = LINK_STATE_DOWN; 4871 adapter->link_active = LINK_STATE_DOWN;
4862 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 4872 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4863 ixgbe_ping_all_vfs(adapter); 4873 ixgbe_ping_all_vfs(adapter);
4864 ixgbe_drain_all(adapter); 4874 ixgbe_drain_all(adapter);
4865 } 4875 }
4866 } 4876 }
4867} /* ixgbe_update_link_status */ 4877} /* ixgbe_update_link_status */
4868 4878
4869/************************************************************************ 4879/************************************************************************
4870 * ixgbe_config_dmac - Configure DMA Coalescing 4880 * ixgbe_config_dmac - Configure DMA Coalescing
4871 ************************************************************************/ 4881 ************************************************************************/
4872static void 4882static void
4873ixgbe_config_dmac(struct adapter *adapter) 4883ixgbe_config_dmac(struct adapter *adapter)
4874{ 4884{
4875 struct ixgbe_hw *hw = &adapter->hw; 4885 struct ixgbe_hw *hw = &adapter->hw;
4876 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; 4886 struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
4877 4887
4878 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) 4888 if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
4879 return; 4889 return;
4880 4890
4881 if (dcfg->watchdog_timer ^ adapter->dmac || 4891 if (dcfg->watchdog_timer ^ adapter->dmac ||
4882 dcfg->link_speed ^ adapter->link_speed) { 4892 dcfg->link_speed ^ adapter->link_speed) {
4883 dcfg->watchdog_timer = adapter->dmac; 4893 dcfg->watchdog_timer = adapter->dmac;
4884 dcfg->fcoe_en = false; 4894 dcfg->fcoe_en = false;
4885 dcfg->link_speed = adapter->link_speed; 4895 dcfg->link_speed = adapter->link_speed;
4886 dcfg->num_tcs = 1; 4896 dcfg->num_tcs = 1;
4887 4897
4888 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", 4898 INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n",
4889 dcfg->watchdog_timer, dcfg->link_speed); 4899 dcfg->watchdog_timer, dcfg->link_speed);
4890 4900
4891 hw->mac.ops.dmac_config(hw); 4901 hw->mac.ops.dmac_config(hw);
4892 } 4902 }
4893} /* ixgbe_config_dmac */ 4903} /* ixgbe_config_dmac */
4894 4904
4895/************************************************************************ 4905/************************************************************************
4896 * ixgbe_enable_intr 4906 * ixgbe_enable_intr
4897 ************************************************************************/ 4907 ************************************************************************/
4898static void 4908static void
4899ixgbe_enable_intr(struct adapter *adapter) 4909ixgbe_enable_intr(struct adapter *adapter)
4900{ 4910{
4901 struct ixgbe_hw *hw = &adapter->hw; 4911 struct ixgbe_hw *hw = &adapter->hw;
4902 struct ix_queue *que = adapter->queues; 4912 struct ix_queue *que = adapter->queues;
4903 u32 mask, fwsm; 4913 u32 mask, fwsm;
4904 4914
4905 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 4915 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4906 4916
4907 switch (adapter->hw.mac.type) { 4917 switch (adapter->hw.mac.type) {
4908 case ixgbe_mac_82599EB: 4918 case ixgbe_mac_82599EB:
4909 mask |= IXGBE_EIMS_ECC; 4919 mask |= IXGBE_EIMS_ECC;
4910 /* Temperature sensor on some adapters */ 4920 /* Temperature sensor on some adapters */
4911 mask |= IXGBE_EIMS_GPI_SDP0; 4921 mask |= IXGBE_EIMS_GPI_SDP0;
4912 /* SFP+ (RX_LOS_N & MOD_ABS_N) */ 4922 /* SFP+ (RX_LOS_N & MOD_ABS_N) */
4913 mask |= IXGBE_EIMS_GPI_SDP1; 4923 mask |= IXGBE_EIMS_GPI_SDP1;
4914 mask |= IXGBE_EIMS_GPI_SDP2; 4924 mask |= IXGBE_EIMS_GPI_SDP2;
4915 break; 4925 break;
4916 case ixgbe_mac_X540: 4926 case ixgbe_mac_X540:
4917 /* Detect if Thermal Sensor is enabled */ 4927 /* Detect if Thermal Sensor is enabled */
4918 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); 4928 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4919 if (fwsm & IXGBE_FWSM_TS_ENABLED) 4929 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4920 mask |= IXGBE_EIMS_TS; 4930 mask |= IXGBE_EIMS_TS;
4921 mask |= IXGBE_EIMS_ECC; 4931 mask |= IXGBE_EIMS_ECC;
4922 break; 4932 break;
4923 case ixgbe_mac_X550: 4933 case ixgbe_mac_X550:
4924 /* MAC thermal sensor is automatically enabled */ 4934 /* MAC thermal sensor is automatically enabled */
4925 mask |= IXGBE_EIMS_TS; 4935 mask |= IXGBE_EIMS_TS;
4926 mask |= IXGBE_EIMS_ECC; 4936 mask |= IXGBE_EIMS_ECC;
4927 break; 4937 break;
4928 case ixgbe_mac_X550EM_x: 4938 case ixgbe_mac_X550EM_x:
4929 case ixgbe_mac_X550EM_a: 4939 case ixgbe_mac_X550EM_a:
4930 /* Some devices use SDP0 for important information */ 4940 /* Some devices use SDP0 for important information */
4931 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 4941 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
4932 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || 4942 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
4933 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || 4943 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
4934 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) 4944 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
4935 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 4945 mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
4936 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 4946 if (hw->phy.type == ixgbe_phy_x550em_ext_t)
4937 mask |= IXGBE_EICR_GPI_SDP0_X540; 4947 mask |= IXGBE_EICR_GPI_SDP0_X540;
4938 mask |= IXGBE_EIMS_ECC; 4948 mask |= IXGBE_EIMS_ECC;
4939 break; 4949 break;
4940 default: 4950 default:
4941 break; 4951 break;
4942 } 4952 }
4943 4953
4944 /* Enable Fan Failure detection */ 4954 /* Enable Fan Failure detection */
4945 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) 4955 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL)
4946 mask |= IXGBE_EIMS_GPI_SDP1; 4956 mask |= IXGBE_EIMS_GPI_SDP1;
4947 /* Enable SR-IOV */ 4957 /* Enable SR-IOV */
4948 if (adapter->feat_en & IXGBE_FEATURE_SRIOV) 4958 if (adapter->feat_en & IXGBE_FEATURE_SRIOV)
4949 mask |= IXGBE_EIMS_MAILBOX; 4959 mask |= IXGBE_EIMS_MAILBOX;
4950 /* Enable Flow Director */ 4960 /* Enable Flow Director */
4951 if (adapter->feat_en & IXGBE_FEATURE_FDIR) 4961 if (adapter->feat_en & IXGBE_FEATURE_FDIR)
4952 mask |= IXGBE_EIMS_FLOW_DIR; 4962 mask |= IXGBE_EIMS_FLOW_DIR;
4953 4963
4954 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); 4964 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4955 4965
4956 /* With MSI-X we use auto clear */ 4966 /* With MSI-X we use auto clear */
4957 if (adapter->msix_mem) { 4967 if (adapter->msix_mem) {
4958 mask = IXGBE_EIMS_ENABLE_MASK; 4968 mask = IXGBE_EIMS_ENABLE_MASK;
4959 /* Don't autoclear Link */ 4969 /* Don't autoclear Link */
4960 mask &= ~IXGBE_EIMS_OTHER; 4970 mask &= ~IXGBE_EIMS_OTHER;
4961 mask &= ~IXGBE_EIMS_LSC; 4971 mask &= ~IXGBE_EIMS_LSC;
4962 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) 4972 if (adapter->feat_cap & IXGBE_FEATURE_SRIOV)
4963 mask &= ~IXGBE_EIMS_MAILBOX; 4973 mask &= ~IXGBE_EIMS_MAILBOX;
4964 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 4974 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4965 } 4975 }
4966 4976
4967 /* 4977 /*
4968 * Now enable all queues, this is done separately to 4978 * Now enable all queues, this is done separately to
4969 * allow for handling the extended (beyond 32) MSI-X 4979 * allow for handling the extended (beyond 32) MSI-X
4970 * vectors that can be used by 82599 4980 * vectors that can be used by 82599
4971 */ 4981 */
4972 for (int i = 0; i < adapter->num_queues; i++, que++) 4982 for (int i = 0; i < adapter->num_queues; i++, que++)
4973 ixgbe_enable_queue(adapter, que->msix); 4983 ixgbe_enable_queue(adapter, que->msix);
4974 4984
4975 IXGBE_WRITE_FLUSH(hw); 4985 IXGBE_WRITE_FLUSH(hw);
4976 4986
4977} /* ixgbe_enable_intr */ 4987} /* ixgbe_enable_intr */
4978 4988
4979/************************************************************************ 4989/************************************************************************
4980 * ixgbe_disable_intr_internal 4990 * ixgbe_disable_intr_internal
4981 ************************************************************************/ 4991 ************************************************************************/
4982static void 4992static void
4983ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok) 4993ixgbe_disable_intr_internal(struct adapter *adapter, bool nestok)
4984{ 4994{
4985 struct ix_queue *que = adapter->queues; 4995 struct ix_queue *que = adapter->queues;
4986 4996
4987 /* disable interrupts other than queues */ 4997 /* disable interrupts other than queues */
4988 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE); 4998 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~IXGBE_EIMC_RTX_QUEUE);
4989 4999
4990 if (adapter->msix_mem) 5000 if (adapter->msix_mem)
4991 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); 5001 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4992 5002
4993 for (int i = 0; i < adapter->num_queues; i++, que++) 5003 for (int i = 0; i < adapter->num_queues; i++, que++)
4994 ixgbe_disable_queue_internal(adapter, que->msix, nestok); 5004 ixgbe_disable_queue_internal(adapter, que->msix, nestok);
4995 5005
4996 IXGBE_WRITE_FLUSH(&adapter->hw); 5006 IXGBE_WRITE_FLUSH(&adapter->hw);
4997 5007
4998} /* ixgbe_do_disable_intr_internal */ 5008} /* ixgbe_do_disable_intr_internal */
4999 5009
5000/************************************************************************ 5010/************************************************************************
5001 * ixgbe_disable_intr 5011 * ixgbe_disable_intr
5002 ************************************************************************/ 5012 ************************************************************************/
5003static void 5013static void
5004ixgbe_disable_intr(struct adapter *adapter) 5014ixgbe_disable_intr(struct adapter *adapter)
5005{ 5015{
5006 5016
5007 ixgbe_disable_intr_internal(adapter, true); 5017 ixgbe_disable_intr_internal(adapter, true);
5008} /* ixgbe_disable_intr */ 5018} /* ixgbe_disable_intr */
5009 5019
5010/************************************************************************ 5020/************************************************************************
5011 * ixgbe_ensure_disabled_intr 5021 * ixgbe_ensure_disabled_intr
5012 ************************************************************************/ 5022 ************************************************************************/
5013void 5023void
5014ixgbe_ensure_disabled_intr(struct adapter *adapter) 5024ixgbe_ensure_disabled_intr(struct adapter *adapter)
5015{ 5025{
5016 5026
5017 ixgbe_disable_intr_internal(adapter, false); 5027 ixgbe_disable_intr_internal(adapter, false);
5018} /* ixgbe_ensure_disabled_intr */ 5028} /* ixgbe_ensure_disabled_intr */
5019 5029
5020/************************************************************************ 5030/************************************************************************
5021 * ixgbe_legacy_irq - Legacy Interrupt Service routine 5031 * ixgbe_legacy_irq - Legacy Interrupt Service routine
5022 ************************************************************************/ 5032 ************************************************************************/
5023static int 5033static int
5024ixgbe_legacy_irq(void *arg) 5034ixgbe_legacy_irq(void *arg)
5025{ 5035{
5026 struct ix_queue *que = arg; 5036 struct ix_queue *que = arg;
5027 struct adapter *adapter = que->adapter; 5037 struct adapter *adapter = que->adapter;
5028 struct ixgbe_hw *hw = &adapter->hw; 5038 struct ixgbe_hw *hw = &adapter->hw;
5029 struct ifnet *ifp = adapter->ifp; 5039 struct ifnet *ifp = adapter->ifp;
5030 struct tx_ring *txr = adapter->tx_rings; 5040 struct tx_ring *txr = adapter->tx_rings;
5031 bool more = false; 5041 bool more = false;
5032 u32 eicr, eicr_mask; 5042 u32 eicr, eicr_mask;
5033 5043
5034 /* Silicon errata #26 on 82598 */ 5044 /* Silicon errata #26 on 82598 */
5035 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 5045 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
5036 5046
5037 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 5047 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
5038 5048
5039 adapter->stats.pf.legint.ev_count++; 5049 adapter->stats.pf.legint.ev_count++;
5040 ++que->irqs.ev_count; 5050 ++que->irqs.ev_count;
5041 if (eicr == 0) { 5051 if (eicr == 0) {
5042 adapter->stats.pf.intzero.ev_count++; 5052 adapter->stats.pf.intzero.ev_count++;
5043 if ((ifp->if_flags & IFF_UP) != 0) 5053 if ((ifp->if_flags & IFF_UP) != 0)
5044 ixgbe_enable_intr(adapter); 5054 ixgbe_enable_intr(adapter);
5045 return 0; 5055 return 0;
5046 } 5056 }
5047 5057
5048 if ((ifp->if_flags & IFF_RUNNING) != 0) { 5058 if ((ifp->if_flags & IFF_RUNNING) != 0) {
5049 /* 5059 /*
5050 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue". 5060 * The same as ixgbe_msix_que() about "que->txrx_use_workqueue".
5051 */ 5061 */
5052 que->txrx_use_workqueue = adapter->txrx_use_workqueue; 5062 que->txrx_use_workqueue = adapter->txrx_use_workqueue;
5053 5063
5054#ifdef __NetBSD__ 5064#ifdef __NetBSD__
5055 /* Don't run ixgbe_rxeof in interrupt context */ 5065 /* Don't run ixgbe_rxeof in interrupt context */
5056 more = true; 5066 more = true;
5057#else 5067#else
5058 more = ixgbe_rxeof(que); 5068 more = ixgbe_rxeof(que);
5059#endif 5069#endif
5060 5070
5061 IXGBE_TX_LOCK(txr); 5071 IXGBE_TX_LOCK(txr);
5062 ixgbe_txeof(txr); 5072 ixgbe_txeof(txr);
5063#ifdef notyet 5073#ifdef notyet
5064 if (!ixgbe_ring_empty(ifp, txr->br)) 5074 if (!ixgbe_ring_empty(ifp, txr->br))
5065 ixgbe_start_locked(ifp, txr); 5075 ixgbe_start_locked(ifp, txr);
5066#endif 5076#endif
5067 IXGBE_TX_UNLOCK(txr); 5077 IXGBE_TX_UNLOCK(txr);
5068 } 5078 }
5069 5079
5070 /* Check for fan failure */ 5080 /* Check for fan failure */
5071 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { 5081 if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) {
5072 ixgbe_check_fan_failure(adapter, eicr, true); 5082 ixgbe_check_fan_failure(adapter, eicr, true);
5073 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 5083 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5074 } 5084 }
5075 5085
5076 /* Link status change */ 5086 /* Link status change */
5077 if (eicr & IXGBE_EICR_LSC) 5087 if (eicr & IXGBE_EICR_LSC)
5078 softint_schedule(adapter->link_si); 5088 softint_schedule(adapter->link_si);
5079 5089
5080 if (ixgbe_is_sfp(hw)) { 5090 if (ixgbe_is_sfp(hw)) {
5081 /* Pluggable optics-related interrupt */ 5091 /* Pluggable optics-related interrupt */
5082 if (hw->mac.type >= ixgbe_mac_X540) 5092 if (hw->mac.type >= ixgbe_mac_X540)
5083 eicr_mask = IXGBE_EICR_GPI_SDP0_X540; 5093 eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
5084 else 5094 else
5085 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); 5095 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw);
5086 5096
5087 if (eicr & eicr_mask) { 5097 if (eicr & eicr_mask) {
5088 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); 5098 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
5089 softint_schedule(adapter->mod_si); 5099 softint_schedule(adapter->mod_si);
5090 } 5100 }
5091 5101
5092 if ((hw->mac.type == ixgbe_mac_82599EB) && 5102 if ((hw->mac.type == ixgbe_mac_82599EB) &&
5093 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { 5103 (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
5094 IXGBE_WRITE_REG(hw, IXGBE_EICR, 5104 IXGBE_WRITE_REG(hw, IXGBE_EICR,
5095 IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 5105 IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
5096 softint_schedule(adapter->msf_si); 5106 softint_schedule(adapter->msf_si);
5097 } 5107 }
5098 } 5108 }
5099 5109
5100 /* External PHY interrupt */ 5110 /* External PHY interrupt */
5101 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && 5111 if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
5102 (eicr & IXGBE_EICR_GPI_SDP0_X540)) 5112 (eicr & IXGBE_EICR_GPI_SDP0_X540))
5103 softint_schedule(adapter->phy_si); 5113 softint_schedule(adapter->phy_si);
5104 5114
5105 if (more) { 5115 if (more) {
5106 que->req.ev_count++; 5116 que->req.ev_count++;
5107 ixgbe_sched_handle_que(adapter, que); 5117 ixgbe_sched_handle_que(adapter, que);
5108 } else 5118 } else
5109 ixgbe_enable_intr(adapter); 5119 ixgbe_enable_intr(adapter);
5110 5120
5111 return 1; 5121 return 1;
5112} /* ixgbe_legacy_irq */ 5122} /* ixgbe_legacy_irq */
5113 5123
5114/************************************************************************ 5124/************************************************************************
5115 * ixgbe_free_pciintr_resources 5125 * ixgbe_free_pciintr_resources
5116 ************************************************************************/ 5126 ************************************************************************/
5117static void 5127static void
5118ixgbe_free_pciintr_resources(struct adapter *adapter) 5128ixgbe_free_pciintr_resources(struct adapter *adapter)
5119{ 5129{
5120 struct ix_queue *que = adapter->queues; 5130 struct ix_queue *que = adapter->queues;
5121 int rid; 5131 int rid;
5122 5132
5123 /* 5133 /*
5124 * Release all msix queue resources: 5134 * Release all msix queue resources:
5125 */ 5135 */
5126 for (int i = 0; i < adapter->num_queues; i++, que++) { 5136 for (int i = 0; i < adapter->num_queues; i++, que++) {
5127 if (que->res != NULL) { 5137 if (que->res != NULL) {
5128 pci_intr_disestablish(adapter->osdep.pc, 5138 pci_intr_disestablish(adapter->osdep.pc,
5129 adapter->osdep.ihs[i]); 5139 adapter->osdep.ihs[i]);
5130 adapter->osdep.ihs[i] = NULL; 5140 adapter->osdep.ihs[i] = NULL;
5131 } 5141 }
5132 } 5142 }
5133 5143
5134 /* Clean the Legacy or Link interrupt last */ 5144 /* Clean the Legacy or Link interrupt last */
5135 if (adapter->vector) /* we are doing MSIX */ 5145 if (adapter->vector) /* we are doing MSIX */
5136 rid = adapter->vector; 5146 rid = adapter->vector;
5137 else 5147 else
5138 rid = 0; 5148 rid = 0;
5139 5149
5140 if (adapter->osdep.ihs[rid] != NULL) { 5150 if (adapter->osdep.ihs[rid] != NULL) {
5141 pci_intr_disestablish(adapter->osdep.pc, 5151 pci_intr_disestablish(adapter->osdep.pc,
5142 adapter->osdep.ihs[rid]); 5152 adapter->osdep.ihs[rid]);
5143 adapter->osdep.ihs[rid] = NULL; 5153 adapter->osdep.ihs[rid] = NULL;
5144 } 5154 }
5145 5155
5146 if (adapter->osdep.intrs != NULL) { 5156 if (adapter->osdep.intrs != NULL) {
5147 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 5157 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
5148 adapter->osdep.nintrs); 5158 adapter->osdep.nintrs);
5149 adapter->osdep.intrs = NULL; 5159 adapter->osdep.intrs = NULL;
5150 } 5160 }
5151} /* ixgbe_free_pciintr_resources */ 5161} /* ixgbe_free_pciintr_resources */
5152 5162
5153/************************************************************************ 5163/************************************************************************
5154 * ixgbe_free_pci_resources 5164 * ixgbe_free_pci_resources
5155 ************************************************************************/ 5165 ************************************************************************/
5156static void 5166static void
5157ixgbe_free_pci_resources(struct adapter *adapter) 5167ixgbe_free_pci_resources(struct adapter *adapter)
5158{ 5168{
5159 5169
5160 ixgbe_free_pciintr_resources(adapter); 5170 ixgbe_free_pciintr_resources(adapter);
5161 5171
5162 if (adapter->osdep.mem_size != 0) { 5172 if (adapter->osdep.mem_size != 0) {
5163 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 5173 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
5164 adapter->osdep.mem_bus_space_handle, 5174 adapter->osdep.mem_bus_space_handle,
5165 adapter->osdep.mem_size); 5175 adapter->osdep.mem_size);
5166 } 5176 }
5167 5177
5168} /* ixgbe_free_pci_resources */ 5178} /* ixgbe_free_pci_resources */
5169 5179
5170/************************************************************************ 5180/************************************************************************
5171 * ixgbe_set_sysctl_value 5181 * ixgbe_set_sysctl_value
5172 ************************************************************************/ 5182 ************************************************************************/
5173static void 5183static void
5174ixgbe_set_sysctl_value(struct adapter *adapter, const char *name, 5184ixgbe_set_sysctl_value(struct adapter *adapter, const char *name,
5175 const char *description, int *limit, int value) 5185 const char *description, int *limit, int value)
5176{ 5186{
5177 device_t dev = adapter->dev; 5187 device_t dev = adapter->dev;
5178 struct sysctllog **log; 5188 struct sysctllog **log;
5179 const struct sysctlnode *rnode, *cnode; 5189 const struct sysctlnode *rnode, *cnode;
5180 5190
5181 /* 5191 /*
5182 * It's not required to check recovery mode because this function never 5192 * It's not required to check recovery mode because this function never
5183 * touches hardware. 5193 * touches hardware.
5184 */ 5194 */
5185 5195
5186 log = &adapter->sysctllog; 5196 log = &adapter->sysctllog;
5187 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) { 5197 if ((rnode = ixgbe_sysctl_instance(adapter)) == NULL) {
5188 aprint_error_dev(dev, "could not create sysctl root\n"); 5198 aprint_error_dev(dev, "could not create sysctl root\n");
5189 return; 5199 return;
5190 } 5200 }
5191 if (sysctl_createv(log, 0, &rnode, &cnode, 5201 if (sysctl_createv(log, 0, &rnode, &cnode,
5192 CTLFLAG_READWRITE, CTLTYPE_INT, 5202 CTLFLAG_READWRITE, CTLTYPE_INT,
5193 name, SYSCTL_DESCR(description), 5203 name, SYSCTL_DESCR(description),
5194 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) 5204 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
5195 aprint_error_dev(dev, "could not create sysctl\n"); 5205 aprint_error_dev(dev, "could not create sysctl\n");
5196 *limit = value; 5206 *limit = value;
5197} /* ixgbe_set_sysctl_value */ 5207} /* ixgbe_set_sysctl_value */
5198 5208
5199/************************************************************************ 5209/************************************************************************
5200 * ixgbe_sysctl_flowcntl 5210 * ixgbe_sysctl_flowcntl
5201 * 5211 *
5202 * SYSCTL wrapper around setting Flow Control 5212 * SYSCTL wrapper around setting Flow Control
5203 ************************************************************************/ 5213 ************************************************************************/
5204static int 5214static int
5205ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS) 5215ixgbe_sysctl_flowcntl(SYSCTLFN_ARGS)
5206{ 5216{
5207 struct sysctlnode node = *rnode; 5217 struct sysctlnode node = *rnode;
5208 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5218 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5209 int error, fc; 5219 int error, fc;
5210 5220
5211 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5221 if (ixgbe_fw_recovery_mode_swflag(adapter))
5212 return (EPERM); 5222 return (EPERM);
5213 5223
5214 fc = adapter->hw.fc.current_mode; 5224 fc = adapter->hw.fc.current_mode;
5215 node.sysctl_data = &fc; 5225 node.sysctl_data = &fc;
5216 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5226 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5217 if (error != 0 || newp == NULL) 5227 if (error != 0 || newp == NULL)
5218 return error; 5228 return error;
5219 5229
5220 /* Don't bother if it's not changed */ 5230 /* Don't bother if it's not changed */
5221 if (fc == adapter->hw.fc.current_mode) 5231 if (fc == adapter->hw.fc.current_mode)
5222 return (0); 5232 return (0);
5223 5233
5224 return ixgbe_set_flowcntl(adapter, fc); 5234 return ixgbe_set_flowcntl(adapter, fc);
5225} /* ixgbe_sysctl_flowcntl */ 5235} /* ixgbe_sysctl_flowcntl */
5226 5236
5227/************************************************************************ 5237/************************************************************************
5228 * ixgbe_set_flowcntl - Set flow control 5238 * ixgbe_set_flowcntl - Set flow control
5229 * 5239 *
5230 * Flow control values: 5240 * Flow control values:
5231 * 0 - off 5241 * 0 - off
5232 * 1 - rx pause 5242 * 1 - rx pause
5233 * 2 - tx pause 5243 * 2 - tx pause
5234 * 3 - full 5244 * 3 - full
5235 ************************************************************************/ 5245 ************************************************************************/
5236static int 5246static int
5237ixgbe_set_flowcntl(struct adapter *adapter, int fc) 5247ixgbe_set_flowcntl(struct adapter *adapter, int fc)
5238{ 5248{
5239 switch (fc) { 5249 switch (fc) {
5240 case ixgbe_fc_rx_pause: 5250 case ixgbe_fc_rx_pause:
5241 case ixgbe_fc_tx_pause: 5251 case ixgbe_fc_tx_pause:
5242 case ixgbe_fc_full: 5252 case ixgbe_fc_full:
5243 adapter->hw.fc.requested_mode = fc; 5253 adapter->hw.fc.requested_mode = fc;
5244 if (adapter->num_queues > 1) 5254 if (adapter->num_queues > 1)
5245 ixgbe_disable_rx_drop(adapter); 5255 ixgbe_disable_rx_drop(adapter);
5246 break; 5256 break;
5247 case ixgbe_fc_none: 5257 case ixgbe_fc_none:
5248 adapter->hw.fc.requested_mode = ixgbe_fc_none; 5258 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5249 if (adapter->num_queues > 1) 5259 if (adapter->num_queues > 1)
5250 ixgbe_enable_rx_drop(adapter); 5260 ixgbe_enable_rx_drop(adapter);
5251 break; 5261 break;
5252 default: 5262 default:
5253 return (EINVAL); 5263 return (EINVAL);
5254 } 5264 }
5255 5265
5256#if 0 /* XXX NetBSD */ 5266#if 0 /* XXX NetBSD */
5257 /* Don't autoneg if forcing a value */ 5267 /* Don't autoneg if forcing a value */
5258 adapter->hw.fc.disable_fc_autoneg = TRUE; 5268 adapter->hw.fc.disable_fc_autoneg = TRUE;
5259#endif 5269#endif
5260 ixgbe_fc_enable(&adapter->hw); 5270 ixgbe_fc_enable(&adapter->hw);
5261 5271
5262 return (0); 5272 return (0);
5263} /* ixgbe_set_flowcntl */ 5273} /* ixgbe_set_flowcntl */
5264 5274
5265/************************************************************************ 5275/************************************************************************
5266 * ixgbe_enable_rx_drop 5276 * ixgbe_enable_rx_drop
5267 * 5277 *
5268 * Enable the hardware to drop packets when the buffer is 5278 * Enable the hardware to drop packets when the buffer is
5269 * full. This is useful with multiqueue, so that no single 5279 * full. This is useful with multiqueue, so that no single
5270 * queue being full stalls the entire RX engine. We only 5280 * queue being full stalls the entire RX engine. We only
5271 * enable this when Multiqueue is enabled AND Flow Control 5281 * enable this when Multiqueue is enabled AND Flow Control
5272 * is disabled. 5282 * is disabled.
5273 ************************************************************************/ 5283 ************************************************************************/
5274static void 5284static void
5275ixgbe_enable_rx_drop(struct adapter *adapter) 5285ixgbe_enable_rx_drop(struct adapter *adapter)
5276{ 5286{
5277 struct ixgbe_hw *hw = &adapter->hw; 5287 struct ixgbe_hw *hw = &adapter->hw;
5278 struct rx_ring *rxr; 5288 struct rx_ring *rxr;
5279 u32 srrctl; 5289 u32 srrctl;
5280 5290
5281 for (int i = 0; i < adapter->num_queues; i++) { 5291 for (int i = 0; i < adapter->num_queues; i++) {
5282 rxr = &adapter->rx_rings[i]; 5292 rxr = &adapter->rx_rings[i];
5283 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5293 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5284 srrctl |= IXGBE_SRRCTL_DROP_EN; 5294 srrctl |= IXGBE_SRRCTL_DROP_EN;
5285 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5295 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5286 } 5296 }
5287 5297
5288 /* enable drop for each vf */ 5298 /* enable drop for each vf */
5289 for (int i = 0; i < adapter->num_vfs; i++) { 5299 for (int i = 0; i < adapter->num_vfs; i++) {
5290 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5300 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5291 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | 5301 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
5292 IXGBE_QDE_ENABLE)); 5302 IXGBE_QDE_ENABLE));
5293 } 5303 }
5294} /* ixgbe_enable_rx_drop */ 5304} /* ixgbe_enable_rx_drop */
5295 5305
5296/************************************************************************ 5306/************************************************************************
5297 * ixgbe_disable_rx_drop 5307 * ixgbe_disable_rx_drop
5298 ************************************************************************/ 5308 ************************************************************************/
5299static void 5309static void
5300ixgbe_disable_rx_drop(struct adapter *adapter) 5310ixgbe_disable_rx_drop(struct adapter *adapter)
5301{ 5311{
5302 struct ixgbe_hw *hw = &adapter->hw; 5312 struct ixgbe_hw *hw = &adapter->hw;
5303 struct rx_ring *rxr; 5313 struct rx_ring *rxr;
5304 u32 srrctl; 5314 u32 srrctl;
5305 5315
5306 for (int i = 0; i < adapter->num_queues; i++) { 5316 for (int i = 0; i < adapter->num_queues; i++) {
5307 rxr = &adapter->rx_rings[i]; 5317 rxr = &adapter->rx_rings[i];
5308 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); 5318 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
5309 srrctl &= ~IXGBE_SRRCTL_DROP_EN; 5319 srrctl &= ~IXGBE_SRRCTL_DROP_EN;
5310 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); 5320 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
5311 } 5321 }
5312 5322
5313 /* disable drop for each vf */ 5323 /* disable drop for each vf */
5314 for (int i = 0; i < adapter->num_vfs; i++) { 5324 for (int i = 0; i < adapter->num_vfs; i++) {
5315 IXGBE_WRITE_REG(hw, IXGBE_QDE, 5325 IXGBE_WRITE_REG(hw, IXGBE_QDE,
5316 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); 5326 (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
5317 } 5327 }
5318} /* ixgbe_disable_rx_drop */ 5328} /* ixgbe_disable_rx_drop */
5319 5329
5320/************************************************************************ 5330/************************************************************************
5321 * ixgbe_sysctl_advertise 5331 * ixgbe_sysctl_advertise
5322 * 5332 *
5323 * SYSCTL wrapper around setting advertised speed 5333 * SYSCTL wrapper around setting advertised speed
5324 ************************************************************************/ 5334 ************************************************************************/
5325static int 5335static int
5326ixgbe_sysctl_advertise(SYSCTLFN_ARGS) 5336ixgbe_sysctl_advertise(SYSCTLFN_ARGS)
5327{ 5337{
5328 struct sysctlnode node = *rnode; 5338 struct sysctlnode node = *rnode;
5329 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5339 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5330 int error = 0, advertise; 5340 int error = 0, advertise;
5331 5341
5332 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5342 if (ixgbe_fw_recovery_mode_swflag(adapter))
5333 return (EPERM); 5343 return (EPERM);
5334 5344
5335 advertise = adapter->advertise; 5345 advertise = adapter->advertise;
5336 node.sysctl_data = &advertise; 5346 node.sysctl_data = &advertise;
5337 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5347 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5338 if (error != 0 || newp == NULL) 5348 if (error != 0 || newp == NULL)
5339 return error; 5349 return error;
5340 5350
5341 return ixgbe_set_advertise(adapter, advertise); 5351 return ixgbe_set_advertise(adapter, advertise);
5342} /* ixgbe_sysctl_advertise */ 5352} /* ixgbe_sysctl_advertise */
5343 5353
5344/************************************************************************ 5354/************************************************************************
5345 * ixgbe_set_advertise - Control advertised link speed 5355 * ixgbe_set_advertise - Control advertised link speed
5346 * 5356 *
5347 * Flags: 5357 * Flags:
5348 * 0x00 - Default (all capable link speed) 5358 * 0x00 - Default (all capable link speed)
5349 * 0x01 - advertise 100 Mb 5359 * 0x01 - advertise 100 Mb
5350 * 0x02 - advertise 1G 5360 * 0x02 - advertise 1G
5351 * 0x04 - advertise 10G 5361 * 0x04 - advertise 10G
5352 * 0x08 - advertise 10 Mb 5362 * 0x08 - advertise 10 Mb
5353 * 0x10 - advertise 2.5G 5363 * 0x10 - advertise 2.5G
5354 * 0x20 - advertise 5G 5364 * 0x20 - advertise 5G
5355 ************************************************************************/ 5365 ************************************************************************/
5356static int 5366static int
5357ixgbe_set_advertise(struct adapter *adapter, int advertise) 5367ixgbe_set_advertise(struct adapter *adapter, int advertise)
5358{ 5368{
5359 device_t dev; 5369 device_t dev;
5360 struct ixgbe_hw *hw; 5370 struct ixgbe_hw *hw;
5361 ixgbe_link_speed speed = 0; 5371 ixgbe_link_speed speed = 0;
5362 ixgbe_link_speed link_caps = 0; 5372 ixgbe_link_speed link_caps = 0;
5363 s32 err = IXGBE_NOT_IMPLEMENTED; 5373 s32 err = IXGBE_NOT_IMPLEMENTED;
5364 bool negotiate = FALSE; 5374 bool negotiate = FALSE;
5365 5375
5366 /* Checks to validate new value */ 5376 /* Checks to validate new value */
5367 if (adapter->advertise == advertise) /* no change */ 5377 if (adapter->advertise == advertise) /* no change */
5368 return (0); 5378 return (0);
5369 5379
5370 dev = adapter->dev; 5380 dev = adapter->dev;
5371 hw = &adapter->hw; 5381 hw = &adapter->hw;
5372 5382
5373 /* No speed changes for backplane media */ 5383 /* No speed changes for backplane media */
5374 if (hw->phy.media_type == ixgbe_media_type_backplane) 5384 if (hw->phy.media_type == ixgbe_media_type_backplane)
5375 return (ENODEV); 5385 return (ENODEV);
5376 5386
5377 if (!((hw->phy.media_type == ixgbe_media_type_copper) || 5387 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5378 (hw->phy.multispeed_fiber))) { 5388 (hw->phy.multispeed_fiber))) {
5379 device_printf(dev, 5389 device_printf(dev,
5380 "Advertised speed can only be set on copper or " 5390 "Advertised speed can only be set on copper or "
5381 "multispeed fiber media types.\n"); 5391 "multispeed fiber media types.\n");
5382 return (EINVAL); 5392 return (EINVAL);
5383 } 5393 }
5384 5394
5385 if (advertise < 0x0 || advertise > 0x2f) { 5395 if (advertise < 0x0 || advertise > 0x3f) {
5386 device_printf(dev, 5396 device_printf(dev,
5387 "Invalid advertised speed; valid modes are 0x0 through 0x7\n"); 5397 "Invalid advertised speed; valid modes are 0x0 through 0x3f\n");
5388 return (EINVAL); 5398 return (EINVAL);
5389 } 5399 }
5390 5400
5391 if (hw->mac.ops.get_link_capabilities) { 5401 if (hw->mac.ops.get_link_capabilities) {
5392 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, 5402 err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
5393 &negotiate); 5403 &negotiate);
5394 if (err != IXGBE_SUCCESS) { 5404 if (err != IXGBE_SUCCESS) {
5395 device_printf(dev, "Unable to determine supported advertise speeds\n"); 5405 device_printf(dev, "Unable to determine supported advertise speeds\n");
5396 return (ENODEV); 5406 return (ENODEV);
5397 } 5407 }
5398 } 5408 }
5399 5409
5400 /* Set new value and report new advertised mode */ 5410 /* Set new value and report new advertised mode */
5401 if (advertise & 0x1) { 5411 if (advertise & 0x1) {
5402 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { 5412 if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
5403 device_printf(dev, "Interface does not support 100Mb advertised speed\n"); 5413 device_printf(dev, "Interface does not support 100Mb advertised speed\n");
5404 return (EINVAL); 5414 return (EINVAL);
5405 } 5415 }
5406 speed |= IXGBE_LINK_SPEED_100_FULL; 5416 speed |= IXGBE_LINK_SPEED_100_FULL;
5407 } 5417 }
5408 if (advertise & 0x2) { 5418 if (advertise & 0x2) {
5409 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { 5419 if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
5410 device_printf(dev, "Interface does not support 1Gb advertised speed\n"); 5420 device_printf(dev, "Interface does not support 1Gb advertised speed\n");
5411 return (EINVAL); 5421 return (EINVAL);
5412 } 5422 }
5413 speed |= IXGBE_LINK_SPEED_1GB_FULL; 5423 speed |= IXGBE_LINK_SPEED_1GB_FULL;
5414 } 5424 }
5415 if (advertise & 0x4) { 5425 if (advertise & 0x4) {
5416 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { 5426 if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
5417 device_printf(dev, "Interface does not support 10Gb advertised speed\n"); 5427 device_printf(dev, "Interface does not support 10Gb advertised speed\n");
5418 return (EINVAL); 5428 return (EINVAL);
5419 } 5429 }
5420 speed |= IXGBE_LINK_SPEED_10GB_FULL; 5430 speed |= IXGBE_LINK_SPEED_10GB_FULL;
5421 } 5431 }
5422 if (advertise & 0x8) { 5432 if (advertise & 0x8) {
5423 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { 5433 if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
5424 device_printf(dev, "Interface does not support 10Mb advertised speed\n"); 5434 device_printf(dev, "Interface does not support 10Mb advertised speed\n");
5425 return (EINVAL); 5435 return (EINVAL);
5426 } 5436 }
5427 speed |= IXGBE_LINK_SPEED_10_FULL; 5437 speed |= IXGBE_LINK_SPEED_10_FULL;
5428 } 5438 }
5429 if (advertise & 0x10) { 5439 if (advertise & 0x10) {
5430 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) { 5440 if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
5431 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n"); 5441 device_printf(dev, "Interface does not support 2.5Gb advertised speed\n");
5432 return (EINVAL); 5442 return (EINVAL);
5433 } 5443 }
5434 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 5444 speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
5435 } 5445 }
5436 if (advertise & 0x20) { 5446 if (advertise & 0x20) {
5437 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) { 5447 if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
5438 device_printf(dev, "Interface does not support 5Gb advertised speed\n"); 5448 device_printf(dev, "Interface does not support 5Gb advertised speed\n");
5439 return (EINVAL); 5449 return (EINVAL);
5440 } 5450 }
5441 speed |= IXGBE_LINK_SPEED_5GB_FULL; 5451 speed |= IXGBE_LINK_SPEED_5GB_FULL;
5442 } 5452 }
5443 if (advertise == 0) 5453 if (advertise == 0)
5444 speed = link_caps; /* All capable link speed */ 5454 speed = link_caps; /* All capable link speed */
5445 5455
5446 hw->mac.autotry_restart = TRUE; 5456 hw->mac.autotry_restart = TRUE;
5447 hw->mac.ops.setup_link(hw, speed, TRUE); 5457 hw->mac.ops.setup_link(hw, speed, TRUE);
5448 adapter->advertise = advertise; 5458 adapter->advertise = advertise;
5449 5459
5450 return (0); 5460 return (0);
5451} /* ixgbe_set_advertise */ 5461} /* ixgbe_set_advertise */
5452 5462
5453/************************************************************************ 5463/************************************************************************
5454 * ixgbe_get_advertise - Get current advertised speed settings 5464 * ixgbe_get_advertise - Get current advertised speed settings
5455 * 5465 *
5456 * Formatted for sysctl usage. 5466 * Formatted for sysctl usage.
5457 * Flags: 5467 * Flags:
5458 * 0x01 - advertise 100 Mb 5468 * 0x01 - advertise 100 Mb
5459 * 0x02 - advertise 1G 5469 * 0x02 - advertise 1G
5460 * 0x04 - advertise 10G 5470 * 0x04 - advertise 10G
5461 * 0x08 - advertise 10 Mb (yes, Mb) 5471 * 0x08 - advertise 10 Mb (yes, Mb)
5462 * 0x10 - advertise 2.5G 5472 * 0x10 - advertise 2.5G
5463 * 0x20 - advertise 5G 5473 * 0x20 - advertise 5G
5464 ************************************************************************/ 5474 ************************************************************************/
5465static int 5475static int
5466ixgbe_get_advertise(struct adapter *adapter) 5476ixgbe_get_advertise(struct adapter *adapter)
5467{ 5477{
5468 struct ixgbe_hw *hw = &adapter->hw; 5478 struct ixgbe_hw *hw = &adapter->hw;
5469 int speed; 5479 int speed;
5470 ixgbe_link_speed link_caps = 0; 5480 ixgbe_link_speed link_caps = 0;
5471 s32 err; 5481 s32 err;
5472 bool negotiate = FALSE; 5482 bool negotiate = FALSE;
5473 5483
5474 /* 5484 /*
5475 * Advertised speed means nothing unless it's copper or 5485 * Advertised speed means nothing unless it's copper or
5476 * multi-speed fiber 5486 * multi-speed fiber
5477 */ 5487 */
5478 if (!(hw->phy.media_type == ixgbe_media_type_copper) && 5488 if (!(hw->phy.media_type == ixgbe_media_type_copper) &&
5479 !(hw->phy.multispeed_fiber)) 5489 !(hw->phy.multispeed_fiber))
5480 return (0); 5490 return (0);
5481 5491
5482 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); 5492 err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate);
5483 if (err != IXGBE_SUCCESS) 5493 if (err != IXGBE_SUCCESS)
5484 return (0); 5494 return (0);
5485 5495
5486 speed = 5496 speed =
5487 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) | 5497 ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 0x04 : 0) |
5488 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) | 5498 ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 0x02 : 0) |
5489 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) | 5499 ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 0x01 : 0) |
5490 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) | 5500 ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 0x08 : 0) |
5491 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) | 5501 ((link_caps & IXGBE_LINK_SPEED_2_5GB_FULL) ? 0x10 : 0) |
5492 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0); 5502 ((link_caps & IXGBE_LINK_SPEED_5GB_FULL) ? 0x20 : 0);
5493 5503
5494 return speed; 5504 return speed;
5495} /* ixgbe_get_advertise */ 5505} /* ixgbe_get_advertise */
5496 5506
5497/************************************************************************ 5507/************************************************************************
5498 * ixgbe_sysctl_dmac - Manage DMA Coalescing 5508 * ixgbe_sysctl_dmac - Manage DMA Coalescing
5499 * 5509 *
5500 * Control values: 5510 * Control values:
5501 * 0/1 - off / on (use default value of 1000) 5511 * 0/1 - off / on (use default value of 1000)
5502 * 5512 *
5503 * Legal timer values are: 5513 * Legal timer values are:
5504 * 50,100,250,500,1000,2000,5000,10000 5514 * 50,100,250,500,1000,2000,5000,10000
5505 * 5515 *
5506 * Turning off interrupt moderation will also turn this off. 5516 * Turning off interrupt moderation will also turn this off.
5507 ************************************************************************/ 5517 ************************************************************************/
5508static int 5518static int
5509ixgbe_sysctl_dmac(SYSCTLFN_ARGS) 5519ixgbe_sysctl_dmac(SYSCTLFN_ARGS)
5510{ 5520{
5511 struct sysctlnode node = *rnode; 5521 struct sysctlnode node = *rnode;
5512 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5522 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5513 struct ifnet *ifp = adapter->ifp; 5523 struct ifnet *ifp = adapter->ifp;
5514 int error; 5524 int error;
5515 int newval; 5525 int newval;
5516 5526
5517 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5527 if (ixgbe_fw_recovery_mode_swflag(adapter))
5518 return (EPERM); 5528 return (EPERM);
5519 5529
5520 newval = adapter->dmac; 5530 newval = adapter->dmac;
5521 node.sysctl_data = &newval; 5531 node.sysctl_data = &newval;
5522 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5532 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5523 if ((error) || (newp == NULL)) 5533 if ((error) || (newp == NULL))
5524 return (error); 5534 return (error);
5525 5535
5526 switch (newval) { 5536 switch (newval) {
5527 case 0: 5537 case 0:
5528 /* Disabled */ 5538 /* Disabled */
5529 adapter->dmac = 0; 5539 adapter->dmac = 0;
5530 break; 5540 break;
5531 case 1: 5541 case 1:
5532 /* Enable and use default */ 5542 /* Enable and use default */
5533 adapter->dmac = 1000; 5543 adapter->dmac = 1000;
5534 break; 5544 break;
5535 case 50: 5545 case 50:
5536 case 100: 5546 case 100:
5537 case 250: 5547 case 250:
5538 case 500: 5548 case 500:
5539 case 1000: 5549 case 1000:
5540 case 2000: 5550 case 2000:
5541 case 5000: 5551 case 5000:
5542 case 10000: 5552 case 10000:
5543 /* Legal values - allow */ 5553 /* Legal values - allow */
5544 adapter->dmac = newval; 5554 adapter->dmac = newval;
5545 break; 5555 break;
5546 default: 5556 default:
5547 /* Do nothing, illegal value */ 5557 /* Do nothing, illegal value */
5548 return (EINVAL); 5558 return (EINVAL);
5549 } 5559 }
5550 5560
5551 /* Re-initialize hardware if it's already running */ 5561 /* Re-initialize hardware if it's already running */
5552 if (ifp->if_flags & IFF_RUNNING) 5562 if (ifp->if_flags & IFF_RUNNING)
5553 ifp->if_init(ifp); 5563 ifp->if_init(ifp);
5554 5564
5555 return (0); 5565 return (0);
5556} 5566}
5557 5567
5558#ifdef IXGBE_DEBUG 5568#ifdef IXGBE_DEBUG
5559/************************************************************************ 5569/************************************************************************
5560 * ixgbe_sysctl_power_state 5570 * ixgbe_sysctl_power_state
5561 * 5571 *
5562 * Sysctl to test power states 5572 * Sysctl to test power states
5563 * Values: 5573 * Values:
5564 * 0 - set device to D0 5574 * 0 - set device to D0
5565 * 3 - set device to D3 5575 * 3 - set device to D3
5566 * (none) - get current device power state 5576 * (none) - get current device power state
5567 ************************************************************************/ 5577 ************************************************************************/
5568static int 5578static int
5569ixgbe_sysctl_power_state(SYSCTLFN_ARGS) 5579ixgbe_sysctl_power_state(SYSCTLFN_ARGS)
5570{ 5580{
5571#ifdef notyet 5581#ifdef notyet
5572 struct sysctlnode node = *rnode; 5582 struct sysctlnode node = *rnode;
5573 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5583 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5574 device_t dev = adapter->dev; 5584 device_t dev = adapter->dev;
5575 int curr_ps, new_ps, error = 0; 5585 int curr_ps, new_ps, error = 0;
5576 5586
5577 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5587 if (ixgbe_fw_recovery_mode_swflag(adapter))
5578 return (EPERM); 5588 return (EPERM);
5579 5589
5580 curr_ps = new_ps = pci_get_powerstate(dev); 5590 curr_ps = new_ps = pci_get_powerstate(dev);
5581 5591
5582 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5592 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5583 if ((error) || (req->newp == NULL)) 5593 if ((error) || (req->newp == NULL))
5584 return (error); 5594 return (error);
5585 5595
5586 if (new_ps == curr_ps) 5596 if (new_ps == curr_ps)
5587 return (0); 5597 return (0);
5588 5598
5589 if (new_ps == 3 && curr_ps == 0) 5599 if (new_ps == 3 && curr_ps == 0)
5590 error = DEVICE_SUSPEND(dev); 5600 error = DEVICE_SUSPEND(dev);
5591 else if (new_ps == 0 && curr_ps == 3) 5601 else if (new_ps == 0 && curr_ps == 3)
5592 error = DEVICE_RESUME(dev); 5602 error = DEVICE_RESUME(dev);
5593 else 5603 else
5594 return (EINVAL); 5604 return (EINVAL);
5595 5605
5596 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); 5606 device_printf(dev, "New state: %d\n", pci_get_powerstate(dev));
5597 5607
5598 return (error); 5608 return (error);
5599#else 5609#else
5600 return 0; 5610 return 0;
5601#endif 5611#endif
5602} /* ixgbe_sysctl_power_state */ 5612} /* ixgbe_sysctl_power_state */
5603#endif 5613#endif
5604 5614
5605/************************************************************************ 5615/************************************************************************
5606 * ixgbe_sysctl_wol_enable 5616 * ixgbe_sysctl_wol_enable
5607 * 5617 *
5608 * Sysctl to enable/disable the WoL capability, 5618 * Sysctl to enable/disable the WoL capability,
5609 * if supported by the adapter. 5619 * if supported by the adapter.
5610 * 5620 *
5611 * Values: 5621 * Values:
5612 * 0 - disabled 5622 * 0 - disabled
5613 * 1 - enabled 5623 * 1 - enabled
5614 ************************************************************************/ 5624 ************************************************************************/
5615static int 5625static int
5616ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS) 5626ixgbe_sysctl_wol_enable(SYSCTLFN_ARGS)
5617{ 5627{
5618 struct sysctlnode node = *rnode; 5628 struct sysctlnode node = *rnode;
5619 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5629 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5620 struct ixgbe_hw *hw = &adapter->hw; 5630 struct ixgbe_hw *hw = &adapter->hw;
5621 bool new_wol_enabled; 5631 bool new_wol_enabled;
5622 int error = 0; 5632 int error = 0;
5623 5633
5624 /* 5634 /*
5625 * It's not required to check recovery mode because this function never 5635 * It's not required to check recovery mode because this function never
5626 * touches hardware. 5636 * touches hardware.
5627 */ 5637 */
5628 new_wol_enabled = hw->wol_enabled; 5638 new_wol_enabled = hw->wol_enabled;
5629 node.sysctl_data = &new_wol_enabled; 5639 node.sysctl_data = &new_wol_enabled;
5630 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5640 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5631 if ((error) || (newp == NULL)) 5641 if ((error) || (newp == NULL))
5632 return (error); 5642 return (error);
5633 if (new_wol_enabled == hw->wol_enabled) 5643 if (new_wol_enabled == hw->wol_enabled)
5634 return (0); 5644 return (0);
5635 5645
5636 if (new_wol_enabled && !adapter->wol_support) 5646 if (new_wol_enabled && !adapter->wol_support)
5637 return (ENODEV); 5647 return (ENODEV);
5638 else 5648 else
5639 hw->wol_enabled = new_wol_enabled; 5649 hw->wol_enabled = new_wol_enabled;
5640 5650
5641 return (0); 5651 return (0);
5642} /* ixgbe_sysctl_wol_enable */ 5652} /* ixgbe_sysctl_wol_enable */
5643 5653
5644/************************************************************************ 5654/************************************************************************
5645 * ixgbe_sysctl_wufc - Wake Up Filter Control 5655 * ixgbe_sysctl_wufc - Wake Up Filter Control
5646 * 5656 *
5647 * Sysctl to enable/disable the types of packets that the 5657 * Sysctl to enable/disable the types of packets that the
5648 * adapter will wake up on upon receipt. 5658 * adapter will wake up on upon receipt.
5649 * Flags: 5659 * Flags:
5650 * 0x1 - Link Status Change 5660 * 0x1 - Link Status Change
5651 * 0x2 - Magic Packet 5661 * 0x2 - Magic Packet
5652 * 0x4 - Direct Exact 5662 * 0x4 - Direct Exact
5653 * 0x8 - Directed Multicast 5663 * 0x8 - Directed Multicast
5654 * 0x10 - Broadcast 5664 * 0x10 - Broadcast
5655 * 0x20 - ARP/IPv4 Request Packet 5665 * 0x20 - ARP/IPv4 Request Packet
5656 * 0x40 - Direct IPv4 Packet 5666 * 0x40 - Direct IPv4 Packet
5657 * 0x80 - Direct IPv6 Packet 5667 * 0x80 - Direct IPv6 Packet
5658 * 5668 *
5659 * Settings not listed above will cause the sysctl to return an error. 5669 * Settings not listed above will cause the sysctl to return an error.
5660 ************************************************************************/ 5670 ************************************************************************/
5661static int 5671static int
5662ixgbe_sysctl_wufc(SYSCTLFN_ARGS) 5672ixgbe_sysctl_wufc(SYSCTLFN_ARGS)
5663{ 5673{
5664 struct sysctlnode node = *rnode; 5674 struct sysctlnode node = *rnode;
5665 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5675 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5666 int error = 0; 5676 int error = 0;
5667 u32 new_wufc; 5677 u32 new_wufc;
5668 5678
5669 /* 5679 /*
5670 * It's not required to check recovery mode because this function never 5680 * It's not required to check recovery mode because this function never
5671 * touches hardware. 5681 * touches hardware.
5672 */ 5682 */
5673 new_wufc = adapter->wufc; 5683 new_wufc = adapter->wufc;
5674 node.sysctl_data = &new_wufc; 5684 node.sysctl_data = &new_wufc;
5675 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5685 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5676 if ((error) || (newp == NULL)) 5686 if ((error) || (newp == NULL))
5677 return (error); 5687 return (error);
5678 if (new_wufc == adapter->wufc) 5688 if (new_wufc == adapter->wufc)
5679 return (0); 5689 return (0);
5680 5690
5681 if (new_wufc & 0xffffff00) 5691 if (new_wufc & 0xffffff00)
5682 return (EINVAL); 5692 return (EINVAL);
5683 5693
5684 new_wufc &= 0xff; 5694 new_wufc &= 0xff;
5685 new_wufc |= (0xffffff & adapter->wufc); 5695 new_wufc |= (0xffffff & adapter->wufc);
5686 adapter->wufc = new_wufc; 5696 adapter->wufc = new_wufc;
5687 5697
5688 return (0); 5698 return (0);
5689} /* ixgbe_sysctl_wufc */ 5699} /* ixgbe_sysctl_wufc */
5690 5700
5691#ifdef IXGBE_DEBUG 5701#ifdef IXGBE_DEBUG
5692/************************************************************************ 5702/************************************************************************
5693 * ixgbe_sysctl_print_rss_config 5703 * ixgbe_sysctl_print_rss_config
5694 ************************************************************************/ 5704 ************************************************************************/
5695static int 5705static int
5696ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS) 5706ixgbe_sysctl_print_rss_config(SYSCTLFN_ARGS)
5697{ 5707{
5698#ifdef notyet 5708#ifdef notyet
5699 struct sysctlnode node = *rnode; 5709 struct sysctlnode node = *rnode;
5700 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5710 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5701 struct ixgbe_hw *hw = &adapter->hw; 5711 struct ixgbe_hw *hw = &adapter->hw;
5702 device_t dev = adapter->dev; 5712 device_t dev = adapter->dev;
5703 struct sbuf *buf; 5713 struct sbuf *buf;
5704 int error = 0, reta_size; 5714 int error = 0, reta_size;
5705 u32 reg; 5715 u32 reg;
5706 5716
5707 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5717 if (ixgbe_fw_recovery_mode_swflag(adapter))
5708 return (EPERM); 5718 return (EPERM);
5709 5719
5710 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5720 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
5711 if (!buf) { 5721 if (!buf) {
5712 device_printf(dev, "Could not allocate sbuf for output.\n"); 5722 device_printf(dev, "Could not allocate sbuf for output.\n");
5713 return (ENOMEM); 5723 return (ENOMEM);
5714 } 5724 }
5715 5725
5716 // TODO: use sbufs to make a string to print out 5726 // TODO: use sbufs to make a string to print out
5717 /* Set multiplier for RETA setup and table size based on MAC */ 5727 /* Set multiplier for RETA setup and table size based on MAC */
5718 switch (adapter->hw.mac.type) { 5728 switch (adapter->hw.mac.type) {
5719 case ixgbe_mac_X550: 5729 case ixgbe_mac_X550:
5720 case ixgbe_mac_X550EM_x: 5730 case ixgbe_mac_X550EM_x:
5721 case ixgbe_mac_X550EM_a: 5731 case ixgbe_mac_X550EM_a:
5722 reta_size = 128; 5732 reta_size = 128;
5723 break; 5733 break;
5724 default: 5734 default:
5725 reta_size = 32; 5735 reta_size = 32;
5726 break; 5736 break;
5727 } 5737 }
5728 5738
5729 /* Print out the redirection table */ 5739 /* Print out the redirection table */
5730 sbuf_cat(buf, "\n"); 5740 sbuf_cat(buf, "\n");
5731 for (int i = 0; i < reta_size; i++) { 5741 for (int i = 0; i < reta_size; i++) {
5732 if (i < 32) { 5742 if (i < 32) {
5733 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); 5743 reg = IXGBE_READ_REG(hw, IXGBE_RETA(i));
5734 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); 5744 sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg);
5735 } else { 5745 } else {
5736 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); 5746 reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32));
5737 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); 5747 sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg);
5738 } 5748 }
5739 } 5749 }
5740 5750
5741 // TODO: print more config 5751 // TODO: print more config
5742 5752
5743 error = sbuf_finish(buf); 5753 error = sbuf_finish(buf);
5744 if (error) 5754 if (error)
5745 device_printf(dev, "Error finishing sbuf: %d\n", error); 5755 device_printf(dev, "Error finishing sbuf: %d\n", error);
5746 5756
5747 sbuf_delete(buf); 5757 sbuf_delete(buf);
5748#endif 5758#endif
5749 return (0); 5759 return (0);
5750} /* ixgbe_sysctl_print_rss_config */ 5760} /* ixgbe_sysctl_print_rss_config */
5751#endif /* IXGBE_DEBUG */ 5761#endif /* IXGBE_DEBUG */
5752 5762
5753/************************************************************************ 5763/************************************************************************
5754 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY 5764 * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY
5755 * 5765 *
5756 * For X552/X557-AT devices using an external PHY 5766 * For X552/X557-AT devices using an external PHY
5757 ************************************************************************/ 5767 ************************************************************************/
5758static int 5768static int
5759ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS) 5769ixgbe_sysctl_phy_temp(SYSCTLFN_ARGS)
5760{ 5770{
5761 struct sysctlnode node = *rnode; 5771 struct sysctlnode node = *rnode;
5762 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5772 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5763 struct ixgbe_hw *hw = &adapter->hw; 5773 struct ixgbe_hw *hw = &adapter->hw;
5764 int val; 5774 int val;
5765 u16 reg; 5775 u16 reg;
5766 int error; 5776 int error;
5767 5777
5768 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5778 if (ixgbe_fw_recovery_mode_swflag(adapter))
5769 return (EPERM); 5779 return (EPERM);
5770 5780
5771 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 5781 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5772 device_printf(adapter->dev, 5782 device_printf(adapter->dev,
5773 "Device has no supported external thermal sensor.\n"); 5783 "Device has no supported external thermal sensor.\n");
5774 return (ENODEV); 5784 return (ENODEV);
5775 } 5785 }
5776 5786
5777 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, 5787 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
5778 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) { 5788 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
5779 device_printf(adapter->dev, 5789 device_printf(adapter->dev,
5780 "Error reading from PHY's current temperature register\n"); 5790 "Error reading from PHY's current temperature register\n");
5781 return (EAGAIN); 5791 return (EAGAIN);
5782 } 5792 }
5783 5793
5784 node.sysctl_data = &val; 5794 node.sysctl_data = &val;
5785 5795
5786 /* Shift temp for output */ 5796 /* Shift temp for output */
5787 val = reg >> 8; 5797 val = reg >> 8;
5788 5798
5789 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5799 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5790 if ((error) || (newp == NULL)) 5800 if ((error) || (newp == NULL))
5791 return (error); 5801 return (error);
5792 5802
5793 return (0); 5803 return (0);
5794} /* ixgbe_sysctl_phy_temp */ 5804} /* ixgbe_sysctl_phy_temp */
5795 5805
5796/************************************************************************ 5806/************************************************************************
5797 * ixgbe_sysctl_phy_overtemp_occurred 5807 * ixgbe_sysctl_phy_overtemp_occurred
5798 * 5808 *
5799 * Reports (directly from the PHY) whether the current PHY 5809 * Reports (directly from the PHY) whether the current PHY
5800 * temperature is over the overtemp threshold. 5810 * temperature is over the overtemp threshold.
5801 ************************************************************************/ 5811 ************************************************************************/
5802static int 5812static int
5803ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS) 5813ixgbe_sysctl_phy_overtemp_occurred(SYSCTLFN_ARGS)
5804{ 5814{
5805 struct sysctlnode node = *rnode; 5815 struct sysctlnode node = *rnode;
5806 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5816 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5807 struct ixgbe_hw *hw = &adapter->hw; 5817 struct ixgbe_hw *hw = &adapter->hw;
5808 int val, error; 5818 int val, error;
5809 u16 reg; 5819 u16 reg;
5810 5820
5811 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5821 if (ixgbe_fw_recovery_mode_swflag(adapter))
5812 return (EPERM); 5822 return (EPERM);
5813 5823
5814 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { 5824 if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
5815 device_printf(adapter->dev, 5825 device_printf(adapter->dev,
5816 "Device has no supported external thermal sensor.\n"); 5826 "Device has no supported external thermal sensor.\n");
5817 return (ENODEV); 5827 return (ENODEV);
5818 } 5828 }
5819 5829
5820 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, 5830 if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS,
5821 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) { 5831 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
5822 device_printf(adapter->dev, 5832 device_printf(adapter->dev,
5823 "Error reading from PHY's temperature status register\n"); 5833 "Error reading from PHY's temperature status register\n");
5824 return (EAGAIN); 5834 return (EAGAIN);
5825 } 5835 }
5826 5836
5827 node.sysctl_data = &val; 5837 node.sysctl_data = &val;
5828 5838
5829 /* Get occurrence bit */ 5839 /* Get occurrence bit */
5830 val = !!(reg & 0x4000); 5840 val = !!(reg & 0x4000);
5831 5841
5832 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5842 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5833 if ((error) || (newp == NULL)) 5843 if ((error) || (newp == NULL))
5834 return (error); 5844 return (error);
5835 5845
5836 return (0); 5846 return (0);
5837} /* ixgbe_sysctl_phy_overtemp_occurred */ 5847} /* ixgbe_sysctl_phy_overtemp_occurred */
5838 5848
5839/************************************************************************ 5849/************************************************************************
5840 * ixgbe_sysctl_eee_state 5850 * ixgbe_sysctl_eee_state
5841 * 5851 *
5842 * Sysctl to set EEE power saving feature 5852 * Sysctl to set EEE power saving feature
5843 * Values: 5853 * Values:
5844 * 0 - disable EEE 5854 * 0 - disable EEE
5845 * 1 - enable EEE 5855 * 1 - enable EEE
5846 * (none) - get current device EEE state 5856 * (none) - get current device EEE state
5847 ************************************************************************/ 5857 ************************************************************************/
5848static int 5858static int
5849ixgbe_sysctl_eee_state(SYSCTLFN_ARGS) 5859ixgbe_sysctl_eee_state(SYSCTLFN_ARGS)
5850{ 5860{
5851 struct sysctlnode node = *rnode; 5861 struct sysctlnode node = *rnode;
5852 struct adapter *adapter = (struct adapter *)node.sysctl_data; 5862 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5853 struct ifnet *ifp = adapter->ifp; 5863 struct ifnet *ifp = adapter->ifp;
5854 device_t dev = adapter->dev; 5864 device_t dev = adapter->dev;
5855 int curr_eee, new_eee, error = 0; 5865 int curr_eee, new_eee, error = 0;
5856 s32 retval; 5866 s32 retval;
5857 5867
5858 if (ixgbe_fw_recovery_mode_swflag(adapter)) 5868 if (ixgbe_fw_recovery_mode_swflag(adapter))
5859 return (EPERM); 5869 return (EPERM);
5860 5870
5861 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); 5871 curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE);
5862 node.sysctl_data = &new_eee; 5872 node.sysctl_data = &new_eee;
5863 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5873 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5864 if ((error) || (newp == NULL)) 5874 if ((error) || (newp == NULL))
5865 return (error); 5875 return (error);
5866 5876
5867 /* Nothing to do */ 5877 /* Nothing to do */
5868 if (new_eee == curr_eee) 5878 if (new_eee == curr_eee)
5869 return (0); 5879 return (0);
5870 5880
5871 /* Not supported */ 5881 /* Not supported */
5872 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) 5882 if (!(adapter->feat_cap & IXGBE_FEATURE_EEE))
5873 return (EINVAL); 5883 return (EINVAL);
5874 5884
5875 /* Bounds checking */ 5885 /* Bounds checking */
5876 if ((new_eee < 0) || (new_eee > 1)) 5886 if ((new_eee < 0) || (new_eee > 1))
5877 return (EINVAL); 5887 return (EINVAL);
5878 5888
5879 retval = ixgbe_setup_eee(&adapter->hw, new_eee); 5889 retval = ixgbe_setup_eee(&adapter->hw, new_eee);
5880 if (retval) { 5890 if (retval) {
5881 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); 5891 device_printf(dev, "Error in EEE setup: 0x%08X\n", retval);
5882 return (EINVAL); 5892 return (EINVAL);
5883 } 5893 }
5884 5894
5885 /* Restart auto-neg */ 5895 /* Restart auto-neg */
5886 ifp->if_init(ifp); 5896 ifp->if_init(ifp);
5887 5897
5888 device_printf(dev, "New EEE state: %d\n", new_eee); 5898 device_printf(dev, "New EEE state: %d\n", new_eee);
5889 5899
5890 /* Cache new value */ 5900 /* Cache new value */
5891 if (new_eee) 5901 if (new_eee)
5892 adapter->feat_en |= IXGBE_FEATURE_EEE; 5902 adapter->feat_en |= IXGBE_FEATURE_EEE;
5893 else 5903 else
5894 adapter->feat_en &= ~IXGBE_FEATURE_EEE; 5904 adapter->feat_en &= ~IXGBE_FEATURE_EEE;
5895 5905
5896 return (error); 5906 return (error);
5897} /* ixgbe_sysctl_eee_state */ 5907} /* ixgbe_sysctl_eee_state */
5898 5908
5899#define PRINTQS(adapter, regname) \ 5909#define PRINTQS(adapter, regname) \
5900 do { \ 5910 do { \
5901 struct ixgbe_hw *_hw = &(adapter)->hw; \ 5911 struct ixgbe_hw *_hw = &(adapter)->hw; \
5902 int _i; \ 5912 int _i; \
5903 \ 5913 \
5904 printf("%s: %s", device_xname((adapter)->dev), #regname); \ 5914 printf("%s: %s", device_xname((adapter)->dev), #regname); \
5905 for (_i = 0; _i < (adapter)->num_queues; _i++) { \ 5915 for (_i = 0; _i < (adapter)->num_queues; _i++) { \
5906 printf((_i == 0) ? "\t" : " "); \ 5916 printf((_i == 0) ? "\t" : " "); \
5907 printf("%08x", IXGBE_READ_REG(_hw, \ 5917 printf("%08x", IXGBE_READ_REG(_hw, \
5908 IXGBE_##regname(_i))); \ 5918 IXGBE_##regname(_i))); \
5909 } \ 5919 } \
5910 printf("\n"); \ 5920 printf("\n"); \
5911 } while (0) 5921 } while (0)
5912 5922
5913/************************************************************************ 5923/************************************************************************
5914 * ixgbe_print_debug_info 5924 * ixgbe_print_debug_info
5915 * 5925 *
5916 * Called only when em_display_debug_stats is enabled. 5926 * Called only when em_display_debug_stats is enabled.
5917 * Provides a way to take a look at important statistics 5927 * Provides a way to take a look at important statistics
5918 * maintained by the driver and hardware. 5928 * maintained by the driver and hardware.
5919 ************************************************************************/ 5929 ************************************************************************/
5920static void 5930static void
5921ixgbe_print_debug_info(struct adapter *adapter) 5931ixgbe_print_debug_info(struct adapter *adapter)
5922{ 5932{
5923 device_t dev = adapter->dev; 5933 device_t dev = adapter->dev;
5924 struct ixgbe_hw *hw = &adapter->hw; 5934 struct ixgbe_hw *hw = &adapter->hw;
5925 int table_size; 5935 int table_size;
5926 int i; 5936 int i;
5927 5937
5928 switch (adapter->hw.mac.type) { 5938 switch (adapter->hw.mac.type) {
5929 case ixgbe_mac_X550: 5939 case ixgbe_mac_X550:
5930 case ixgbe_mac_X550EM_x: 5940 case ixgbe_mac_X550EM_x:
5931 case ixgbe_mac_X550EM_a: 5941 case ixgbe_mac_X550EM_a:
5932 table_size = 128; 5942 table_size = 128;
5933 break; 5943 break;
5934 default: 5944 default:
5935 table_size = 32; 5945 table_size = 32;
5936 break; 5946 break;
5937 } 5947 }
5938 5948
5939 device_printf(dev, "[E]RETA:\n"); 5949 device_printf(dev, "[E]RETA:\n");
5940 for (i = 0; i < table_size; i++) { 5950 for (i = 0; i < table_size; i++) {
5941 if (i < 32) 5951 if (i < 32)
5942 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw, 5952 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5943 IXGBE_RETA(i))); 5953 IXGBE_RETA(i)));
5944 else 5954 else
5945 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw, 5955 printf("%02x: %08x\n", i, IXGBE_READ_REG(hw,
5946 IXGBE_ERETA(i - 32))); 5956 IXGBE_ERETA(i - 32)));
5947 } 5957 }
5948 5958
5949 device_printf(dev, "queue:"); 5959 device_printf(dev, "queue:");
5950 for (i = 0; i < adapter->num_queues; i++) { 5960 for (i = 0; i < adapter->num_queues; i++) {
5951 printf((i == 0) ? "\t" : " "); 5961 printf((i == 0) ? "\t" : " ");
5952 printf("%8d", i); 5962 printf("%8d", i);
5953 } 5963 }
5954 printf("\n"); 5964 printf("\n");
5955 PRINTQS(adapter, RDBAL); 5965 PRINTQS(adapter, RDBAL);
5956 PRINTQS(adapter, RDBAH); 5966 PRINTQS(adapter, RDBAH);
5957 PRINTQS(adapter, RDLEN); 5967 PRINTQS(adapter, RDLEN);
5958 PRINTQS(adapter, SRRCTL); 5968 PRINTQS(adapter, SRRCTL);
5959 PRINTQS(adapter, RDH); 5969 PRINTQS(adapter, RDH);
5960 PRINTQS(adapter, RDT); 5970 PRINTQS(adapter, RDT);
5961 PRINTQS(adapter, RXDCTL); 5971 PRINTQS(adapter, RXDCTL);
5962 5972
5963 device_printf(dev, "RQSMR:"); 5973 device_printf(dev, "RQSMR:");
5964 for (i = 0; i < adapter->num_queues / 4; i++) { 5974 for (i = 0; i < adapter->num_queues / 4; i++) {
5965 printf((i == 0) ? "\t" : " "); 5975 printf((i == 0) ? "\t" : " ");
5966 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i))); 5976 printf("%08x", IXGBE_READ_REG(hw, IXGBE_RQSMR(i)));
5967 } 5977 }
5968 printf("\n"); 5978 printf("\n");
5969 5979
5970 device_printf(dev, "disabled_count:"); 5980 device_printf(dev, "disabled_count:");
5971 for (i = 0; i < adapter->num_queues; i++) { 5981 for (i = 0; i < adapter->num_queues; i++) {
5972 printf((i == 0) ? "\t" : " "); 5982 printf((i == 0) ? "\t" : " ");
5973 printf("%8d", adapter->queues[i].disabled_count); 5983 printf("%8d", adapter->queues[i].disabled_count);
5974 } 5984 }
5975 printf("\n"); 5985 printf("\n");
5976 5986
5977 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS)); 5987 device_printf(dev, "EIMS:\t%08x\n", IXGBE_READ_REG(hw, IXGBE_EIMS));
5978 if (hw->mac.type != ixgbe_mac_82598EB) { 5988 if (hw->mac.type != ixgbe_mac_82598EB) {
5979 device_printf(dev, "EIMS_EX(0):\t%08x\n", 5989 device_printf(dev, "EIMS_EX(0):\t%08x\n",
5980 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0))); 5990 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)));
5981 device_printf(dev, "EIMS_EX(1):\t%08x\n", 5991 device_printf(dev, "EIMS_EX(1):\t%08x\n",
5982 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1))); 5992 IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)));
5983 } 5993 }
5984} /* ixgbe_print_debug_info */ 5994} /* ixgbe_print_debug_info */
5985 5995
5986/************************************************************************ 5996/************************************************************************
5987 * ixgbe_sysctl_debug 5997 * ixgbe_sysctl_debug
5988 ************************************************************************/ 5998 ************************************************************************/
5989static int 5999static int
5990ixgbe_sysctl_debug(SYSCTLFN_ARGS) 6000ixgbe_sysctl_debug(SYSCTLFN_ARGS)
5991{ 6001{
5992 struct sysctlnode node = *rnode; 6002 struct sysctlnode node = *rnode;
5993 struct adapter *adapter = (struct adapter *)node.sysctl_data; 6003 struct adapter *adapter = (struct adapter *)node.sysctl_data;
5994 int error, result = 0; 6004 int error, result = 0;
5995 6005
5996 if (ixgbe_fw_recovery_mode_swflag(adapter)) 6006 if (ixgbe_fw_recovery_mode_swflag(adapter))
5997 return (EPERM); 6007 return (EPERM);
5998 6008
5999 node.sysctl_data = &result; 6009 node.sysctl_data = &result;
6000 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6010 error = sysctl_lookup(SYSCTLFN_CALL(&node));
6001 6011
6002 if (error || newp == NULL) 6012 if (error || newp == NULL)
6003 return error; 6013 return error;
6004 6014
6005 if (result == 1) 6015 if (result == 1)
6006 ixgbe_print_debug_info(adapter); 6016 ixgbe_print_debug_info(adapter);
6007 6017
6008 return 0; 6018 return 0;
6009} /* ixgbe_sysctl_debug */ 6019} /* ixgbe_sysctl_debug */
6010 6020
6011/************************************************************************ 6021/************************************************************************
6012 * ixgbe_init_device_features 6022 * ixgbe_init_device_features
6013 ************************************************************************/ 6023 ************************************************************************/
6014static void 6024static void
6015ixgbe_init_device_features(struct adapter *adapter) 6025ixgbe_init_device_features(struct adapter *adapter)
6016{ 6026{
6017 adapter->feat_cap = IXGBE_FEATURE_NETMAP 6027 adapter->feat_cap = IXGBE_FEATURE_NETMAP
6018 | IXGBE_FEATURE_RSS 6028 | IXGBE_FEATURE_RSS
6019 | IXGBE_FEATURE_MSI 6029 | IXGBE_FEATURE_MSI
6020 | IXGBE_FEATURE_MSIX 6030 | IXGBE_FEATURE_MSIX
6021 | IXGBE_FEATURE_LEGACY_IRQ 6031 | IXGBE_FEATURE_LEGACY_IRQ
6022 | IXGBE_FEATURE_LEGACY_TX; 6032 | IXGBE_FEATURE_LEGACY_TX;
6023 6033
6024 /* Set capabilities first... */ 6034 /* Set capabilities first... */
6025 switch (adapter->hw.mac.type) { 6035 switch (adapter->hw.mac.type) {
6026 case ixgbe_mac_82598EB: 6036 case ixgbe_mac_82598EB:
6027 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) 6037 if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT)
6028 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; 6038 adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL;
6029 break; 6039 break;
6030 case ixgbe_mac_X540: 6040 case ixgbe_mac_X540:
6031 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 6041 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6032 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 6042 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6033 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && 6043 if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) &&
6034 (adapter->hw.bus.func == 0)) 6044 (adapter->hw.bus.func == 0))
6035 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 6045 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6036 break; 6046 break;
6037 case ixgbe_mac_X550: 6047 case ixgbe_mac_X550:
6038 /* 6048 /*
6039 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6049 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6040 * NVM Image version. 6050 * NVM Image version.
6041 */ 6051 */
6042 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 6052 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6043 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 6053 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6044 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 6054 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6045 break; 6055 break;
6046 case ixgbe_mac_X550EM_x: 6056 case ixgbe_mac_X550EM_x:
6047 /* 6057 /*
6048 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6058 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6049 * NVM Image version. 6059 * NVM Image version.
6050 */ 6060 */
6051 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 6061 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6052 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 6062 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6053 break; 6063 break;
6054 case ixgbe_mac_X550EM_a: 6064 case ixgbe_mac_X550EM_a:
6055 /* 6065 /*
6056 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading 6066 * IXGBE_FEATURE_RECOVERY_MODE will be set after reading
6057 * NVM Image version. 6067 * NVM Image version.
6058 */ 6068 */
6059 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 6069 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6060 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 6070 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6061 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 6071 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6062 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || 6072 if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
6063 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { 6073 (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
6064 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; 6074 adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
6065 adapter->feat_cap |= IXGBE_FEATURE_EEE; 6075 adapter->feat_cap |= IXGBE_FEATURE_EEE;
6066 } 6076 }
6067 break; 6077 break;
6068 case ixgbe_mac_82599EB: 6078 case ixgbe_mac_82599EB:
6069 adapter->feat_cap |= IXGBE_FEATURE_SRIOV; 6079 adapter->feat_cap |= IXGBE_FEATURE_SRIOV;
6070 adapter->feat_cap |= IXGBE_FEATURE_FDIR; 6080 adapter->feat_cap |= IXGBE_FEATURE_FDIR;
6071 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && 6081 if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) &&
6072 (adapter->hw.bus.func == 0)) 6082 (adapter->hw.bus.func == 0))
6073 adapter->feat_cap |= IXGBE_FEATURE_BYPASS; 6083 adapter->feat_cap |= IXGBE_FEATURE_BYPASS;
6074 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) 6084 if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
6075 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; 6085 adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
6076 break; 6086 break;
6077 default: 6087 default:
6078 break; 6088 break;
6079 } 6089 }
6080 6090
6081 /* Enabled by default... */ 6091 /* Enabled by default... */
6082 /* Fan failure detection */ 6092 /* Fan failure detection */
6083 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) 6093 if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL)
6084 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; 6094 adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL;
6085 /* Netmap */ 6095 /* Netmap */
6086 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 6096 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
6087 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 6097 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
6088 /* EEE */ 6098 /* EEE */
6089 if (adapter->feat_cap & IXGBE_FEATURE_EEE) 6099 if (adapter->feat_cap & IXGBE_FEATURE_EEE)
6090 adapter->feat_en |= IXGBE_FEATURE_EEE; 6100 adapter->feat_en |= IXGBE_FEATURE_EEE;
6091 /* Thermal Sensor */ 6101 /* Thermal Sensor */
6092 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) 6102 if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
6093 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; 6103 adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
6094 /* 6104 /*
6095 * Recovery mode: 6105 * Recovery mode:
6096 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading 6106 * NetBSD: IXGBE_FEATURE_RECOVERY_MODE will be controlled after reading
6097 * NVM Image version. 6107 * NVM Image version.
6098 */ 6108 */
6099 6109
6100 /* Enabled via global sysctl... */ 6110 /* Enabled via global sysctl... */
6101 /* Flow Director */ 6111 /* Flow Director */
6102 if (ixgbe_enable_fdir) { 6112 if (ixgbe_enable_fdir) {
6103 if (adapter->feat_cap & IXGBE_FEATURE_FDIR) 6113 if (adapter->feat_cap & IXGBE_FEATURE_FDIR)
6104 adapter->feat_en |= IXGBE_FEATURE_FDIR; 6114 adapter->feat_en |= IXGBE_FEATURE_FDIR;
6105 else 6115 else
6106 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); 6116 device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled.");
6107 } 6117 }
6108 /* Legacy (single queue) transmit */ 6118 /* Legacy (single queue) transmit */
6109 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 6119 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
6110 ixgbe_enable_legacy_tx) 6120 ixgbe_enable_legacy_tx)
6111 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 6121 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
6112 /* 6122 /*
6113 * Message Signal Interrupts - Extended (MSI-X) 6123 * Message Signal Interrupts - Extended (MSI-X)
6114 * Normal MSI is only enabled if MSI-X calls fail. 6124 * Normal MSI is only enabled if MSI-X calls fail.
6115 */ 6125 */
6116 if (!ixgbe_enable_msix) 6126 if (!ixgbe_enable_msix)
6117 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; 6127 adapter->feat_cap &= ~IXGBE_FEATURE_MSIX;
6118 /* Receive-Side Scaling (RSS) */ 6128 /* Receive-Side Scaling (RSS) */
6119 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) 6129 if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss)
6120 adapter->feat_en |= IXGBE_FEATURE_RSS; 6130 adapter->feat_en |= IXGBE_FEATURE_RSS;
6121 6131
6122 /* Disable features with unmet dependencies... */ 6132 /* Disable features with unmet dependencies... */
6123 /* No MSI-X */ 6133 /* No MSI-X */
6124 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { 6134 if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) {
6125 adapter->feat_cap &= ~IXGBE_FEATURE_RSS; 6135 adapter->feat_cap &= ~IXGBE_FEATURE_RSS;
6126 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; 6136 adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV;
6127 adapter->feat_en &= ~IXGBE_FEATURE_RSS; 6137 adapter->feat_en &= ~IXGBE_FEATURE_RSS;
6128 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; 6138 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
6129 } 6139 }
6130} /* ixgbe_init_device_features */ 6140} /* ixgbe_init_device_features */
6131 6141
6132/************************************************************************ 6142/************************************************************************
6133 * ixgbe_probe - Device identification routine 6143 * ixgbe_probe - Device identification routine
6134 * 6144 *
6135 * Determines if the driver should be loaded on 6145 * Determines if the driver should be loaded on
6136 * adapter based on its PCI vendor/device ID. 6146 * adapter based on its PCI vendor/device ID.
6137 * 6147 *
6138 * return BUS_PROBE_DEFAULT on success, positive on failure 6148 * return BUS_PROBE_DEFAULT on success, positive on failure
6139 ************************************************************************/ 6149 ************************************************************************/
6140static int 6150static int
6141ixgbe_probe(device_t dev, cfdata_t cf, void *aux) 6151ixgbe_probe(device_t dev, cfdata_t cf, void *aux)
6142{ 6152{
6143 const struct pci_attach_args *pa = aux; 6153 const struct pci_attach_args *pa = aux;
6144 6154
6145 return (ixgbe_lookup(pa) != NULL) ? 1 : 0; 6155 return (ixgbe_lookup(pa) != NULL) ? 1 : 0;
6146} 6156}
6147 6157
6148static const ixgbe_vendor_info_t * 6158static const ixgbe_vendor_info_t *
6149ixgbe_lookup(const struct pci_attach_args *pa) 6159ixgbe_lookup(const struct pci_attach_args *pa)
6150{ 6160{
6151 const ixgbe_vendor_info_t *ent; 6161 const ixgbe_vendor_info_t *ent;
6152 pcireg_t subid; 6162 pcireg_t subid;
6153 6163
6154 INIT_DEBUGOUT("ixgbe_lookup: begin"); 6164 INIT_DEBUGOUT("ixgbe_lookup: begin");
6155 6165
6156 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 6166 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
6157 return NULL; 6167 return NULL;
6158 6168
6159 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 6169 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
6160 6170
6161 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) { 6171 for (ent = ixgbe_vendor_info_array; ent->vendor_id != 0; ent++) {
6162 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 6172 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
6163 (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 6173 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
6164 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 6174 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
6165 (ent->subvendor_id == 0)) && 6175 (ent->subvendor_id == 0)) &&
6166 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 6176 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
6167 (ent->subdevice_id == 0))) { 6177 (ent->subdevice_id == 0))) {
6168 return ent; 6178 return ent;
6169 } 6179 }
6170 } 6180 }
6171 return NULL; 6181 return NULL;
6172} 6182}
6173 6183
6174static int 6184static int
6175ixgbe_ifflags_cb(struct ethercom *ec) 6185ixgbe_ifflags_cb(struct ethercom *ec)
6176{ 6186{
6177 struct ifnet *ifp = &ec->ec_if; 6187 struct ifnet *ifp = &ec->ec_if;
6178 struct adapter *adapter = ifp->if_softc; 6188 struct adapter *adapter = ifp->if_softc;
6179 u_short change; 6189 u_short change;
6180 int rv = 0; 6190 int rv = 0;
6181 6191
6182 IXGBE_CORE_LOCK(adapter); 6192 IXGBE_CORE_LOCK(adapter);
6183 6193
6184 change = ifp->if_flags ^ adapter->if_flags; 6194 change = ifp->if_flags ^ adapter->if_flags;
6185 if (change != 0) 6195 if (change != 0)
6186 adapter->if_flags = ifp->if_flags; 6196 adapter->if_flags = ifp->if_flags;
6187 6197
6188 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 6198 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
6189 rv = ENETRESET; 6199 rv = ENETRESET;
6190 goto out; 6200 goto out;
6191 } else if ((change & IFF_PROMISC) != 0) 6201 } else if ((change & IFF_PROMISC) != 0)
6192 ixgbe_set_rxfilter(adapter); 6202 ixgbe_set_rxfilter(adapter);
6193 6203
6194 /* Check for ec_capenable. */ 6204 /* Check for ec_capenable. */
6195 change = ec->ec_capenable ^ adapter->ec_capenable; 6205 change = ec->ec_capenable ^ adapter->ec_capenable;
6196 adapter->ec_capenable = ec->ec_capenable; 6206 adapter->ec_capenable = ec->ec_capenable;
6197 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 6207 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
6198 | ETHERCAP_VLAN_HWFILTER)) != 0) { 6208 | ETHERCAP_VLAN_HWFILTER)) != 0) {
6199 rv = ENETRESET; 6209 rv = ENETRESET;
6200 goto out; 6210 goto out;
6201 } 6211 }
6202 6212
6203 /* 6213 /*
6204 * Special handling is not required for ETHERCAP_VLAN_MTU. 6214 * Special handling is not required for ETHERCAP_VLAN_MTU.
6205 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 6215 * MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
6206 */ 6216 */
6207 6217
6208 /* Set up VLAN support and filter */ 6218 /* Set up VLAN support and filter */
6209 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 6219 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
6210 ixgbe_setup_vlan_hw_support(adapter); 6220 ixgbe_setup_vlan_hw_support(adapter);
6211 6221
6212out: 6222out:
6213 IXGBE_CORE_UNLOCK(adapter); 6223 IXGBE_CORE_UNLOCK(adapter);
6214 6224
6215 return rv; 6225 return rv;
6216} 6226}
6217 6227
6218/************************************************************************ 6228/************************************************************************
6219 * ixgbe_ioctl - Ioctl entry point 6229 * ixgbe_ioctl - Ioctl entry point
6220 * 6230 *
6221 * Called when the user wants to configure the interface. 6231 * Called when the user wants to configure the interface.
6222 * 6232 *
6223 * return 0 on success, positive on failure 6233 * return 0 on success, positive on failure
6224 ************************************************************************/ 6234 ************************************************************************/
6225static int 6235static int
6226ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data) 6236ixgbe_ioctl(struct ifnet *ifp, u_long command, void *data)
6227{ 6237{
6228 struct adapter *adapter = ifp->if_softc; 6238 struct adapter *adapter = ifp->if_softc;
6229 struct ixgbe_hw *hw = &adapter->hw; 6239 struct ixgbe_hw *hw = &adapter->hw;
6230 struct ifcapreq *ifcr = data; 6240 struct ifcapreq *ifcr = data;
6231 struct ifreq *ifr = data; 6241 struct ifreq *ifr = data;
6232 int error = 0; 6242 int error = 0;
6233 int l4csum_en; 6243 int l4csum_en;
6234 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 6244 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
6235 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 6245 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
6236 6246
6237 if (ixgbe_fw_recovery_mode_swflag(adapter)) 6247 if (ixgbe_fw_recovery_mode_swflag(adapter))
6238 return (EPERM); 6248 return (EPERM);
6239 6249
6240 switch (command) { 6250 switch (command) {
6241 case SIOCSIFFLAGS: 6251 case SIOCSIFFLAGS:
6242 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 6252 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
6243 break; 6253 break;
6244 case SIOCADDMULTI: 6254 case SIOCADDMULTI:
6245 case SIOCDELMULTI: 6255 case SIOCDELMULTI:
6246 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 6256 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
6247 break; 6257 break;
6248 case SIOCSIFMEDIA: 6258 case SIOCSIFMEDIA:
6249 case SIOCGIFMEDIA: 6259 case SIOCGIFMEDIA:
6250 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 6260 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
6251 break; 6261 break;
6252 case SIOCSIFCAP: 6262 case SIOCSIFCAP:
6253 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 6263 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
6254 break; 6264 break;
6255 case SIOCSIFMTU: 6265 case SIOCSIFMTU:
6256 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 6266 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
6257 break; 6267 break;
6258#ifdef __NetBSD__ 6268#ifdef __NetBSD__
6259 case SIOCINITIFADDR: 6269 case SIOCINITIFADDR:
6260 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR"); 6270 IOCTL_DEBUGOUT("ioctl: SIOCINITIFADDR");
6261 break; 6271 break;
6262 case SIOCGIFFLAGS: 6272 case SIOCGIFFLAGS:
6263 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS"); 6273 IOCTL_DEBUGOUT("ioctl: SIOCGIFFLAGS");
6264 break; 6274 break;
6265 case SIOCGIFAFLAG_IN: 6275 case SIOCGIFAFLAG_IN:
6266 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN"); 6276 IOCTL_DEBUGOUT("ioctl: SIOCGIFAFLAG_IN");
6267 break; 6277 break;
6268 case SIOCGIFADDR: 6278 case SIOCGIFADDR:
6269 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR"); 6279 IOCTL_DEBUGOUT("ioctl: SIOCGIFADDR");
6270 break; 6280 break;
6271 case SIOCGIFMTU: 6281 case SIOCGIFMTU:
6272 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)"); 6282 IOCTL_DEBUGOUT("ioctl: SIOCGIFMTU (Get Interface MTU)");
6273 break; 6283 break;
6274 case SIOCGIFCAP: 6284 case SIOCGIFCAP:
6275 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)"); 6285 IOCTL_DEBUGOUT("ioctl: SIOCGIFCAP (Get IF cap)");
6276 break; 6286 break;
6277 case SIOCGETHERCAP: 6287 case SIOCGETHERCAP:
6278 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)"); 6288 IOCTL_DEBUGOUT("ioctl: SIOCGETHERCAP (Get ethercap)");
6279 break; 6289 break;
6280 case SIOCGLIFADDR: 6290 case SIOCGLIFADDR:
6281 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)"); 6291 IOCTL_DEBUGOUT("ioctl: SIOCGLIFADDR (Get Interface addr)");
6282 break; 6292 break;
6283 case SIOCZIFDATA: 6293 case SIOCZIFDATA:
6284 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); 6294 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
6285 hw->mac.ops.clear_hw_cntrs(hw); 6295 hw->mac.ops.clear_hw_cntrs(hw);
6286 ixgbe_clear_evcnt(adapter); 6296 ixgbe_clear_evcnt(adapter);
6287 break; 6297 break;
6288 case SIOCAIFADDR: 6298 case SIOCAIFADDR:
6289 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)"); 6299 IOCTL_DEBUGOUT("ioctl: SIOCAIFADDR (add/chg IF alias)");
6290 break; 6300 break;
6291#endif 6301#endif
6292 default: 6302 default:
6293 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 6303 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
6294 break; 6304 break;
6295 } 6305 }
6296 6306
6297 switch (command) { 6307 switch (command) {
6298 case SIOCGI2C: 6308 case SIOCGI2C:
6299 { 6309 {
6300 struct ixgbe_i2c_req i2c; 6310 struct ixgbe_i2c_req i2c;
6301 6311
6302 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); 6312 IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
6303 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 6313 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
6304 if (error != 0) 6314 if (error != 0)
6305 break; 6315 break;
6306 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 6316 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
6307 error = EINVAL; 6317 error = EINVAL;
6308 break; 6318 break;
6309 } 6319 }
6310 if (i2c.len > sizeof(i2c.data)) { 6320 if (i2c.len > sizeof(i2c.data)) {
6311 error = EINVAL; 6321 error = EINVAL;
6312 break; 6322 break;
6313 } 6323 }
6314 6324
6315 hw->phy.ops.read_i2c_byte(hw, i2c.offset, 6325 hw->phy.ops.read_i2c_byte(hw, i2c.offset,
6316 i2c.dev_addr, i2c.data); 6326 i2c.dev_addr, i2c.data);
6317 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 6327 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
6318 break; 6328 break;
6319 } 6329 }
6320 case SIOCSIFCAP: 6330 case SIOCSIFCAP:
6321 /* Layer-4 Rx checksum offload has to be turned on and 6331 /* Layer-4 Rx checksum offload has to be turned on and
6322 * off as a unit. 6332 * off as a unit.
6323 */ 6333 */
6324 l4csum_en = ifcr->ifcr_capenable & l4csum; 6334 l4csum_en = ifcr->ifcr_capenable & l4csum;
6325 if (l4csum_en != l4csum && l4csum_en != 0) 6335 if (l4csum_en != l4csum && l4csum_en != 0)
6326 return EINVAL; 6336 return EINVAL;
6327 /*FALLTHROUGH*/ 6337 /*FALLTHROUGH*/
6328 case SIOCADDMULTI: 6338 case SIOCADDMULTI:
6329 case SIOCDELMULTI: 6339 case SIOCDELMULTI:
6330 case SIOCSIFFLAGS: 6340 case SIOCSIFFLAGS:
6331 case SIOCSIFMTU: 6341 case SIOCSIFMTU:
6332 default: 6342 default:
6333 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 6343 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
6334 return error; 6344 return error;
6335 if ((ifp->if_flags & IFF_RUNNING) == 0) 6345 if ((ifp->if_flags & IFF_RUNNING) == 0)
6336 ; 6346 ;
6337 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 6347 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
6338 IXGBE_CORE_LOCK(adapter); 6348 IXGBE_CORE_LOCK(adapter);
6339 if ((ifp->if_flags & IFF_RUNNING) != 0) 6349 if ((ifp->if_flags & IFF_RUNNING) != 0)
6340 ixgbe_init_locked(adapter); 6350 ixgbe_init_locked(adapter);
6341 ixgbe_recalculate_max_frame(adapter); 6351 ixgbe_recalculate_max_frame(adapter);
6342 IXGBE_CORE_UNLOCK(adapter); 6352 IXGBE_CORE_UNLOCK(adapter);
6343 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 6353 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
6344 /* 6354 /*
6345 * Multicast list has changed; set the hardware filter 6355 * Multicast list has changed; set the hardware filter
6346 * accordingly. 6356 * accordingly.
6347 */ 6357 */
6348 IXGBE_CORE_LOCK(adapter); 6358 IXGBE_CORE_LOCK(adapter);
6349 ixgbe_disable_intr(adapter); 6359 ixgbe_disable_intr(adapter);
6350 ixgbe_set_rxfilter(adapter); 6360 ixgbe_set_rxfilter(adapter);
6351 ixgbe_enable_intr(adapter); 6361 ixgbe_enable_intr(adapter);
6352 IXGBE_CORE_UNLOCK(adapter); 6362 IXGBE_CORE_UNLOCK(adapter);
6353 } 6363 }
6354 return 0; 6364 return 0;
6355 } 6365 }
6356 6366
6357 return error; 6367 return error;
6358} /* ixgbe_ioctl */ 6368} /* ixgbe_ioctl */
6359 6369
6360/************************************************************************ 6370/************************************************************************
6361 * ixgbe_check_fan_failure 6371 * ixgbe_check_fan_failure
6362 ************************************************************************/ 6372 ************************************************************************/
6363static void 6373static void
6364ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) 6374ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt)
6365{ 6375{
6366 u32 mask; 6376 u32 mask;
6367 6377
6368 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : 6378 mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) :
6369 IXGBE_ESDP_SDP1; 6379 IXGBE_ESDP_SDP1;
6370 6380
6371 if (reg & mask) 6381 if (reg & mask)
6372 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); 6382 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
6373} /* ixgbe_check_fan_failure */ 6383} /* ixgbe_check_fan_failure */
6374 6384
6375/************************************************************************ 6385/************************************************************************
6376 * ixgbe_handle_que 6386 * ixgbe_handle_que
6377 ************************************************************************/ 6387 ************************************************************************/
6378static void 6388static void
6379ixgbe_handle_que(void *context) 6389ixgbe_handle_que(void *context)
6380{ 6390{
6381 struct ix_queue *que = context; 6391 struct ix_queue *que = context;
6382 struct adapter *adapter = que->adapter; 6392 struct adapter *adapter = que->adapter;
6383 struct tx_ring *txr = que->txr; 6393 struct tx_ring *txr = que->txr;
6384 struct ifnet *ifp = adapter->ifp; 6394 struct ifnet *ifp = adapter->ifp;
6385 bool more = false; 6395 bool more = false;
6386 6396

cvs diff -r1.56.2.3 -r1.56.2.4 src/sys/dev/pci/ixgbe/ixgbe.h (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe.h 2020/07/10 11:35:51 1.56.2.3
+++ src/sys/dev/pci/ixgbe/ixgbe.h 2021/03/11 16:00:24 1.56.2.4
@@ -1,772 +1,773 @@ @@ -1,772 +1,773 @@
1/* $NetBSD: ixgbe.h,v 1.56.2.3 2020/07/10 11:35:51 martin Exp $ */ 1/* $NetBSD: ixgbe.h,v 1.56.2.4 2021/03/11 16:00:24 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause 4 SPDX-License-Identifier: BSD-3-Clause
5 5
6 Copyright (c) 2001-2017, Intel Corporation 6 Copyright (c) 2001-2017, Intel Corporation
7 All rights reserved. 7 All rights reserved.
8 8
9 Redistribution and use in source and binary forms, with or without 9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met: 10 modification, are permitted provided that the following conditions are met:
11 11
12 1. Redistributions of source code must retain the above copyright notice, 12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer. 13 this list of conditions and the following disclaimer.
14 14
15 2. Redistributions in binary form must reproduce the above copyright 15 2. Redistributions in binary form must reproduce the above copyright
16 notice, this list of conditions and the following disclaimer in the 16 notice, this list of conditions and the following disclaimer in the
17 documentation and/or other materials provided with the distribution. 17 documentation and/or other materials provided with the distribution.
18 18
19 3. Neither the name of the Intel Corporation nor the names of its 19 3. Neither the name of the Intel Corporation nor the names of its
20 contributors may be used to endorse or promote products derived from 20 contributors may be used to endorse or promote products derived from
21 this software without specific prior written permission. 21 this software without specific prior written permission.
22 22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE. 33 POSSIBILITY OF SUCH DAMAGE.
34 34
35******************************************************************************/ 35******************************************************************************/
36/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 327031 2017-12-20 18:15:06Z erj $*/ 36/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 327031 2017-12-20 18:15:06Z erj $*/
37 37
38/* 38/*
39 * Copyright (c) 2011 The NetBSD Foundation, Inc. 39 * Copyright (c) 2011 The NetBSD Foundation, Inc.
40 * All rights reserved. 40 * All rights reserved.
41 * 41 *
42 * This code is derived from software contributed to The NetBSD Foundation 42 * This code is derived from software contributed to The NetBSD Foundation
43 * by Coyote Point Systems, Inc. 43 * by Coyote Point Systems, Inc.
44 * 44 *
45 * Redistribution and use in source and binary forms, with or without 45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions 46 * modification, are permitted provided that the following conditions
47 * are met: 47 * are met:
48 * 1. Redistributions of source code must retain the above copyright 48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer. 49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright 50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the 51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution. 52 * documentation and/or other materials provided with the distribution.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67 67
68#ifndef _IXGBE_H_ 68#ifndef _IXGBE_H_
69#define _IXGBE_H_ 69#define _IXGBE_H_
70 70
71 71
72#include <sys/param.h> 72#include <sys/param.h>
73#include <sys/reboot.h> 73#include <sys/reboot.h>
74#include <sys/systm.h> 74#include <sys/systm.h>
75#include <sys/pcq.h> 75#include <sys/pcq.h>
76#include <sys/mbuf.h> 76#include <sys/mbuf.h>
77#include <sys/protosw.h> 77#include <sys/protosw.h>
78#include <sys/socket.h> 78#include <sys/socket.h>
79#include <sys/malloc.h> 79#include <sys/malloc.h>
80#include <sys/kernel.h> 80#include <sys/kernel.h>
81#include <sys/module.h> 81#include <sys/module.h>
82#include <sys/sockio.h> 82#include <sys/sockio.h>
83#include <sys/percpu.h> 83#include <sys/percpu.h>
84 84
85#include <net/if.h> 85#include <net/if.h>
86#include <net/if_arp.h> 86#include <net/if_arp.h>
87#include <net/bpf.h> 87#include <net/bpf.h>
88#include <net/if_ether.h> 88#include <net/if_ether.h>
89#include <net/if_dl.h> 89#include <net/if_dl.h>
90#include <net/if_media.h> 90#include <net/if_media.h>
91 91
92#include <net/if_types.h> 92#include <net/if_types.h>
93#include <net/if_vlanvar.h> 93#include <net/if_vlanvar.h>
94 94
95#include <netinet/in_systm.h> 95#include <netinet/in_systm.h>
96#include <netinet/in.h> 96#include <netinet/in.h>
97#include <netinet/ip.h> 97#include <netinet/ip.h>
98#include <netinet/ip6.h> 98#include <netinet/ip6.h>
99#include <netinet/tcp.h> 99#include <netinet/tcp.h>
100#include <netinet/udp.h> 100#include <netinet/udp.h>
101 101
102#include <sys/bus.h> 102#include <sys/bus.h>
103#include <dev/pci/pcivar.h> 103#include <dev/pci/pcivar.h>
104#include <dev/pci/pcireg.h> 104#include <dev/pci/pcireg.h>
105#include <sys/proc.h> 105#include <sys/proc.h>
106#include <sys/sysctl.h> 106#include <sys/sysctl.h>
107#include <sys/endian.h> 107#include <sys/endian.h>
108#include <sys/workqueue.h> 108#include <sys/workqueue.h>
109#include <sys/cpu.h> 109#include <sys/cpu.h>
110#include <sys/interrupt.h> 110#include <sys/interrupt.h>
111#include <sys/bitops.h> 111#include <sys/bitops.h>
112 112
113#include "ixgbe_netbsd.h" 113#include "ixgbe_netbsd.h"
114#include "ixgbe_api.h" 114#include "ixgbe_api.h"
115#include "ixgbe_common.h" 115#include "ixgbe_common.h"
116#include "ixgbe_vf.h" 116#include "ixgbe_vf.h"
117#include "ixgbe_features.h" 117#include "ixgbe_features.h"
118 118
119/* Tunables */ 119/* Tunables */
120 120
121/* 121/*
122 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the 122 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
123 * number of transmit descriptors allocated by the driver. Increasing this 123 * number of transmit descriptors allocated by the driver. Increasing this
124 * value allows the driver to queue more transmits. Each descriptor is 16 124 * value allows the driver to queue more transmits. Each descriptor is 16
125 * bytes. Performance tests have show the 2K value to be optimal for top 125 * bytes. Performance tests have show the 2K value to be optimal for top
126 * performance. 126 * performance.
127 */ 127 */
128#define DEFAULT_TXD 1024 128#define DEFAULT_TXD 1024
129#define PERFORM_TXD 2048 129#define PERFORM_TXD 2048
130#define MAX_TXD 4096 130#define MAX_TXD 4096
131#define MIN_TXD 64 131#define MIN_TXD 64
132 132
133/* 133/*
134 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the 134 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
135 * number of receive descriptors allocated for each RX queue. Increasing this 135 * number of receive descriptors allocated for each RX queue. Increasing this
136 * value allows the driver to buffer more incoming packets. Each descriptor 136 * value allows the driver to buffer more incoming packets. Each descriptor
137 * is 16 bytes. A receive buffer is also allocated for each descriptor. 137 * is 16 bytes. A receive buffer is also allocated for each descriptor.
138 * 138 *
139 * Note: with 8 rings and a dual port card, it is possible to bump up 139 * Note: with 8 rings and a dual port card, it is possible to bump up
140 * against the system mbuf pool limit, you can tune nmbclusters 140 * against the system mbuf pool limit, you can tune nmbclusters
141 * to adjust for this. 141 * to adjust for this.
142 */ 142 */
143#define DEFAULT_RXD 1024 143#define DEFAULT_RXD 1024
144#define PERFORM_RXD 2048 144#define PERFORM_RXD 2048
145#define MAX_RXD 4096 145#define MAX_RXD 4096
146#define MIN_RXD 64 146#define MIN_RXD 64
147 147
148/* Alignment for rings */ 148/* Alignment for rings */
149#define DBA_ALIGN 128 149#define DBA_ALIGN 128
150 150
151/* 151/*
152 * This is the max watchdog interval, ie. the time that can 152 * This is the max watchdog interval, ie. the time that can
153 * pass between any two TX clean operations, such only happening 153 * pass between any two TX clean operations, such only happening
154 * when the TX hardware is functioning. 154 * when the TX hardware is functioning.
155 */ 155 */
156#define IXGBE_WATCHDOG (10 * hz) 156#define IXGBE_WATCHDOG (10 * hz)
157 157
158/* 158/*
159 * This parameters control when the driver calls the routine to reclaim 159 * This parameters control when the driver calls the routine to reclaim
160 * transmit descriptors. 160 * transmit descriptors.
161 */ 161 */
162#define IXGBE_TX_CLEANUP_THRESHOLD(_a) ((_a)->num_tx_desc / 8) 162#define IXGBE_TX_CLEANUP_THRESHOLD(_a) ((_a)->num_tx_desc / 8)
163#define IXGBE_TX_OP_THRESHOLD(_a) ((_a)->num_tx_desc / 32) 163#define IXGBE_TX_OP_THRESHOLD(_a) ((_a)->num_tx_desc / 32)
164 164
165/* These defines are used in MTU calculations */ 165/* These defines are used in MTU calculations */
166#define IXGBE_MAX_FRAME_SIZE 9728 166#define IXGBE_MAX_FRAME_SIZE 9728
167#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN) 167#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
168#define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \ 168#define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
169 ETHER_VLAN_ENCAP_LEN) 169 ETHER_VLAN_ENCAP_LEN)
170#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) 170#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
171#define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN) 171#define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
172 172
173/* Flow control constants */ 173/* Flow control constants */
174#define IXGBE_FC_PAUSE 0xFFFF 174#define IXGBE_FC_PAUSE 0xFFFF
175 175
176/* 176/*
177 * Used for optimizing small rx mbufs. Effort is made to keep the copy 177 * Used for optimizing small rx mbufs. Effort is made to keep the copy
178 * small and aligned for the CPU L1 cache. 178 * small and aligned for the CPU L1 cache.
179 * 179 *
180 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting 180 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
181 * 32 byte alignment needed for the fast bcopy results in 8 bytes being 181 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
182 * wasted. Getting 64 byte alignment, which _should_ be ideal for 182 * wasted. Getting 64 byte alignment, which _should_ be ideal for
183 * modern Intel CPUs, results in 40 bytes wasted and a significant drop 183 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
184 * in observed efficiency of the optimization, 97.9% -> 81.8%. 184 * in observed efficiency of the optimization, 97.9% -> 81.8%.
185 */ 185 */
186#define MPKTHSIZE (offsetof(struct _mbuf_dummy, m_pktdat)) 186#define MPKTHSIZE (offsetof(struct _mbuf_dummy, m_pktdat))
187#define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32) 187#define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
188#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED) 188#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
189#define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE) 189#define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
190 190
191/* Keep older OS drivers building... */ 191/* Keep older OS drivers building... */
192#if !defined(SYSCTL_ADD_UQUAD) 192#if !defined(SYSCTL_ADD_UQUAD)
193#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD 193#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
194#endif 194#endif
195 195
196/* Defines for printing debug information */ 196/* Defines for printing debug information */
197#define DEBUG_INIT 0 197#define DEBUG_INIT 0
198#define DEBUG_IOCTL 0 198#define DEBUG_IOCTL 0
199#define DEBUG_HW 0 199#define DEBUG_HW 0
200 200
201#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") 201#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
202#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) 202#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
203#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) 203#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
204#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") 204#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
205#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) 205#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
206#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) 206#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
207#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") 207#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
208#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) 208#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
209#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) 209#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
210 210
211#define MAX_NUM_MULTICAST_ADDRESSES 128 211#define MAX_NUM_MULTICAST_ADDRESSES 128
212#define IXGBE_82598_SCATTER 100 212#define IXGBE_82598_SCATTER 100
213#define IXGBE_82599_SCATTER 32 213#define IXGBE_82599_SCATTER 32
214#define MSIX_82598_BAR 3 214#define MSIX_82598_BAR 3
215#define MSIX_82599_BAR 4 215#define MSIX_82599_BAR 4
216#define IXGBE_TSO_SIZE 262140 216#define IXGBE_TSO_SIZE 262140
217#define IXGBE_RX_HDR 128 217#define IXGBE_RX_HDR 128
218#define IXGBE_VFTA_SIZE 128 218#define IXGBE_VFTA_SIZE 128
219#define IXGBE_BR_SIZE 4096 219#define IXGBE_BR_SIZE 2048
220#define IXGBE_QUEUE_MIN_FREE 32 220#define IXGBE_QUEUE_MIN_FREE 32
221#define IXGBE_MAX_TX_BUSY 10 221#define IXGBE_MAX_TX_BUSY 10
222#define IXGBE_QUEUE_HUNG 0x80000000 222#define IXGBE_QUEUE_HUNG 0x80000000
223 223
224#define IXGBE_EITR_DEFAULT 128 224#define IXGBE_EITR_DEFAULT 128
225 225
226/* IOCTL define to gather SFP+ Diagnostic data */ 226/* IOCTL define to gather SFP+ Diagnostic data */
227#define SIOCGI2C SIOCGIFGENERIC 227#define SIOCGI2C SIOCGIFGENERIC
228 228
229/* Offload bits in mbuf flag */ 229/* Offload bits in mbuf flag */
230#define M_CSUM_OFFLOAD \ 230#define M_CSUM_OFFLOAD \
231 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_UDPv6|M_CSUM_TCPv6) 231 (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_UDPv6|M_CSUM_TCPv6)
232 232
233/* Backward compatibility items for very old versions */ 233/* Backward compatibility items for very old versions */
234#ifndef pci_find_cap 234#ifndef pci_find_cap
235#define pci_find_cap pci_find_extcap 235#define pci_find_cap pci_find_extcap
236#endif 236#endif
237 237
238#ifndef DEVMETHOD_END 238#ifndef DEVMETHOD_END
239#define DEVMETHOD_END { NULL, NULL } 239#define DEVMETHOD_END { NULL, NULL }
240#endif 240#endif
241 241
242/* 242/*
243 * Interrupt Moderation parameters 243 * Interrupt Moderation parameters
244 */ 244 */
245#define IXGBE_LOW_LATENCY 128 245#define IXGBE_LOW_LATENCY 128
246#define IXGBE_AVE_LATENCY 400 246#define IXGBE_AVE_LATENCY 400
247#define IXGBE_BULK_LATENCY 1200 247#define IXGBE_BULK_LATENCY 1200
248 248
249/* Using 1FF (the max value), the interval is ~1.05ms */ 249/* Using 1FF (the max value), the interval is ~1.05ms */
250#define IXGBE_LINK_ITR_QUANTA 0x1FF 250#define IXGBE_LINK_ITR_QUANTA 0x1FF
251#define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \ 251#define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
252 IXGBE_EITR_ITR_INT_MASK) 252 IXGBE_EITR_ITR_INT_MASK)
253 253
254 254
255/************************************************************************ 255/************************************************************************
256 * vendor_info_array 256 * vendor_info_array
257 * 257 *
258 * Contains the list of Subvendor/Subdevice IDs on 258 * Contains the list of Subvendor/Subdevice IDs on
259 * which the driver should load. 259 * which the driver should load.
260 ************************************************************************/ 260 ************************************************************************/
261typedef struct _ixgbe_vendor_info_t { 261typedef struct _ixgbe_vendor_info_t {
262 unsigned int vendor_id; 262 unsigned int vendor_id;
263 unsigned int device_id; 263 unsigned int device_id;
264 unsigned int subvendor_id; 264 unsigned int subvendor_id;
265 unsigned int subdevice_id; 265 unsigned int subdevice_id;
266 unsigned int index; 266 unsigned int index;
267} ixgbe_vendor_info_t; 267} ixgbe_vendor_info_t;
268 268
269/* This is used to get SFP+ module data */ 269/* This is used to get SFP+ module data */
270struct ixgbe_i2c_req { 270struct ixgbe_i2c_req {
271 u8 dev_addr; 271 u8 dev_addr;
272 u8 offset; 272 u8 offset;
273 u8 len; 273 u8 len;
274 u8 data[8]; 274 u8 data[8];
275}; 275};
276 276
277struct ixgbe_bp_data { 277struct ixgbe_bp_data {
278 u32 low; 278 u32 low;
279 u32 high; 279 u32 high;
280 u32 log; 280 u32 log;
281}; 281};
282 282
283struct ixgbe_tx_buf { 283struct ixgbe_tx_buf {
284 union ixgbe_adv_tx_desc *eop; 284 union ixgbe_adv_tx_desc *eop;
285 struct mbuf *m_head; 285 struct mbuf *m_head;
286 bus_dmamap_t map; 286 bus_dmamap_t map;
287}; 287};
288 288
289struct ixgbe_rx_buf { 289struct ixgbe_rx_buf {
290 struct mbuf *buf; 290 struct mbuf *buf;
291 struct mbuf *fmp; 291 struct mbuf *fmp;
292 bus_dmamap_t pmap; 292 bus_dmamap_t pmap;
293 u_int flags; 293 u_int flags;
294#define IXGBE_RX_COPY 0x01 294#define IXGBE_RX_COPY 0x01
295 uint64_t addr; 295 uint64_t addr;
296}; 296};
297 297
298/* 298/*
299 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free 299 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free
300 */ 300 */
301struct ixgbe_dma_alloc { 301struct ixgbe_dma_alloc {
302 bus_addr_t dma_paddr; 302 bus_addr_t dma_paddr;
303 void *dma_vaddr; 303 void *dma_vaddr;
304 ixgbe_dma_tag_t *dma_tag; 304 ixgbe_dma_tag_t *dma_tag;
305 bus_dmamap_t dma_map; 305 bus_dmamap_t dma_map;
306 bus_dma_segment_t dma_seg; 306 bus_dma_segment_t dma_seg;
307 bus_size_t dma_size; 307 bus_size_t dma_size;
308}; 308};
309 309
310struct ixgbe_mc_addr { 310struct ixgbe_mc_addr {
311 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 311 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
312 u32 vmdq; 312 u32 vmdq;
313}; 313};
314 314
315/* 315/*
316 * Driver queue struct: this is the interrupt container 316 * Driver queue struct: this is the interrupt container
317 * for the associated tx and rx ring. 317 * for the associated tx and rx ring.
318 */ 318 */
319struct ix_queue { 319struct ix_queue {
320 struct adapter *adapter; 320 struct adapter *adapter;
321 u32 msix; /* This queue's MSI-X vector */ 321 u32 msix; /* This queue's MSI-X vector */
322 u32 eitr_setting; 322 u32 eitr_setting;
323 u32 me; 323 u32 me;
324 struct resource *res; 324 struct resource *res;
325 int busy; 325 int busy;
326 struct tx_ring *txr; 326 struct tx_ring *txr;
327 struct rx_ring *rxr; 327 struct rx_ring *rxr;
328 struct work wq_cookie; 328 struct work wq_cookie;
329 void *que_si; 329 void *que_si;
330 /* Per queue event conters */ 330 /* Per queue event conters */
331 struct evcnt irqs; /* Hardware interrupt */ 331 struct evcnt irqs; /* Hardware interrupt */
332 struct evcnt handleq; /* software_interrupt */ 332 struct evcnt handleq; /* software_interrupt */
333 struct evcnt req; /* deferred */ 333 struct evcnt req; /* deferred */
334 char namebuf[32]; 334 char namebuf[32];
335 char evnamebuf[32]; 335 char evnamebuf[32];
336 336
337 /* Lock for disabled_count and this queue's EIMS/EIMC bit */ 337 /* Lock for disabled_count and this queue's EIMS/EIMC bit */
338 kmutex_t dc_mtx; 338 kmutex_t dc_mtx;
339 /* 339 /*
340 * disabled_count means: 340 * disabled_count means:
341 * 0 : this queue is enabled 341 * 0 : this queue is enabled
342 * > 0 : this queue is disabled 342 * > 0 : this queue is disabled
343 * the value is ixgbe_disable_queue() called count 343 * the value is ixgbe_disable_queue() called count
344 */ 344 */
345 int disabled_count; 345 int disabled_count;
346 bool txrx_use_workqueue; 346 bool txrx_use_workqueue;
347}; 347};
348 348
349/* 349/*
350 * The transmit ring, one per queue 350 * The transmit ring, one per queue
351 */ 351 */
352struct tx_ring { 352struct tx_ring {
353 struct adapter *adapter; 353 struct adapter *adapter;
354 kmutex_t tx_mtx; 354 kmutex_t tx_mtx;
355 u32 me; 355 u32 me;
356 u32 tail; 356 u32 tail;
357 int busy; 357 int busy;
358 union ixgbe_adv_tx_desc *tx_base; 358 union ixgbe_adv_tx_desc *tx_base;
359 struct ixgbe_tx_buf *tx_buffers; 359 struct ixgbe_tx_buf *tx_buffers;
360 struct ixgbe_dma_alloc txdma; 360 struct ixgbe_dma_alloc txdma;
361 volatile u16 tx_avail; 361 volatile u16 tx_avail;
362 u16 next_avail_desc; 362 u16 next_avail_desc;
363 u16 next_to_clean; 363 u16 next_to_clean;
364 u16 num_desc; 364 u16 num_desc;
365 ixgbe_dma_tag_t *txtag; 365 ixgbe_dma_tag_t *txtag;
366#if 0 366#if 0
367 char mtx_name[16]; /* NetBSD has no mutex name */ 367 char mtx_name[16]; /* NetBSD has no mutex name */
368#endif 368#endif
369 pcq_t *txr_interq; 369 pcq_t *txr_interq;
370 struct work wq_cookie; 370 struct work wq_cookie;
371 void *txr_si; 371 void *txr_si;
372 bool txr_no_space; /* Like IFF_OACTIVE */ 372 bool txr_no_space; /* Like IFF_OACTIVE */
373 373
374 /* Flow Director */ 374 /* Flow Director */
375 u16 atr_sample; 375 u16 atr_sample;
376 u16 atr_count; 376 u16 atr_count;
377 377
378 u64 bytes; /* Used for AIM */ 378 u64 bytes; /* Used for AIM */
379 u64 packets; 379 u64 packets;
380 /* Soft Stats */ 380 /* Soft Stats */
381 struct evcnt tso_tx; 381 struct evcnt tso_tx;
382 struct evcnt no_desc_avail; 382 struct evcnt no_desc_avail;
383 struct evcnt total_packets; 383 struct evcnt total_packets;
384 struct evcnt pcq_drops; 384 struct evcnt pcq_drops;
385 /* Per queue conters. The adapter total is in struct adapter */ 385 /* Per queue conters. The adapter total is in struct adapter */
386 u64 q_efbig_tx_dma_setup; 386 u64 q_efbig_tx_dma_setup;
387 u64 q_mbuf_defrag_failed; 387 u64 q_mbuf_defrag_failed;
388 u64 q_efbig2_tx_dma_setup; 388 u64 q_efbig2_tx_dma_setup;
389 u64 q_einval_tx_dma_setup; 389 u64 q_einval_tx_dma_setup;
390 u64 q_other_tx_dma_setup; 390 u64 q_other_tx_dma_setup;
391 u64 q_eagain_tx_dma_setup; 391 u64 q_eagain_tx_dma_setup;
392 u64 q_enomem_tx_dma_setup; 392 u64 q_enomem_tx_dma_setup;
393 u64 q_tso_err; 393 u64 q_tso_err;
394}; 394};
395 395
396 396
397/* 397/*
398 * The Receive ring, one per rx queue 398 * The Receive ring, one per rx queue
399 */ 399 */
400struct rx_ring { 400struct rx_ring {
401 struct adapter *adapter; 401 struct adapter *adapter;
402 kmutex_t rx_mtx; 402 kmutex_t rx_mtx;
403 u32 me; 403 u32 me;
404 u32 tail; 404 u32 tail;
405 union ixgbe_adv_rx_desc *rx_base; 405 union ixgbe_adv_rx_desc *rx_base;
406 struct ixgbe_dma_alloc rxdma; 406 struct ixgbe_dma_alloc rxdma;
407#ifdef LRO 407#ifdef LRO
408 struct lro_ctrl lro; 408 struct lro_ctrl lro;
409#endif /* LRO */ 409#endif /* LRO */
410 bool lro_enabled; 410 bool lro_enabled;
411 bool hw_rsc; 411 bool hw_rsc;
412 bool vtag_strip; 412 bool vtag_strip;
413 u16 next_to_refresh; 413 u16 next_to_refresh;
414 u16 next_to_check; 414 u16 next_to_check;
415 u16 num_desc; 415 u16 num_desc;
416 u16 mbuf_sz; 416 u16 mbuf_sz;
417#if 0 417#if 0
418 char mtx_name[16]; /* NetBSD has no mutex name */ 418 char mtx_name[16]; /* NetBSD has no mutex name */
419#endif 419#endif
420 struct ixgbe_rx_buf *rx_buffers; 420 struct ixgbe_rx_buf *rx_buffers;
421 ixgbe_dma_tag_t *ptag; 421 ixgbe_dma_tag_t *ptag;
422 u16 last_rx_mbuf_sz; 422 u16 last_rx_mbuf_sz;
423 u32 last_num_rx_desc; 423 u32 last_num_rx_desc;
424 ixgbe_extmem_head_t jcl_head; 424 ixgbe_extmem_head_t jcl_head;
425 425
426 u64 bytes; /* Used for AIM calc */ 426 u64 bytes; /* Used for AIM calc */
427 u64 packets; 427 u64 packets;
428 428
429 /* Soft stats */ 429 /* Soft stats */
430 struct evcnt rx_copies; 430 struct evcnt rx_copies;
431 struct evcnt rx_packets; 431 struct evcnt rx_packets;
432 struct evcnt rx_bytes; 432 struct evcnt rx_bytes;
433 struct evcnt rx_discarded; 433 struct evcnt rx_discarded;
434 struct evcnt no_jmbuf; 434 struct evcnt no_jmbuf;
435 u64 rsc_num; 435 u64 rsc_num;
436 436
437 /* Flow Director */ 437 /* Flow Director */
438 u64 flm; 438 u64 flm;
439}; 439};
440 440
441struct ixgbe_vf { 441struct ixgbe_vf {
442 u_int pool; 442 u_int pool;
443 u_int rar_index; 443 u_int rar_index;
444 u_int max_frame_size; 444 u_int max_frame_size;
445 uint32_t flags; 445 uint32_t flags;
446 uint8_t ether_addr[ETHER_ADDR_LEN]; 446 uint8_t ether_addr[ETHER_ADDR_LEN];
447 uint16_t mc_hash[IXGBE_MAX_VF_MC]; 447 uint16_t mc_hash[IXGBE_MAX_VF_MC];
448 uint16_t num_mc_hashes; 448 uint16_t num_mc_hashes;
449 uint16_t default_vlan; 449 uint16_t default_vlan;
450 uint16_t vlan_tag; 450 uint16_t vlan_tag;
451 uint16_t api_ver; 451 uint16_t api_ver;
452}; 452};
453 453
454/* 454/*
455 * NetBSD: For trafic class 455 * NetBSD: For trafic class
456 * Crrently, the following structure is only for statistics. 456 * Crrently, the following structure is only for statistics.
457 */ 457 */
458struct ixgbe_tc { 458struct ixgbe_tc {
459 char evnamebuf[32]; 459 char evnamebuf[32];
460}; 460};
461 461
462/* Our adapter structure */ 462/* Our adapter structure */
463struct adapter { 463struct adapter {
464 struct ixgbe_hw hw; 464 struct ixgbe_hw hw;
465 struct ixgbe_osdep osdep; 465 struct ixgbe_osdep osdep;
466 466
467 device_t dev; 467 device_t dev;
468 struct ifnet *ifp; 468 struct ifnet *ifp;
469 struct if_percpuq *ipq; /* softint-based input queues */ 469 struct if_percpuq *ipq; /* softint-based input queues */
470 470
471 struct resource *pci_mem; 471 struct resource *pci_mem;
472 struct resource *msix_mem; 472 struct resource *msix_mem;
473 473
474 /* NetBSD: Interrupt resources are in osdep */ 474 /* NetBSD: Interrupt resources are in osdep */
475 475
476 struct ifmedia media; 476 struct ifmedia media;
477 callout_t timer; 477 callout_t timer;
478 u_short if_flags; /* saved ifp->if_flags */ 478 u_short if_flags; /* saved ifp->if_flags */
479 int ec_capenable; /* saved ec->ec_capenable */ 479 int ec_capenable; /* saved ec->ec_capenable */
480 480
481 kmutex_t core_mtx; 481 kmutex_t core_mtx;
482 482
483 unsigned int num_queues; 483 unsigned int num_queues;
484 484
485 /* 485 /*
486 * Shadow VFTA table, this is needed because 486 * Shadow VFTA table, this is needed because
487 * the real vlan filter table gets cleared during 487 * the real vlan filter table gets cleared during
488 * a soft reset and the driver needs to be able 488 * a soft reset and the driver needs to be able
489 * to repopulate it. 489 * to repopulate it.
490 */ 490 */
491 u32 shadow_vfta[IXGBE_VFTA_SIZE]; 491 u32 shadow_vfta[IXGBE_VFTA_SIZE];
492 492
493 /* Info about the interface */ 493 /* Info about the interface */
494 int advertise; /* link speeds */ 494 int advertise; /* link speeds */
495 bool enable_aim; /* adaptive interrupt moderation */ 495 bool enable_aim; /* adaptive interrupt moderation */
496 int link_active; /* Use LINK_STATE_* value */ 496 int link_active; /* Use LINK_STATE_* value */
497 u16 max_frame_size; 497 u16 max_frame_size;
498 u16 num_segs; 498 u16 num_segs;
499 u32 link_speed; 499 u32 link_speed;
500 bool link_up; 500 bool link_up;
501 u32 vector; 501 u32 vector;
502 u16 dmac; 502 u16 dmac;
503 u32 phy_layer; 503 u32 phy_layer;
504 504
505 /* Power management-related */ 505 /* Power management-related */
506 bool wol_support; 506 bool wol_support;
507 u32 wufc; 507 u32 wufc;
508 508
509 /* Mbuf cluster size */ 509 /* Mbuf cluster size */
510 u32 rx_mbuf_sz; 510 u32 rx_mbuf_sz;
511 511
512 /* Support for pluggable optics */ 512 /* Support for pluggable optics */
513 bool sfp_probe; 513 bool sfp_probe;
514 void *link_si; /* Link tasklet */ 514 void *link_si; /* Link tasklet */
515 void *mod_si; /* SFP tasklet */ 515 void *mod_si; /* SFP tasklet */
516 void *msf_si; /* Multispeed Fiber */ 516 void *msf_si; /* Multispeed Fiber */
517 void *mbx_si; /* VF -> PF mailbox interrupt */ 517 void *mbx_si; /* VF -> PF mailbox interrupt */
518 518
519 /* Flow Director */ 519 /* Flow Director */
520 int fdir_reinit; 520 int fdir_reinit;
521 void *fdir_si; 521 void *fdir_si;
522 522
523 void *phy_si; /* PHY intr tasklet */ 523 void *phy_si; /* PHY intr tasklet */
524 524
525 bool txrx_use_workqueue; 525 bool txrx_use_workqueue;
526 526
527 /* 527 /*
528 * Workqueue for ixgbe_handle_que_work(). 528 * Workqueue for ixgbe_handle_que_work().
529 * 529 *
530 * que_wq's "enqueued flag" is not required, because twice 530 * que_wq's "enqueued flag" is not required, because twice
531 * workqueue_enqueue() for ixgbe_handle_que_work() is avoided by 531 * workqueue_enqueue() for ixgbe_handle_que_work() is avoided by
532 * masking the queue's interrupt by EIMC. See also ixgbe_msix_que(). 532 * masking the queue's interrupt by EIMC. See also ixgbe_msix_que().
533 */ 533 */
534 struct workqueue *que_wq; 534 struct workqueue *que_wq;
535 /* Workqueue for ixgbe_deferred_mq_start_work() */ 535 /* Workqueue for ixgbe_deferred_mq_start_work() */
536 struct workqueue *txr_wq; 536 struct workqueue *txr_wq;
537 percpu_t *txr_wq_enqueued; 537 percpu_t *txr_wq_enqueued;
538 538
539 /* 539 /*
540 * Queues: 540 * Queues:
541 * This is the irq holder, it has 541 * This is the irq holder, it has
542 * and RX/TX pair or rings associated 542 * and RX/TX pair or rings associated
543 * with it. 543 * with it.
544 */ 544 */
545 struct ix_queue *queues; 545 struct ix_queue *queues;
546 546
547 /* 547 /*
548 * Transmit rings 548 * Transmit rings
549 * Allocated at run time, an array of rings 549 * Allocated at run time, an array of rings
550 */ 550 */
551 struct tx_ring *tx_rings; 551 struct tx_ring *tx_rings;
552 u32 num_tx_desc; 552 u32 num_tx_desc;
553 u32 tx_process_limit; 553 u32 tx_process_limit;
554 554
555 /* 555 /*
556 * Receive rings 556 * Receive rings
557 * Allocated at run time, an array of rings 557 * Allocated at run time, an array of rings
558 */ 558 */
559 struct rx_ring *rx_rings; 559 struct rx_ring *rx_rings;
560 u64 active_queues; 560 u64 active_queues;
561 u32 num_rx_desc; 561 u32 num_rx_desc;
562 u32 rx_process_limit; 562 u32 rx_process_limit;
 563 int num_jcl;
563 564
564 /* Multicast array memory */ 565 /* Multicast array memory */
565 struct ixgbe_mc_addr *mta; 566 struct ixgbe_mc_addr *mta;
566 567
567 /* SR-IOV */ 568 /* SR-IOV */
568 int iov_mode; 569 int iov_mode;
569 int num_vfs; 570 int num_vfs;
570 int pool; 571 int pool;
571 struct ixgbe_vf *vfs; 572 struct ixgbe_vf *vfs;
572 573
573 /* Bypass */ 574 /* Bypass */
574 struct ixgbe_bp_data bypass; 575 struct ixgbe_bp_data bypass;
575 576
576 /* Netmap */ 577 /* Netmap */
577 void (*init_locked)(struct adapter *); 578 void (*init_locked)(struct adapter *);
578 void (*stop_locked)(void *); 579 void (*stop_locked)(void *);
579 580
580 /* Firmware error check */ 581 /* Firmware error check */
581 u_int recovery_mode; 582 u_int recovery_mode;
582 struct callout recovery_mode_timer; 583 struct callout recovery_mode_timer;
583 584
584 /* Misc stats maintained by the driver */ 585 /* Misc stats maintained by the driver */
585 struct evcnt efbig_tx_dma_setup; 586 struct evcnt efbig_tx_dma_setup;
586 struct evcnt mbuf_defrag_failed; 587 struct evcnt mbuf_defrag_failed;
587 struct evcnt efbig2_tx_dma_setup; 588 struct evcnt efbig2_tx_dma_setup;
588 struct evcnt einval_tx_dma_setup; 589 struct evcnt einval_tx_dma_setup;
589 struct evcnt other_tx_dma_setup; 590 struct evcnt other_tx_dma_setup;
590 struct evcnt eagain_tx_dma_setup; 591 struct evcnt eagain_tx_dma_setup;
591 struct evcnt enomem_tx_dma_setup; 592 struct evcnt enomem_tx_dma_setup;
592 struct evcnt tso_err; 593 struct evcnt tso_err;
593 struct evcnt watchdog_events; 594 struct evcnt watchdog_events;
594 struct evcnt link_irq; 595 struct evcnt link_irq;
595 struct evcnt link_sicount; 596 struct evcnt link_sicount;
596 struct evcnt mod_sicount; 597 struct evcnt mod_sicount;
597 struct evcnt msf_sicount; 598 struct evcnt msf_sicount;
598 struct evcnt phy_sicount; 599 struct evcnt phy_sicount;
599 600
600 union { 601 union {
601 struct ixgbe_hw_stats pf; 602 struct ixgbe_hw_stats pf;
602 struct ixgbevf_hw_stats vf; 603 struct ixgbevf_hw_stats vf;
603 } stats; 604 } stats;
604#if __FreeBSD_version >= 1100036 605#if __FreeBSD_version >= 1100036
605 /* counter(9) stats */ 606 /* counter(9) stats */
606 u64 ipackets; 607 u64 ipackets;
607 u64 ierrors; 608 u64 ierrors;
608 u64 opackets; 609 u64 opackets;
609 u64 oerrors; 610 u64 oerrors;
610 u64 ibytes; 611 u64 ibytes;
611 u64 obytes; 612 u64 obytes;
612 u64 imcasts; 613 u64 imcasts;
613 u64 omcasts; 614 u64 omcasts;
614 u64 iqdrops; 615 u64 iqdrops;
615 u64 noproto; 616 u64 noproto;
616#endif 617#endif
617 /* Feature capable/enabled flags. See ixgbe_features.h */ 618 /* Feature capable/enabled flags. See ixgbe_features.h */
618 u32 feat_cap; 619 u32 feat_cap;
619 u32 feat_en; 620 u32 feat_en;
620 621
621 /* Traffic classes */ 622 /* Traffic classes */
622 struct ixgbe_tc tcs[IXGBE_DCB_MAX_TRAFFIC_CLASS]; 623 struct ixgbe_tc tcs[IXGBE_DCB_MAX_TRAFFIC_CLASS];
623 624
624 struct sysctllog *sysctllog; 625 struct sysctllog *sysctllog;
625 const struct sysctlnode *sysctltop; 626 const struct sysctlnode *sysctltop;
626}; 627};
627 628
628/* Precision Time Sync (IEEE 1588) defines */ 629/* Precision Time Sync (IEEE 1588) defines */
629#define ETHERTYPE_IEEE1588 0x88F7 630#define ETHERTYPE_IEEE1588 0x88F7
630#define PICOSECS_PER_TICK 20833 631#define PICOSECS_PER_TICK 20833
631#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */ 632#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
632#define IXGBE_ADVTXD_TSTAMP 0x00080000 633#define IXGBE_ADVTXD_TSTAMP 0x00080000
633 634
634 635
635#define IXGBE_CORE_LOCK_INIT(_sc, _name) \ 636#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
636 mutex_init(&(_sc)->core_mtx, MUTEX_DEFAULT, IPL_SOFTNET) 637 mutex_init(&(_sc)->core_mtx, MUTEX_DEFAULT, IPL_SOFTNET)
637#define IXGBE_CORE_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->core_mtx) 638#define IXGBE_CORE_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->core_mtx)
638#define IXGBE_TX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->tx_mtx) 639#define IXGBE_TX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->tx_mtx)
639#define IXGBE_RX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->rx_mtx) 640#define IXGBE_RX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->rx_mtx)
640#define IXGBE_CORE_LOCK(_sc) mutex_enter(&(_sc)->core_mtx) 641#define IXGBE_CORE_LOCK(_sc) mutex_enter(&(_sc)->core_mtx)
641#define IXGBE_TX_LOCK(_sc) mutex_enter(&(_sc)->tx_mtx) 642#define IXGBE_TX_LOCK(_sc) mutex_enter(&(_sc)->tx_mtx)
642#define IXGBE_TX_TRYLOCK(_sc) mutex_tryenter(&(_sc)->tx_mtx) 643#define IXGBE_TX_TRYLOCK(_sc) mutex_tryenter(&(_sc)->tx_mtx)
643#define IXGBE_RX_LOCK(_sc) mutex_enter(&(_sc)->rx_mtx) 644#define IXGBE_RX_LOCK(_sc) mutex_enter(&(_sc)->rx_mtx)
644#define IXGBE_CORE_UNLOCK(_sc) mutex_exit(&(_sc)->core_mtx) 645#define IXGBE_CORE_UNLOCK(_sc) mutex_exit(&(_sc)->core_mtx)
645#define IXGBE_TX_UNLOCK(_sc) mutex_exit(&(_sc)->tx_mtx) 646#define IXGBE_TX_UNLOCK(_sc) mutex_exit(&(_sc)->tx_mtx)
646#define IXGBE_RX_UNLOCK(_sc) mutex_exit(&(_sc)->rx_mtx) 647#define IXGBE_RX_UNLOCK(_sc) mutex_exit(&(_sc)->rx_mtx)
647#define IXGBE_CORE_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->core_mtx)) 648#define IXGBE_CORE_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->core_mtx))
648#define IXGBE_TX_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->tx_mtx)) 649#define IXGBE_TX_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->tx_mtx))
649 650
650/* External PHY register addresses */ 651/* External PHY register addresses */
651#define IXGBE_PHY_CURRENT_TEMP 0xC820 652#define IXGBE_PHY_CURRENT_TEMP 0xC820
652#define IXGBE_PHY_OVERTEMP_STATUS 0xC830 653#define IXGBE_PHY_OVERTEMP_STATUS 0xC830
653 654
654/* Sysctl help messages; displayed with sysctl -d */ 655/* Sysctl help messages; displayed with sysctl -d */
655#define IXGBE_SYSCTL_DESC_ADV_SPEED \ 656#define IXGBE_SYSCTL_DESC_ADV_SPEED \
656 "\nControl advertised link speed using these flags:\n" \ 657 "\nControl advertised link speed using these flags:\n" \
657 "\t0x01 - advertise 100M\n" \ 658 "\t0x01 - advertise 100M\n" \
658 "\t0x02 - advertise 1G\n" \ 659 "\t0x02 - advertise 1G\n" \
659 "\t0x04 - advertise 10G\n" \ 660 "\t0x04 - advertise 10G\n" \
660 "\t0x08 - advertise 10M\n" \ 661 "\t0x08 - advertise 10M\n" \
661 "\t0x10 - advertise 2.5G\n" \ 662 "\t0x10 - advertise 2.5G\n" \
662 "\t0x20 - advertise 5G\n\n" \ 663 "\t0x20 - advertise 5G\n\n" \
663 "\t5G, 2.5G, 100M and 10M are only supported on certain adapters." 664 "\t5G, 2.5G, 100M and 10M are only supported on certain adapters."
664 665
665#define IXGBE_SYSCTL_DESC_SET_FC \ 666#define IXGBE_SYSCTL_DESC_SET_FC \
666 "\nSet flow control mode using these values:\n" \ 667 "\nSet flow control mode using these values:\n" \
667 "\t0 - off\n" \ 668 "\t0 - off\n" \
668 "\t1 - rx pause\n" \ 669 "\t1 - rx pause\n" \
669 "\t2 - tx pause\n" \ 670 "\t2 - tx pause\n" \
670 "\t3 - tx and rx pause" 671 "\t3 - tx and rx pause"
671 672
672/* Workaround to make 8.0 buildable */ 673/* Workaround to make 8.0 buildable */
673#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 674#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
674static __inline int 675static __inline int
675drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) 676drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
676{ 677{
677#ifdef ALTQ 678#ifdef ALTQ
678 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 679 if (ALTQ_IS_ENABLED(&ifp->if_snd))
679 return (1); 680 return (1);
680#endif 681#endif
681 return (!buf_ring_empty(br)); 682 return (!buf_ring_empty(br));
682} 683}
683#endif 684#endif
684 685
685/* 686/*
686 * Find the number of unrefreshed RX descriptors 687 * Find the number of unrefreshed RX descriptors
687 */ 688 */
688static __inline u16 689static __inline u16
689ixgbe_rx_unrefreshed(struct rx_ring *rxr) 690ixgbe_rx_unrefreshed(struct rx_ring *rxr)
690{ 691{
691 if (rxr->next_to_check > rxr->next_to_refresh) 692 if (rxr->next_to_check > rxr->next_to_refresh)
692 return (rxr->next_to_check - rxr->next_to_refresh - 1); 693 return (rxr->next_to_check - rxr->next_to_refresh - 1);
693 else 694 else
694 return ((rxr->num_desc + rxr->next_to_check) - 695 return ((rxr->num_desc + rxr->next_to_check) -
695 rxr->next_to_refresh - 1); 696 rxr->next_to_refresh - 1);
696} 697}
697 698
698static __inline int 699static __inline int
699ixgbe_legacy_ring_empty(struct ifnet *ifp, pcq_t *dummy) 700ixgbe_legacy_ring_empty(struct ifnet *ifp, pcq_t *dummy)
700{ 701{
701 UNREFERENCED_1PARAMETER(dummy); 702 UNREFERENCED_1PARAMETER(dummy);
702 703
703 return IFQ_IS_EMPTY(&ifp->if_snd); 704 return IFQ_IS_EMPTY(&ifp->if_snd);
704} 705}
705 706
706static __inline int 707static __inline int
707ixgbe_mq_ring_empty(struct ifnet *dummy, pcq_t *interq) 708ixgbe_mq_ring_empty(struct ifnet *dummy, pcq_t *interq)
708{ 709{
709 UNREFERENCED_1PARAMETER(dummy); 710 UNREFERENCED_1PARAMETER(dummy);
710 711
711 return (pcq_peek(interq) == NULL); 712 return (pcq_peek(interq) == NULL);
712} 713}
713 714
714/* 715/*
715 * This checks for a zero mac addr, something that will be likely 716 * This checks for a zero mac addr, something that will be likely
716 * unless the Admin on the Host has created one. 717 * unless the Admin on the Host has created one.
717 */ 718 */
718static __inline bool 719static __inline bool
719ixv_check_ether_addr(u8 *addr) 720ixv_check_ether_addr(u8 *addr)
720{ 721{
721 bool status = TRUE; 722 bool status = TRUE;
722 723
723 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && 724 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
724 addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) 725 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
725 status = FALSE; 726 status = FALSE;
726 727
727 return (status); 728 return (status);
728} 729}
729 730
730/* 731/*
731 * This checks the adapter->recovery_mode software flag which is 732 * This checks the adapter->recovery_mode software flag which is
732 * set by ixgbe_fw_recovery_mode(). 733 * set by ixgbe_fw_recovery_mode().
733 * 734 *
734 */ 735 */
735static inline bool 736static inline bool
736ixgbe_fw_recovery_mode_swflag(struct adapter *adapter) 737ixgbe_fw_recovery_mode_swflag(struct adapter *adapter)
737{ 738{
738 return (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) && 739 return (adapter->feat_en & IXGBE_FEATURE_RECOVERY_MODE) &&
739 atomic_load_acq_uint(&adapter->recovery_mode); 740 atomic_load_acq_uint(&adapter->recovery_mode);
740} 741}
741 742
742/* Shared Prototypes */ 743/* Shared Prototypes */
743void ixgbe_legacy_start(struct ifnet *); 744void ixgbe_legacy_start(struct ifnet *);
744int ixgbe_legacy_start_locked(struct ifnet *, struct tx_ring *); 745int ixgbe_legacy_start_locked(struct ifnet *, struct tx_ring *);
745int ixgbe_mq_start(struct ifnet *, struct mbuf *); 746int ixgbe_mq_start(struct ifnet *, struct mbuf *);
746int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *); 747int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
747void ixgbe_deferred_mq_start(void *); 748void ixgbe_deferred_mq_start(void *);
748void ixgbe_deferred_mq_start_work(struct work *, void *); 749void ixgbe_deferred_mq_start_work(struct work *, void *);
749void ixgbe_drain_all(struct adapter *); 750void ixgbe_drain_all(struct adapter *);
750 751
751int ixgbe_allocate_queues(struct adapter *); 752int ixgbe_allocate_queues(struct adapter *);
752void ixgbe_free_queues(struct adapter *); 753void ixgbe_free_queues(struct adapter *);
753int ixgbe_setup_transmit_structures(struct adapter *); 754int ixgbe_setup_transmit_structures(struct adapter *);
754void ixgbe_free_transmit_structures(struct adapter *); 755void ixgbe_free_transmit_structures(struct adapter *);
755int ixgbe_setup_receive_structures(struct adapter *); 756int ixgbe_setup_receive_structures(struct adapter *);
756void ixgbe_free_receive_structures(struct adapter *); 757void ixgbe_free_receive_structures(struct adapter *);
757bool ixgbe_txeof(struct tx_ring *); 758bool ixgbe_txeof(struct tx_ring *);
758bool ixgbe_rxeof(struct ix_queue *); 759bool ixgbe_rxeof(struct ix_queue *);
759 760
760const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *); 761const struct sysctlnode *ixgbe_sysctl_instance(struct adapter *);
761 762
762/* For NetBSD */ 763/* For NetBSD */
763void ixgbe_jcl_reinit(struct adapter *, bus_dma_tag_t, struct rx_ring *, 764void ixgbe_jcl_reinit(struct adapter *, bus_dma_tag_t, struct rx_ring *,
764 int, size_t); 765 int, size_t);
765void ixgbe_jcl_destroy(struct adapter *, struct rx_ring *); 766void ixgbe_jcl_destroy(struct adapter *, struct rx_ring *);
766 767
767#include "ixgbe_bypass.h" 768#include "ixgbe_bypass.h"
768#include "ixgbe_fdir.h" 769#include "ixgbe_fdir.h"
769#include "ixgbe_rss.h" 770#include "ixgbe_rss.h"
770#include "ixgbe_netmap.h" 771#include "ixgbe_netmap.h"
771 772
772#endif /* _IXGBE_H_ */ 773#endif /* _IXGBE_H_ */

cvs diff -r1.11 -r1.11.4.1 src/sys/dev/pci/ixgbe/ixgbe_netbsd.h (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe_netbsd.h 2019/03/05 08:25:02 1.11
+++ src/sys/dev/pci/ixgbe/ixgbe_netbsd.h 2021/03/11 16:00:24 1.11.4.1
@@ -1,98 +1,112 @@ @@ -1,98 +1,112 @@
1/*$NetBSD: ixgbe_netbsd.h,v 1.11 2019/03/05 08:25:02 msaitoh Exp $*/ 1/*$NetBSD: ixgbe_netbsd.h,v 1.11.4.1 2021/03/11 16:00:24 martin Exp $*/
2/* 2/*
3 * Copyright (c) 2011 The NetBSD Foundation, Inc. 3 * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Coyote Point Systems, Inc. 7 * by Coyote Point Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#ifndef _IXGBE_NETBSD_H 31#ifndef _IXGBE_NETBSD_H
32#define _IXGBE_NETBSD_H 32#define _IXGBE_NETBSD_H
33 33
34#if 0 /* Enable this if you don't want to use TX multiqueue function */ 34#if 0 /* Enable this if you don't want to use TX multiqueue function */
35#define IXGBE_LEGACY_TX 1 35#define IXGBE_LEGACY_TX 1
36#endif 36#endif
37 37
38#define ETHERCAP_VLAN_HWCSUM 0 38#define ETHERCAP_VLAN_HWCSUM 0
39#define MJUM9BYTES (9 * 1024) 39#define MJUM9BYTES (9 * 1024)
40#define MJUM16BYTES (16 * 1024) 40#define MJUM16BYTES (16 * 1024)
41#define MJUMPAGESIZE PAGE_SIZE 41#define MJUMPAGESIZE PAGE_SIZE
42 42
 43/*
 44 * Number of jcl per queue is calculated by
 45 * adapter->num_rx_desc * IXGBE_JCLNUM_MULTI. The lower limit is 2.
 46 */
 47#define IXGBE_JCLNUM_MULTI_LOWLIM 2
 48#define IXGBE_JCLNUM_MULTI_DEFAULT 3
 49#if !defined(IXGBE_JCLNUM_MULTI)
 50# define IXGBE_JCLNUM_MULTI IXGBE_JCLNUM_MULTI_DEFAULT
 51#else
 52# if (IXGBE_JCLNUM_MULTI < IXGBE_JCLNUM_MULTI_LOWLIM)
 53# error IXGBE_JCLNUM_MULTI is too low.
 54# endif
 55#endif
 56
43#define IFCAP_RXCSUM \ 57#define IFCAP_RXCSUM \
44 (IFCAP_CSUM_IPv4_Rx|IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|\ 58 (IFCAP_CSUM_IPv4_Rx|IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx|\
45 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx) 59 IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx)
46 60
47#define IFCAP_TXCSUM \ 61#define IFCAP_TXCSUM \
48 (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx|\ 62 (IFCAP_CSUM_IPv4_Tx|IFCAP_CSUM_TCPv4_Tx|IFCAP_CSUM_UDPv4_Tx|\
49 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_UDPv6_Tx) 63 IFCAP_CSUM_TCPv6_Tx|IFCAP_CSUM_UDPv6_Tx)
50 64
51#define IFCAP_HWCSUM (IFCAP_RXCSUM|IFCAP_TXCSUM) 65#define IFCAP_HWCSUM (IFCAP_RXCSUM|IFCAP_TXCSUM)
52 66
53struct ixgbe_dma_tag { 67struct ixgbe_dma_tag {
54 bus_dma_tag_t dt_dmat; 68 bus_dma_tag_t dt_dmat;
55 bus_size_t dt_alignment; 69 bus_size_t dt_alignment;
56 bus_size_t dt_boundary; 70 bus_size_t dt_boundary;
57 bus_size_t dt_maxsize; 71 bus_size_t dt_maxsize;
58 int dt_nsegments; 72 int dt_nsegments;
59 bus_size_t dt_maxsegsize; 73 bus_size_t dt_maxsegsize;
60 int dt_flags; 74 int dt_flags;
61}; 75};
62 76
63typedef struct ixgbe_dma_tag ixgbe_dma_tag_t; 77typedef struct ixgbe_dma_tag ixgbe_dma_tag_t;
64 78
65struct ixgbe_extmem_head; 79struct ixgbe_extmem_head;
66typedef struct ixgbe_extmem_head ixgbe_extmem_head_t; 80typedef struct ixgbe_extmem_head ixgbe_extmem_head_t;
67 81
68struct ixgbe_extmem { 82struct ixgbe_extmem {
69 ixgbe_extmem_head_t *em_head; 83 ixgbe_extmem_head_t *em_head;
70 bus_dma_tag_t em_dmat; 84 bus_dma_tag_t em_dmat;
71 bus_size_t em_size; 85 bus_size_t em_size;
72 bus_dma_segment_t em_seg; 86 bus_dma_segment_t em_seg;
73 void *em_vaddr; 87 void *em_vaddr;
74 TAILQ_ENTRY(ixgbe_extmem) em_link; 88 TAILQ_ENTRY(ixgbe_extmem) em_link;
75}; 89};
76 90
77typedef struct ixgbe_extmem ixgbe_extmem_t; 91typedef struct ixgbe_extmem ixgbe_extmem_t;
78 92
79struct ixgbe_extmem_head { 93struct ixgbe_extmem_head {
80 TAILQ_HEAD(, ixgbe_extmem) eh_freelist; 94 TAILQ_HEAD(, ixgbe_extmem) eh_freelist;
81 kmutex_t eh_mtx; 95 kmutex_t eh_mtx;
82 bool eh_initialized; 96 bool eh_initialized;
83}; 97};
84 98
85int ixgbe_dma_tag_create(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t, int, 99int ixgbe_dma_tag_create(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t, int,
86 bus_size_t, int, ixgbe_dma_tag_t **); 100 bus_size_t, int, ixgbe_dma_tag_t **);
87void ixgbe_dma_tag_destroy(ixgbe_dma_tag_t *); 101void ixgbe_dma_tag_destroy(ixgbe_dma_tag_t *);
88int ixgbe_dmamap_create(ixgbe_dma_tag_t *, int, bus_dmamap_t *); 102int ixgbe_dmamap_create(ixgbe_dma_tag_t *, int, bus_dmamap_t *);
89void ixgbe_dmamap_destroy(ixgbe_dma_tag_t *, bus_dmamap_t); 103void ixgbe_dmamap_destroy(ixgbe_dma_tag_t *, bus_dmamap_t);
90void ixgbe_dmamap_sync(ixgbe_dma_tag_t *, bus_dmamap_t, int); 104void ixgbe_dmamap_sync(ixgbe_dma_tag_t *, bus_dmamap_t, int);
91void ixgbe_dmamap_unload(ixgbe_dma_tag_t *, bus_dmamap_t); 105void ixgbe_dmamap_unload(ixgbe_dma_tag_t *, bus_dmamap_t);
92 106
93struct mbuf *ixgbe_getjcl(ixgbe_extmem_head_t *, int, int, int, size_t); 107struct mbuf *ixgbe_getjcl(ixgbe_extmem_head_t *, int, int, int, size_t);
94void ixgbe_pci_enable_busmaster(pci_chipset_tag_t, pcitag_t); 108void ixgbe_pci_enable_busmaster(pci_chipset_tag_t, pcitag_t);
95 109
96u_int atomic_load_acq_uint(volatile u_int *); 110u_int atomic_load_acq_uint(volatile u_int *);
97 111
98#endif /* _IXGBE_NETBSD_H */ 112#endif /* _IXGBE_NETBSD_H */

cvs diff -r1.18.2.2 -r1.18.2.3 src/sys/dev/pci/ixgbe/ixgbe_vf.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe_vf.c 2020/07/10 11:35:51 1.18.2.2
+++ src/sys/dev/pci/ixgbe/ixgbe_vf.c 2021/03/11 16:00:24 1.18.2.3
@@ -1,817 +1,818 @@ @@ -1,817 +1,818 @@
1/* $NetBSD: ixgbe_vf.c,v 1.18.2.2 2020/07/10 11:35:51 martin Exp $ */ 1/* $NetBSD: ixgbe_vf.c,v 1.18.2.3 2021/03/11 16:00:24 martin Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 SPDX-License-Identifier: BSD-3-Clause 4 SPDX-License-Identifier: BSD-3-Clause
5 5
6 Copyright (c) 2001-2017, Intel Corporation 6 Copyright (c) 2001-2017, Intel Corporation
7 All rights reserved. 7 All rights reserved.
8 8
9 Redistribution and use in source and binary forms, with or without 9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met: 10 modification, are permitted provided that the following conditions are met:
11 11
12 1. Redistributions of source code must retain the above copyright notice, 12 1. Redistributions of source code must retain the above copyright notice,
13 this list of conditions and the following disclaimer. 13 this list of conditions and the following disclaimer.
14 14
15 2. Redistributions in binary form must reproduce the above copyright 15 2. Redistributions in binary form must reproduce the above copyright
16 notice, this list of conditions and the following disclaimer in the 16 notice, this list of conditions and the following disclaimer in the
17 documentation and/or other materials provided with the distribution. 17 documentation and/or other materials provided with the distribution.
18 18
19 3. Neither the name of the Intel Corporation nor the names of its 19 3. Neither the name of the Intel Corporation nor the names of its
20 contributors may be used to endorse or promote products derived from 20 contributors may be used to endorse or promote products derived from
21 this software without specific prior written permission. 21 this software without specific prior written permission.
22 22
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 POSSIBILITY OF SUCH DAMAGE. 33 POSSIBILITY OF SUCH DAMAGE.
34 34
35******************************************************************************/ 35******************************************************************************/
36/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_vf.c 331224 2018-03-19 20:55:05Z erj $*/ 36/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_vf.c 331224 2018-03-19 20:55:05Z erj $*/
37 37
38 38
39#include "ixgbe_api.h" 39#include "ixgbe_api.h"
40#include "ixgbe_type.h" 40#include "ixgbe_type.h"
41#include "ixgbe_vf.h" 41#include "ixgbe_vf.h"
42 42
43#ifndef IXGBE_VFWRITE_REG 43#ifndef IXGBE_VFWRITE_REG
44#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG 44#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
45#endif 45#endif
46#ifndef IXGBE_VFREAD_REG 46#ifndef IXGBE_VFREAD_REG
47#define IXGBE_VFREAD_REG IXGBE_READ_REG 47#define IXGBE_VFREAD_REG IXGBE_READ_REG
48#endif 48#endif
49 49
50/** 50/**
51 * ixgbe_init_ops_vf - Initialize the pointers for vf 51 * ixgbe_init_ops_vf - Initialize the pointers for vf
52 * @hw: pointer to hardware structure 52 * @hw: pointer to hardware structure
53 * 53 *
54 * This will assign function pointers, adapter-specific functions can 54 * This will assign function pointers, adapter-specific functions can
55 * override the assignment of generic function pointers by assigning 55 * override the assignment of generic function pointers by assigning
56 * their own adapter-specific function pointers. 56 * their own adapter-specific function pointers.
57 * Does not touch the hardware. 57 * Does not touch the hardware.
58 **/ 58 **/
59s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) 59s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
60{ 60{
61 /* MAC */ 61 /* MAC */
62 hw->mac.ops.init_hw = ixgbe_init_hw_vf; 62 hw->mac.ops.init_hw = ixgbe_init_hw_vf;
63 hw->mac.ops.reset_hw = ixgbe_reset_hw_vf; 63 hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
64 hw->mac.ops.start_hw = ixgbe_start_hw_vf; 64 hw->mac.ops.start_hw = ixgbe_start_hw_vf;
65 /* Cannot clear stats on VF */ 65 /* Cannot clear stats on VF */
66 hw->mac.ops.clear_hw_cntrs = NULL; 66 hw->mac.ops.clear_hw_cntrs = NULL;
67 hw->mac.ops.get_media_type = NULL; 67 hw->mac.ops.get_media_type = NULL;
68 hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf; 68 hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
69 hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf; 69 hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
70 hw->mac.ops.get_bus_info = NULL; 70 hw->mac.ops.get_bus_info = NULL;
71 hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version; 71 hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version;
72 72
73 /* Link */ 73 /* Link */
74 hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf; 74 hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
75 hw->mac.ops.check_link = ixgbe_check_mac_link_vf; 75 hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
76 hw->mac.ops.get_link_capabilities = NULL; 76 hw->mac.ops.get_link_capabilities = NULL;
77 77
78 /* RAR, Multicast, VLAN */ 78 /* RAR, Multicast, VLAN */
79 hw->mac.ops.set_rar = ixgbe_set_rar_vf; 79 hw->mac.ops.set_rar = ixgbe_set_rar_vf;
80 hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf; 80 hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
81 hw->mac.ops.init_rx_addrs = NULL; 81 hw->mac.ops.init_rx_addrs = NULL;
82 hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf; 82 hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
83 hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode; 83 hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode;
84 hw->mac.ops.enable_mc = NULL; 84 hw->mac.ops.enable_mc = NULL;
85 hw->mac.ops.disable_mc = NULL; 85 hw->mac.ops.disable_mc = NULL;
86 hw->mac.ops.clear_vfta = NULL; 86 hw->mac.ops.clear_vfta = NULL;
87 hw->mac.ops.set_vfta = ixgbe_set_vfta_vf; 87 hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
88 hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf; 88 hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf;
89 89
90 hw->mac.max_tx_queues = 1; 90 hw->mac.max_tx_queues = 1;
91 hw->mac.max_rx_queues = 1; 91 hw->mac.max_rx_queues = 1;
92 92
93 hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf; 93 hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
94 94
95 return IXGBE_SUCCESS; 95 return IXGBE_SUCCESS;
96} 96}
97 97
98/* ixgbe_virt_clr_reg - Set register to default (power on) state. 98/* ixgbe_virt_clr_reg - Set register to default (power on) state.
99 * @hw: pointer to hardware structure 99 * @hw: pointer to hardware structure
100 */ 100 */
101static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw) 101static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
102{ 102{
103 int i; 103 int i;
104 u32 vfsrrctl; 104 u32 vfsrrctl;
105 u32 vfdca_rxctrl; 105 u32 vfdca_rxctrl;
106 u32 vfdca_txctrl; 106 u32 vfdca_txctrl;
107 107
108 /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ 108 /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
109 vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; 109 vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
110 vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 110 vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
111 111
112 /* DCA_RXCTRL default value */ 112 /* DCA_RXCTRL default value */
113 vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN | 113 vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
114 IXGBE_DCA_RXCTRL_DATA_WRO_EN | 114 IXGBE_DCA_RXCTRL_DATA_WRO_EN |
115 IXGBE_DCA_RXCTRL_HEAD_WRO_EN; 115 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
116 116
117 /* DCA_TXCTRL default value */ 117 /* DCA_TXCTRL default value */
118 vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN | 118 vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
119 IXGBE_DCA_TXCTRL_DESC_WRO_EN | 119 IXGBE_DCA_TXCTRL_DESC_WRO_EN |
120 IXGBE_DCA_TXCTRL_DATA_RRO_EN; 120 IXGBE_DCA_TXCTRL_DATA_RRO_EN;
121 121
122 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 122 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
123 123
124 for (i = 0; i < 7; i++) { 124 KASSERT(IXGBE_VF_MAX_TX_QUEUES == IXGBE_VF_MAX_RX_QUEUES);
 125 for (i = 0; i < IXGBE_VF_MAX_TX_QUEUES; i++) {
125 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); 126 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
126 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); 127 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
127 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0); 128 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
128 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl); 129 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
129 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); 130 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
130 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); 131 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
131 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0); 132 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
132 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0); 133 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
133 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0); 134 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
134 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl); 135 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
135 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl); 136 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
136 } 137 }
137 138
138 IXGBE_WRITE_FLUSH(hw); 139 IXGBE_WRITE_FLUSH(hw);
139} 140}
140 141
141/** 142/**
142 * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx 143 * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
143 * @hw: pointer to hardware structure 144 * @hw: pointer to hardware structure
144 * 145 *
145 * Starts the hardware by filling the bus info structure and media type, clears 146 * Starts the hardware by filling the bus info structure and media type, clears
146 * all on chip counters, initializes receive address registers, multicast 147 * all on chip counters, initializes receive address registers, multicast
147 * table, VLAN filter table, calls routine to set up link and flow control 148 * table, VLAN filter table, calls routine to set up link and flow control
148 * settings, and leaves transmit and receive units disabled and uninitialized 149 * settings, and leaves transmit and receive units disabled and uninitialized
149 **/ 150 **/
150s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw) 151s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
151{ 152{
152 /* Clear adapter stopped flag */ 153 /* Clear adapter stopped flag */
153 hw->adapter_stopped = FALSE; 154 hw->adapter_stopped = FALSE;
154 155
155 return IXGBE_SUCCESS; 156 return IXGBE_SUCCESS;
156} 157}
157 158
158/** 159/**
159 * ixgbe_init_hw_vf - virtual function hardware initialization 160 * ixgbe_init_hw_vf - virtual function hardware initialization
160 * @hw: pointer to hardware structure 161 * @hw: pointer to hardware structure
161 * 162 *
162 * Initialize the hardware by resetting the hardware and then starting 163 * Initialize the hardware by resetting the hardware and then starting
163 * the hardware 164 * the hardware
164 **/ 165 **/
165s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw) 166s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
166{ 167{
167 s32 status = hw->mac.ops.start_hw(hw); 168 s32 status = hw->mac.ops.start_hw(hw);
168 169
169 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 170 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
170 171
171 return status; 172 return status;
172} 173}
173 174
174/** 175/**
175 * ixgbe_reset_hw_vf - Performs hardware reset 176 * ixgbe_reset_hw_vf - Performs hardware reset
176 * @hw: pointer to hardware structure 177 * @hw: pointer to hardware structure
177 * 178 *
178 * Resets the hardware by reseting the transmit and receive units, masks and 179 * Resets the hardware by resetting the transmit and receive units, masks and
179 * clears all interrupts. 180 * clears all interrupts.
180 **/ 181 **/
181s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) 182s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
182{ 183{
183 struct ixgbe_mbx_info *mbx = &hw->mbx; 184 struct ixgbe_mbx_info *mbx = &hw->mbx;
184 u32 timeout = IXGBE_VF_INIT_TIMEOUT; 185 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
185 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; 186 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
186 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; 187 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
187 u8 *addr = (u8 *)(&msgbuf[1]); 188 u8 *addr = (u8 *)(&msgbuf[1]);
188 189
189 DEBUGFUNC("ixgbevf_reset_hw_vf"); 190 DEBUGFUNC("ixgbevf_reset_hw_vf");
190 191
191 /* Call adapter stop to disable tx/rx and clear interrupts */ 192 /* Call adapter stop to disable tx/rx and clear interrupts */
192 hw->mac.ops.stop_adapter(hw); 193 hw->mac.ops.stop_adapter(hw);
193 194
194 /* reset the api version */ 195 /* reset the api version */
195 hw->api_version = ixgbe_mbox_api_10; 196 hw->api_version = ixgbe_mbox_api_10;
196 197
197 DEBUGOUT("Issuing a function level reset to MAC\n"); 198 DEBUGOUT("Issuing a function level reset to MAC\n");
198 199
199 IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); 200 IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
200 IXGBE_WRITE_FLUSH(hw); 201 IXGBE_WRITE_FLUSH(hw);
201 202
202 msec_delay(50); 203 msec_delay(50);
203 204
204 /* we cannot reset while the RSTI / RSTD bits are asserted */ 205 /* we cannot reset while the RSTI / RSTD bits are asserted */
205 while (!mbx->ops.check_for_rst(hw, 0) && timeout) { 206 while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
206 timeout--; 207 timeout--;
207 usec_delay(5); 208 usec_delay(5);
208 } 209 }
209 210
210 if (!timeout) 211 if (!timeout)
211 return IXGBE_ERR_RESET_FAILED; 212 return IXGBE_ERR_RESET_FAILED;
212 213
213 /* Reset VF registers to initial values */ 214 /* Reset VF registers to initial values */
214 ixgbe_virt_clr_reg(hw); 215 ixgbe_virt_clr_reg(hw);
215 216
216 /* mailbox timeout can now become active */ 217 /* mailbox timeout can now become active */
217 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; 218 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
218 219
219 msgbuf[0] = IXGBE_VF_RESET; 220 msgbuf[0] = IXGBE_VF_RESET;
220 mbx->ops.write_posted(hw, msgbuf, 1, 0); 221 mbx->ops.write_posted(hw, msgbuf, 1, 0);
221 222
222 msec_delay(10); 223 msec_delay(10);
223 224
224 /* 225 /*
225 * set our "perm_addr" based on info provided by PF 226 * set our "perm_addr" based on info provided by PF
226 * also set up the mc_filter_type which is piggy backed 227 * also set up the mc_filter_type which is piggy backed
227 * on the mac address in word 3 228 * on the mac address in word 3
228 */ 229 */
229 ret_val = mbx->ops.read_posted(hw, msgbuf, 230 ret_val = mbx->ops.read_posted(hw, msgbuf,
230 IXGBE_VF_PERMADDR_MSG_LEN, 0); 231 IXGBE_VF_PERMADDR_MSG_LEN, 0);
231 if (ret_val) 232 if (ret_val)
232 return ret_val; 233 return ret_val;
233 234
234 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && 235 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
235 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) 236 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
236 return IXGBE_ERR_INVALID_MAC_ADDR; 237 return IXGBE_ERR_INVALID_MAC_ADDR;
237 238
238 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) 239 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
239 memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 240 memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
240 241
241 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; 242 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
242 243
243 return ret_val; 244 return ret_val;
244} 245}
245 246
246/** 247/**
247 * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units 248 * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
248 * @hw: pointer to hardware structure 249 * @hw: pointer to hardware structure
249 * 250 *
250 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 251 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
251 * disables transmit and receive units. The adapter_stopped flag is used by 252 * disables transmit and receive units. The adapter_stopped flag is used by
252 * the shared code and drivers to determine if the adapter is in a stopped 253 * the shared code and drivers to determine if the adapter is in a stopped
253 * state and should not touch the hardware. 254 * state and should not touch the hardware.
254 **/ 255 **/
255s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) 256s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
256{ 257{
257 u32 reg_val; 258 u32 reg_val;
258 u16 i; 259 u16 i;
259 260
260 /* 261 /*
261 * Set the adapter_stopped flag so other driver functions stop touching 262 * Set the adapter_stopped flag so other driver functions stop touching
262 * the hardware 263 * the hardware
263 */ 264 */
264 hw->adapter_stopped = TRUE; 265 hw->adapter_stopped = TRUE;
265 266
266 /* Clear interrupt mask to stop from interrupts being generated */ 267 /* Clear interrupt mask to stop from interrupts being generated */
267 IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 268 IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
268 269
269 /* Clear any pending interrupts, flush previous writes */ 270 /* Clear any pending interrupts, flush previous writes */
270 IXGBE_VFREAD_REG(hw, IXGBE_VTEICR); 271 IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
271 272
272 /* Disable the transmit unit. Each queue must be disabled. */ 273 /* Disable the transmit unit. Each queue must be disabled. */
273 for (i = 0; i < hw->mac.max_tx_queues; i++) 274 for (i = 0; i < hw->mac.max_tx_queues; i++)
274 IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH); 275 IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
275 276
276 /* Disable the receive unit by stopping each queue */ 277 /* Disable the receive unit by stopping each queue */
277 for (i = 0; i < hw->mac.max_rx_queues; i++) { 278 for (i = 0; i < hw->mac.max_rx_queues; i++) {
278 reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i)); 279 reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
279 reg_val &= ~IXGBE_RXDCTL_ENABLE; 280 reg_val &= ~IXGBE_RXDCTL_ENABLE;
280 IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); 281 IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
281 } 282 }
282 /* Clear packet split and pool config */ 283 /* Clear packet split and pool config */
283 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 284 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
284 285
285 /* flush all queues disables */ 286 /* flush all queues disables */
286 IXGBE_WRITE_FLUSH(hw); 287 IXGBE_WRITE_FLUSH(hw);
287 msec_delay(2); 288 msec_delay(2);
288 289
289 return IXGBE_SUCCESS; 290 return IXGBE_SUCCESS;
290} 291}
291 292
292/** 293/**
293 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 294 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
294 * @hw: pointer to hardware structure 295 * @hw: pointer to hardware structure
295 * @mc_addr: the multicast address 296 * @mc_addr: the multicast address
296 * 297 *
297 * Extracts the 12 bits, from a multicast address, to determine which 298 * Extracts the 12 bits, from a multicast address, to determine which
298 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 299 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
299 * incoming rx multicast addresses, to determine the bit-vector to check in 300 * incoming rx multicast addresses, to determine the bit-vector to check in
300 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 301 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
301 * by the MO field of the MCSTCTRL. The MO field is set during initialization 302 * by the MO field of the MCSTCTRL. The MO field is set during initialization
302 * to mc_filter_type. 303 * to mc_filter_type.
303 **/ 304 **/
304static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 305static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
305{ 306{
306 u32 vector = 0; 307 u32 vector = 0;
307 308
308 switch (hw->mac.mc_filter_type) { 309 switch (hw->mac.mc_filter_type) {
309 case 0: /* use bits [47:36] of the address */ 310 case 0: /* use bits [47:36] of the address */
310 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 311 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
311 break; 312 break;
312 case 1: /* use bits [46:35] of the address */ 313 case 1: /* use bits [46:35] of the address */
313 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 314 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
314 break; 315 break;
315 case 2: /* use bits [45:34] of the address */ 316 case 2: /* use bits [45:34] of the address */
316 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 317 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
317 break; 318 break;
318 case 3: /* use bits [43:32] of the address */ 319 case 3: /* use bits [43:32] of the address */
319 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 320 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
320 break; 321 break;
321 default: /* Invalid mc_filter_type */ 322 default: /* Invalid mc_filter_type */
322 DEBUGOUT("MC filter type param set incorrectly\n"); 323 DEBUGOUT("MC filter type param set incorrectly\n");
323 ASSERT(0); 324 ASSERT(0);
324 break; 325 break;
325 } 326 }
326 327
327 /* vector can only be 12-bits or boundary will be exceeded */ 328 /* vector can only be 12-bits or boundary will be exceeded */
328 vector &= 0xFFF; 329 vector &= 0xFFF;
329 return vector; 330 return vector;
330} 331}
331 332
332static s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, 333static s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
333 u32 *retmsg, u16 size) 334 u32 *retmsg, u16 size)
334{ 335{
335 struct ixgbe_mbx_info *mbx = &hw->mbx; 336 struct ixgbe_mbx_info *mbx = &hw->mbx;
336 s32 retval = mbx->ops.write_posted(hw, msg, size, 0); 337 s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
337 338
338 if (retval) 339 if (retval)
339 return retval; 340 return retval;
340 341
341 return mbx->ops.read_posted(hw, retmsg, size, 0); 342 return mbx->ops.read_posted(hw, retmsg, size, 0);
342} 343}
343 344
344/** 345/**
345 * ixgbe_set_rar_vf - set device MAC address 346 * ixgbe_set_rar_vf - set device MAC address
346 * @hw: pointer to hardware structure 347 * @hw: pointer to hardware structure
347 * @index: Receive address register to write 348 * @index: Receive address register to write
348 * @addr: Address to put into receive address register 349 * @addr: Address to put into receive address register
349 * @vmdq: VMDq "set" or "pool" index 350 * @vmdq: VMDq "set" or "pool" index
350 * @enable_addr: set flag that address is active 351 * @enable_addr: set flag that address is active
351 **/ 352 **/
352s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 353s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
353 u32 enable_addr) 354 u32 enable_addr)
354{ 355{
355 u32 msgbuf[3]; 356 u32 msgbuf[3];
356 u8 *msg_addr = (u8 *)(&msgbuf[1]); 357 u8 *msg_addr = (u8 *)(&msgbuf[1]);
357 s32 ret_val; 358 s32 ret_val;
358 UNREFERENCED_3PARAMETER(vmdq, enable_addr, index); 359 UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
359 360
360 memset(msgbuf, 0, 12); 361 memset(msgbuf, 0, 12);
361 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; 362 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
362 memcpy(msg_addr, addr, 6); 363 memcpy(msg_addr, addr, 6);
363 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); 364 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
364 365
365 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 366 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
366 367
367 /* if nacked the address was rejected, use "perm_addr" */ 368 /* if nacked the address was rejected, use "perm_addr" */
368 if (!ret_val && 369 if (!ret_val &&
369 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { 370 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
370 ixgbe_get_mac_addr_vf(hw, hw->mac.addr); 371 ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
371 return IXGBE_ERR_MBX; 372 return IXGBE_ERR_MBX;
372 } 373 }
373 374
374 return ret_val; 375 return ret_val;
375} 376}
376 377
377/** 378/**
378 * ixgbe_update_mc_addr_list_vf - Update Multicast addresses 379 * ixgbe_update_mc_addr_list_vf - Update Multicast addresses
379 * @hw: pointer to the HW structure 380 * @hw: pointer to the HW structure
380 * @mc_addr_list: array of multicast addresses to program 381 * @mc_addr_list: array of multicast addresses to program
381 * @mc_addr_count: number of multicast addresses to program 382 * @mc_addr_count: number of multicast addresses to program
382 * @next: caller supplied function to return next address in list 383 * @next: caller supplied function to return next address in list
383 * @clear: unused 384 * @clear: unused
384 * 385 *
385 * Updates the Multicast Table Array. 386 * Updates the Multicast Table Array.
386 **/ 387 **/
387s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, 388s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
388 u32 mc_addr_count, ixgbe_mc_addr_itr next, 389 u32 mc_addr_count, ixgbe_mc_addr_itr next,
389 bool clear) 390 bool clear)
390{ 391{
391 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 392 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
392 u16 *vector_list = (u16 *)&msgbuf[1]; 393 u16 *vector_list = (u16 *)&msgbuf[1];
393 u32 vector; 394 u32 vector;
394 u32 cnt, i; 395 u32 cnt, i;
395 u32 vmdq; 396 u32 vmdq;
396 397
397 UNREFERENCED_1PARAMETER(clear); 398 UNREFERENCED_1PARAMETER(clear);
398 399
399 DEBUGFUNC("ixgbe_update_mc_addr_list_vf"); 400 DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
400 401
401 /* Each entry in the list uses 1 16 bit word. We have 30 402 /* Each entry in the list uses 1 16 bit word. We have 30
402 * 16 bit words available in our HW msg buffer (minus 1 for the 403 * 16 bit words available in our HW msg buffer (minus 1 for the
403 * msg type). That's 30 hash values if we pack 'em right. If 404 * msg type). That's 30 hash values if we pack 'em right. If
404 * there are more than 30 MC addresses to add then punt the 405 * there are more than 30 MC addresses to add then punt the
405 * extras for now and then add code to handle more than 30 later. 406 * extras for now and then add code to handle more than 30 later.
406 * It would be unusual for a server to request that many multi-cast 407 * It would be unusual for a server to request that many multi-cast
407 * addresses except for in large enterprise network environments. 408 * addresses except for in large enterprise network environments.
408 */ 409 */
409 410
410 DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); 411 DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
411 412
412 if (mc_addr_count > IXGBE_MAX_VF_MC) { 413 if (mc_addr_count > IXGBE_MAX_VF_MC) {
413 device_printf(ixgbe_dev_from_hw(hw), 414 device_printf(ixgbe_dev_from_hw(hw),
414 "number of Ethernet multicast addresses exceeded " 415 "number of Ethernet multicast addresses exceeded "
415 "the limit (%u > %d)\n", mc_addr_count, IXGBE_MAX_VF_MC); 416 "the limit (%u > %d)\n", mc_addr_count, IXGBE_MAX_VF_MC);
416 cnt = IXGBE_MAX_VF_MC; 417 cnt = IXGBE_MAX_VF_MC;
417 } else 418 } else
418 cnt = mc_addr_count; 419 cnt = mc_addr_count;
419 msgbuf[0] = IXGBE_VF_SET_MULTICAST; 420 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
420 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; 421 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
421 422
422 for (i = 0; i < cnt; i++) { 423 for (i = 0; i < cnt; i++) {
423 vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); 424 vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
424 DEBUGOUT1("Hash value = 0x%03X\n", vector); 425 DEBUGOUT1("Hash value = 0x%03X\n", vector);
425 vector_list[i] = (u16)vector; 426 vector_list[i] = (u16)vector;
426 } 427 }
427 return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 428 return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
428 IXGBE_VFMAILBOX_SIZE); 429 IXGBE_VFMAILBOX_SIZE);
429} 430}
430 431
431/** 432/**
432 * ixgbevf_update_xcast_mode - Update Multicast mode 433 * ixgbevf_update_xcast_mode - Update Multicast mode
433 * @hw: pointer to the HW structure 434 * @hw: pointer to the HW structure
434 * @xcast_mode: new multicast mode 435 * @xcast_mode: new multicast mode
435 * 436 *
436 * Updates the Multicast Mode of VF. 437 * Updates the Multicast Mode of VF.
437 **/ 438 **/
438s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) 439s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
439{ 440{
440 u32 msgbuf[2]; 441 u32 msgbuf[2];
441 s32 err; 442 s32 err;
442 443
443 switch (hw->api_version) { 444 switch (hw->api_version) {
444 case ixgbe_mbox_api_12: 445 case ixgbe_mbox_api_12:
445 /* New modes were introduced in 1.3 version */ 446 /* New modes were introduced in 1.3 version */
446 if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI) 447 if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
447 return IXGBE_ERR_FEATURE_NOT_SUPPORTED; 448 return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
448 /* Fall through */ 449 /* Fall through */
449 case ixgbe_mbox_api_13: 450 case ixgbe_mbox_api_13:
450 break; 451 break;
451 default: 452 default:
452 return IXGBE_ERR_FEATURE_NOT_SUPPORTED; 453 return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
453 } 454 }
454 455
455 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; 456 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
456 msgbuf[1] = xcast_mode; 457 msgbuf[1] = xcast_mode;
457 458
458 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); 459 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
459 if (err) 460 if (err)
460 return err; 461 return err;
461 462
462 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 463 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
463 if (msgbuf[0] == 464 if (msgbuf[0] ==
464 (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) { 465 (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) {
465 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) { 466 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) {
466 /* 467 /*
467 * If the API version matched and the reply was NACK, 468 * If the API version matched and the reply was NACK,
468 * assume the PF was not in PROMISC mode. 469 * assume the PF was not in PROMISC mode.
469 */ 470 */
470 return IXGBE_ERR_NOT_IN_PROMISC; 471 return IXGBE_ERR_NOT_IN_PROMISC;
471 } else 472 } else
472 return IXGBE_ERR_FEATURE_NOT_SUPPORTED; 473 return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
473 } 474 }
474 /* 475 /*
475 * On linux's PF driver implementation, the PF replies VF's 476 * On linux's PF driver implementation, the PF replies VF's
476 * XCAST_MODE_ALLMULTI message not with NACK but with ACK even if the 477 * XCAST_MODE_ALLMULTI message not with NACK but with ACK even if the
477 * virtual function is NOT marked "trust" and act as 478 * virtual function is NOT marked "trust" and act as
478 * XCAST_MODE_"MULTI". If ixv(4) simply check the return value of 479 * XCAST_MODE_"MULTI". If ixv(4) simply check the return value of
479 * update_xcast_mode(XCAST_MODE_ALLMULTI), SIOCSADDMULTI success and 480 * update_xcast_mode(XCAST_MODE_ALLMULTI), SIOCSADDMULTI success and
480 * the user may have trouble with some addresses. Fortunately, the 481 * the user may have trouble with some addresses. Fortunately, the
481 * Linux's PF driver's "ACK" message has not XCAST_MODE_"ALL"MULTI but 482 * Linux's PF driver's "ACK" message has not XCAST_MODE_"ALL"MULTI but
482 * XCAST_MODE_MULTI, so we can check this state by checking if the 483 * XCAST_MODE_MULTI, so we can check this state by checking if the
483 * send message's argument and the reply message's argument are 484 * send message's argument and the reply message's argument are
484 * different. 485 * different.
485 */ 486 */
486 if ((xcast_mode > IXGBEVF_XCAST_MODE_MULTI) 487 if ((xcast_mode > IXGBEVF_XCAST_MODE_MULTI)
487 && (xcast_mode != msgbuf[1])) 488 && (xcast_mode != msgbuf[1]))
488 return IXGBE_ERR_NOT_TRUSTED; 489 return IXGBE_ERR_NOT_TRUSTED;
489 return IXGBE_SUCCESS; 490 return IXGBE_SUCCESS;
490} 491}
491 492
492/** 493/**
493 * ixgbe_set_vfta_vf - Set/Unset vlan filter table address 494 * ixgbe_set_vfta_vf - Set/Unset vlan filter table address
494 * @hw: pointer to the HW structure 495 * @hw: pointer to the HW structure
495 * @vlan: 12 bit VLAN ID 496 * @vlan: 12 bit VLAN ID
496 * @vind: unused by VF drivers 497 * @vind: unused by VF drivers
497 * @vlan_on: if TRUE then set bit, else clear bit 498 * @vlan_on: if TRUE then set bit, else clear bit
498 * @vlvf_bypass: boolean flag indicating updating default pool is okay 499 * @vlvf_bypass: boolean flag indicating updating default pool is okay
499 * 500 *
500 * Turn on/off specified VLAN in the VLAN filter table. 501 * Turn on/off specified VLAN in the VLAN filter table.
501 **/ 502 **/
502s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, 503s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
503 bool vlan_on, bool vlvf_bypass) 504 bool vlan_on, bool vlvf_bypass)
504{ 505{
505 u32 msgbuf[2]; 506 u32 msgbuf[2];
506 s32 ret_val; 507 s32 ret_val;
507 UNREFERENCED_2PARAMETER(vind, vlvf_bypass); 508 UNREFERENCED_2PARAMETER(vind, vlvf_bypass);
508 509
509 msgbuf[0] = IXGBE_VF_SET_VLAN; 510 msgbuf[0] = IXGBE_VF_SET_VLAN;
510 msgbuf[1] = vlan; 511 msgbuf[1] = vlan;
511 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ 512 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
512 msgbuf[0] |= (u32)vlan_on << IXGBE_VT_MSGINFO_SHIFT; 513 msgbuf[0] |= (u32)vlan_on << IXGBE_VT_MSGINFO_SHIFT;
513 514
514 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); 515 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
515 if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK)) 516 if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
516 return IXGBE_SUCCESS; 517 return IXGBE_SUCCESS;
517 518
518 return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK); 519 return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
519} 520}
520 521
521/** 522/**
522 * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues 523 * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
523 * @hw: pointer to hardware structure 524 * @hw: pointer to hardware structure
524 * 525 *
525 * Returns the number of transmit queues for the given adapter. 526 * Returns the number of transmit queues for the given adapter.
526 **/ 527 **/
527u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw) 528u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
528{ 529{
529 UNREFERENCED_1PARAMETER(hw); 530 UNREFERENCED_1PARAMETER(hw);
530 return IXGBE_VF_MAX_TX_QUEUES; 531 return IXGBE_VF_MAX_TX_QUEUES;
531} 532}
532 533
533/** 534/**
534 * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues 535 * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
535 * @hw: pointer to hardware structure 536 * @hw: pointer to hardware structure
536 * 537 *
537 * Returns the number of receive queues for the given adapter. 538 * Returns the number of receive queues for the given adapter.
538 **/ 539 **/
539u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw) 540u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
540{ 541{
541 UNREFERENCED_1PARAMETER(hw); 542 UNREFERENCED_1PARAMETER(hw);
542 return IXGBE_VF_MAX_RX_QUEUES; 543 return IXGBE_VF_MAX_RX_QUEUES;
543} 544}
544 545
545/** 546/**
546 * ixgbe_get_mac_addr_vf - Read device MAC address 547 * ixgbe_get_mac_addr_vf - Read device MAC address
547 * @hw: pointer to the HW structure 548 * @hw: pointer to the HW structure
548 * @mac_addr: the MAC address 549 * @mac_addr: the MAC address
549 **/ 550 **/
550s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) 551s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
551{ 552{
552 int i; 553 int i;
553 554
554 for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++) 555 for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
555 mac_addr[i] = hw->mac.perm_addr[i]; 556 mac_addr[i] = hw->mac.perm_addr[i];
556 557
557 return IXGBE_SUCCESS; 558 return IXGBE_SUCCESS;
558} 559}
559 560
560s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) 561s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
561{ 562{
562 u32 msgbuf[3], msgbuf_chk; 563 u32 msgbuf[3], msgbuf_chk;
563 u8 *msg_addr = (u8 *)(&msgbuf[1]); 564 u8 *msg_addr = (u8 *)(&msgbuf[1]);
564 s32 ret_val; 565 s32 ret_val;
565 566
566 memset(msgbuf, 0, sizeof(msgbuf)); 567 memset(msgbuf, 0, sizeof(msgbuf));
567 /* 568 /*
568 * If index is one then this is the start of a new list and needs 569 * If index is one then this is the start of a new list and needs
569 * indication to the PF so it can do it's own list management. 570 * indication to the PF so it can do it's own list management.
570 * If it is zero then that tells the PF to just clear all of 571 * If it is zero then that tells the PF to just clear all of
571 * this VF's macvlans and there is no new list. 572 * this VF's macvlans and there is no new list.
572 */ 573 */
573 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; 574 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
574 msgbuf[0] |= IXGBE_VF_SET_MACVLAN; 575 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
575 msgbuf_chk = msgbuf[0]; 576 msgbuf_chk = msgbuf[0];
576 if (addr) 577 if (addr)
577 memcpy(msg_addr, addr, 6); 578 memcpy(msg_addr, addr, 6);
578 579
579 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); 580 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
580 if (!ret_val) { 581 if (!ret_val) {
581 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 582 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
582 583
583 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) 584 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
584 return IXGBE_ERR_OUT_OF_MEM; 585 return IXGBE_ERR_OUT_OF_MEM;
585 } 586 }
586 587
587 return ret_val; 588 return ret_val;
588} 589}
589 590
590/** 591/**
591 * ixgbe_setup_mac_link_vf - Setup MAC link settings 592 * ixgbe_setup_mac_link_vf - Setup MAC link settings
592 * @hw: pointer to hardware structure 593 * @hw: pointer to hardware structure
593 * @speed: new link speed 594 * @speed: new link speed
594 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 595 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
595 * 596 *
596 * Set the link speed in the AUTOC register and restarts link. 597 * Set the link speed in the AUTOC register and restarts link.
597 **/ 598 **/
598s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, 599s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
599 bool autoneg_wait_to_complete) 600 bool autoneg_wait_to_complete)
600{ 601{
601 UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete); 602 UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
602 return IXGBE_SUCCESS; 603 return IXGBE_SUCCESS;
603} 604}
604 605
605/** 606/**
606 * ixgbe_check_mac_link_vf - Get link/speed status 607 * ixgbe_check_mac_link_vf - Get link/speed status
607 * @hw: pointer to hardware structure 608 * @hw: pointer to hardware structure
608 * @speed: pointer to link speed 609 * @speed: pointer to link speed
609 * @link_up: TRUE is link is up, FALSE otherwise 610 * @link_up: TRUE is link is up, FALSE otherwise
610 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 611 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
611 * 612 *
612 * Reads the links register to determine if link is up and the current speed 613 * Reads the links register to determine if link is up and the current speed
613 **/ 614 **/
614s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 615s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
615 bool *link_up, bool autoneg_wait_to_complete) 616 bool *link_up, bool autoneg_wait_to_complete)
616{ 617{
617 struct ixgbe_mbx_info *mbx = &hw->mbx; 618 struct ixgbe_mbx_info *mbx = &hw->mbx;
618 struct ixgbe_mac_info *mac = &hw->mac; 619 struct ixgbe_mac_info *mac = &hw->mac;
619 s32 ret_val = IXGBE_SUCCESS; 620 s32 ret_val = IXGBE_SUCCESS;
620 u32 links_reg; 621 u32 links_reg;
621 u32 in_msg = 0; 622 u32 in_msg = 0;
622 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); 623 UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
623 624
624 /* If we were hit with a reset drop the link */ 625 /* If we were hit with a reset drop the link */
625 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 626 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
626 mac->get_link_status = TRUE; 627 mac->get_link_status = TRUE;
627 628
628 if (!mac->get_link_status) 629 if (!mac->get_link_status)
629 goto out; 630 goto out;
630 631
631 /* if link status is down no point in checking to see if pf is up */ 632 /* if link status is down no point in checking to see if pf is up */
632 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 633 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
633 if (!(links_reg & IXGBE_LINKS_UP)) 634 if (!(links_reg & IXGBE_LINKS_UP))
634 goto out; 635 goto out;
635 636
636 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 637 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
637 * before the link status is correct 638 * before the link status is correct
638 */ 639 */
639 if (mac->type == ixgbe_mac_82599_vf) { 640 if (mac->type == ixgbe_mac_82599_vf) {
640 int i; 641 int i;
641 642
642 for (i = 0; i < 5; i++) { 643 for (i = 0; i < 5; i++) {
643 usec_delay(100); 644 usec_delay(100);
644 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 645 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
645 646
646 if (!(links_reg & IXGBE_LINKS_UP)) 647 if (!(links_reg & IXGBE_LINKS_UP))
647 goto out; 648 goto out;
648 } 649 }
649 } 650 }
650 651
651 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 652 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
652 case IXGBE_LINKS_SPEED_10G_82599: 653 case IXGBE_LINKS_SPEED_10G_82599:
653 *speed = IXGBE_LINK_SPEED_10GB_FULL; 654 *speed = IXGBE_LINK_SPEED_10GB_FULL;
654 if (hw->mac.type >= ixgbe_mac_X550) { 655 if (hw->mac.type >= ixgbe_mac_X550) {
655 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 656 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
656 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 657 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
657 } 658 }
658 break; 659 break;
659 case IXGBE_LINKS_SPEED_1G_82599: 660 case IXGBE_LINKS_SPEED_1G_82599:
660 *speed = IXGBE_LINK_SPEED_1GB_FULL; 661 *speed = IXGBE_LINK_SPEED_1GB_FULL;
661 break; 662 break;
662 case IXGBE_LINKS_SPEED_100_82599: 663 case IXGBE_LINKS_SPEED_100_82599:
663 *speed = IXGBE_LINK_SPEED_100_FULL; 664 *speed = IXGBE_LINK_SPEED_100_FULL;
664 if (hw->mac.type >= ixgbe_mac_X550) { 665 if (hw->mac.type >= ixgbe_mac_X550) {
665 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 666 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
666 *speed = IXGBE_LINK_SPEED_5GB_FULL; 667 *speed = IXGBE_LINK_SPEED_5GB_FULL;
667 } 668 }
668 break; 669 break;
669 case IXGBE_LINKS_SPEED_10_X550EM_A: 670 case IXGBE_LINKS_SPEED_10_X550EM_A:
670 *speed = IXGBE_LINK_SPEED_UNKNOWN; 671 *speed = IXGBE_LINK_SPEED_UNKNOWN;
671 /* Since Reserved in older MAC's */ 672 /* Since Reserved in older MAC's */
672 if (hw->mac.type >= ixgbe_mac_X550) 673 if (hw->mac.type >= ixgbe_mac_X550)
673 *speed = IXGBE_LINK_SPEED_10_FULL; 674 *speed = IXGBE_LINK_SPEED_10_FULL;
674 break; 675 break;
675 default: 676 default:
676 *speed = IXGBE_LINK_SPEED_UNKNOWN; 677 *speed = IXGBE_LINK_SPEED_UNKNOWN;
677 } 678 }
678 679
679 /* if the read failed it could just be a mailbox collision, best wait 680 /* if the read failed it could just be a mailbox collision, best wait
680 * until we are called again and don't report an error 681 * until we are called again and don't report an error
681 */ 682 */
682 if (mbx->ops.read(hw, &in_msg, 1, 0)) 683 if (mbx->ops.read(hw, &in_msg, 1, 0))
683 goto out; 684 goto out;
684 685
685 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 686 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
686 /* msg is not CTS and is NACK we must have lost CTS status */ 687 /* msg is not CTS and is NACK we must have lost CTS status */
687 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 688 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
688 ret_val = -1; 689 ret_val = -1;
689 goto out; 690 goto out;
690 } 691 }
691 692
692 /* the pf is talking, if we timed out in the past we reinit */ 693 /* the pf is talking, if we timed out in the past we reinit */
693 if (!mbx->timeout) { 694 if (!mbx->timeout) {
694 ret_val = -1; 695 ret_val = -1;
695 goto out; 696 goto out;
696 } 697 }
697 698
698 /* if we passed all the tests above then the link is up and we no 699 /* if we passed all the tests above then the link is up and we no
699 * longer need to check for link 700 * longer need to check for link
700 */ 701 */
701 mac->get_link_status = FALSE; 702 mac->get_link_status = FALSE;
702 703
703out: 704out:
704 *link_up = !mac->get_link_status; 705 *link_up = !mac->get_link_status;
705 return ret_val; 706 return ret_val;
706} 707}
707 708
708/** 709/**
709 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length 710 * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
710 * @hw: pointer to the HW structure 711 * @hw: pointer to the HW structure
711 * @max_size: value to assign to max frame size 712 * @max_size: value to assign to max frame size
712 **/ 713 **/
713s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) 714s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
714{ 715{
715 u32 msgbuf[2]; 716 u32 msgbuf[2];
716 s32 retval; 717 s32 retval;
717 718
718 msgbuf[0] = IXGBE_VF_SET_LPE; 719 msgbuf[0] = IXGBE_VF_SET_LPE;
719 msgbuf[1] = max_size; 720 msgbuf[1] = max_size;
720 721
721 retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); 722 retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
722 if (retval) 723 if (retval)
723 return retval; 724 return retval;
724 if ((msgbuf[0] & IXGBE_VF_SET_LPE) && 725 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
725 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) 726 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
726 return IXGBE_ERR_MBX; 727 return IXGBE_ERR_MBX;
727 728
728 return 0; 729 return 0;
729} 730}
730 731
731/** 732/**
732 * ixgbevf_negotiate_api_version - Negotiate supported API version 733 * ixgbevf_negotiate_api_version - Negotiate supported API version
733 * @hw: pointer to the HW structure 734 * @hw: pointer to the HW structure
734 * @api: integer containing requested API version 735 * @api: integer containing requested API version
735 **/ 736 **/
736int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) 737int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
737{ 738{
738 int err; 739 int err;
739 u32 msg[3]; 740 u32 msg[3];
740 741
741 /* Negotiate the mailbox API version */ 742 /* Negotiate the mailbox API version */
742 msg[0] = IXGBE_VF_API_NEGOTIATE; 743 msg[0] = IXGBE_VF_API_NEGOTIATE;
743 msg[1] = api; 744 msg[1] = api;
744 msg[2] = 0; 745 msg[2] = 0;
745 746
746 err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); 747 err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
747 if (!err) { 748 if (!err) {
748 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 749 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
749 750
750 /* Store value and return 0 on success */ 751 /* Store value and return 0 on success */
751 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { 752 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
752 hw->api_version = api; 753 hw->api_version = api;
753 return 0; 754 return 0;
754 } 755 }
755 756
756 err = IXGBE_ERR_INVALID_ARGUMENT; 757 err = IXGBE_ERR_INVALID_ARGUMENT;
757 } 758 }
758 759
759 return err; 760 return err;
760} 761}
761 762
762int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, 763int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
763 unsigned int *default_tc) 764 unsigned int *default_tc)
764{ 765{
765 int err; 766 int err;
766 u32 msg[5]; 767 u32 msg[5];
767 768
768 /* do nothing if API doesn't support ixgbevf_get_queues */ 769 /* do nothing if API doesn't support ixgbevf_get_queues */
769 switch (hw->api_version) { 770 switch (hw->api_version) {
770 case ixgbe_mbox_api_11: 771 case ixgbe_mbox_api_11:
771 case ixgbe_mbox_api_12: 772 case ixgbe_mbox_api_12:
772 case ixgbe_mbox_api_13: 773 case ixgbe_mbox_api_13:
773 break; 774 break;
774 default: 775 default:
775 return 0; 776 return 0;
776 } 777 }
777 778
778 /* Fetch queue configuration from the PF */ 779 /* Fetch queue configuration from the PF */
779 msg[0] = IXGBE_VF_GET_QUEUES; 780 msg[0] = IXGBE_VF_GET_QUEUES;
780 msg[1] = msg[2] = msg[3] = msg[4] = 0; 781 msg[1] = msg[2] = msg[3] = msg[4] = 0;
781 782
782 err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); 783 err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
783 if (!err) { 784 if (!err) {
784 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; 785 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
785 786
786 /* 787 /*
787 * if we we didn't get an ACK there must have been 788 * if we didn't get an ACK there must have been
788 * some sort of mailbox error so we should treat it 789 * some sort of mailbox error so we should treat it
789 * as such 790 * as such
790 */ 791 */
791 if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK)) 792 if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
792 return IXGBE_ERR_MBX; 793 return IXGBE_ERR_MBX;
793 794
794 /* record and validate values from message */ 795 /* record and validate values from message */
795 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; 796 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
796 if (hw->mac.max_tx_queues == 0 || 797 if (hw->mac.max_tx_queues == 0 ||
797 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) 798 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
798 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; 799 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
799 800
800 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; 801 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
801 if (hw->mac.max_rx_queues == 0 || 802 if (hw->mac.max_rx_queues == 0 ||
802 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) 803 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
803 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; 804 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
804 805
805 *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; 806 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
806 /* in case of unknown state assume we cannot tag frames */ 807 /* in case of unknown state assume we cannot tag frames */
807 if (*num_tcs > hw->mac.max_rx_queues) 808 if (*num_tcs > hw->mac.max_rx_queues)
808 *num_tcs = 1; 809 *num_tcs = 1;
809 810
810 *default_tc = msg[IXGBE_VF_DEF_QUEUE]; 811 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
811 /* default to queue 0 on out-of-bounds queue number */ 812 /* default to queue 0 on out-of-bounds queue number */
812 if (*default_tc >= hw->mac.max_tx_queues) 813 if (*default_tc >= hw->mac.max_tx_queues)
813 *default_tc = 0; 814 *default_tc = 0;
814 } 815 }
815 816
816 return err; 817 return err;
817} 818}

cvs diff -r1.16 -r1.16.8.1 src/sys/dev/pci/ixgbe/ixgbe_x540.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixgbe_x540.c 2018/06/11 10:34:18 1.16
+++ src/sys/dev/pci/ixgbe/ixgbe_x540.c 2021/03/11 16:00:24 1.16.8.1
@@ -1,1074 +1,1074 @@ @@ -1,1074 +1,1074 @@
1/****************************************************************************** 1/******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause 2 SPDX-License-Identifier: BSD-3-Clause
3 3
4 Copyright (c) 2001-2017, Intel Corporation 4 Copyright (c) 2001-2017, Intel Corporation
5 All rights reserved. 5 All rights reserved.
6 6
7 Redistribution and use in source and binary forms, with or without 7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met: 8 modification, are permitted provided that the following conditions are met:
9 9
10 1. Redistributions of source code must retain the above copyright notice, 10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer. 11 this list of conditions and the following disclaimer.
12 12
13 2. Redistributions in binary form must reproduce the above copyright 13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the 14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution. 15 documentation and/or other materials provided with the distribution.
16 16
17 3. Neither the name of the Intel Corporation nor the names of its 17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from 18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission. 19 this software without specific prior written permission.
20 20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE. 31 POSSIBILITY OF SUCH DAMAGE.
32 32
33******************************************************************************/ 33******************************************************************************/
34/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x540.c 331224 2018-03-19 20:55:05Z erj $*/ 34/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_x540.c 331224 2018-03-19 20:55:05Z erj $*/
35 35
36#include "ixgbe_x540.h" 36#include "ixgbe_x540.h"
37#include "ixgbe_type.h" 37#include "ixgbe_type.h"
38#include "ixgbe_api.h" 38#include "ixgbe_api.h"
39#include "ixgbe_common.h" 39#include "ixgbe_common.h"
40#include "ixgbe_phy.h" 40#include "ixgbe_phy.h"
41 41
42#define IXGBE_X540_MAX_TX_QUEUES 128 42#define IXGBE_X540_MAX_TX_QUEUES 128
43#define IXGBE_X540_MAX_RX_QUEUES 128 43#define IXGBE_X540_MAX_RX_QUEUES 128
44#define IXGBE_X540_RAR_ENTRIES 128 44#define IXGBE_X540_RAR_ENTRIES 128
45#define IXGBE_X540_MC_TBL_SIZE 128 45#define IXGBE_X540_MC_TBL_SIZE 128
46#define IXGBE_X540_VFT_TBL_SIZE 128 46#define IXGBE_X540_VFT_TBL_SIZE 128
47#define IXGBE_X540_RX_PB_SIZE 384 47#define IXGBE_X540_RX_PB_SIZE 384
48 48
49static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); 49static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
50static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); 50static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
51static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); 51static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
52 52
53/** 53/**
54 * ixgbe_init_ops_X540 - Inits func ptrs and MAC type 54 * ixgbe_init_ops_X540 - Inits func ptrs and MAC type
55 * @hw: pointer to hardware structure 55 * @hw: pointer to hardware structure
56 * 56 *
57 * Initialize the function pointers and assign the MAC type for X540. 57 * Initialize the function pointers and assign the MAC type for X540.
58 * Does not touch the hardware. 58 * Does not touch the hardware.
59 **/ 59 **/
60s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) 60s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
61{ 61{
62 struct ixgbe_mac_info *mac = &hw->mac; 62 struct ixgbe_mac_info *mac = &hw->mac;
63 struct ixgbe_phy_info *phy = &hw->phy; 63 struct ixgbe_phy_info *phy = &hw->phy;
64 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 64 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
65 s32 ret_val; 65 s32 ret_val;
66 66
67 DEBUGFUNC("ixgbe_init_ops_X540"); 67 DEBUGFUNC("ixgbe_init_ops_X540");
68 68
69 ret_val = ixgbe_init_phy_ops_generic(hw); 69 ret_val = ixgbe_init_phy_ops_generic(hw);
70 ret_val = ixgbe_init_ops_generic(hw); 70 ret_val = ixgbe_init_ops_generic(hw);
71 71
72 72
73 /* EEPROM */ 73 /* EEPROM */
74 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; 74 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
75 eeprom->ops.read = ixgbe_read_eerd_X540; 75 eeprom->ops.read = ixgbe_read_eerd_X540;
76 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; 76 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
77 eeprom->ops.write = ixgbe_write_eewr_X540; 77 eeprom->ops.write = ixgbe_write_eewr_X540;
78 eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; 78 eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
79 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; 79 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
80 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; 80 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
81 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; 81 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
82 82
83 /* PHY */ 83 /* PHY */
84 phy->ops.init = ixgbe_init_phy_ops_generic; 84 phy->ops.init = ixgbe_init_phy_ops_generic;
85 phy->ops.reset = NULL; 85 phy->ops.reset = NULL;
86 phy->ops.set_phy_power = ixgbe_set_copper_phy_power; 86 phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
87 87
88 /* MAC */ 88 /* MAC */
89 mac->ops.reset_hw = ixgbe_reset_hw_X540; 89 mac->ops.reset_hw = ixgbe_reset_hw_X540;
90 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; 90 mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
91 mac->ops.get_media_type = ixgbe_get_media_type_X540; 91 mac->ops.get_media_type = ixgbe_get_media_type_X540;
92 mac->ops.get_supported_physical_layer = 92 mac->ops.get_supported_physical_layer =
93 ixgbe_get_supported_physical_layer_X540; 93 ixgbe_get_supported_physical_layer_X540;
94 mac->ops.read_analog_reg8 = NULL; 94 mac->ops.read_analog_reg8 = NULL;
95 mac->ops.write_analog_reg8 = NULL; 95 mac->ops.write_analog_reg8 = NULL;
96 mac->ops.start_hw = ixgbe_start_hw_X540; 96 mac->ops.start_hw = ixgbe_start_hw_X540;
97 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; 97 mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
98 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; 98 mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
99 mac->ops.get_device_caps = ixgbe_get_device_caps_generic; 99 mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
100 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; 100 mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
101 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; 101 mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
102 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; 102 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
103 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; 103 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
104 mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; 104 mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540;
105 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; 105 mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
106 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; 106 mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
107 107
108 /* RAR, Multicast, VLAN */ 108 /* RAR, Multicast, VLAN */
109 mac->ops.set_vmdq = ixgbe_set_vmdq_generic; 109 mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
110 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; 110 mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
111 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; 111 mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
112 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; 112 mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
113 mac->rar_highwater = 1; 113 mac->rar_highwater = 1;
114 mac->ops.set_vfta = ixgbe_set_vfta_generic; 114 mac->ops.set_vfta = ixgbe_set_vfta_generic;
115 mac->ops.set_vlvf = ixgbe_set_vlvf_generic; 115 mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
116 mac->ops.clear_vfta = ixgbe_clear_vfta_generic; 116 mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
117 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; 117 mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
118 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; 118 mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
119 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; 119 mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
120 120
121 /* Link */ 121 /* Link */
122 mac->ops.get_link_capabilities = 122 mac->ops.get_link_capabilities =
123 ixgbe_get_copper_link_capabilities_generic; 123 ixgbe_get_copper_link_capabilities_generic;
124 mac->ops.setup_link = ixgbe_setup_mac_link_X540; 124 mac->ops.setup_link = ixgbe_setup_mac_link_X540;
125 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; 125 mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
126 mac->ops.check_link = ixgbe_check_mac_link_generic; 126 mac->ops.check_link = ixgbe_check_mac_link_generic;
127 mac->ops.bypass_rw = ixgbe_bypass_rw_generic; 127 mac->ops.bypass_rw = ixgbe_bypass_rw_generic;
128 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic; 128 mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic;
129 mac->ops.bypass_set = ixgbe_bypass_set_generic; 129 mac->ops.bypass_set = ixgbe_bypass_set_generic;
130 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic; 130 mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic;
131 131
132 132
133 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; 133 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
134 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; 134 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
135 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; 135 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
136 mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; 136 mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
137 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; 137 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
138 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; 138 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
139 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 139 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
140 140
141 /* 141 /*
142 * FWSM register 142 * FWSM register
143 * ARC supported; valid only if manageability features are 143 * ARC supported; valid only if manageability features are
144 * enabled. 144 * enabled.
145 */ 145 */
146 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) 146 mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
147 & IXGBE_FWSM_MODE_MASK); 147 & IXGBE_FWSM_MODE_MASK);
148 148
149 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; 149 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
150 150
151 /* LEDs */ 151 /* LEDs */
152 mac->ops.blink_led_start = ixgbe_blink_led_start_X540; 152 mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
153 mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; 153 mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
154 154
155 /* Manageability interface */ 155 /* Manageability interface */
156 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; 156 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
157 157
158 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; 158 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
159 159
160 return ret_val; 160 return ret_val;
161} 161}
162 162
163/** 163/**
164 * ixgbe_get_link_capabilities_X540 - Determines link capabilities 164 * ixgbe_get_link_capabilities_X540 - Determines link capabilities
165 * @hw: pointer to hardware structure 165 * @hw: pointer to hardware structure
166 * @speed: pointer to link speed 166 * @speed: pointer to link speed
167 * @autoneg: TRUE when autoneg or autotry is enabled 167 * @autoneg: TRUE when autoneg or autotry is enabled
168 * 168 *
169 * Determines the link capabilities by reading the AUTOC register. 169 * Determines the link capabilities by reading the AUTOC register.
170 **/ 170 **/
171s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, 171s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
172 ixgbe_link_speed *speed, 172 ixgbe_link_speed *speed,
173 bool *autoneg) 173 bool *autoneg)
174{ 174{
175 ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); 175 ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
176 176
177 return IXGBE_SUCCESS; 177 return IXGBE_SUCCESS;
178} 178}
179 179
180/** 180/**
181 * ixgbe_get_media_type_X540 - Get media type 181 * ixgbe_get_media_type_X540 - Get media type
182 * @hw: pointer to hardware structure 182 * @hw: pointer to hardware structure
183 * 183 *
184 * Returns the media type (fiber, copper, backplane) 184 * Returns the media type (fiber, copper, backplane)
185 **/ 185 **/
186enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) 186enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
187{ 187{
188 UNREFERENCED_1PARAMETER(hw); 188 UNREFERENCED_1PARAMETER(hw);
189 return ixgbe_media_type_copper; 189 return ixgbe_media_type_copper;
190} 190}
191 191
192/** 192/**
193 * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities 193 * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
194 * @hw: pointer to hardware structure 194 * @hw: pointer to hardware structure
195 * @speed: new link speed 195 * @speed: new link speed
196 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 196 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
197 **/ 197 **/
198s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, 198s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
199 ixgbe_link_speed speed, 199 ixgbe_link_speed speed,
200 bool autoneg_wait_to_complete) 200 bool autoneg_wait_to_complete)
201{ 201{
202 DEBUGFUNC("ixgbe_setup_mac_link_X540"); 202 DEBUGFUNC("ixgbe_setup_mac_link_X540");
203 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); 203 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
204} 204}
205 205
206/** 206/**
207 * ixgbe_reset_hw_X540 - Perform hardware reset 207 * ixgbe_reset_hw_X540 - Perform hardware reset
208 * @hw: pointer to hardware structure 208 * @hw: pointer to hardware structure
209 * 209 *
210 * Resets the hardware by resetting the transmit and receive units, masks 210 * Resets the hardware by resetting the transmit and receive units, masks
211 * and clears all interrupts, and perform a reset. 211 * and clears all interrupts, and perform a reset.
212 **/ 212 **/
213s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) 213s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
214{ 214{
215 s32 status; 215 s32 status;
216 u32 ctrl, i; 216 u32 ctrl, i;
217 u32 swfw_mask = hw->phy.phy_semaphore_mask; 217 u32 swfw_mask = hw->phy.phy_semaphore_mask;
218 218
219 DEBUGFUNC("ixgbe_reset_hw_X540"); 219 DEBUGFUNC("ixgbe_reset_hw_X540");
220 220
221 /* Call adapter stop to disable tx/rx and clear interrupts */ 221 /* Call adapter stop to disable tx/rx and clear interrupts */
222 status = hw->mac.ops.stop_adapter(hw); 222 status = hw->mac.ops.stop_adapter(hw);
223 if (status != IXGBE_SUCCESS) 223 if (status != IXGBE_SUCCESS)
224 goto reset_hw_out; 224 goto reset_hw_out;
225 225
226 /* flush pending Tx transactions */ 226 /* flush pending Tx transactions */
227 ixgbe_clear_tx_pending(hw); 227 ixgbe_clear_tx_pending(hw);
228 228
229mac_reset_top: 229mac_reset_top:
230 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); 230 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
231 if (status != IXGBE_SUCCESS) { 231 if (status != IXGBE_SUCCESS) {
232 ERROR_REPORT2(IXGBE_ERROR_CAUTION, 232 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
233 "semaphore failed with %d", status); 233 "semaphore failed with %d", status);
234 return IXGBE_ERR_SWFW_SYNC; 234 return IXGBE_ERR_SWFW_SYNC;
235 } 235 }
236 ctrl = IXGBE_CTRL_RST; 236 ctrl = IXGBE_CTRL_RST;
237 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 237 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
238 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 238 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
239 IXGBE_WRITE_FLUSH(hw); 239 IXGBE_WRITE_FLUSH(hw);
240 hw->mac.ops.release_swfw_sync(hw, swfw_mask); 240 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
241 241
242 /* Poll for reset bit to self-clear indicating reset is complete */ 242 /* Poll for reset bit to self-clear indicating reset is complete */
243 for (i = 0; i < 10; i++) { 243 for (i = 0; i < 10; i++) {
244 usec_delay(1); 244 usec_delay(1);
245 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 245 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
246 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 246 if (!(ctrl & IXGBE_CTRL_RST_MASK))
247 break; 247 break;
248 } 248 }
249 249
250 if (ctrl & IXGBE_CTRL_RST_MASK) { 250 if (ctrl & IXGBE_CTRL_RST_MASK) {
251 status = IXGBE_ERR_RESET_FAILED; 251 status = IXGBE_ERR_RESET_FAILED;
252 ERROR_REPORT1(IXGBE_ERROR_POLLING, 252 ERROR_REPORT1(IXGBE_ERROR_POLLING,
253 "Reset polling failed to complete.\n"); 253 "Reset polling failed to complete.\n");
254 } 254 }
255 msec_delay(100); 255 msec_delay(100);
256 256
257 /* 257 /*
258 * Double resets are required for recovery from certain error 258 * Double resets are required for recovery from certain error
259 * conditions. Between resets, it is necessary to stall to allow time 259 * conditions. Between resets, it is necessary to stall to allow time
260 * for any pending HW events to complete. 260 * for any pending HW events to complete.
261 */ 261 */
262 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 262 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
263 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 263 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
264 goto mac_reset_top; 264 goto mac_reset_top;
265 } 265 }
266 266
267 /* Set the Rx packet buffer size. */ 267 /* Set the Rx packet buffer size. */
268 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); 268 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
269 269
270 /* Store the permanent mac address */ 270 /* Store the permanent mac address */
271 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 271 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
272 272
273 /* 273 /*
274 * Store MAC address from RAR0, clear receive address registers, and 274 * Store MAC address from RAR0, clear receive address registers, and
275 * clear the multicast table. Also reset num_rar_entries to 128, 275 * clear the multicast table. Also reset num_rar_entries to 128,
276 * since we modify this value when programming the SAN MAC address. 276 * since we modify this value when programming the SAN MAC address.
277 */ 277 */
278 hw->mac.num_rar_entries = 128; 278 hw->mac.num_rar_entries = 128;
279 hw->mac.ops.init_rx_addrs(hw); 279 hw->mac.ops.init_rx_addrs(hw);
280 280
281 /* Store the permanent SAN mac address */ 281 /* Store the permanent SAN mac address */
282 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 282 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
283 283
284 /* Add the SAN MAC address to the RAR only if it's a valid address */ 284 /* Add the SAN MAC address to the RAR only if it's a valid address */
285 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 285 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
286 /* Save the SAN MAC RAR index */ 286 /* Save the SAN MAC RAR index */
287 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 287 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
288 288
289 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, 289 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
290 hw->mac.san_addr, 0, IXGBE_RAH_AV); 290 hw->mac.san_addr, 0, IXGBE_RAH_AV);
291 291
292 /* clear VMDq pool/queue selection for this RAR */ 292 /* clear VMDq pool/queue selection for this RAR */
293 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, 293 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
294 IXGBE_CLEAR_VMDQ_ALL); 294 IXGBE_CLEAR_VMDQ_ALL);
295 295
296 /* Reserve the last RAR for the SAN MAC address */ 296 /* Reserve the last RAR for the SAN MAC address */
297 hw->mac.num_rar_entries--; 297 hw->mac.num_rar_entries--;
298 } 298 }
299 299
300 /* Store the alternative WWNN/WWPN prefix */ 300 /* Store the alternative WWNN/WWPN prefix */
301 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 301 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
302 &hw->mac.wwpn_prefix); 302 &hw->mac.wwpn_prefix);
303 303
304reset_hw_out: 304reset_hw_out:
305 return status; 305 return status;
306} 306}
307 307
308/** 308/**
309 * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx 309 * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
310 * @hw: pointer to hardware structure 310 * @hw: pointer to hardware structure
311 * 311 *
312 * Starts the hardware using the generic start_hw function 312 * Starts the hardware using the generic start_hw function
313 * and the generation start_hw function. 313 * and the generation start_hw function.
314 * Then performs revision-specific operations, if any. 314 * Then performs revision-specific operations, if any.
315 **/ 315 **/
316s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) 316s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
317{ 317{
318 s32 ret_val = IXGBE_SUCCESS; 318 s32 ret_val = IXGBE_SUCCESS;
319 319
320 DEBUGFUNC("ixgbe_start_hw_X540"); 320 DEBUGFUNC("ixgbe_start_hw_X540");
321 321
322 ret_val = ixgbe_start_hw_generic(hw); 322 ret_val = ixgbe_start_hw_generic(hw);
323 if (ret_val != IXGBE_SUCCESS) 323 if (ret_val != IXGBE_SUCCESS)
324 goto out; 324 goto out;
325 325
326 ret_val = ixgbe_start_hw_gen2(hw); 326 ret_val = ixgbe_start_hw_gen2(hw);
327 327
328out: 328out:
329 return ret_val; 329 return ret_val;
330} 330}
331 331
332/** 332/**
333 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type 333 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
334 * @hw: pointer to hardware structure 334 * @hw: pointer to hardware structure
335 * 335 *
336 * Determines physical layer capabilities of the current configuration. 336 * Determines physical layer capabilities of the current configuration.
337 **/ 337 **/
338u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) 338u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
339{ 339{
340 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 340 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
341 u16 ext_ability = 0; 341 u16 ext_ability = 0;
342 342
343 DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); 343 DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
344 344
345 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 345 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
346 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 346 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
347 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 347 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
348 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 348 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
349 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 349 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
350 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 350 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
351 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 351 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
352 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 352 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
353 353
354 if (hw->mac.type == ixgbe_mac_X550) { 354 if (hw->mac.type == ixgbe_mac_X550) {
355 physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T 355 physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T
356 | IXGBE_PHYSICAL_LAYER_5GBASE_T; 356 | IXGBE_PHYSICAL_LAYER_5GBASE_T;
357 } 357 }
358 358
359 return physical_layer; 359 return physical_layer;
360} 360}
361 361
362/** 362/**
363 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params 363 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
364 * @hw: pointer to hardware structure 364 * @hw: pointer to hardware structure
365 * 365 *
366 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 366 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
367 * ixgbe_hw struct in order to set up EEPROM access. 367 * ixgbe_hw struct in order to set up EEPROM access.
368 **/ 368 **/
369s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) 369s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
370{ 370{
371 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 371 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
372 u32 eec; 372 u32 eec;
373 u16 eeprom_size; 373 u16 eeprom_size;
374 374
375 DEBUGFUNC("ixgbe_init_eeprom_params_X540"); 375 DEBUGFUNC("ixgbe_init_eeprom_params_X540");
376 376
377 if (eeprom->type == ixgbe_eeprom_uninitialized) { 377 if (eeprom->type == ixgbe_eeprom_uninitialized) {
378 eeprom->semaphore_delay = 10; 378 eeprom->semaphore_delay = 10;
379 eeprom->type = ixgbe_flash; 379 eeprom->type = ixgbe_flash;
380 380
381 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 381 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
382 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> 382 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
383 IXGBE_EEC_SIZE_SHIFT); 383 IXGBE_EEC_SIZE_SHIFT);
384 eeprom->word_size = 1 << (eeprom_size + 384 eeprom->word_size = 1 << (eeprom_size +
385 IXGBE_EEPROM_WORD_SIZE_SHIFT); 385 IXGBE_EEPROM_WORD_SIZE_SHIFT);
386 386
387 DEBUGOUT2("Eeprom params: type = %d, size = %d\n", 387 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
388 eeprom->type, eeprom->word_size); 388 eeprom->type, eeprom->word_size);
389 } 389 }
390 390
391 return IXGBE_SUCCESS; 391 return IXGBE_SUCCESS;
392} 392}
393 393
394/** 394/**
395 * ixgbe_read_eerd_X540- Read EEPROM word using EERD 395 * ixgbe_read_eerd_X540- Read EEPROM word using EERD
396 * @hw: pointer to hardware structure 396 * @hw: pointer to hardware structure
397 * @offset: offset of word in the EEPROM to read 397 * @offset: offset of word in the EEPROM to read
398 * @data: word read from the EEPROM 398 * @data: word read from the EEPROM
399 * 399 *
400 * Reads a 16 bit word from the EEPROM using the EERD register. 400 * Reads a 16 bit word from the EEPROM using the EERD register.
401 **/ 401 **/
402s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) 402s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
403{ 403{
404 s32 status = IXGBE_SUCCESS; 404 s32 status = IXGBE_SUCCESS;
405 405
406 DEBUGFUNC("ixgbe_read_eerd_X540"); 406 DEBUGFUNC("ixgbe_read_eerd_X540");
407 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 407 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
408 IXGBE_SUCCESS) { 408 IXGBE_SUCCESS) {
409 status = ixgbe_read_eerd_generic(hw, offset, data); 409 status = ixgbe_read_eerd_generic(hw, offset, data);
410 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 410 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
411 } else { 411 } else {
412 status = IXGBE_ERR_SWFW_SYNC; 412 status = IXGBE_ERR_SWFW_SYNC;
413 } 413 }
414 414
415 return status; 415 return status;
416} 416}
417 417
418/** 418/**
419 * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD 419 * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
420 * @hw: pointer to hardware structure 420 * @hw: pointer to hardware structure
421 * @offset: offset of word in the EEPROM to read 421 * @offset: offset of word in the EEPROM to read
422 * @words: number of words 422 * @words: number of words
423 * @data: word(s) read from the EEPROM 423 * @data: word(s) read from the EEPROM
424 * 424 *
425 * Reads a 16 bit word(s) from the EEPROM using the EERD register. 425 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
426 **/ 426 **/
427s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, 427s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
428 u16 offset, u16 words, u16 *data) 428 u16 offset, u16 words, u16 *data)
429{ 429{
430 s32 status = IXGBE_SUCCESS; 430 s32 status = IXGBE_SUCCESS;
431 431
432 DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); 432 DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
433 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 433 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
434 IXGBE_SUCCESS) { 434 IXGBE_SUCCESS) {
435 status = ixgbe_read_eerd_buffer_generic(hw, offset, 435 status = ixgbe_read_eerd_buffer_generic(hw, offset,
436 words, data); 436 words, data);
437 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 437 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
438 } else { 438 } else {
439 status = IXGBE_ERR_SWFW_SYNC; 439 status = IXGBE_ERR_SWFW_SYNC;
440 } 440 }
441 441
442 return status; 442 return status;
443} 443}
444 444
445/** 445/**
446 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR 446 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
447 * @hw: pointer to hardware structure 447 * @hw: pointer to hardware structure
448 * @offset: offset of word in the EEPROM to write 448 * @offset: offset of word in the EEPROM to write
449 * @data: word write to the EEPROM 449 * @data: word write to the EEPROM
450 * 450 *
451 * Write a 16 bit word to the EEPROM using the EEWR register. 451 * Write a 16 bit word to the EEPROM using the EEWR register.
452 **/ 452 **/
453s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) 453s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
454{ 454{
455 s32 status = IXGBE_SUCCESS; 455 s32 status = IXGBE_SUCCESS;
456 456
457 DEBUGFUNC("ixgbe_write_eewr_X540"); 457 DEBUGFUNC("ixgbe_write_eewr_X540");
458 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 458 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
459 IXGBE_SUCCESS) { 459 IXGBE_SUCCESS) {
460 status = ixgbe_write_eewr_generic(hw, offset, data); 460 status = ixgbe_write_eewr_generic(hw, offset, data);
461 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 461 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
462 } else { 462 } else {
463 status = IXGBE_ERR_SWFW_SYNC; 463 status = IXGBE_ERR_SWFW_SYNC;
464 } 464 }
465 465
466 return status; 466 return status;
467} 467}
468 468
469/** 469/**
470 * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR 470 * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
471 * @hw: pointer to hardware structure 471 * @hw: pointer to hardware structure
472 * @offset: offset of word in the EEPROM to write 472 * @offset: offset of word in the EEPROM to write
473 * @words: number of words 473 * @words: number of words
474 * @data: word(s) write to the EEPROM 474 * @data: word(s) write to the EEPROM
475 * 475 *
476 * Write a 16 bit word(s) to the EEPROM using the EEWR register. 476 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
477 **/ 477 **/
478s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, 478s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
479 u16 offset, u16 words, u16 *data) 479 u16 offset, u16 words, u16 *data)
480{ 480{
481 s32 status = IXGBE_SUCCESS; 481 s32 status = IXGBE_SUCCESS;
482 482
483 DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); 483 DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
484 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 484 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
485 IXGBE_SUCCESS) { 485 IXGBE_SUCCESS) {
486 status = ixgbe_write_eewr_buffer_generic(hw, offset, 486 status = ixgbe_write_eewr_buffer_generic(hw, offset,
487 words, data); 487 words, data);
488 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 488 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
489 } else { 489 } else {
490 status = IXGBE_ERR_SWFW_SYNC; 490 status = IXGBE_ERR_SWFW_SYNC;
491 } 491 }
492 492
493 return status; 493 return status;
494} 494}
495 495
496/** 496/**
497 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum 497 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
498 * 498 *
499 * This function does not use synchronization for EERD and EEWR. It can 499 * This function does not use synchronization for EERD and EEWR. It can
500 * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. 500 * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
501 * 501 *
502 * @hw: pointer to hardware structure 502 * @hw: pointer to hardware structure
503 * 503 *
504 * Returns a negative error code on error, or the 16-bit checksum 504 * Returns a negative error code on error, or the 16-bit checksum
505 **/ 505 **/
506s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) 506s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
507{ 507{
508 u16 i, j; 508 u16 i, j;
509 u16 checksum = 0; 509 u16 checksum = 0;
510 u16 length = 0; 510 u16 length = 0;
511 u16 pointer = 0; 511 u16 pointer = 0;
512 u16 word = 0; 512 u16 word = 0;
513 u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; 513 u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
514 514
515 /* Do not use hw->eeprom.ops.read because we do not want to take 515 /* Do not use hw->eeprom.ops.read because we do not want to take
516 * the synchronization semaphores here. Instead use 516 * the synchronization semaphores here. Instead use
517 * ixgbe_read_eerd_generic 517 * ixgbe_read_eerd_generic
518 */ 518 */
519 519
520 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); 520 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
521 521
522 /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the 522 /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
523 * checksum itself 523 * checksum itself
524 */ 524 */
525 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 525 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
526 if (ixgbe_read_eerd_generic(hw, i, &word)) { 526 if (ixgbe_read_eerd_generic(hw, i, &word)) {
527 DEBUGOUT("EEPROM read failed\n"); 527 DEBUGOUT("EEPROM read failed\n");
528 return IXGBE_ERR_EEPROM; 528 return IXGBE_ERR_EEPROM;
529 } 529 }
530 checksum += word; 530 checksum += word;
531 } 531 }
532 532
533 /* Include all data from pointers 0x3, 0x6-0xE. This excludes the 533 /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
534 * FW, PHY module, and PCIe Expansion/Option ROM pointers. 534 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
535 */ 535 */
536 for (i = ptr_start; i < IXGBE_FW_PTR; i++) { 536 for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
537 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) 537 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
538 continue; 538 continue;
539 539
540 if (ixgbe_read_eerd_generic(hw, i, &pointer)) { 540 if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
541 DEBUGOUT("EEPROM read failed\n"); 541 DEBUGOUT("EEPROM read failed\n");
542 return IXGBE_ERR_EEPROM; 542 return IXGBE_ERR_EEPROM;
543 } 543 }
544 544
545 /* Skip pointer section if the pointer is invalid. */ 545 /* Skip pointer section if the pointer is invalid. */
546 if (pointer == 0xFFFF || pointer == 0 || 546 if (pointer == 0xFFFF || pointer == 0 ||
547 pointer >= hw->eeprom.word_size) 547 pointer >= hw->eeprom.word_size)
548 continue; 548 continue;
549 549
550 if (ixgbe_read_eerd_generic(hw, pointer, &length)) { 550 if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
551 DEBUGOUT("EEPROM read failed\n"); 551 DEBUGOUT("EEPROM read failed\n");
552 return IXGBE_ERR_EEPROM; 552 return IXGBE_ERR_EEPROM;
553 } 553 }
554 554
555 /* Skip pointer section if length is invalid. */ 555 /* Skip pointer section if length is invalid. */
556 if (length == 0xFFFF || length == 0 || 556 if (length == 0xFFFF || length == 0 ||
557 (pointer + length) >= hw->eeprom.word_size) 557 (pointer + length) >= hw->eeprom.word_size)
558 continue; 558 continue;
559 559
560 for (j = pointer + 1; j <= pointer + length; j++) { 560 for (j = pointer + 1; j <= pointer + length; j++) {
561 if (ixgbe_read_eerd_generic(hw, j, &word)) { 561 if (ixgbe_read_eerd_generic(hw, j, &word)) {
562 DEBUGOUT("EEPROM read failed\n"); 562 DEBUGOUT("EEPROM read failed\n");
563 return IXGBE_ERR_EEPROM; 563 return IXGBE_ERR_EEPROM;
564 } 564 }
565 checksum += word; 565 checksum += word;
566 } 566 }
567 } 567 }
568 568
569 checksum = (u16)IXGBE_EEPROM_SUM - checksum; 569 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
570 570
571 return (s32)checksum; 571 return (s32)checksum;
572} 572}
573 573
574/** 574/**
575 * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum 575 * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
576 * @hw: pointer to hardware structure 576 * @hw: pointer to hardware structure
577 * @checksum_val: calculated checksum 577 * @checksum_val: calculated checksum
578 * 578 *
579 * Performs checksum calculation and validates the EEPROM checksum. If the 579 * Performs checksum calculation and validates the EEPROM checksum. If the
580 * caller does not need checksum_val, the value can be NULL. 580 * caller does not need checksum_val, the value can be NULL.
581 **/ 581 **/
582s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, 582s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
583 u16 *checksum_val) 583 u16 *checksum_val)
584{ 584{
585 s32 status; 585 s32 status;
586 u16 checksum; 586 u16 checksum;
587 u16 read_checksum = 0; 587 u16 read_checksum = 0;
588 588
589 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); 589 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
590 590
591 /* Read the first word from the EEPROM. If this times out or fails, do 591 /* Read the first word from the EEPROM. If this times out or fails, do
592 * not continue or we could be in for a very long wait while every 592 * not continue or we could be in for a very long wait while every
593 * EEPROM read fails 593 * EEPROM read fails
594 */ 594 */
595 status = hw->eeprom.ops.read(hw, 0, &checksum); 595 status = hw->eeprom.ops.read(hw, 0, &checksum);
596 if (status) { 596 if (status) {
597 DEBUGOUT("EEPROM read failed\n"); 597 DEBUGOUT("EEPROM read failed\n");
598 return status; 598 return status;
599 } 599 }
600 600
601 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) 601 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
602 return IXGBE_ERR_SWFW_SYNC; 602 return IXGBE_ERR_SWFW_SYNC;
603 603
604 status = hw->eeprom.ops.calc_checksum(hw); 604 status = hw->eeprom.ops.calc_checksum(hw);
605 if (status < 0) 605 if (status < 0)
606 goto out; 606 goto out;
607 607
608 checksum = (u16)(status & 0xffff); 608 checksum = (u16)(status & 0xffff);
609 609
610 /* Do not use hw->eeprom.ops.read because we do not want to take 610 /* Do not use hw->eeprom.ops.read because we do not want to take
611 * the synchronization semaphores twice here. 611 * the synchronization semaphores twice here.
612 */ 612 */
613 status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, 613 status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
614 &read_checksum); 614 &read_checksum);
615 if (status) 615 if (status)
616 goto out; 616 goto out;
617 617
618 /* Verify read checksum from EEPROM is the same as 618 /* Verify read checksum from EEPROM is the same as
619 * calculated checksum 619 * calculated checksum
620 */ 620 */
621 if (read_checksum != checksum) { 621 if (read_checksum != checksum) {
622 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, 622 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
623 "Invalid EEPROM checksum"); 623 "Invalid EEPROM checksum");
624 status = IXGBE_ERR_EEPROM_CHECKSUM; 624 status = IXGBE_ERR_EEPROM_CHECKSUM;
625 } 625 }
626 626
627 /* If the user cares, return the calculated checksum */ 627 /* If the user cares, return the calculated checksum */
628 if (checksum_val) 628 if (checksum_val)
629 *checksum_val = checksum; 629 *checksum_val = checksum;
630 630
631out: 631out:
632 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 632 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
633 633
634 return status; 634 return status;
635} 635}
636 636
637/** 637/**
638 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash 638 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
639 * @hw: pointer to hardware structure 639 * @hw: pointer to hardware structure
640 * 640 *
641 * After writing EEPROM to shadow RAM using EEWR register, software calculates 641 * After writing EEPROM to shadow RAM using EEWR register, software calculates
642 * checksum and updates the EEPROM and instructs the hardware to update 642 * checksum and updates the EEPROM and instructs the hardware to update
643 * the flash. 643 * the flash.
644 **/ 644 **/
645s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) 645s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
646{ 646{
647 s32 status; 647 s32 status;
648 u16 checksum; 648 u16 checksum;
649 649
650 DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); 650 DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
651 651
652 /* Read the first word from the EEPROM. If this times out or fails, do 652 /* Read the first word from the EEPROM. If this times out or fails, do
653 * not continue or we could be in for a very long wait while every 653 * not continue or we could be in for a very long wait while every
654 * EEPROM read fails 654 * EEPROM read fails
655 */ 655 */
656 status = hw->eeprom.ops.read(hw, 0, &checksum); 656 status = hw->eeprom.ops.read(hw, 0, &checksum);
657 if (status) { 657 if (status) {
658 DEBUGOUT("EEPROM read failed\n"); 658 DEBUGOUT("EEPROM read failed\n");
659 return status; 659 return status;
660 } 660 }
661 661
662 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) 662 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
663 return IXGBE_ERR_SWFW_SYNC; 663 return IXGBE_ERR_SWFW_SYNC;
664 664
665 status = hw->eeprom.ops.calc_checksum(hw); 665 status = hw->eeprom.ops.calc_checksum(hw);
666 if (status < 0) 666 if (status < 0)
667 goto out; 667 goto out;
668 668
669 checksum = (u16)(status & 0xffff); 669 checksum = (u16)(status & 0xffff);
670 670
671 /* Do not use hw->eeprom.ops.write because we do not want to 671 /* Do not use hw->eeprom.ops.write because we do not want to
672 * take the synchronization semaphores twice here. 672 * take the synchronization semaphores twice here.
673 */ 673 */
674 status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); 674 status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
675 if (status) 675 if (status)
676 goto out; 676 goto out;
677 677
678 status = ixgbe_update_flash_X540(hw); 678 status = ixgbe_update_flash_X540(hw);
679 679
680out: 680out:
681 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 681 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
682 682
683 return status; 683 return status;
684} 684}
685 685
686/** 686/**
687 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device 687 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
688 * @hw: pointer to hardware structure 688 * @hw: pointer to hardware structure
689 * 689 *
690 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy 690 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
691 * EEPROM from shadow RAM to the flash device. 691 * EEPROM from shadow RAM to the flash device.
692 **/ 692 **/
693s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) 693s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
694{ 694{
695 u32 flup; 695 u32 flup;
696 s32 status; 696 s32 status;
697 697
698 DEBUGFUNC("ixgbe_update_flash_X540"); 698 DEBUGFUNC("ixgbe_update_flash_X540");
699 699
700 status = ixgbe_poll_flash_update_done_X540(hw); 700 status = ixgbe_poll_flash_update_done_X540(hw);
701 if (status == IXGBE_ERR_EEPROM) { 701 if (status == IXGBE_ERR_EEPROM) {
702 DEBUGOUT("Flash update time out\n"); 702 DEBUGOUT("Flash update time out\n");
703 goto out; 703 goto out;
704 } 704 }
705 705
706 flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; 706 flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP;
707 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); 707 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
708 708
709 status = ixgbe_poll_flash_update_done_X540(hw); 709 status = ixgbe_poll_flash_update_done_X540(hw);
710 if (status == IXGBE_SUCCESS) 710 if (status == IXGBE_SUCCESS)
711 DEBUGOUT("Flash update complete\n"); 711 DEBUGOUT("Flash update complete\n");
712 else 712 else
713 DEBUGOUT("Flash update time out\n"); 713 DEBUGOUT("Flash update time out\n");
714 714
715 if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { 715 if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
716 flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 716 flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
717 717
718 if (flup & IXGBE_EEC_SEC1VAL) { 718 if (flup & IXGBE_EEC_SEC1VAL) {
719 flup |= IXGBE_EEC_FLUP; 719 flup |= IXGBE_EEC_FLUP;
720 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); 720 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup);
721 } 721 }
722 722
723 status = ixgbe_poll_flash_update_done_X540(hw); 723 status = ixgbe_poll_flash_update_done_X540(hw);
724 if (status == IXGBE_SUCCESS) 724 if (status == IXGBE_SUCCESS)
725 DEBUGOUT("Flash update complete\n"); 725 DEBUGOUT("Flash update complete\n");
726 else 726 else
727 DEBUGOUT("Flash update time out\n"); 727 DEBUGOUT("Flash update time out\n");
728 } 728 }
729out: 729out:
730 return status; 730 return status;
731} 731}
732 732
733/** 733/**
734 * ixgbe_poll_flash_update_done_X540 - Poll flash update status 734 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
735 * @hw: pointer to hardware structure 735 * @hw: pointer to hardware structure
736 * 736 *
737 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the 737 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
738 * flash update is done. 738 * flash update is done.
739 **/ 739 **/
740static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) 740static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
741{ 741{
742 u32 i; 742 u32 i;
743 u32 reg; 743 u32 reg;
744 s32 status = IXGBE_ERR_EEPROM; 744 s32 status = IXGBE_ERR_EEPROM;
745 745
746 DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); 746 DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
747 747
748 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { 748 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
749 reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); 749 reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
750 if (reg & IXGBE_EEC_FLUDONE) { 750 if (reg & IXGBE_EEC_FLUDONE) {
751 status = IXGBE_SUCCESS; 751 status = IXGBE_SUCCESS;
752 break; 752 break;
753 } 753 }
754 msec_delay(5); 754 msec_delay(5);
755 } 755 }
756 756
757 if (i == IXGBE_FLUDONE_ATTEMPTS) 757 if (i == IXGBE_FLUDONE_ATTEMPTS)
758 ERROR_REPORT1(IXGBE_ERROR_POLLING, 758 ERROR_REPORT1(IXGBE_ERROR_POLLING,
759 "Flash update status polling timed out"); 759 "Flash update status polling timed out");
760 760
761 return status; 761 return status;
762} 762}
763 763
764/** 764/**
765 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore 765 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
766 * @hw: pointer to hardware structure 766 * @hw: pointer to hardware structure
767 * @mask: Mask to specify which semaphore to acquire 767 * @mask: Mask to specify which semaphore to acquire
768 * 768 *
769 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for 769 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
770 * the specified function (CSR, PHY0, PHY1, NVM, Flash) 770 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
771 **/ 771 **/
772s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) 772s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
773{ 773{
774 u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; 774 u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
775 u32 fwmask = swmask << 5; 775 u32 fwmask = swmask << 5;
776 u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; 776 u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
777 u32 timeout = 200; 777 u32 timeout = 200;
778 u32 hwmask = 0; 778 u32 hwmask = 0;
779 u32 swfw_sync; 779 u32 swfw_sync;
780 u32 i; 780 u32 i;
781 781
782 DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); 782 DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
783 783
784 if (swmask & IXGBE_GSSR_EEP_SM) 784 if (swmask & IXGBE_GSSR_EEP_SM)
785 hwmask |= IXGBE_GSSR_FLASH_SM; 785 hwmask |= IXGBE_GSSR_FLASH_SM;
786 786
787 /* SW only mask doesn't have FW bit pair */ 787 /* SW only mask doesn't have FW bit pair */
788 if (mask & IXGBE_GSSR_SW_MNG_SM) 788 if (mask & IXGBE_GSSR_SW_MNG_SM)
789 swmask |= IXGBE_GSSR_SW_MNG_SM; 789 swmask |= IXGBE_GSSR_SW_MNG_SM;
790 790
791 swmask |= swi2c_mask; 791 swmask |= swi2c_mask;
792 fwmask |= swi2c_mask << 2; 792 fwmask |= swi2c_mask << 2;
793 if (hw->mac.type >= ixgbe_mac_X550) 793 if (hw->mac.type >= ixgbe_mac_X550)
794 timeout = 1000; 794 timeout = 1000;
795 795
796 for (i = 0; i < timeout; i++) { 796 for (i = 0; i < timeout; i++) {
797 /* SW NVM semaphore bit is used for access to all 797 /* SW NVM semaphore bit is used for access to all
798 * SW_FW_SYNC bits (not just NVM) 798 * SW_FW_SYNC bits (not just NVM)
799 */ 799 */
800 if (ixgbe_get_swfw_sync_semaphore(hw)) { 800 if (ixgbe_get_swfw_sync_semaphore(hw)) {
801 DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); 801 DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
802 return IXGBE_ERR_SWFW_SYNC; 802 return IXGBE_ERR_SWFW_SYNC;
803 } 803 }
804 804
805 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); 805 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
806 if (!(swfw_sync & (fwmask | swmask | hwmask))) { 806 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
807 swfw_sync |= swmask; 807 swfw_sync |= swmask;
808 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), 808 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw),
809 swfw_sync); 809 swfw_sync);
810 ixgbe_release_swfw_sync_semaphore(hw); 810 ixgbe_release_swfw_sync_semaphore(hw);
811 return IXGBE_SUCCESS; 811 return IXGBE_SUCCESS;
812 } 812 }
813 /* Firmware currently using resource (fwmask), hardware 813 /* Firmware currently using resource (fwmask), hardware
814 * currently using resource (hwmask), or other software 814 * currently using resource (hwmask), or other software
815 * thread currently using resource (swmask) 815 * thread currently using resource (swmask)
816 */ 816 */
817 ixgbe_release_swfw_sync_semaphore(hw); 817 ixgbe_release_swfw_sync_semaphore(hw);
818 msec_delay(5); 818 msec_delay(5);
819 } 819 }
820 820
821 /* If the resource is not released by the FW/HW the SW can assume that 821 /* If the resource is not released by the FW/HW the SW can assume that
822 * the FW/HW malfunctions. In that case the SW should set the SW bit(s) 822 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
823 * of the requested resource(s) while ignoring the corresponding FW/HW 823 * of the requested resource(s) while ignoring the corresponding FW/HW
824 * bits in the SW_FW_SYNC register. 824 * bits in the SW_FW_SYNC register.
825 */ 825 */
826 if (ixgbe_get_swfw_sync_semaphore(hw)) { 826 if (ixgbe_get_swfw_sync_semaphore(hw)) {
827 DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); 827 DEBUGOUT("Failed to get NVM semaphore and register semaphore while forcefully ignoring FW semaphore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
828 return IXGBE_ERR_SWFW_SYNC; 828 return IXGBE_ERR_SWFW_SYNC;
829 } 829 }
830 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); 830 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
831 if (swfw_sync & (fwmask | hwmask)) { 831 if (swfw_sync & (fwmask | hwmask)) {
832 swfw_sync |= swmask; 832 swfw_sync |= swmask;
833 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); 833 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
834 ixgbe_release_swfw_sync_semaphore(hw); 834 ixgbe_release_swfw_sync_semaphore(hw);
835 msec_delay(5); 835 msec_delay(5);
836 return IXGBE_SUCCESS; 836 return IXGBE_SUCCESS;
837 } 837 }
838 /* If the resource is not released by other SW the SW can assume that 838 /* If the resource is not released by other SW the SW can assume that
839 * the other SW malfunctions. In that case the SW should clear all SW 839 * the other SW malfunctions. In that case the SW should clear all SW
840 * flags that it does not own and then repeat the whole process once 840 * flags that it does not own and then repeat the whole process once
841 * again. 841 * again.
842 */ 842 */
843 if (swfw_sync & swmask) { 843 if (swfw_sync & swmask) {
844 u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | 844 u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
845 IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | 845 IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
846 IXGBE_GSSR_SW_MNG_SM; 846 IXGBE_GSSR_SW_MNG_SM;
847 847
848 if (swi2c_mask) 848 if (swi2c_mask)
849 rmask |= IXGBE_GSSR_I2C_MASK; 849 rmask |= IXGBE_GSSR_I2C_MASK;
850 ixgbe_release_swfw_sync_X540(hw, rmask); 850 ixgbe_release_swfw_sync_X540(hw, rmask);
851 ixgbe_release_swfw_sync_semaphore(hw); 851 ixgbe_release_swfw_sync_semaphore(hw);
852 DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); 852 DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
853 return IXGBE_ERR_SWFW_SYNC; 853 return IXGBE_ERR_SWFW_SYNC;
854 } 854 }
855 ixgbe_release_swfw_sync_semaphore(hw); 855 ixgbe_release_swfw_sync_semaphore(hw);
856 DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n"); 856 DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
857 857
858 return IXGBE_ERR_SWFW_SYNC; 858 return IXGBE_ERR_SWFW_SYNC;
859} 859}
860 860
861/** 861/**
862 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore 862 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
863 * @hw: pointer to hardware structure 863 * @hw: pointer to hardware structure
864 * @mask: Mask to specify which semaphore to release 864 * @mask: Mask to specify which semaphore to release
865 * 865 *
866 * Releases the SWFW semaphore through the SW_FW_SYNC register 866 * Releases the SWFW semaphore through the SW_FW_SYNC register
867 * for the specified function (CSR, PHY0, PHY1, EVM, Flash) 867 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
868 **/ 868 **/
869void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) 869void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
870{ 870{
871 u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); 871 u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
872 u32 swfw_sync; 872 u32 swfw_sync;
873 873
874 DEBUGFUNC("ixgbe_release_swfw_sync_X540"); 874 DEBUGFUNC("ixgbe_release_swfw_sync_X540");
875 875
876 if (mask & IXGBE_GSSR_I2C_MASK) 876 if (mask & IXGBE_GSSR_I2C_MASK)
877 swmask |= mask & IXGBE_GSSR_I2C_MASK; 877 swmask |= mask & IXGBE_GSSR_I2C_MASK;
878 ixgbe_get_swfw_sync_semaphore(hw); 878 ixgbe_get_swfw_sync_semaphore(hw);
879 879
880 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); 880 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
881 swfw_sync &= ~swmask; 881 swfw_sync &= ~swmask;
882 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); 882 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
883 883
884 ixgbe_release_swfw_sync_semaphore(hw); 884 ixgbe_release_swfw_sync_semaphore(hw);
885 msec_delay(2); 885 msec_delay(2);
886} 886}
887 887
888/** 888/**
889 * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore 889 * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
890 * @hw: pointer to hardware structure 890 * @hw: pointer to hardware structure
891 * 891 *
892 * Sets the hardware semaphores so SW/FW can gain control of shared resources 892 * Sets the hardware semaphores so SW/FW can gain control of shared resources
893 **/ 893 **/
894static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) 894static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
895{ 895{
896 s32 status = IXGBE_ERR_EEPROM; 896 s32 status = IXGBE_ERR_EEPROM;
897 u32 timeout = 2000; 897 u32 timeout = 2000;
898 u32 i; 898 u32 i;
899 u32 swsm; 899 u32 swsm;
900 900
901 DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); 901 DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
902 902
903 /* Get SMBI software semaphore between device drivers first */ 903 /* Get SMBI software semaphore between device drivers first */
904 for (i = 0; i < timeout; i++) { 904 for (i = 0; i < timeout; i++) {
905 /* 905 /*
906 * If the SMBI bit is 0 when we read it, then the bit will be 906 * If the SMBI bit is 0 when we read it, then the bit will be
907 * set and we have the semaphore 907 * set and we have the semaphore
908 */ 908 */
909 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 909 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
910 if (!(swsm & IXGBE_SWSM_SMBI)) { 910 if (!(swsm & IXGBE_SWSM_SMBI)) {
911 status = IXGBE_SUCCESS; 911 status = IXGBE_SUCCESS;
912 break; 912 break;
913 } 913 }
914 usec_delay(50); 914 usec_delay(50);
915 } 915 }
916 916
917 /* Now get the semaphore between SW/FW through the REGSMP bit */ 917 /* Now get the semaphore between SW/FW through the REGSMP bit */
918 if (status == IXGBE_SUCCESS) { 918 if (status == IXGBE_SUCCESS) {
919 for (i = 0; i < timeout; i++) { 919 for (i = 0; i < timeout; i++) {
920 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); 920 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
921 if (!(swsm & IXGBE_SWFW_REGSMP)) 921 if (!(swsm & IXGBE_SWFW_REGSMP))
922 break; 922 break;
923 923
924 usec_delay(50); 924 usec_delay(50);
925 } 925 }
926 926
927 /* 927 /*
928 * Release semaphores and return error if SW NVM semaphore 928 * Release semaphores and return error if SW NVM semaphore
929 * was not granted because we don't have access to the EEPROM 929 * was not granted because we don't have access to the EEPROM
930 */ 930 */
931 if (i >= timeout) { 931 if (i >= timeout) {
932 ERROR_REPORT1(IXGBE_ERROR_POLLING, 932 ERROR_REPORT1(IXGBE_ERROR_POLLING,
933 "REGSMP Software NVM semaphore not granted.\n"); 933 "REGSMP Software NVM semaphore not granted.\n");
934 ixgbe_release_swfw_sync_semaphore(hw); 934 ixgbe_release_swfw_sync_semaphore(hw);
935 status = IXGBE_ERR_EEPROM; 935 status = IXGBE_ERR_EEPROM;
936 } 936 }
937 } else { 937 } else {
938 ERROR_REPORT1(IXGBE_ERROR_POLLING, 938 ERROR_REPORT1(IXGBE_ERROR_POLLING,
939 "Software semaphore SMBI between device drivers " 939 "Software semaphore SMBI between device drivers "
940 "not granted.\n"); 940 "not granted.\n");
941 } 941 }
942 942
943 return status; 943 return status;
944} 944}
945 945
946/** 946/**
947 * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore 947 * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
948 * @hw: pointer to hardware structure 948 * @hw: pointer to hardware structure
949 * 949 *
950 * This function clears hardware semaphore bits. 950 * This function clears hardware semaphore bits.
951 **/ 951 **/
952static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) 952static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
953{ 953{
954 u32 swsm; 954 u32 swsm;
955 955
956 DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); 956 DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
957 957
958 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ 958 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
959 959
960 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); 960 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
961 swsm &= ~IXGBE_SWFW_REGSMP; 961 swsm &= ~IXGBE_SWFW_REGSMP;
962 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); 962 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm);
963 963
964 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); 964 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
965 swsm &= ~IXGBE_SWSM_SMBI; 965 swsm &= ~IXGBE_SWSM_SMBI;
966 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); 966 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
967 967
968 IXGBE_WRITE_FLUSH(hw); 968 IXGBE_WRITE_FLUSH(hw);
969} 969}
970 970
971/** 971/**
972 * ixgbe_init_swfw_sync_X540 - Release hardware semaphore 972 * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
973 * @hw: pointer to hardware structure 973 * @hw: pointer to hardware structure
974 * 974 *
975 * This function reset hardware semaphore bits for a semaphore that may 975 * This function reset hardware semaphore bits for a semaphore that may
976 * have be left locked due to a catastrophic failure. 976 * have be left locked due to a catastrophic failure.
977 **/ 977 **/
978void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) 978void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
979{ 979{
980 u32 rmask; 980 u32 rmask;
981 981
982 /* First try to grab the semaphore but we don't need to bother 982 /* First try to grab the semaphore but we don't need to bother
983 * looking to see whether we got the lock or not since we do 983 * looking to see whether we got the lock or not since we do
984 * the same thing regardless of whether we got the lock or not. 984 * the same thing regardless of whether we got the lock or not.
985 * We got the lock - we release it. 985 * We got the lock - we release it.
986 * We timeout trying to get the lock - we force its release. 986 * We timeout trying to get the lock - we force its release.
987 */ 987 */
988 ixgbe_get_swfw_sync_semaphore(hw); 988 ixgbe_get_swfw_sync_semaphore(hw);
989 ixgbe_release_swfw_sync_semaphore(hw); 989 ixgbe_release_swfw_sync_semaphore(hw);
990 990
991 /* Acquire and release all software resources. */ 991 /* Acquire and release all software resources. */
992 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | 992 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
993 IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | 993 IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
994 IXGBE_GSSR_SW_MNG_SM; 994 IXGBE_GSSR_SW_MNG_SM;
995 995
996 rmask |= IXGBE_GSSR_I2C_MASK; 996 rmask |= IXGBE_GSSR_I2C_MASK;
997 ixgbe_acquire_swfw_sync_X540(hw, rmask); 997 ixgbe_acquire_swfw_sync_X540(hw, rmask);
998 ixgbe_release_swfw_sync_X540(hw, rmask); 998 ixgbe_release_swfw_sync_X540(hw, rmask);
999} 999}
1000 1000
1001/** 1001/**
1002 * ixgbe_blink_led_start_X540 - Blink LED based on index. 1002 * ixgbe_blink_led_start_X540 - Blink LED based on index.
1003 * @hw: pointer to hardware structure 1003 * @hw: pointer to hardware structure
1004 * @index: led number to blink 1004 * @index: led number to blink
1005 * 1005 *
1006 * Devices that implement the version 2 interface: 1006 * Devices that implement the version 2 interface:
1007 * X540 1007 * X540
1008 **/ 1008 **/
1009s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) 1009s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
1010{ 1010{
1011 u32 macc_reg; 1011 u32 macc_reg;
1012 u32 ledctl_reg; 1012 u32 ledctl_reg;
1013 ixgbe_link_speed speed; 1013 ixgbe_link_speed speed;
1014 bool link_up; 1014 bool link_up;
1015 1015
1016 DEBUGFUNC("ixgbe_blink_led_start_X540"); 1016 DEBUGFUNC("ixgbe_blink_led_start_X540");
1017 1017
1018 if (index > 3) 1018 if (index > 3)
1019 return IXGBE_ERR_PARAM; 1019 return IXGBE_ERR_PARAM;
1020 1020
1021 /* 1021 /*
1022 * Link should be up in order for the blink bit in the LED control 1022 * Link should be up in order for the blink bit in the LED control
1023 * register to work. Force link and speed in the MAC if link is down. 1023 * register to work. Force link and speed in the MAC if link is down.
1024 * This will be reversed when we stop the blinking. 1024 * This will be reversed when we stop the blinking.
1025 */ 1025 */
1026 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); 1026 hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
1027 if (link_up == FALSE) { 1027 if (link_up == FALSE) {
1028 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); 1028 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
1029 macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; 1029 macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
1030 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); 1030 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
1031 } 1031 }
1032 /* Set the LED to LINK_UP + BLINK. */ 1032 /* Set the LED to LINK_UP + BLINK. */
1033 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1033 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1034 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); 1034 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
1035 ledctl_reg |= IXGBE_LED_BLINK(index); 1035 ledctl_reg |= IXGBE_LED_BLINK(index);
1036 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); 1036 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
1037 IXGBE_WRITE_FLUSH(hw); 1037 IXGBE_WRITE_FLUSH(hw);
1038 1038
1039 return IXGBE_SUCCESS; 1039 return IXGBE_SUCCESS;
1040} 1040}
1041 1041
1042/** 1042/**
1043 * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. 1043 * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
1044 * @hw: pointer to hardware structure 1044 * @hw: pointer to hardware structure
1045 * @index: led number to stop blinking 1045 * @index: led number to stop blinking
1046 * 1046 *
1047 * Devices that implement the version 2 interface: 1047 * Devices that implement the version 2 interface:
1048 * X540 1048 * X540
1049 **/ 1049 **/
1050s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) 1050s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
1051{ 1051{
1052 u32 macc_reg; 1052 u32 macc_reg;
1053 u32 ledctl_reg; 1053 u32 ledctl_reg;
1054 1054
1055 if (index > 3) 1055 if (index > 3)
1056 return IXGBE_ERR_PARAM; 1056 return IXGBE_ERR_PARAM;
1057 1057
1058 DEBUGFUNC("ixgbe_blink_led_stop_X540"); 1058 DEBUGFUNC("ixgbe_blink_led_stop_X540");
1059 1059
1060 /* Restore the LED to its default value. */ 1060 /* Restore the LED to its default value. */
1061 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1061 ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1062 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); 1062 ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
1063 ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 1063 ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
1064 ledctl_reg &= ~IXGBE_LED_BLINK(index); 1064 ledctl_reg &= ~IXGBE_LED_BLINK(index);
1065 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); 1065 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
1066 1066
1067 /* Unforce link and speed in the MAC. */ 1067 /* Unforce link and speed in the MAC. */
1068 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); 1068 macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
1069 macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); 1069 macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
1070 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); 1070 IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
1071 IXGBE_WRITE_FLUSH(hw); 1071 IXGBE_WRITE_FLUSH(hw);
1072 1072
1073 return IXGBE_SUCCESS; 1073 return IXGBE_SUCCESS;
1074} 1074}

cvs diff -r1.125.2.10 -r1.125.2.11 src/sys/dev/pci/ixgbe/ixv.c (switch to unified diff)

--- src/sys/dev/pci/ixgbe/ixv.c 2020/07/10 11:35:51 1.125.2.10
+++ src/sys/dev/pci/ixgbe/ixv.c 2021/03/11 16:00:24 1.125.2.11
@@ -1,1499 +1,1502 @@ @@ -1,1499 +1,1502 @@
1/*$NetBSD: ixv.c,v 1.125.2.10 2020/07/10 11:35:51 martin Exp $*/ 1/*$NetBSD: ixv.c,v 1.125.2.11 2021/03/11 16:00:24 martin Exp $*/
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the 15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution. 16 documentation and/or other materials provided with the distribution.
17 17
18 3. Neither the name of the Intel Corporation nor the names of its 18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from 19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission. 20 this software without specific prior written permission.
21 21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE. 32 POSSIBILITY OF SUCH DAMAGE.
33 33
34******************************************************************************/ 34******************************************************************************/
35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/ 35/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
36 36
37#ifdef _KERNEL_OPT 37#ifdef _KERNEL_OPT
38#include "opt_inet.h" 38#include "opt_inet.h"
39#include "opt_inet6.h" 39#include "opt_inet6.h"
40#include "opt_net_mpsafe.h" 40#include "opt_net_mpsafe.h"
 41#include "opt_ixgbe.h"
41#endif 42#endif
42 43
43#include "ixgbe.h" 44#include "ixgbe.h"
44#include "vlan.h" 45#include "vlan.h"
45 46
46/************************************************************************ 47/************************************************************************
47 * Driver version 48 * Driver version
48 ************************************************************************/ 49 ************************************************************************/
49static const char ixv_driver_version[] = "2.0.1-k"; 50static const char ixv_driver_version[] = "2.0.1-k";
50/* XXX NetBSD: + 1.5.17 */ 51/* XXX NetBSD: + 1.5.17 */
51 52
52/************************************************************************ 53/************************************************************************
53 * PCI Device ID Table 54 * PCI Device ID Table
54 * 55 *
55 * Used by probe to select devices to load on 56 * Used by probe to select devices to load on
56 * Last field stores an index into ixv_strings 57 * Last field stores an index into ixv_strings
57 * Last entry must be all 0s 58 * Last entry must be all 0s
58 * 59 *
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
60 ************************************************************************/ 61 ************************************************************************/
61static const ixgbe_vendor_info_t ixv_vendor_info_array[] = 62static const ixgbe_vendor_info_t ixv_vendor_info_array[] =
62{ 63{
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, 64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, 65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0}, 66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0}, 67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0}, 68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, 0, 0, 0},
68 /* required last entry */ 69 /* required last entry */
69 {0, 0, 0, 0, 0} 70 {0, 0, 0, 0, 0}
70}; 71};
71 72
72/************************************************************************ 73/************************************************************************
73 * Table of branding strings 74 * Table of branding strings
74 ************************************************************************/ 75 ************************************************************************/
75static const char *ixv_strings[] = { 76static const char *ixv_strings[] = {
76 "Intel(R) PRO/10GbE Virtual Function Network Driver" 77 "Intel(R) PRO/10GbE Virtual Function Network Driver"
77}; 78};
78 79
79/********************************************************************* 80/*********************************************************************
80 * Function prototypes 81 * Function prototypes
81 *********************************************************************/ 82 *********************************************************************/
82static int ixv_probe(device_t, cfdata_t, void *); 83static int ixv_probe(device_t, cfdata_t, void *);
83static void ixv_attach(device_t, device_t, void *); 84static void ixv_attach(device_t, device_t, void *);
84static int ixv_detach(device_t, int); 85static int ixv_detach(device_t, int);
85#if 0 86#if 0
86static int ixv_shutdown(device_t); 87static int ixv_shutdown(device_t);
87#endif 88#endif
88static int ixv_ifflags_cb(struct ethercom *); 89static int ixv_ifflags_cb(struct ethercom *);
89static int ixv_ioctl(struct ifnet *, u_long, void *); 90static int ixv_ioctl(struct ifnet *, u_long, void *);
90static int ixv_init(struct ifnet *); 91static int ixv_init(struct ifnet *);
91static void ixv_init_locked(struct adapter *); 92static void ixv_init_locked(struct adapter *);
92static void ixv_ifstop(struct ifnet *, int); 93static void ixv_ifstop(struct ifnet *, int);
93static void ixv_stop(void *); 94static void ixv_stop(void *);
94static void ixv_init_device_features(struct adapter *); 95static void ixv_init_device_features(struct adapter *);
95static void ixv_media_status(struct ifnet *, struct ifmediareq *); 96static void ixv_media_status(struct ifnet *, struct ifmediareq *);
96static int ixv_media_change(struct ifnet *); 97static int ixv_media_change(struct ifnet *);
97static int ixv_allocate_pci_resources(struct adapter *, 98static int ixv_allocate_pci_resources(struct adapter *,
98 const struct pci_attach_args *); 99 const struct pci_attach_args *);
99static int ixv_allocate_msix(struct adapter *, 100static int ixv_allocate_msix(struct adapter *,
100 const struct pci_attach_args *); 101 const struct pci_attach_args *);
101static int ixv_configure_interrupts(struct adapter *); 102static int ixv_configure_interrupts(struct adapter *);
102static void ixv_free_pci_resources(struct adapter *); 103static void ixv_free_pci_resources(struct adapter *);
103static void ixv_local_timer(void *); 104static void ixv_local_timer(void *);
104static void ixv_local_timer_locked(void *); 105static void ixv_local_timer_locked(void *);
105static int ixv_setup_interface(device_t, struct adapter *); 106static int ixv_setup_interface(device_t, struct adapter *);
106static int ixv_negotiate_api(struct adapter *); 107static int ixv_negotiate_api(struct adapter *);
107 108
108static void ixv_initialize_transmit_units(struct adapter *); 109static void ixv_initialize_transmit_units(struct adapter *);
109static void ixv_initialize_receive_units(struct adapter *); 110static void ixv_initialize_receive_units(struct adapter *);
110static void ixv_initialize_rss_mapping(struct adapter *); 111static void ixv_initialize_rss_mapping(struct adapter *);
111static s32 ixv_check_link(struct adapter *); 112static s32 ixv_check_link(struct adapter *);
112 113
113static void ixv_enable_intr(struct adapter *); 114static void ixv_enable_intr(struct adapter *);
114static void ixv_disable_intr(struct adapter *); 115static void ixv_disable_intr(struct adapter *);
115static int ixv_set_rxfilter(struct adapter *); 116static int ixv_set_rxfilter(struct adapter *);
116static void ixv_update_link_status(struct adapter *); 117static void ixv_update_link_status(struct adapter *);
117static int ixv_sysctl_debug(SYSCTLFN_PROTO); 118static int ixv_sysctl_debug(SYSCTLFN_PROTO);
118static void ixv_set_ivar(struct adapter *, u8, u8, s8); 119static void ixv_set_ivar(struct adapter *, u8, u8, s8);
119static void ixv_configure_ivars(struct adapter *); 120static void ixv_configure_ivars(struct adapter *);
120static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 121static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
121static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t); 122static void ixv_eitr_write(struct adapter *, uint32_t, uint32_t);
122 123
123static void ixv_setup_vlan_tagging(struct adapter *); 124static void ixv_setup_vlan_tagging(struct adapter *);
124static int ixv_setup_vlan_support(struct adapter *); 125static int ixv_setup_vlan_support(struct adapter *);
125static int ixv_vlan_cb(struct ethercom *, uint16_t, bool); 126static int ixv_vlan_cb(struct ethercom *, uint16_t, bool);
126static int ixv_register_vlan(struct adapter *, u16); 127static int ixv_register_vlan(struct adapter *, u16);
127static int ixv_unregister_vlan(struct adapter *, u16); 128static int ixv_unregister_vlan(struct adapter *, u16);
128 129
129static void ixv_add_device_sysctls(struct adapter *); 130static void ixv_add_device_sysctls(struct adapter *);
130static void ixv_save_stats(struct adapter *); 131static void ixv_save_stats(struct adapter *);
131static void ixv_init_stats(struct adapter *); 132static void ixv_init_stats(struct adapter *);
132static void ixv_update_stats(struct adapter *); 133static void ixv_update_stats(struct adapter *);
133static void ixv_add_stats_sysctls(struct adapter *); 134static void ixv_add_stats_sysctls(struct adapter *);
134static void ixv_clear_evcnt(struct adapter *); 135static void ixv_clear_evcnt(struct adapter *);
135 136
136/* Sysctl handlers */ 137/* Sysctl handlers */
137static void ixv_set_sysctl_value(struct adapter *, const char *, 138static void ixv_set_sysctl_value(struct adapter *, const char *,
138 const char *, int *, int); 139 const char *, int *, int);
139static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO); 140static int ixv_sysctl_interrupt_rate_handler(SYSCTLFN_PROTO);
140static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO); 141static int ixv_sysctl_next_to_check_handler(SYSCTLFN_PROTO);
141static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO); 142static int ixv_sysctl_rdh_handler(SYSCTLFN_PROTO);
142static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO); 143static int ixv_sysctl_rdt_handler(SYSCTLFN_PROTO);
143static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO); 144static int ixv_sysctl_tdt_handler(SYSCTLFN_PROTO);
144static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO); 145static int ixv_sysctl_tdh_handler(SYSCTLFN_PROTO);
145 146
146/* The MSI-X Interrupt handlers */ 147/* The MSI-X Interrupt handlers */
147static int ixv_msix_que(void *); 148static int ixv_msix_que(void *);
148static int ixv_msix_mbx(void *); 149static int ixv_msix_mbx(void *);
149 150
150/* Deferred interrupt tasklets */ 151/* Deferred interrupt tasklets */
151static void ixv_handle_que(void *); 152static void ixv_handle_que(void *);
152static void ixv_handle_link(void *); 153static void ixv_handle_link(void *);
153 154
154/* Workqueue handler for deferred work */ 155/* Workqueue handler for deferred work */
155static void ixv_handle_que_work(struct work *, void *); 156static void ixv_handle_que_work(struct work *, void *);
156 157
157const struct sysctlnode *ixv_sysctl_instance(struct adapter *); 158const struct sysctlnode *ixv_sysctl_instance(struct adapter *);
158static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *); 159static const ixgbe_vendor_info_t *ixv_lookup(const struct pci_attach_args *);
159 160
160/************************************************************************ 161/************************************************************************
161 * NetBSD Device Interface Entry Points 162 * NetBSD Device Interface Entry Points
162 ************************************************************************/ 163 ************************************************************************/
163CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), 164CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter),
164 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, 165 ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL,
165 DVF_DETACH_SHUTDOWN); 166 DVF_DETACH_SHUTDOWN);
166 167
167#if 0 168#if 0
168static driver_t ixv_driver = { 169static driver_t ixv_driver = {
169 "ixv", ixv_methods, sizeof(struct adapter), 170 "ixv", ixv_methods, sizeof(struct adapter),
170}; 171};
171 172
172devclass_t ixv_devclass; 173devclass_t ixv_devclass;
173DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 174DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
174MODULE_DEPEND(ixv, pci, 1, 1, 1); 175MODULE_DEPEND(ixv, pci, 1, 1, 1);
175MODULE_DEPEND(ixv, ether, 1, 1, 1); 176MODULE_DEPEND(ixv, ether, 1, 1, 1);
176#endif 177#endif
177 178
178/* 179/*
179 * TUNEABLE PARAMETERS: 180 * TUNEABLE PARAMETERS:
180 */ 181 */
181 182
182/* Number of Queues - do not exceed MSI-X vectors - 1 */ 183/* Number of Queues - do not exceed MSI-X vectors - 1 */
183static int ixv_num_queues = 0; 184static int ixv_num_queues = 0;
184#define TUNABLE_INT(__x, __y) 185#define TUNABLE_INT(__x, __y)
185TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues); 186TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
186 187
187/* 188/*
188 * AIM: Adaptive Interrupt Moderation 189 * AIM: Adaptive Interrupt Moderation
189 * which means that the interrupt rate 190 * which means that the interrupt rate
190 * is varied over time based on the 191 * is varied over time based on the
191 * traffic for that interrupt vector 192 * traffic for that interrupt vector
192 */ 193 */
193static bool ixv_enable_aim = false; 194static bool ixv_enable_aim = false;
194TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); 195TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
195 196
196static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); 197static int ixv_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
197TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate); 198TUNABLE_INT("hw.ixv.max_interrupt_rate", &ixv_max_interrupt_rate);
198 199
199/* How many packets rxeof tries to clean at a time */ 200/* How many packets rxeof tries to clean at a time */
200static int ixv_rx_process_limit = 256; 201static int ixv_rx_process_limit = 256;
201TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); 202TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
202 203
203/* How many packets txeof tries to clean at a time */ 204/* How many packets txeof tries to clean at a time */
204static int ixv_tx_process_limit = 256; 205static int ixv_tx_process_limit = 256;
205TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit); 206TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
206 207
207/* Which packet processing uses workqueue or softint */ 208/* Which packet processing uses workqueue or softint */
208static bool ixv_txrx_workqueue = false; 209static bool ixv_txrx_workqueue = false;
209 210
210/* 211/*
211 * Number of TX descriptors per ring, 212 * Number of TX descriptors per ring,
212 * setting higher than RX as this seems 213 * setting higher than RX as this seems
213 * the better performing choice. 214 * the better performing choice.
214 */ 215 */
215static int ixv_txd = PERFORM_TXD; 216static int ixv_txd = PERFORM_TXD;
216TUNABLE_INT("hw.ixv.txd", &ixv_txd); 217TUNABLE_INT("hw.ixv.txd", &ixv_txd);
217 218
218/* Number of RX descriptors per ring */ 219/* Number of RX descriptors per ring */
219static int ixv_rxd = PERFORM_RXD; 220static int ixv_rxd = PERFORM_RXD;
220TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); 221TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
221 222
222/* Legacy Transmit (single queue) */ 223/* Legacy Transmit (single queue) */
223static int ixv_enable_legacy_tx = 0; 224static int ixv_enable_legacy_tx = 0;
224TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx); 225TUNABLE_INT("hw.ixv.enable_legacy_tx", &ixv_enable_legacy_tx);
225 226
226#ifdef NET_MPSAFE 227#ifdef NET_MPSAFE
227#define IXGBE_MPSAFE 1 228#define IXGBE_MPSAFE 1
228#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE 229#define IXGBE_CALLOUT_FLAGS CALLOUT_MPSAFE
229#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE 230#define IXGBE_SOFTINT_FLAGS SOFTINT_MPSAFE
230#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 231#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE
231#else 232#else
232#define IXGBE_CALLOUT_FLAGS 0 233#define IXGBE_CALLOUT_FLAGS 0
233#define IXGBE_SOFTINT_FLAGS 0 234#define IXGBE_SOFTINT_FLAGS 0
234#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU 235#define IXGBE_WORKQUEUE_FLAGS WQ_PERCPU
235#endif 236#endif
236#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET 237#define IXGBE_WORKQUEUE_PRI PRI_SOFTNET
237 238
238#if 0 239#if 0
239static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *); 240static int (*ixv_start_locked)(struct ifnet *, struct tx_ring *);
240static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *); 241static int (*ixv_ring_empty)(struct ifnet *, struct buf_ring *);
241#endif 242#endif
242 243
243/************************************************************************ 244/************************************************************************
244 * ixv_probe - Device identification routine 245 * ixv_probe - Device identification routine
245 * 246 *
246 * Determines if the driver should be loaded on 247 * Determines if the driver should be loaded on
247 * adapter based on its PCI vendor/device ID. 248 * adapter based on its PCI vendor/device ID.
248 * 249 *
249 * return BUS_PROBE_DEFAULT on success, positive on failure 250 * return BUS_PROBE_DEFAULT on success, positive on failure
250 ************************************************************************/ 251 ************************************************************************/
251static int 252static int
252ixv_probe(device_t dev, cfdata_t cf, void *aux) 253ixv_probe(device_t dev, cfdata_t cf, void *aux)
253{ 254{
254#ifdef __HAVE_PCI_MSI_MSIX 255#ifdef __HAVE_PCI_MSI_MSIX
255 const struct pci_attach_args *pa = aux; 256 const struct pci_attach_args *pa = aux;
256 257
257 return (ixv_lookup(pa) != NULL) ? 1 : 0; 258 return (ixv_lookup(pa) != NULL) ? 1 : 0;
258#else 259#else
259 return 0; 260 return 0;
260#endif 261#endif
261} /* ixv_probe */ 262} /* ixv_probe */
262 263
263static const ixgbe_vendor_info_t * 264static const ixgbe_vendor_info_t *
264ixv_lookup(const struct pci_attach_args *pa) 265ixv_lookup(const struct pci_attach_args *pa)
265{ 266{
266 const ixgbe_vendor_info_t *ent; 267 const ixgbe_vendor_info_t *ent;
267 pcireg_t subid; 268 pcireg_t subid;
268 269
269 INIT_DEBUGOUT("ixv_lookup: begin"); 270 INIT_DEBUGOUT("ixv_lookup: begin");
270 271
271 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) 272 if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID)
272 return NULL; 273 return NULL;
273 274
274 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 275 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
275 276
276 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { 277 for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) {
277 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && 278 if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) &&
278 (PCI_PRODUCT(pa->pa_id) == ent->device_id) && 279 (PCI_PRODUCT(pa->pa_id) == ent->device_id) &&
279 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || 280 ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) ||
280 (ent->subvendor_id == 0)) && 281 (ent->subvendor_id == 0)) &&
281 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || 282 ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) ||
282 (ent->subdevice_id == 0))) { 283 (ent->subdevice_id == 0))) {
283 return ent; 284 return ent;
284 } 285 }
285 } 286 }
286 287
287 return NULL; 288 return NULL;
288} 289}
289 290
290/************************************************************************ 291/************************************************************************
291 * ixv_attach - Device initialization routine 292 * ixv_attach - Device initialization routine
292 * 293 *
293 * Called when the driver is being loaded. 294 * Called when the driver is being loaded.
294 * Identifies the type of hardware, allocates all resources 295 * Identifies the type of hardware, allocates all resources
295 * and initializes the hardware. 296 * and initializes the hardware.
296 * 297 *
297 * return 0 on success, positive on failure 298 * return 0 on success, positive on failure
298 ************************************************************************/ 299 ************************************************************************/
299static void 300static void
300ixv_attach(device_t parent, device_t dev, void *aux) 301ixv_attach(device_t parent, device_t dev, void *aux)
301{ 302{
302 struct adapter *adapter; 303 struct adapter *adapter;
303 struct ixgbe_hw *hw; 304 struct ixgbe_hw *hw;
304 int error = 0; 305 int error = 0;
305 pcireg_t id, subid; 306 pcireg_t id, subid;
306 const ixgbe_vendor_info_t *ent; 307 const ixgbe_vendor_info_t *ent;
307 const struct pci_attach_args *pa = aux; 308 const struct pci_attach_args *pa = aux;
308 const char *apivstr; 309 const char *apivstr;
309 const char *str; 310 const char *str;
310 char buf[256]; 311 char buf[256];
311 312
312 INIT_DEBUGOUT("ixv_attach: begin"); 313 INIT_DEBUGOUT("ixv_attach: begin");
313 314
314 /* 315 /*
315 * Make sure BUSMASTER is set, on a VM under 316 * Make sure BUSMASTER is set, on a VM under
316 * KVM it may not be and will break things. 317 * KVM it may not be and will break things.
317 */ 318 */
318 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag); 319 ixgbe_pci_enable_busmaster(pa->pa_pc, pa->pa_tag);
319 320
320 /* Allocate, clear, and link in our adapter structure */ 321 /* Allocate, clear, and link in our adapter structure */
321 adapter = device_private(dev); 322 adapter = device_private(dev);
322 adapter->hw.back = adapter; 323 adapter->hw.back = adapter;
323 adapter->dev = dev; 324 adapter->dev = dev;
324 hw = &adapter->hw; 325 hw = &adapter->hw;
325 326
326 adapter->init_locked = ixv_init_locked; 327 adapter->init_locked = ixv_init_locked;
327 adapter->stop_locked = ixv_stop; 328 adapter->stop_locked = ixv_stop;
328 329
329 adapter->osdep.pc = pa->pa_pc; 330 adapter->osdep.pc = pa->pa_pc;
330 adapter->osdep.tag = pa->pa_tag; 331 adapter->osdep.tag = pa->pa_tag;
331 if (pci_dma64_available(pa)) 332 if (pci_dma64_available(pa))
332 adapter->osdep.dmat = pa->pa_dmat64; 333 adapter->osdep.dmat = pa->pa_dmat64;
333 else 334 else
334 adapter->osdep.dmat = pa->pa_dmat; 335 adapter->osdep.dmat = pa->pa_dmat;
335 adapter->osdep.attached = false; 336 adapter->osdep.attached = false;
336 337
337 ent = ixv_lookup(pa); 338 ent = ixv_lookup(pa);
338 339
339 KASSERT(ent != NULL); 340 KASSERT(ent != NULL);
340 341
341 aprint_normal(": %s, Version - %s\n", 342 aprint_normal(": %s, Version - %s\n",
342 ixv_strings[ent->index], ixv_driver_version); 343 ixv_strings[ent->index], ixv_driver_version);
343 344
344 /* Core Lock Init */ 345 /* Core Lock Init */
345 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev)); 346 IXGBE_CORE_LOCK_INIT(adapter, device_xname(dev));
346 347
347 /* Do base PCI setup - map BAR0 */ 348 /* Do base PCI setup - map BAR0 */
348 if (ixv_allocate_pci_resources(adapter, pa)) { 349 if (ixv_allocate_pci_resources(adapter, pa)) {
349 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n"); 350 aprint_error_dev(dev, "ixv_allocate_pci_resources() failed!\n");
350 error = ENXIO; 351 error = ENXIO;
351 goto err_out; 352 goto err_out;
352 } 353 }
353 354
354 /* SYSCTL APIs */ 355 /* SYSCTL APIs */
355 ixv_add_device_sysctls(adapter); 356 ixv_add_device_sysctls(adapter);
356 357
357 /* Set up the timer callout */ 358 /* Set up the timer callout */
358 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS); 359 callout_init(&adapter->timer, IXGBE_CALLOUT_FLAGS);
359 360
360 /* Save off the information about this board */ 361 /* Save off the information about this board */
361 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); 362 id = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG);
362 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 363 subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
363 hw->vendor_id = PCI_VENDOR(id); 364 hw->vendor_id = PCI_VENDOR(id);
364 hw->device_id = PCI_PRODUCT(id); 365 hw->device_id = PCI_PRODUCT(id);
365 hw->revision_id = 366 hw->revision_id =
366 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG)); 367 PCI_REVISION(pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG));
367 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); 368 hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid);
368 hw->subsystem_device_id = PCI_SUBSYS_ID(subid); 369 hw->subsystem_device_id = PCI_SUBSYS_ID(subid);
369 370
370 /* A subset of set_mac_type */ 371 /* A subset of set_mac_type */
371 switch (hw->device_id) { 372 switch (hw->device_id) {
372 case IXGBE_DEV_ID_82599_VF: 373 case IXGBE_DEV_ID_82599_VF:
373 hw->mac.type = ixgbe_mac_82599_vf; 374 hw->mac.type = ixgbe_mac_82599_vf;
374 str = "82599 VF"; 375 str = "82599 VF";
375 break; 376 break;
376 case IXGBE_DEV_ID_X540_VF: 377 case IXGBE_DEV_ID_X540_VF:
377 hw->mac.type = ixgbe_mac_X540_vf; 378 hw->mac.type = ixgbe_mac_X540_vf;
378 str = "X540 VF"; 379 str = "X540 VF";
379 break; 380 break;
380 case IXGBE_DEV_ID_X550_VF: 381 case IXGBE_DEV_ID_X550_VF:
381 hw->mac.type = ixgbe_mac_X550_vf; 382 hw->mac.type = ixgbe_mac_X550_vf;
382 str = "X550 VF"; 383 str = "X550 VF";
383 break; 384 break;
384 case IXGBE_DEV_ID_X550EM_X_VF: 385 case IXGBE_DEV_ID_X550EM_X_VF:
385 hw->mac.type = ixgbe_mac_X550EM_x_vf; 386 hw->mac.type = ixgbe_mac_X550EM_x_vf;
386 str = "X550EM X VF"; 387 str = "X550EM X VF";
387 break; 388 break;
388 case IXGBE_DEV_ID_X550EM_A_VF: 389 case IXGBE_DEV_ID_X550EM_A_VF:
389 hw->mac.type = ixgbe_mac_X550EM_a_vf; 390 hw->mac.type = ixgbe_mac_X550EM_a_vf;
390 str = "X550EM A VF"; 391 str = "X550EM A VF";
391 break; 392 break;
392 default: 393 default:
393 /* Shouldn't get here since probe succeeded */ 394 /* Shouldn't get here since probe succeeded */
394 aprint_error_dev(dev, "Unknown device ID!\n"); 395 aprint_error_dev(dev, "Unknown device ID!\n");
395 error = ENXIO; 396 error = ENXIO;
396 goto err_out; 397 goto err_out;
397 break; 398 break;
398 } 399 }
399 aprint_normal_dev(dev, "device %s\n", str); 400 aprint_normal_dev(dev, "device %s\n", str);
400 401
401 ixv_init_device_features(adapter); 402 ixv_init_device_features(adapter);
402 403
403 /* Initialize the shared code */ 404 /* Initialize the shared code */
404 error = ixgbe_init_ops_vf(hw); 405 error = ixgbe_init_ops_vf(hw);
405 if (error) { 406 if (error) {
406 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n"); 407 aprint_error_dev(dev, "ixgbe_init_ops_vf() failed!\n");
407 error = EIO; 408 error = EIO;
408 goto err_out; 409 goto err_out;
409 } 410 }
410 411
411 /* Setup the mailbox */ 412 /* Setup the mailbox */
412 ixgbe_init_mbx_params_vf(hw); 413 ixgbe_init_mbx_params_vf(hw);
413 414
414 /* Set the right number of segments */ 415 /* Set the right number of segments */
415 adapter->num_segs = IXGBE_82599_SCATTER; 416 adapter->num_segs = IXGBE_82599_SCATTER;
416 417
417 /* Reset mbox api to 1.0 */ 418 /* Reset mbox api to 1.0 */
418 error = hw->mac.ops.reset_hw(hw); 419 error = hw->mac.ops.reset_hw(hw);
419 if (error == IXGBE_ERR_RESET_FAILED) 420 if (error == IXGBE_ERR_RESET_FAILED)
420 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n"); 421 aprint_error_dev(dev, "...reset_hw() failure: Reset Failed!\n");
421 else if (error) 422 else if (error)
422 aprint_error_dev(dev, "...reset_hw() failed with error %d\n", 423 aprint_error_dev(dev, "...reset_hw() failed with error %d\n",
423 error); 424 error);
424 if (error) { 425 if (error) {
425 error = EIO; 426 error = EIO;
426 goto err_out; 427 goto err_out;
427 } 428 }
428 429
429 error = hw->mac.ops.init_hw(hw); 430 error = hw->mac.ops.init_hw(hw);
430 if (error) { 431 if (error) {
431 aprint_error_dev(dev, "...init_hw() failed!\n"); 432 aprint_error_dev(dev, "...init_hw() failed!\n");
432 error = EIO; 433 error = EIO;
433 goto err_out; 434 goto err_out;
434 } 435 }
435 436
436 /* Negotiate mailbox API version */ 437 /* Negotiate mailbox API version */
437 error = ixv_negotiate_api(adapter); 438 error = ixv_negotiate_api(adapter);
438 if (error) 439 if (error)
439 aprint_normal_dev(dev, 440 aprint_normal_dev(dev,
440 "MBX API negotiation failed during attach!\n"); 441 "MBX API negotiation failed during attach!\n");
441 switch (hw->api_version) { 442 switch (hw->api_version) {
442 case ixgbe_mbox_api_10: 443 case ixgbe_mbox_api_10:
443 apivstr = "1.0"; 444 apivstr = "1.0";
444 break; 445 break;
445 case ixgbe_mbox_api_20: 446 case ixgbe_mbox_api_20:
446 apivstr = "2.0"; 447 apivstr = "2.0";
447 break; 448 break;
448 case ixgbe_mbox_api_11: 449 case ixgbe_mbox_api_11:
449 apivstr = "1.1"; 450 apivstr = "1.1";
450 break; 451 break;
451 case ixgbe_mbox_api_12: 452 case ixgbe_mbox_api_12:
452 apivstr = "1.2"; 453 apivstr = "1.2";
453 break; 454 break;
454 case ixgbe_mbox_api_13: 455 case ixgbe_mbox_api_13:
455 apivstr = "1.3"; 456 apivstr = "1.3";
456 break; 457 break;
457 default: 458 default:
458 apivstr = "unknown"; 459 apivstr = "unknown";
459 break; 460 break;
460 } 461 }
461 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr); 462 aprint_normal_dev(dev, "Mailbox API %s\n", apivstr);
462 463
463 /* If no mac address was assigned, make a random one */ 464 /* If no mac address was assigned, make a random one */
464 if (!ixv_check_ether_addr(hw->mac.addr)) { 465 if (!ixv_check_ether_addr(hw->mac.addr)) {
465 u8 addr[ETHER_ADDR_LEN]; 466 u8 addr[ETHER_ADDR_LEN];
466 uint64_t rndval = cprng_strong64(); 467 uint64_t rndval = cprng_strong64();
467 468
468 memcpy(addr, &rndval, sizeof(addr)); 469 memcpy(addr, &rndval, sizeof(addr));
469 addr[0] &= 0xFE; 470 addr[0] &= 0xFE;
470 addr[0] |= 0x02; 471 addr[0] |= 0x02;
471 bcopy(addr, hw->mac.addr, sizeof(addr)); 472 bcopy(addr, hw->mac.addr, sizeof(addr));
472 } 473 }
473 474
474 /* Register for VLAN events */ 475 /* Register for VLAN events */
475 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb); 476 ether_set_vlan_cb(&adapter->osdep.ec, ixv_vlan_cb);
476 477
477 /* Sysctls for limiting the amount of work done in the taskqueues */ 478 /* Sysctls for limiting the amount of work done in the taskqueues */
478 ixv_set_sysctl_value(adapter, "rx_processing_limit", 479 ixv_set_sysctl_value(adapter, "rx_processing_limit",
479 "max number of rx packets to process", 480 "max number of rx packets to process",
480 &adapter->rx_process_limit, ixv_rx_process_limit); 481 &adapter->rx_process_limit, ixv_rx_process_limit);
481 482
482 ixv_set_sysctl_value(adapter, "tx_processing_limit", 483 ixv_set_sysctl_value(adapter, "tx_processing_limit",
483 "max number of tx packets to process", 484 "max number of tx packets to process",
484 &adapter->tx_process_limit, ixv_tx_process_limit); 485 &adapter->tx_process_limit, ixv_tx_process_limit);
485 486
486 /* Do descriptor calc and sanity checks */ 487 /* Do descriptor calc and sanity checks */
487 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || 488 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
488 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { 489 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
489 aprint_error_dev(dev, "TXD config issue, using default!\n"); 490 aprint_error_dev(dev, "TXD config issue, using default!\n");
490 adapter->num_tx_desc = DEFAULT_TXD; 491 adapter->num_tx_desc = DEFAULT_TXD;
491 } else 492 } else
492 adapter->num_tx_desc = ixv_txd; 493 adapter->num_tx_desc = ixv_txd;
493 494
494 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || 495 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
495 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) { 496 ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
496 aprint_error_dev(dev, "RXD config issue, using default!\n"); 497 aprint_error_dev(dev, "RXD config issue, using default!\n");
497 adapter->num_rx_desc = DEFAULT_RXD; 498 adapter->num_rx_desc = DEFAULT_RXD;
498 } else 499 } else
499 adapter->num_rx_desc = ixv_rxd; 500 adapter->num_rx_desc = ixv_rxd;
500 501
 502 adapter->num_jcl = adapter->num_rx_desc * IXGBE_JCLNUM_MULTI;
 503
501 /* Setup MSI-X */ 504 /* Setup MSI-X */
502 error = ixv_configure_interrupts(adapter); 505 error = ixv_configure_interrupts(adapter);
503 if (error) 506 if (error)
504 goto err_out; 507 goto err_out;
505 508
506 /* Allocate our TX/RX Queues */ 509 /* Allocate our TX/RX Queues */
507 if (ixgbe_allocate_queues(adapter)) { 510 if (ixgbe_allocate_queues(adapter)) {
508 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n"); 511 aprint_error_dev(dev, "ixgbe_allocate_queues() failed!\n");
509 error = ENOMEM; 512 error = ENOMEM;
510 goto err_out; 513 goto err_out;
511 } 514 }
512 515
513 /* hw.ix defaults init */ 516 /* hw.ix defaults init */
514 adapter->enable_aim = ixv_enable_aim; 517 adapter->enable_aim = ixv_enable_aim;
515 518
516 adapter->txrx_use_workqueue = ixv_txrx_workqueue; 519 adapter->txrx_use_workqueue = ixv_txrx_workqueue;
517 520
518 error = ixv_allocate_msix(adapter, pa); 521 error = ixv_allocate_msix(adapter, pa);
519 if (error) { 522 if (error) {
520 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n"); 523 aprint_error_dev(dev, "ixv_allocate_msix() failed!\n");
521 goto err_late; 524 goto err_late;
522 } 525 }
523 526
524 /* Setup OS specific network interface */ 527 /* Setup OS specific network interface */
525 error = ixv_setup_interface(dev, adapter); 528 error = ixv_setup_interface(dev, adapter);
526 if (error != 0) { 529 if (error != 0) {
527 aprint_error_dev(dev, "ixv_setup_interface() failed!\n"); 530 aprint_error_dev(dev, "ixv_setup_interface() failed!\n");
528 goto err_late; 531 goto err_late;
529 } 532 }
530 533
531 /* Do the stats setup */ 534 /* Do the stats setup */
532 ixv_save_stats(adapter); 535 ixv_save_stats(adapter);
533 ixv_init_stats(adapter); 536 ixv_init_stats(adapter);
534 ixv_add_stats_sysctls(adapter); 537 ixv_add_stats_sysctls(adapter);
535 538
536 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 539 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
537 ixgbe_netmap_attach(adapter); 540 ixgbe_netmap_attach(adapter);
538 541
539 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap); 542 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_cap);
540 aprint_verbose_dev(dev, "feature cap %s\n", buf); 543 aprint_verbose_dev(dev, "feature cap %s\n", buf);
541 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en); 544 snprintb(buf, sizeof(buf), IXGBE_FEATURE_FLAGS, adapter->feat_en);
542 aprint_verbose_dev(dev, "feature ena %s\n", buf); 545 aprint_verbose_dev(dev, "feature ena %s\n", buf);
543 546
544 INIT_DEBUGOUT("ixv_attach: end"); 547 INIT_DEBUGOUT("ixv_attach: end");
545 adapter->osdep.attached = true; 548 adapter->osdep.attached = true;
546 549
547 return; 550 return;
548 551
549err_late: 552err_late:
550 ixgbe_free_queues(adapter); 553 ixgbe_free_queues(adapter);
551err_out: 554err_out:
552 ixv_free_pci_resources(adapter); 555 ixv_free_pci_resources(adapter);
553 IXGBE_CORE_LOCK_DESTROY(adapter); 556 IXGBE_CORE_LOCK_DESTROY(adapter);
554 557
555 return; 558 return;
556} /* ixv_attach */ 559} /* ixv_attach */
557 560
558/************************************************************************ 561/************************************************************************
559 * ixv_detach - Device removal routine 562 * ixv_detach - Device removal routine
560 * 563 *
561 * Called when the driver is being removed. 564 * Called when the driver is being removed.
562 * Stops the adapter and deallocates all the resources 565 * Stops the adapter and deallocates all the resources
563 * that were allocated for driver operation. 566 * that were allocated for driver operation.
564 * 567 *
565 * return 0 on success, positive on failure 568 * return 0 on success, positive on failure
566 ************************************************************************/ 569 ************************************************************************/
567static int 570static int
568ixv_detach(device_t dev, int flags) 571ixv_detach(device_t dev, int flags)
569{ 572{
570 struct adapter *adapter = device_private(dev); 573 struct adapter *adapter = device_private(dev);
571 struct ixgbe_hw *hw = &adapter->hw; 574 struct ixgbe_hw *hw = &adapter->hw;
572 struct ix_queue *que = adapter->queues; 575 struct ix_queue *que = adapter->queues;
573 struct tx_ring *txr = adapter->tx_rings; 576 struct tx_ring *txr = adapter->tx_rings;
574 struct rx_ring *rxr = adapter->rx_rings; 577 struct rx_ring *rxr = adapter->rx_rings;
575 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 578 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
576 579
577 INIT_DEBUGOUT("ixv_detach: begin"); 580 INIT_DEBUGOUT("ixv_detach: begin");
578 if (adapter->osdep.attached == false) 581 if (adapter->osdep.attached == false)
579 return 0; 582 return 0;
580 583
581 /* Stop the interface. Callouts are stopped in it. */ 584 /* Stop the interface. Callouts are stopped in it. */
582 ixv_ifstop(adapter->ifp, 1); 585 ixv_ifstop(adapter->ifp, 1);
583 586
584#if NVLAN > 0 587#if NVLAN > 0
585 /* Make sure VLANs are not using driver */ 588 /* Make sure VLANs are not using driver */
586 if (!VLAN_ATTACHED(&adapter->osdep.ec)) 589 if (!VLAN_ATTACHED(&adapter->osdep.ec))
587 ; /* nothing to do: no VLANs */ 590 ; /* nothing to do: no VLANs */
588 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0) 591 else if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) != 0)
589 vlan_ifdetach(adapter->ifp); 592 vlan_ifdetach(adapter->ifp);
590 else { 593 else {
591 aprint_error_dev(dev, "VLANs in use, detach first\n"); 594 aprint_error_dev(dev, "VLANs in use, detach first\n");
592 return EBUSY; 595 return EBUSY;
593 } 596 }
594#endif 597#endif
595 598
596 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { 599 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
597 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 600 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
598 softint_disestablish(txr->txr_si); 601 softint_disestablish(txr->txr_si);
599 softint_disestablish(que->que_si); 602 softint_disestablish(que->que_si);
600 } 603 }
601 if (adapter->txr_wq != NULL) 604 if (adapter->txr_wq != NULL)
602 workqueue_destroy(adapter->txr_wq); 605 workqueue_destroy(adapter->txr_wq);
603 if (adapter->txr_wq_enqueued != NULL) 606 if (adapter->txr_wq_enqueued != NULL)
604 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int)); 607 percpu_free(adapter->txr_wq_enqueued, sizeof(u_int));
605 if (adapter->que_wq != NULL) 608 if (adapter->que_wq != NULL)
606 workqueue_destroy(adapter->que_wq); 609 workqueue_destroy(adapter->que_wq);
607 610
608 /* Drain the Mailbox(link) queue */ 611 /* Drain the Mailbox(link) queue */
609 softint_disestablish(adapter->link_si); 612 softint_disestablish(adapter->link_si);
610 613
611 ether_ifdetach(adapter->ifp); 614 ether_ifdetach(adapter->ifp);
612 callout_halt(&adapter->timer, NULL); 615 callout_halt(&adapter->timer, NULL);
613 616
614 if (adapter->feat_en & IXGBE_FEATURE_NETMAP) 617 if (adapter->feat_en & IXGBE_FEATURE_NETMAP)
615 netmap_detach(adapter->ifp); 618 netmap_detach(adapter->ifp);
616 619
617 ixv_free_pci_resources(adapter); 620 ixv_free_pci_resources(adapter);
618#if 0 /* XXX the NetBSD port is probably missing something here */ 621#if 0 /* XXX the NetBSD port is probably missing something here */
619 bus_generic_detach(dev); 622 bus_generic_detach(dev);
620#endif 623#endif
621 if_detach(adapter->ifp); 624 if_detach(adapter->ifp);
622 if_percpuq_destroy(adapter->ipq); 625 if_percpuq_destroy(adapter->ipq);
623 626
624 sysctl_teardown(&adapter->sysctllog); 627 sysctl_teardown(&adapter->sysctllog);
625 evcnt_detach(&adapter->efbig_tx_dma_setup); 628 evcnt_detach(&adapter->efbig_tx_dma_setup);
626 evcnt_detach(&adapter->mbuf_defrag_failed); 629 evcnt_detach(&adapter->mbuf_defrag_failed);
627 evcnt_detach(&adapter->efbig2_tx_dma_setup); 630 evcnt_detach(&adapter->efbig2_tx_dma_setup);
628 evcnt_detach(&adapter->einval_tx_dma_setup); 631 evcnt_detach(&adapter->einval_tx_dma_setup);
629 evcnt_detach(&adapter->other_tx_dma_setup); 632 evcnt_detach(&adapter->other_tx_dma_setup);
630 evcnt_detach(&adapter->eagain_tx_dma_setup); 633 evcnt_detach(&adapter->eagain_tx_dma_setup);
631 evcnt_detach(&adapter->enomem_tx_dma_setup); 634 evcnt_detach(&adapter->enomem_tx_dma_setup);
632 evcnt_detach(&adapter->watchdog_events); 635 evcnt_detach(&adapter->watchdog_events);
633 evcnt_detach(&adapter->tso_err); 636 evcnt_detach(&adapter->tso_err);
634 evcnt_detach(&adapter->link_irq); 637 evcnt_detach(&adapter->link_irq);
635 638
636 txr = adapter->tx_rings; 639 txr = adapter->tx_rings;
637 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 640 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
638 evcnt_detach(&adapter->queues[i].irqs); 641 evcnt_detach(&adapter->queues[i].irqs);
639 evcnt_detach(&adapter->queues[i].handleq); 642 evcnt_detach(&adapter->queues[i].handleq);
640 evcnt_detach(&adapter->queues[i].req); 643 evcnt_detach(&adapter->queues[i].req);
641 evcnt_detach(&txr->no_desc_avail); 644 evcnt_detach(&txr->no_desc_avail);
642 evcnt_detach(&txr->total_packets); 645 evcnt_detach(&txr->total_packets);
643 evcnt_detach(&txr->tso_tx); 646 evcnt_detach(&txr->tso_tx);
644#ifndef IXGBE_LEGACY_TX 647#ifndef IXGBE_LEGACY_TX
645 evcnt_detach(&txr->pcq_drops); 648 evcnt_detach(&txr->pcq_drops);
646#endif 649#endif
647 650
648 evcnt_detach(&rxr->rx_packets); 651 evcnt_detach(&rxr->rx_packets);
649 evcnt_detach(&rxr->rx_bytes); 652 evcnt_detach(&rxr->rx_bytes);
650 evcnt_detach(&rxr->rx_copies); 653 evcnt_detach(&rxr->rx_copies);
651 evcnt_detach(&rxr->no_jmbuf); 654 evcnt_detach(&rxr->no_jmbuf);
652 evcnt_detach(&rxr->rx_discarded); 655 evcnt_detach(&rxr->rx_discarded);
653 } 656 }
654 evcnt_detach(&stats->ipcs); 657 evcnt_detach(&stats->ipcs);
655 evcnt_detach(&stats->l4cs); 658 evcnt_detach(&stats->l4cs);
656 evcnt_detach(&stats->ipcs_bad); 659 evcnt_detach(&stats->ipcs_bad);
657 evcnt_detach(&stats->l4cs_bad); 660 evcnt_detach(&stats->l4cs_bad);
658 661
659 /* Packet Reception Stats */ 662 /* Packet Reception Stats */
660 evcnt_detach(&stats->vfgorc); 663 evcnt_detach(&stats->vfgorc);
661 evcnt_detach(&stats->vfgprc); 664 evcnt_detach(&stats->vfgprc);
662 evcnt_detach(&stats->vfmprc); 665 evcnt_detach(&stats->vfmprc);
663 666
664 /* Packet Transmission Stats */ 667 /* Packet Transmission Stats */
665 evcnt_detach(&stats->vfgotc); 668 evcnt_detach(&stats->vfgotc);
666 evcnt_detach(&stats->vfgptc); 669 evcnt_detach(&stats->vfgptc);
667 670
668 /* Mailbox Stats */ 671 /* Mailbox Stats */
669 evcnt_detach(&hw->mbx.stats.msgs_tx); 672 evcnt_detach(&hw->mbx.stats.msgs_tx);
670 evcnt_detach(&hw->mbx.stats.msgs_rx); 673 evcnt_detach(&hw->mbx.stats.msgs_rx);
671 evcnt_detach(&hw->mbx.stats.acks); 674 evcnt_detach(&hw->mbx.stats.acks);
672 evcnt_detach(&hw->mbx.stats.reqs); 675 evcnt_detach(&hw->mbx.stats.reqs);
673 evcnt_detach(&hw->mbx.stats.rsts); 676 evcnt_detach(&hw->mbx.stats.rsts);
674 677
675 ixgbe_free_queues(adapter); 678 ixgbe_free_queues(adapter);
676 679
677 IXGBE_CORE_LOCK_DESTROY(adapter); 680 IXGBE_CORE_LOCK_DESTROY(adapter);
678 681
679 return (0); 682 return (0);
680} /* ixv_detach */ 683} /* ixv_detach */
681 684
682/************************************************************************ 685/************************************************************************
683 * ixv_init_locked - Init entry point 686 * ixv_init_locked - Init entry point
684 * 687 *
685 * Used in two ways: It is used by the stack as an init entry 688 * Used in two ways: It is used by the stack as an init entry
686 * point in network interface structure. It is also used 689 * point in network interface structure. It is also used
687 * by the driver as a hw/sw initialization routine to get 690 * by the driver as a hw/sw initialization routine to get
688 * to a consistent state. 691 * to a consistent state.
689 * 692 *
690 * return 0 on success, positive on failure 693 * return 0 on success, positive on failure
691 ************************************************************************/ 694 ************************************************************************/
692static void 695static void
693ixv_init_locked(struct adapter *adapter) 696ixv_init_locked(struct adapter *adapter)
694{ 697{
695 struct ifnet *ifp = adapter->ifp; 698 struct ifnet *ifp = adapter->ifp;
696 device_t dev = adapter->dev; 699 device_t dev = adapter->dev;
697 struct ixgbe_hw *hw = &adapter->hw; 700 struct ixgbe_hw *hw = &adapter->hw;
698 struct ix_queue *que; 701 struct ix_queue *que;
699 int error = 0; 702 int error = 0;
700 uint32_t mask; 703 uint32_t mask;
701 int i; 704 int i;
702 705
703 INIT_DEBUGOUT("ixv_init_locked: begin"); 706 INIT_DEBUGOUT("ixv_init_locked: begin");
704 KASSERT(mutex_owned(&adapter->core_mtx)); 707 KASSERT(mutex_owned(&adapter->core_mtx));
705 hw->adapter_stopped = FALSE; 708 hw->adapter_stopped = FALSE;
706 hw->mac.ops.stop_adapter(hw); 709 hw->mac.ops.stop_adapter(hw);
707 callout_stop(&adapter->timer); 710 callout_stop(&adapter->timer);
708 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 711 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
709 que->disabled_count = 0; 712 que->disabled_count = 0;
710 713
711 adapter->max_frame_size = 714 adapter->max_frame_size =
712 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 715 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
713 716
714 /* reprogram the RAR[0] in case user changed it. */ 717 /* reprogram the RAR[0] in case user changed it. */
715 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 718 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
716 719
717 /* Get the latest mac address, User can use a LAA */ 720 /* Get the latest mac address, User can use a LAA */
718 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl), 721 memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
719 IXGBE_ETH_LENGTH_OF_ADDRESS); 722 IXGBE_ETH_LENGTH_OF_ADDRESS);
720 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 723 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
721 724
722 /* Prepare transmit descriptors and buffers */ 725 /* Prepare transmit descriptors and buffers */
723 if (ixgbe_setup_transmit_structures(adapter)) { 726 if (ixgbe_setup_transmit_structures(adapter)) {
724 aprint_error_dev(dev, "Could not setup transmit structures\n"); 727 aprint_error_dev(dev, "Could not setup transmit structures\n");
725 ixv_stop(adapter); 728 ixv_stop(adapter);
726 return; 729 return;
727 } 730 }
728 731
729 /* Reset VF and renegotiate mailbox API version */ 732 /* Reset VF and renegotiate mailbox API version */
730 hw->mac.ops.reset_hw(hw); 733 hw->mac.ops.reset_hw(hw);
731 hw->mac.ops.start_hw(hw); 734 hw->mac.ops.start_hw(hw);
732 error = ixv_negotiate_api(adapter); 735 error = ixv_negotiate_api(adapter);
733 if (error) 736 if (error)
734 device_printf(dev, 737 device_printf(dev,
735 "Mailbox API negotiation failed in init_locked!\n"); 738 "Mailbox API negotiation failed in init_locked!\n");
736 739
737 ixv_initialize_transmit_units(adapter); 740 ixv_initialize_transmit_units(adapter);
738 741
739 /* Setup Multicast table */ 742 /* Setup Multicast table */
740 ixv_set_rxfilter(adapter); 743 ixv_set_rxfilter(adapter);
741 744
742 /* 745 /*
743 * Determine the correct mbuf pool 746 * Determine the correct mbuf pool
744 * for doing jumbo/headersplit 747 * for doing jumbo/headersplit
745 */ 748 */
746 if (adapter->max_frame_size <= MCLBYTES) 749 if (adapter->max_frame_size <= MCLBYTES)
747 adapter->rx_mbuf_sz = MCLBYTES; 750 adapter->rx_mbuf_sz = MCLBYTES;
748 else 751 else
749 adapter->rx_mbuf_sz = MJUMPAGESIZE; 752 adapter->rx_mbuf_sz = MJUMPAGESIZE;
750 753
751 /* Prepare receive descriptors and buffers */ 754 /* Prepare receive descriptors and buffers */
752 if (ixgbe_setup_receive_structures(adapter)) { 755 if (ixgbe_setup_receive_structures(adapter)) {
753 device_printf(dev, "Could not setup receive structures\n"); 756 device_printf(dev, "Could not setup receive structures\n");
754 ixv_stop(adapter); 757 ixv_stop(adapter);
755 return; 758 return;
756 } 759 }
757 760
758 /* Configure RX settings */ 761 /* Configure RX settings */
759 ixv_initialize_receive_units(adapter); 762 ixv_initialize_receive_units(adapter);
760 763
761 /* Set up VLAN offload and filter */ 764 /* Set up VLAN offload and filter */
762 ixv_setup_vlan_support(adapter); 765 ixv_setup_vlan_support(adapter);
763 766
764 /* Set up MSI-X routing */ 767 /* Set up MSI-X routing */
765 ixv_configure_ivars(adapter); 768 ixv_configure_ivars(adapter);
766 769
767 /* Set up auto-mask */ 770 /* Set up auto-mask */
768 mask = (1 << adapter->vector); 771 mask = (1 << adapter->vector);
769 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++) 772 for (i = 0, que = adapter->queues; i < adapter->num_queues; i++, que++)
770 mask |= (1 << que->msix); 773 mask |= (1 << que->msix);
771 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask); 774 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
772 775
773 /* Set moderation on the Link interrupt */ 776 /* Set moderation on the Link interrupt */
774 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR); 777 ixv_eitr_write(adapter, adapter->vector, IXGBE_LINK_ITR);
775 778
776 /* Stats init */ 779 /* Stats init */
777 ixv_init_stats(adapter); 780 ixv_init_stats(adapter);
778 781
779 /* Config/Enable Link */ 782 /* Config/Enable Link */
780 hw->mac.get_link_status = TRUE; 783 hw->mac.get_link_status = TRUE;
781 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, 784 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
782 FALSE); 785 FALSE);
783 786
784 /* Start watchdog */ 787 /* Start watchdog */
785 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 788 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
786 789
787 /* And now turn on interrupts */ 790 /* And now turn on interrupts */
788 ixv_enable_intr(adapter); 791 ixv_enable_intr(adapter);
789 792
790 /* Update saved flags. See ixgbe_ifflags_cb() */ 793 /* Update saved flags. See ixgbe_ifflags_cb() */
791 adapter->if_flags = ifp->if_flags; 794 adapter->if_flags = ifp->if_flags;
792 adapter->ec_capenable = adapter->osdep.ec.ec_capenable; 795 adapter->ec_capenable = adapter->osdep.ec.ec_capenable;
793 796
794 /* Now inform the stack we're ready */ 797 /* Now inform the stack we're ready */
795 ifp->if_flags |= IFF_RUNNING; 798 ifp->if_flags |= IFF_RUNNING;
796 ifp->if_flags &= ~IFF_OACTIVE; 799 ifp->if_flags &= ~IFF_OACTIVE;
797 800
798 return; 801 return;
799} /* ixv_init_locked */ 802} /* ixv_init_locked */
800 803
801/************************************************************************ 804/************************************************************************
802 * ixv_enable_queue 805 * ixv_enable_queue
803 ************************************************************************/ 806 ************************************************************************/
804static inline void 807static inline void
805ixv_enable_queue(struct adapter *adapter, u32 vector) 808ixv_enable_queue(struct adapter *adapter, u32 vector)
806{ 809{
807 struct ixgbe_hw *hw = &adapter->hw; 810 struct ixgbe_hw *hw = &adapter->hw;
808 struct ix_queue *que = &adapter->queues[vector]; 811 struct ix_queue *que = &adapter->queues[vector];
809 u32 queue = 1UL << vector; 812 u32 queue = 1UL << vector;
810 u32 mask; 813 u32 mask;
811 814
812 mutex_enter(&que->dc_mtx); 815 mutex_enter(&que->dc_mtx);
813 if (que->disabled_count > 0 && --que->disabled_count > 0) 816 if (que->disabled_count > 0 && --que->disabled_count > 0)
814 goto out; 817 goto out;
815 818
816 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 819 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
817 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 820 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
818out: 821out:
819 mutex_exit(&que->dc_mtx); 822 mutex_exit(&que->dc_mtx);
820} /* ixv_enable_queue */ 823} /* ixv_enable_queue */
821 824
822/************************************************************************ 825/************************************************************************
823 * ixv_disable_queue 826 * ixv_disable_queue
824 ************************************************************************/ 827 ************************************************************************/
825static inline void 828static inline void
826ixv_disable_queue(struct adapter *adapter, u32 vector) 829ixv_disable_queue(struct adapter *adapter, u32 vector)
827{ 830{
828 struct ixgbe_hw *hw = &adapter->hw; 831 struct ixgbe_hw *hw = &adapter->hw;
829 struct ix_queue *que = &adapter->queues[vector]; 832 struct ix_queue *que = &adapter->queues[vector];
830 u32 queue = 1UL << vector; 833 u32 queue = 1UL << vector;
831 u32 mask; 834 u32 mask;
832 835
833 mutex_enter(&que->dc_mtx); 836 mutex_enter(&que->dc_mtx);
834 if (que->disabled_count++ > 0) 837 if (que->disabled_count++ > 0)
835 goto out; 838 goto out;
836 839
837 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 840 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
838 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 841 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
839out: 842out:
840 mutex_exit(&que->dc_mtx); 843 mutex_exit(&que->dc_mtx);
841} /* ixv_disable_queue */ 844} /* ixv_disable_queue */
842 845
843#if 0 846#if 0
844static inline void 847static inline void
845ixv_rearm_queues(struct adapter *adapter, u64 queues) 848ixv_rearm_queues(struct adapter *adapter, u64 queues)
846{ 849{
847 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); 850 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
848 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); 851 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
849} /* ixv_rearm_queues */ 852} /* ixv_rearm_queues */
850#endif 853#endif
851 854
852 855
853/************************************************************************ 856/************************************************************************
854 * ixv_msix_que - MSI-X Queue Interrupt Service routine 857 * ixv_msix_que - MSI-X Queue Interrupt Service routine
855 ************************************************************************/ 858 ************************************************************************/
856static int 859static int
857ixv_msix_que(void *arg) 860ixv_msix_que(void *arg)
858{ 861{
859 struct ix_queue *que = arg; 862 struct ix_queue *que = arg;
860 struct adapter *adapter = que->adapter; 863 struct adapter *adapter = que->adapter;
861 struct tx_ring *txr = que->txr; 864 struct tx_ring *txr = que->txr;
862 struct rx_ring *rxr = que->rxr; 865 struct rx_ring *rxr = que->rxr;
863 bool more; 866 bool more;
864 u32 newitr = 0; 867 u32 newitr = 0;
865 868
866 ixv_disable_queue(adapter, que->msix); 869 ixv_disable_queue(adapter, que->msix);
867 ++que->irqs.ev_count; 870 ++que->irqs.ev_count;
868 871
869#ifdef __NetBSD__ 872#ifdef __NetBSD__
870 /* Don't run ixgbe_rxeof in interrupt context */ 873 /* Don't run ixgbe_rxeof in interrupt context */
871 more = true; 874 more = true;
872#else 875#else
873 more = ixgbe_rxeof(que); 876 more = ixgbe_rxeof(que);
874#endif 877#endif
875 878
876 IXGBE_TX_LOCK(txr); 879 IXGBE_TX_LOCK(txr);
877 ixgbe_txeof(txr); 880 ixgbe_txeof(txr);
878 IXGBE_TX_UNLOCK(txr); 881 IXGBE_TX_UNLOCK(txr);
879 882
880 /* Do AIM now? */ 883 /* Do AIM now? */
881 884
882 if (adapter->enable_aim == false) 885 if (adapter->enable_aim == false)
883 goto no_calc; 886 goto no_calc;
884 /* 887 /*
885 * Do Adaptive Interrupt Moderation: 888 * Do Adaptive Interrupt Moderation:
886 * - Write out last calculated setting 889 * - Write out last calculated setting
887 * - Calculate based on average size over 890 * - Calculate based on average size over
888 * the last interval. 891 * the last interval.
889 */ 892 */
890 if (que->eitr_setting) 893 if (que->eitr_setting)
891 ixv_eitr_write(adapter, que->msix, que->eitr_setting); 894 ixv_eitr_write(adapter, que->msix, que->eitr_setting);
892 895
893 que->eitr_setting = 0; 896 que->eitr_setting = 0;
894 897
895 /* Idle, do nothing */ 898 /* Idle, do nothing */
896 if ((txr->bytes == 0) && (rxr->bytes == 0)) 899 if ((txr->bytes == 0) && (rxr->bytes == 0))
897 goto no_calc; 900 goto no_calc;
898 901
899 if ((txr->bytes) && (txr->packets)) 902 if ((txr->bytes) && (txr->packets))
900 newitr = txr->bytes/txr->packets; 903 newitr = txr->bytes/txr->packets;
901 if ((rxr->bytes) && (rxr->packets)) 904 if ((rxr->bytes) && (rxr->packets))
902 newitr = uimax(newitr, (rxr->bytes / rxr->packets)); 905 newitr = uimax(newitr, (rxr->bytes / rxr->packets));
903 newitr += 24; /* account for hardware frame, crc */ 906 newitr += 24; /* account for hardware frame, crc */
904 907
905 /* set an upper boundary */ 908 /* set an upper boundary */
906 newitr = uimin(newitr, 3000); 909 newitr = uimin(newitr, 3000);
907 910
908 /* Be nice to the mid range */ 911 /* Be nice to the mid range */
909 if ((newitr > 300) && (newitr < 1200)) 912 if ((newitr > 300) && (newitr < 1200))
910 newitr = (newitr / 3); 913 newitr = (newitr / 3);
911 else 914 else
912 newitr = (newitr / 2); 915 newitr = (newitr / 2);
913 916
914 /* 917 /*
915 * When RSC is used, ITR interval must be larger than RSC_DELAY. 918 * When RSC is used, ITR interval must be larger than RSC_DELAY.
916 * Currently, we use 2us for RSC_DELAY. The minimum value is always 919 * Currently, we use 2us for RSC_DELAY. The minimum value is always
917 * greater than 2us on 100M (and 10M?(not documented)), but it's not 920 * greater than 2us on 100M (and 10M?(not documented)), but it's not
918 * on 1G and higher. 921 * on 1G and higher.
919 */ 922 */
920 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 923 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
921 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 924 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
922 if (newitr < IXGBE_MIN_RSC_EITR_10G1G) 925 if (newitr < IXGBE_MIN_RSC_EITR_10G1G)
923 newitr = IXGBE_MIN_RSC_EITR_10G1G; 926 newitr = IXGBE_MIN_RSC_EITR_10G1G;
924 } 927 }
925 928
926 /* save for next interrupt */ 929 /* save for next interrupt */
927 que->eitr_setting = newitr; 930 que->eitr_setting = newitr;
928 931
929 /* Reset state */ 932 /* Reset state */
930 txr->bytes = 0; 933 txr->bytes = 0;
931 txr->packets = 0; 934 txr->packets = 0;
932 rxr->bytes = 0; 935 rxr->bytes = 0;
933 rxr->packets = 0; 936 rxr->packets = 0;
934 937
935no_calc: 938no_calc:
936 if (more) 939 if (more)
937 softint_schedule(que->que_si); 940 softint_schedule(que->que_si);
938 else /* Re-enable this interrupt */ 941 else /* Re-enable this interrupt */
939 ixv_enable_queue(adapter, que->msix); 942 ixv_enable_queue(adapter, que->msix);
940 943
941 return 1; 944 return 1;
942} /* ixv_msix_que */ 945} /* ixv_msix_que */
943 946
944/************************************************************************ 947/************************************************************************
945 * ixv_msix_mbx 948 * ixv_msix_mbx
946 ************************************************************************/ 949 ************************************************************************/
947static int 950static int
948ixv_msix_mbx(void *arg) 951ixv_msix_mbx(void *arg)
949{ 952{
950 struct adapter *adapter = arg; 953 struct adapter *adapter = arg;
951 struct ixgbe_hw *hw = &adapter->hw; 954 struct ixgbe_hw *hw = &adapter->hw;
952 955
953 ++adapter->link_irq.ev_count; 956 ++adapter->link_irq.ev_count;
954 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */ 957 /* NetBSD: We use auto-clear, so it's not required to write VTEICR */
955 958
956 /* Link status change */ 959 /* Link status change */
957 hw->mac.get_link_status = TRUE; 960 hw->mac.get_link_status = TRUE;
958 softint_schedule(adapter->link_si); 961 softint_schedule(adapter->link_si);
959 962
960 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 963 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
961 964
962 return 1; 965 return 1;
963} /* ixv_msix_mbx */ 966} /* ixv_msix_mbx */
964 967
965static void 968static void
966ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr) 969ixv_eitr_write(struct adapter *adapter, uint32_t index, uint32_t itr)
967{ 970{
968 971
969 /* 972 /*
970 * Newer devices than 82598 have VF function, so this function is 973 * Newer devices than 82598 have VF function, so this function is
971 * simple. 974 * simple.
972 */ 975 */
973 itr |= IXGBE_EITR_CNT_WDIS; 976 itr |= IXGBE_EITR_CNT_WDIS;
974 977
975 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr); 978 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(index), itr);
976} 979}
977 980
978 981
979/************************************************************************ 982/************************************************************************
980 * ixv_media_status - Media Ioctl callback 983 * ixv_media_status - Media Ioctl callback
981 * 984 *
982 * Called whenever the user queries the status of 985 * Called whenever the user queries the status of
983 * the interface using ifconfig. 986 * the interface using ifconfig.
984 ************************************************************************/ 987 ************************************************************************/
985static void 988static void
986ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 989ixv_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
987{ 990{
988 struct adapter *adapter = ifp->if_softc; 991 struct adapter *adapter = ifp->if_softc;
989 992
990 INIT_DEBUGOUT("ixv_media_status: begin"); 993 INIT_DEBUGOUT("ixv_media_status: begin");
991 IXGBE_CORE_LOCK(adapter); 994 IXGBE_CORE_LOCK(adapter);
992 ixv_update_link_status(adapter); 995 ixv_update_link_status(adapter);
993 996
994 ifmr->ifm_status = IFM_AVALID; 997 ifmr->ifm_status = IFM_AVALID;
995 ifmr->ifm_active = IFM_ETHER; 998 ifmr->ifm_active = IFM_ETHER;
996 999
997 if (adapter->link_active != LINK_STATE_UP) { 1000 if (adapter->link_active != LINK_STATE_UP) {
998 ifmr->ifm_active |= IFM_NONE; 1001 ifmr->ifm_active |= IFM_NONE;
999 IXGBE_CORE_UNLOCK(adapter); 1002 IXGBE_CORE_UNLOCK(adapter);
1000 return; 1003 return;
1001 } 1004 }
1002 1005
1003 ifmr->ifm_status |= IFM_ACTIVE; 1006 ifmr->ifm_status |= IFM_ACTIVE;
1004 1007
1005 switch (adapter->link_speed) { 1008 switch (adapter->link_speed) {
1006 case IXGBE_LINK_SPEED_10GB_FULL: 1009 case IXGBE_LINK_SPEED_10GB_FULL:
1007 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 1010 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1008 break; 1011 break;
1009 case IXGBE_LINK_SPEED_5GB_FULL: 1012 case IXGBE_LINK_SPEED_5GB_FULL:
1010 ifmr->ifm_active |= IFM_5000_T | IFM_FDX; 1013 ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
1011 break; 1014 break;
1012 case IXGBE_LINK_SPEED_2_5GB_FULL: 1015 case IXGBE_LINK_SPEED_2_5GB_FULL:
1013 ifmr->ifm_active |= IFM_2500_T | IFM_FDX; 1016 ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
1014 break; 1017 break;
1015 case IXGBE_LINK_SPEED_1GB_FULL: 1018 case IXGBE_LINK_SPEED_1GB_FULL:
1016 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 1019 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1017 break; 1020 break;
1018 case IXGBE_LINK_SPEED_100_FULL: 1021 case IXGBE_LINK_SPEED_100_FULL:
1019 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1022 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1020 break; 1023 break;
1021 case IXGBE_LINK_SPEED_10_FULL: 1024 case IXGBE_LINK_SPEED_10_FULL:
1022 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 1025 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1023 break; 1026 break;
1024 } 1027 }
1025 1028
1026 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 1029 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
1027 1030
1028 IXGBE_CORE_UNLOCK(adapter); 1031 IXGBE_CORE_UNLOCK(adapter);
1029} /* ixv_media_status */ 1032} /* ixv_media_status */
1030 1033
1031/************************************************************************ 1034/************************************************************************
1032 * ixv_media_change - Media Ioctl callback 1035 * ixv_media_change - Media Ioctl callback
1033 * 1036 *
1034 * Called when the user changes speed/duplex using 1037 * Called when the user changes speed/duplex using
1035 * media/mediopt option with ifconfig. 1038 * media/mediopt option with ifconfig.
1036 ************************************************************************/ 1039 ************************************************************************/
1037static int 1040static int
1038ixv_media_change(struct ifnet *ifp) 1041ixv_media_change(struct ifnet *ifp)
1039{ 1042{
1040 struct adapter *adapter = ifp->if_softc; 1043 struct adapter *adapter = ifp->if_softc;
1041 struct ifmedia *ifm = &adapter->media; 1044 struct ifmedia *ifm = &adapter->media;
1042 1045
1043 INIT_DEBUGOUT("ixv_media_change: begin"); 1046 INIT_DEBUGOUT("ixv_media_change: begin");
1044 1047
1045 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1048 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1046 return (EINVAL); 1049 return (EINVAL);
1047 1050
1048 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1051 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1049 case IFM_AUTO: 1052 case IFM_AUTO:
1050 break; 1053 break;
1051 default: 1054 default:
1052 device_printf(adapter->dev, "Only auto media type\n"); 1055 device_printf(adapter->dev, "Only auto media type\n");
1053 return (EINVAL); 1056 return (EINVAL);
1054 } 1057 }
1055 1058
1056 return (0); 1059 return (0);
1057} /* ixv_media_change */ 1060} /* ixv_media_change */
1058 1061
1059/************************************************************************ 1062/************************************************************************
1060 * ixv_negotiate_api 1063 * ixv_negotiate_api
1061 * 1064 *
1062 * Negotiate the Mailbox API with the PF; 1065 * Negotiate the Mailbox API with the PF;
1063 * start with the most featured API first. 1066 * start with the most featured API first.
1064 ************************************************************************/ 1067 ************************************************************************/
1065static int 1068static int
1066ixv_negotiate_api(struct adapter *adapter) 1069ixv_negotiate_api(struct adapter *adapter)
1067{ 1070{
1068 struct ixgbe_hw *hw = &adapter->hw; 1071 struct ixgbe_hw *hw = &adapter->hw;
1069 int mbx_api[] = { ixgbe_mbox_api_13, 1072 int mbx_api[] = { ixgbe_mbox_api_13,
1070 ixgbe_mbox_api_12, 1073 ixgbe_mbox_api_12,
1071 ixgbe_mbox_api_11, 1074 ixgbe_mbox_api_11,
1072 ixgbe_mbox_api_10, 1075 ixgbe_mbox_api_10,
1073 ixgbe_mbox_api_unknown }; 1076 ixgbe_mbox_api_unknown };
1074 int i = 0; 1077 int i = 0;
1075 1078
1076 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 1079 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
1077 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 1080 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
1078 return (0); 1081 return (0);
1079 i++; 1082 i++;
1080 } 1083 }
1081 1084
1082 return (EINVAL); 1085 return (EINVAL);
1083} /* ixv_negotiate_api */ 1086} /* ixv_negotiate_api */
1084 1087
1085 1088
1086/************************************************************************ 1089/************************************************************************
1087 * ixv_set_rxfilter - Multicast Update 1090 * ixv_set_rxfilter - Multicast Update
1088 * 1091 *
1089 * Called whenever multicast address list is updated. 1092 * Called whenever multicast address list is updated.
1090 ************************************************************************/ 1093 ************************************************************************/
1091static int 1094static int
1092ixv_set_rxfilter(struct adapter *adapter) 1095ixv_set_rxfilter(struct adapter *adapter)
1093{ 1096{
1094 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS]; 1097 u8 mta[IXGBE_MAX_VF_MC * IXGBE_ETH_LENGTH_OF_ADDRESS];
1095 struct ifnet *ifp = adapter->ifp; 1098 struct ifnet *ifp = adapter->ifp;
1096 struct ixgbe_hw *hw = &adapter->hw; 1099 struct ixgbe_hw *hw = &adapter->hw;
1097 u8 *update_ptr; 1100 u8 *update_ptr;
1098 int mcnt = 0; 1101 int mcnt = 0;
1099 struct ethercom *ec = &adapter->osdep.ec; 1102 struct ethercom *ec = &adapter->osdep.ec;
1100 struct ether_multi *enm; 1103 struct ether_multi *enm;
1101 struct ether_multistep step; 1104 struct ether_multistep step;
1102 bool overflow = false; 1105 bool overflow = false;
1103 int error, rc = 0; 1106 int error, rc = 0;
1104 1107
1105 KASSERT(mutex_owned(&adapter->core_mtx)); 1108 KASSERT(mutex_owned(&adapter->core_mtx));
1106 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin"); 1109 IOCTL_DEBUGOUT("ixv_set_rxfilter: begin");
1107 1110
1108 /* 1: For PROMISC */ 1111 /* 1: For PROMISC */
1109 if (ifp->if_flags & IFF_PROMISC) { 1112 if (ifp->if_flags & IFF_PROMISC) {
1110 error = hw->mac.ops.update_xcast_mode(hw, 1113 error = hw->mac.ops.update_xcast_mode(hw,
1111 IXGBEVF_XCAST_MODE_PROMISC); 1114 IXGBEVF_XCAST_MODE_PROMISC);
1112 if (error == IXGBE_ERR_NOT_TRUSTED) { 1115 if (error == IXGBE_ERR_NOT_TRUSTED) {
1113 device_printf(adapter->dev, 1116 device_printf(adapter->dev,
1114 "this interface is not trusted\n"); 1117 "this interface is not trusted\n");
1115 error = EPERM; 1118 error = EPERM;
1116 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { 1119 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1117 device_printf(adapter->dev, 1120 device_printf(adapter->dev,
1118 "the PF doesn't support promisc mode\n"); 1121 "the PF doesn't support promisc mode\n");
1119 error = EOPNOTSUPP; 1122 error = EOPNOTSUPP;
1120 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) { 1123 } else if (error == IXGBE_ERR_NOT_IN_PROMISC) {
1121 device_printf(adapter->dev, 1124 device_printf(adapter->dev,
1122 "the PF may not in promisc mode\n"); 1125 "the PF may not in promisc mode\n");
1123 error = EINVAL; 1126 error = EINVAL;
1124 } else if (error) { 1127 } else if (error) {
1125 device_printf(adapter->dev, 1128 device_printf(adapter->dev,
1126 "failed to set promisc mode. error = %d\n", 1129 "failed to set promisc mode. error = %d\n",
1127 error); 1130 error);
1128 error = EIO; 1131 error = EIO;
1129 } else 1132 } else
1130 return 0; 1133 return 0;
1131 rc = error; 1134 rc = error;
1132 } 1135 }
1133 1136
1134 /* 2: For ALLMULTI or normal */ 1137 /* 2: For ALLMULTI or normal */
1135 ETHER_LOCK(ec); 1138 ETHER_LOCK(ec);
1136 ETHER_FIRST_MULTI(step, ec, enm); 1139 ETHER_FIRST_MULTI(step, ec, enm);
1137 while (enm != NULL) { 1140 while (enm != NULL) {
1138 if ((mcnt >= IXGBE_MAX_VF_MC) || 1141 if ((mcnt >= IXGBE_MAX_VF_MC) ||
1139 (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1142 (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1140 ETHER_ADDR_LEN) != 0)) { 1143 ETHER_ADDR_LEN) != 0)) {
1141 overflow = true; 1144 overflow = true;
1142 break; 1145 break;
1143 } 1146 }
1144 bcopy(enm->enm_addrlo, 1147 bcopy(enm->enm_addrlo,
1145 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 1148 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1146 IXGBE_ETH_LENGTH_OF_ADDRESS); 1149 IXGBE_ETH_LENGTH_OF_ADDRESS);
1147 mcnt++; 1150 mcnt++;
1148 ETHER_NEXT_MULTI(step, enm); 1151 ETHER_NEXT_MULTI(step, enm);
1149 } 1152 }
1150 ETHER_UNLOCK(ec); 1153 ETHER_UNLOCK(ec);
1151 1154
1152 /* 3: For ALLMULTI */ 1155 /* 3: For ALLMULTI */
1153 if (overflow) { 1156 if (overflow) {
1154 error = hw->mac.ops.update_xcast_mode(hw, 1157 error = hw->mac.ops.update_xcast_mode(hw,
1155 IXGBEVF_XCAST_MODE_ALLMULTI); 1158 IXGBEVF_XCAST_MODE_ALLMULTI);
1156 if (error == IXGBE_ERR_NOT_TRUSTED) { 1159 if (error == IXGBE_ERR_NOT_TRUSTED) {
1157 device_printf(adapter->dev, 1160 device_printf(adapter->dev,
1158 "this interface is not trusted\n"); 1161 "this interface is not trusted\n");
1159 error = EPERM; 1162 error = EPERM;
1160 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { 1163 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
1161 device_printf(adapter->dev, 1164 device_printf(adapter->dev,
1162 "the PF doesn't support allmulti mode\n"); 1165 "the PF doesn't support allmulti mode\n");
1163 error = EOPNOTSUPP; 1166 error = EOPNOTSUPP;
1164 } else if (error) { 1167 } else if (error) {
1165 device_printf(adapter->dev, 1168 device_printf(adapter->dev,
1166 "number of Ethernet multicast addresses " 1169 "number of Ethernet multicast addresses "
1167 "exceeds the limit (%d). error = %d\n", 1170 "exceeds the limit (%d). error = %d\n",
1168 IXGBE_MAX_VF_MC, error); 1171 IXGBE_MAX_VF_MC, error);
1169 error = ENOSPC; 1172 error = ENOSPC;
1170 } else { 1173 } else {
1171 ETHER_LOCK(ec); 1174 ETHER_LOCK(ec);
1172 ec->ec_flags |= ETHER_F_ALLMULTI; 1175 ec->ec_flags |= ETHER_F_ALLMULTI;
1173 ETHER_UNLOCK(ec); 1176 ETHER_UNLOCK(ec);
1174 return rc; /* Promisc might have failed */ 1177 return rc; /* Promisc might have failed */
1175 } 1178 }
1176 1179
1177 if (rc == 0) 1180 if (rc == 0)
1178 rc = error; 1181 rc = error;
1179 1182
1180 /* Continue to update the multicast table as many as we can */ 1183 /* Continue to update the multicast table as many as we can */
1181 } 1184 }
1182 1185
1183 /* 4: For normal operation */ 1186 /* 4: For normal operation */
1184 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); 1187 error = hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
1185 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) { 1188 if ((error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) || (error == 0)) {
1186 /* Normal operation */ 1189 /* Normal operation */
1187 ETHER_LOCK(ec); 1190 ETHER_LOCK(ec);
1188 ec->ec_flags &= ~ETHER_F_ALLMULTI; 1191 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1189 ETHER_UNLOCK(ec); 1192 ETHER_UNLOCK(ec);
1190 error = 0; 1193 error = 0;
1191 } else if (error) { 1194 } else if (error) {
1192 device_printf(adapter->dev, 1195 device_printf(adapter->dev,
1193 "failed to set Ethernet multicast address " 1196 "failed to set Ethernet multicast address "
1194 "operation to normal. error = %d\n", error); 1197 "operation to normal. error = %d\n", error);
1195 } 1198 }
1196 1199
1197 update_ptr = mta; 1200 update_ptr = mta;
1198 1201
1199 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, 1202 error = adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw,
1200 update_ptr, mcnt, ixv_mc_array_itr, TRUE); 1203 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1201 if (rc == 0) 1204 if (rc == 0)
1202 rc = error; 1205 rc = error;
1203 1206
1204 return rc; 1207 return rc;
1205} /* ixv_set_rxfilter */ 1208} /* ixv_set_rxfilter */
1206 1209
1207/************************************************************************ 1210/************************************************************************
1208 * ixv_mc_array_itr 1211 * ixv_mc_array_itr
1209 * 1212 *
1210 * An iterator function needed by the multicast shared code. 1213 * An iterator function needed by the multicast shared code.
1211 * It feeds the shared code routine the addresses in the 1214 * It feeds the shared code routine the addresses in the
1212 * array of ixv_set_rxfilter() one by one. 1215 * array of ixv_set_rxfilter() one by one.
1213 ************************************************************************/ 1216 ************************************************************************/
1214static u8 * 1217static u8 *
1215ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 1218ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1216{ 1219{
1217 u8 *addr = *update_ptr; 1220 u8 *addr = *update_ptr;
1218 u8 *newptr; 1221 u8 *newptr;
1219 1222
1220 *vmdq = 0; 1223 *vmdq = 0;
1221 1224
1222 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 1225 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1223 *update_ptr = newptr; 1226 *update_ptr = newptr;
1224 1227
1225 return addr; 1228 return addr;
1226} /* ixv_mc_array_itr */ 1229} /* ixv_mc_array_itr */
1227 1230
1228/************************************************************************ 1231/************************************************************************
1229 * ixv_local_timer - Timer routine 1232 * ixv_local_timer - Timer routine
1230 * 1233 *
1231 * Checks for link status, updates statistics, 1234 * Checks for link status, updates statistics,
1232 * and runs the watchdog check. 1235 * and runs the watchdog check.
1233 ************************************************************************/ 1236 ************************************************************************/
1234static void 1237static void
1235ixv_local_timer(void *arg) 1238ixv_local_timer(void *arg)
1236{ 1239{
1237 struct adapter *adapter = arg; 1240 struct adapter *adapter = arg;
1238 1241
1239 IXGBE_CORE_LOCK(adapter); 1242 IXGBE_CORE_LOCK(adapter);
1240 ixv_local_timer_locked(adapter); 1243 ixv_local_timer_locked(adapter);
1241 IXGBE_CORE_UNLOCK(adapter); 1244 IXGBE_CORE_UNLOCK(adapter);
1242} 1245}
1243 1246
1244static void 1247static void
1245ixv_local_timer_locked(void *arg) 1248ixv_local_timer_locked(void *arg)
1246{ 1249{
1247 struct adapter *adapter = arg; 1250 struct adapter *adapter = arg;
1248 device_t dev = adapter->dev; 1251 device_t dev = adapter->dev;
1249 struct ix_queue *que = adapter->queues; 1252 struct ix_queue *que = adapter->queues;
1250 u64 queues = 0; 1253 u64 queues = 0;
1251 u64 v0, v1, v2, v3, v4, v5, v6, v7; 1254 u64 v0, v1, v2, v3, v4, v5, v6, v7;
1252 int hung = 0; 1255 int hung = 0;
1253 int i; 1256 int i;
1254 1257
1255 KASSERT(mutex_owned(&adapter->core_mtx)); 1258 KASSERT(mutex_owned(&adapter->core_mtx));
1256 1259
1257 if (ixv_check_link(adapter)) { 1260 if (ixv_check_link(adapter)) {
1258 ixv_init_locked(adapter); 1261 ixv_init_locked(adapter);
1259 return; 1262 return;
1260 } 1263 }
1261 1264
1262 /* Stats Update */ 1265 /* Stats Update */
1263 ixv_update_stats(adapter); 1266 ixv_update_stats(adapter);
1264 1267
1265 /* Update some event counters */ 1268 /* Update some event counters */
1266 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0; 1269 v0 = v1 = v2 = v3 = v4 = v5 = v6 = v7 = 0;
1267 que = adapter->queues; 1270 que = adapter->queues;
1268 for (i = 0; i < adapter->num_queues; i++, que++) { 1271 for (i = 0; i < adapter->num_queues; i++, que++) {
1269 struct tx_ring *txr = que->txr; 1272 struct tx_ring *txr = que->txr;
1270 1273
1271 v0 += txr->q_efbig_tx_dma_setup; 1274 v0 += txr->q_efbig_tx_dma_setup;
1272 v1 += txr->q_mbuf_defrag_failed; 1275 v1 += txr->q_mbuf_defrag_failed;
1273 v2 += txr->q_efbig2_tx_dma_setup; 1276 v2 += txr->q_efbig2_tx_dma_setup;
1274 v3 += txr->q_einval_tx_dma_setup; 1277 v3 += txr->q_einval_tx_dma_setup;
1275 v4 += txr->q_other_tx_dma_setup; 1278 v4 += txr->q_other_tx_dma_setup;
1276 v5 += txr->q_eagain_tx_dma_setup; 1279 v5 += txr->q_eagain_tx_dma_setup;
1277 v6 += txr->q_enomem_tx_dma_setup; 1280 v6 += txr->q_enomem_tx_dma_setup;
1278 v7 += txr->q_tso_err; 1281 v7 += txr->q_tso_err;
1279 } 1282 }
1280 adapter->efbig_tx_dma_setup.ev_count = v0; 1283 adapter->efbig_tx_dma_setup.ev_count = v0;
1281 adapter->mbuf_defrag_failed.ev_count = v1; 1284 adapter->mbuf_defrag_failed.ev_count = v1;
1282 adapter->efbig2_tx_dma_setup.ev_count = v2; 1285 adapter->efbig2_tx_dma_setup.ev_count = v2;
1283 adapter->einval_tx_dma_setup.ev_count = v3; 1286 adapter->einval_tx_dma_setup.ev_count = v3;
1284 adapter->other_tx_dma_setup.ev_count = v4; 1287 adapter->other_tx_dma_setup.ev_count = v4;
1285 adapter->eagain_tx_dma_setup.ev_count = v5; 1288 adapter->eagain_tx_dma_setup.ev_count = v5;
1286 adapter->enomem_tx_dma_setup.ev_count = v6; 1289 adapter->enomem_tx_dma_setup.ev_count = v6;
1287 adapter->tso_err.ev_count = v7; 1290 adapter->tso_err.ev_count = v7;
1288 1291
1289 /* 1292 /*
1290 * Check the TX queues status 1293 * Check the TX queues status
1291 * - mark hung queues so we don't schedule on them 1294 * - mark hung queues so we don't schedule on them
1292 * - watchdog only if all queues show hung 1295 * - watchdog only if all queues show hung
1293 */ 1296 */
1294 que = adapter->queues; 1297 que = adapter->queues;
1295 for (i = 0; i < adapter->num_queues; i++, que++) { 1298 for (i = 0; i < adapter->num_queues; i++, que++) {
1296 /* Keep track of queues with work for soft irq */ 1299 /* Keep track of queues with work for soft irq */
1297 if (que->txr->busy) 1300 if (que->txr->busy)
1298 queues |= ((u64)1 << que->me); 1301 queues |= ((u64)1 << que->me);
1299 /* 1302 /*
1300 * Each time txeof runs without cleaning, but there 1303 * Each time txeof runs without cleaning, but there
1301 * are uncleaned descriptors it increments busy. If 1304 * are uncleaned descriptors it increments busy. If
1302 * we get to the MAX we declare it hung. 1305 * we get to the MAX we declare it hung.
1303 */ 1306 */
1304 if (que->busy == IXGBE_QUEUE_HUNG) { 1307 if (que->busy == IXGBE_QUEUE_HUNG) {
1305 ++hung; 1308 ++hung;
1306 /* Mark the queue as inactive */ 1309 /* Mark the queue as inactive */
1307 adapter->active_queues &= ~((u64)1 << que->me); 1310 adapter->active_queues &= ~((u64)1 << que->me);
1308 continue; 1311 continue;
1309 } else { 1312 } else {
1310 /* Check if we've come back from hung */ 1313 /* Check if we've come back from hung */
1311 if ((adapter->active_queues & ((u64)1 << que->me)) == 0) 1314 if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1312 adapter->active_queues |= ((u64)1 << que->me); 1315 adapter->active_queues |= ((u64)1 << que->me);
1313 } 1316 }
1314 if (que->busy >= IXGBE_MAX_TX_BUSY) { 1317 if (que->busy >= IXGBE_MAX_TX_BUSY) {
1315 device_printf(dev, 1318 device_printf(dev,
1316 "Warning queue %d appears to be hung!\n", i); 1319 "Warning queue %d appears to be hung!\n", i);
1317 que->txr->busy = IXGBE_QUEUE_HUNG; 1320 que->txr->busy = IXGBE_QUEUE_HUNG;
1318 ++hung; 1321 ++hung;
1319 } 1322 }
1320 } 1323 }
1321 1324
1322 /* Only truly watchdog if all queues show hung */ 1325 /* Only truly watchdog if all queues show hung */
1323 if (hung == adapter->num_queues) 1326 if (hung == adapter->num_queues)
1324 goto watchdog; 1327 goto watchdog;
1325#if 0 1328#if 0
1326 else if (queues != 0) { /* Force an IRQ on queues with work */ 1329 else if (queues != 0) { /* Force an IRQ on queues with work */
1327 ixv_rearm_queues(adapter, queues); 1330 ixv_rearm_queues(adapter, queues);
1328 } 1331 }
1329#endif 1332#endif
1330 1333
1331 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); 1334 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1332 1335
1333 return; 1336 return;
1334 1337
1335watchdog: 1338watchdog:
1336 1339
1337 device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); 1340 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1338 adapter->ifp->if_flags &= ~IFF_RUNNING; 1341 adapter->ifp->if_flags &= ~IFF_RUNNING;
1339 adapter->watchdog_events.ev_count++; 1342 adapter->watchdog_events.ev_count++;
1340 ixv_init_locked(adapter); 1343 ixv_init_locked(adapter);
1341} /* ixv_local_timer */ 1344} /* ixv_local_timer */
1342 1345
1343/************************************************************************ 1346/************************************************************************
1344 * ixv_update_link_status - Update OS on link state 1347 * ixv_update_link_status - Update OS on link state
1345 * 1348 *
1346 * Note: Only updates the OS on the cached link state. 1349 * Note: Only updates the OS on the cached link state.
1347 * The real check of the hardware only happens with 1350 * The real check of the hardware only happens with
1348 * a link interrupt. 1351 * a link interrupt.
1349 ************************************************************************/ 1352 ************************************************************************/
1350static void 1353static void
1351ixv_update_link_status(struct adapter *adapter) 1354ixv_update_link_status(struct adapter *adapter)
1352{ 1355{
1353 struct ifnet *ifp = adapter->ifp; 1356 struct ifnet *ifp = adapter->ifp;
1354 device_t dev = adapter->dev; 1357 device_t dev = adapter->dev;
1355 1358
1356 KASSERT(mutex_owned(&adapter->core_mtx)); 1359 KASSERT(mutex_owned(&adapter->core_mtx));
1357 1360
1358 if (adapter->link_up) { 1361 if (adapter->link_up) {
1359 if (adapter->link_active != LINK_STATE_UP) { 1362 if (adapter->link_active != LINK_STATE_UP) {
1360 if (bootverbose) { 1363 if (bootverbose) {
1361 const char *bpsmsg; 1364 const char *bpsmsg;
1362 1365
1363 switch (adapter->link_speed) { 1366 switch (adapter->link_speed) {
1364 case IXGBE_LINK_SPEED_10GB_FULL: 1367 case IXGBE_LINK_SPEED_10GB_FULL:
1365 bpsmsg = "10 Gbps"; 1368 bpsmsg = "10 Gbps";
1366 break; 1369 break;
1367 case IXGBE_LINK_SPEED_5GB_FULL: 1370 case IXGBE_LINK_SPEED_5GB_FULL:
1368 bpsmsg = "5 Gbps"; 1371 bpsmsg = "5 Gbps";
1369 break; 1372 break;
1370 case IXGBE_LINK_SPEED_2_5GB_FULL: 1373 case IXGBE_LINK_SPEED_2_5GB_FULL:
1371 bpsmsg = "2.5 Gbps"; 1374 bpsmsg = "2.5 Gbps";
1372 break; 1375 break;
1373 case IXGBE_LINK_SPEED_1GB_FULL: 1376 case IXGBE_LINK_SPEED_1GB_FULL:
1374 bpsmsg = "1 Gbps"; 1377 bpsmsg = "1 Gbps";
1375 break; 1378 break;
1376 case IXGBE_LINK_SPEED_100_FULL: 1379 case IXGBE_LINK_SPEED_100_FULL:
1377 bpsmsg = "100 Mbps"; 1380 bpsmsg = "100 Mbps";
1378 break; 1381 break;
1379 case IXGBE_LINK_SPEED_10_FULL: 1382 case IXGBE_LINK_SPEED_10_FULL:
1380 bpsmsg = "10 Mbps"; 1383 bpsmsg = "10 Mbps";
1381 break; 1384 break;
1382 default: 1385 default:
1383 bpsmsg = "unknown speed"; 1386 bpsmsg = "unknown speed";
1384 break; 1387 break;
1385 } 1388 }
1386 device_printf(dev, "Link is up %s %s \n", 1389 device_printf(dev, "Link is up %s %s \n",
1387 bpsmsg, "Full Duplex"); 1390 bpsmsg, "Full Duplex");
1388 } 1391 }
1389 adapter->link_active = LINK_STATE_UP; 1392 adapter->link_active = LINK_STATE_UP;
1390 if_link_state_change(ifp, LINK_STATE_UP); 1393 if_link_state_change(ifp, LINK_STATE_UP);
1391 } 1394 }
1392 } else { 1395 } else {
1393 /* 1396 /*
1394 * Do it when link active changes to DOWN. i.e. 1397 * Do it when link active changes to DOWN. i.e.
1395 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN 1398 * a) LINK_STATE_UNKNOWN -> LINK_STATE_DOWN
1396 * b) LINK_STATE_UP -> LINK_STATE_DOWN 1399 * b) LINK_STATE_UP -> LINK_STATE_DOWN
1397 */ 1400 */
1398 if (adapter->link_active != LINK_STATE_DOWN) { 1401 if (adapter->link_active != LINK_STATE_DOWN) {
1399 if (bootverbose) 1402 if (bootverbose)
1400 device_printf(dev, "Link is Down\n"); 1403 device_printf(dev, "Link is Down\n");
1401 if_link_state_change(ifp, LINK_STATE_DOWN); 1404 if_link_state_change(ifp, LINK_STATE_DOWN);
1402 adapter->link_active = LINK_STATE_DOWN; 1405 adapter->link_active = LINK_STATE_DOWN;
1403 } 1406 }
1404 } 1407 }
1405} /* ixv_update_link_status */ 1408} /* ixv_update_link_status */
1406 1409
1407 1410
1408/************************************************************************ 1411/************************************************************************
1409 * ixv_stop - Stop the hardware 1412 * ixv_stop - Stop the hardware
1410 * 1413 *
1411 * Disables all traffic on the adapter by issuing a 1414 * Disables all traffic on the adapter by issuing a
1412 * global reset on the MAC and deallocates TX/RX buffers. 1415 * global reset on the MAC and deallocates TX/RX buffers.
1413 ************************************************************************/ 1416 ************************************************************************/
1414static void 1417static void
1415ixv_ifstop(struct ifnet *ifp, int disable) 1418ixv_ifstop(struct ifnet *ifp, int disable)
1416{ 1419{
1417 struct adapter *adapter = ifp->if_softc; 1420 struct adapter *adapter = ifp->if_softc;
1418 1421
1419 IXGBE_CORE_LOCK(adapter); 1422 IXGBE_CORE_LOCK(adapter);
1420 ixv_stop(adapter); 1423 ixv_stop(adapter);
1421 IXGBE_CORE_UNLOCK(adapter); 1424 IXGBE_CORE_UNLOCK(adapter);
1422} 1425}
1423 1426
1424static void 1427static void
1425ixv_stop(void *arg) 1428ixv_stop(void *arg)
1426{ 1429{
1427 struct ifnet *ifp; 1430 struct ifnet *ifp;
1428 struct adapter *adapter = arg; 1431 struct adapter *adapter = arg;
1429 struct ixgbe_hw *hw = &adapter->hw; 1432 struct ixgbe_hw *hw = &adapter->hw;
1430 1433
1431 ifp = adapter->ifp; 1434 ifp = adapter->ifp;
1432 1435
1433 KASSERT(mutex_owned(&adapter->core_mtx)); 1436 KASSERT(mutex_owned(&adapter->core_mtx));
1434 1437
1435 INIT_DEBUGOUT("ixv_stop: begin\n"); 1438 INIT_DEBUGOUT("ixv_stop: begin\n");
1436 ixv_disable_intr(adapter); 1439 ixv_disable_intr(adapter);
1437 1440
1438 /* Tell the stack that the interface is no longer active */ 1441 /* Tell the stack that the interface is no longer active */
1439 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1442 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1440 1443
1441 hw->mac.ops.reset_hw(hw); 1444 hw->mac.ops.reset_hw(hw);
1442 adapter->hw.adapter_stopped = FALSE; 1445 adapter->hw.adapter_stopped = FALSE;
1443 hw->mac.ops.stop_adapter(hw); 1446 hw->mac.ops.stop_adapter(hw);
1444 callout_stop(&adapter->timer); 1447 callout_stop(&adapter->timer);
1445 1448
1446 /* reprogram the RAR[0] in case user changed it. */ 1449 /* reprogram the RAR[0] in case user changed it. */
1447 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1450 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1448 1451
1449 return; 1452 return;
1450} /* ixv_stop */ 1453} /* ixv_stop */
1451 1454
1452 1455
1453/************************************************************************ 1456/************************************************************************
1454 * ixv_allocate_pci_resources 1457 * ixv_allocate_pci_resources
1455 ************************************************************************/ 1458 ************************************************************************/
1456static int 1459static int
1457ixv_allocate_pci_resources(struct adapter *adapter, 1460ixv_allocate_pci_resources(struct adapter *adapter,
1458 const struct pci_attach_args *pa) 1461 const struct pci_attach_args *pa)
1459{ 1462{
1460 pcireg_t memtype, csr; 1463 pcireg_t memtype, csr;
1461 device_t dev = adapter->dev; 1464 device_t dev = adapter->dev;
1462 bus_addr_t addr; 1465 bus_addr_t addr;
1463 int flags; 1466 int flags;
1464 1467
1465 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); 1468 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0));
1466 switch (memtype) { 1469 switch (memtype) {
1467 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1470 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1468 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1471 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1469 adapter->osdep.mem_bus_space_tag = pa->pa_memt; 1472 adapter->osdep.mem_bus_space_tag = pa->pa_memt;
1470 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), 1473 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0),
1471 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) 1474 memtype, &addr, &adapter->osdep.mem_size, &flags) != 0)
1472 goto map_err; 1475 goto map_err;
1473 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { 1476 if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) {
1474 aprint_normal_dev(dev, "clearing prefetchable bit\n"); 1477 aprint_normal_dev(dev, "clearing prefetchable bit\n");
1475 flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 1478 flags &= ~BUS_SPACE_MAP_PREFETCHABLE;
1476 } 1479 }
1477 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, 1480 if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr,
1478 adapter->osdep.mem_size, flags, 1481 adapter->osdep.mem_size, flags,
1479 &adapter->osdep.mem_bus_space_handle) != 0) { 1482 &adapter->osdep.mem_bus_space_handle) != 0) {
1480map_err: 1483map_err:
1481 adapter->osdep.mem_size = 0; 1484 adapter->osdep.mem_size = 0;
1482 aprint_error_dev(dev, "unable to map BAR0\n"); 1485 aprint_error_dev(dev, "unable to map BAR0\n");
1483 return ENXIO; 1486 return ENXIO;
1484 } 1487 }
1485 /* 1488 /*
1486 * Enable address decoding for memory range in case it's not 1489 * Enable address decoding for memory range in case it's not
1487 * set. 1490 * set.
1488 */ 1491 */
1489 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, 1492 csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
1490 PCI_COMMAND_STATUS_REG); 1493 PCI_COMMAND_STATUS_REG);
1491 csr |= PCI_COMMAND_MEM_ENABLE; 1494 csr |= PCI_COMMAND_MEM_ENABLE;
1492 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1495 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1493 csr); 1496 csr);
1494 break; 1497 break;
1495 default: 1498 default:
1496 aprint_error_dev(dev, "unexpected type on BAR0\n"); 1499 aprint_error_dev(dev, "unexpected type on BAR0\n");
1497 return ENXIO; 1500 return ENXIO;
1498 } 1501 }
1499 1502
@@ -1503,1929 +1506,1936 @@ map_err: @@ -1503,1929 +1506,1936 @@ map_err:
1503 return (0); 1506 return (0);
1504} /* ixv_allocate_pci_resources */ 1507} /* ixv_allocate_pci_resources */
1505 1508
1506/************************************************************************ 1509/************************************************************************
1507 * ixv_free_pci_resources 1510 * ixv_free_pci_resources
1508 ************************************************************************/ 1511 ************************************************************************/
1509static void 1512static void
1510ixv_free_pci_resources(struct adapter * adapter) 1513ixv_free_pci_resources(struct adapter * adapter)
1511{ 1514{
1512 struct ix_queue *que = adapter->queues; 1515 struct ix_queue *que = adapter->queues;
1513 int rid; 1516 int rid;
1514 1517
1515 /* 1518 /*
1516 * Release all msix queue resources: 1519 * Release all msix queue resources:
1517 */ 1520 */
1518 for (int i = 0; i < adapter->num_queues; i++, que++) { 1521 for (int i = 0; i < adapter->num_queues; i++, que++) {
1519 if (que->res != NULL) 1522 if (que->res != NULL)
1520 pci_intr_disestablish(adapter->osdep.pc, 1523 pci_intr_disestablish(adapter->osdep.pc,
1521 adapter->osdep.ihs[i]); 1524 adapter->osdep.ihs[i]);
1522 } 1525 }
1523 1526
1524 1527
1525 /* Clean the Mailbox interrupt last */ 1528 /* Clean the Mailbox interrupt last */
1526 rid = adapter->vector; 1529 rid = adapter->vector;
1527 1530
1528 if (adapter->osdep.ihs[rid] != NULL) { 1531 if (adapter->osdep.ihs[rid] != NULL) {
1529 pci_intr_disestablish(adapter->osdep.pc, 1532 pci_intr_disestablish(adapter->osdep.pc,
1530 adapter->osdep.ihs[rid]); 1533 adapter->osdep.ihs[rid]);
1531 adapter->osdep.ihs[rid] = NULL; 1534 adapter->osdep.ihs[rid] = NULL;
1532 } 1535 }
1533 1536
1534 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs, 1537 pci_intr_release(adapter->osdep.pc, adapter->osdep.intrs,
1535 adapter->osdep.nintrs); 1538 adapter->osdep.nintrs);
1536 1539
1537 if (adapter->osdep.mem_size != 0) { 1540 if (adapter->osdep.mem_size != 0) {
1538 bus_space_unmap(adapter->osdep.mem_bus_space_tag, 1541 bus_space_unmap(adapter->osdep.mem_bus_space_tag,
1539 adapter->osdep.mem_bus_space_handle, 1542 adapter->osdep.mem_bus_space_handle,
1540 adapter->osdep.mem_size); 1543 adapter->osdep.mem_size);
1541 } 1544 }
1542 1545
1543 return; 1546 return;
1544} /* ixv_free_pci_resources */ 1547} /* ixv_free_pci_resources */
1545 1548
1546/************************************************************************ 1549/************************************************************************
1547 * ixv_setup_interface 1550 * ixv_setup_interface
1548 * 1551 *
1549 * Setup networking device structure and register an interface. 1552 * Setup networking device structure and register an interface.
1550 ************************************************************************/ 1553 ************************************************************************/
1551static int 1554static int
1552ixv_setup_interface(device_t dev, struct adapter *adapter) 1555ixv_setup_interface(device_t dev, struct adapter *adapter)
1553{ 1556{
1554 struct ethercom *ec = &adapter->osdep.ec; 1557 struct ethercom *ec = &adapter->osdep.ec;
1555 struct ifnet *ifp; 1558 struct ifnet *ifp;
1556 int rv; 1559 int rv;
1557 1560
1558 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1561 INIT_DEBUGOUT("ixv_setup_interface: begin");
1559 1562
1560 ifp = adapter->ifp = &ec->ec_if; 1563 ifp = adapter->ifp = &ec->ec_if;
1561 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); 1564 strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ);
1562 ifp->if_baudrate = IF_Gbps(10); 1565 ifp->if_baudrate = IF_Gbps(10);
1563 ifp->if_init = ixv_init; 1566 ifp->if_init = ixv_init;
1564 ifp->if_stop = ixv_ifstop; 1567 ifp->if_stop = ixv_ifstop;
1565 ifp->if_softc = adapter; 1568 ifp->if_softc = adapter;
1566 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1569 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1567#ifdef IXGBE_MPSAFE 1570#ifdef IXGBE_MPSAFE
1568 ifp->if_extflags = IFEF_MPSAFE; 1571 ifp->if_extflags = IFEF_MPSAFE;
1569#endif 1572#endif
1570 ifp->if_ioctl = ixv_ioctl; 1573 ifp->if_ioctl = ixv_ioctl;
1571 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) { 1574 if (adapter->feat_en & IXGBE_FEATURE_LEGACY_TX) {
1572#if 0 1575#if 0
1573 ixv_start_locked = ixgbe_legacy_start_locked; 1576 ixv_start_locked = ixgbe_legacy_start_locked;
1574#endif 1577#endif
1575 } else { 1578 } else {
1576 ifp->if_transmit = ixgbe_mq_start; 1579 ifp->if_transmit = ixgbe_mq_start;
1577#if 0 1580#if 0
1578 ixv_start_locked = ixgbe_mq_start_locked; 1581 ixv_start_locked = ixgbe_mq_start_locked;
1579#endif 1582#endif
1580 } 1583 }
1581 ifp->if_start = ixgbe_legacy_start; 1584 ifp->if_start = ixgbe_legacy_start;
1582 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2); 1585 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
1583 IFQ_SET_READY(&ifp->if_snd); 1586 IFQ_SET_READY(&ifp->if_snd);
1584 1587
1585 rv = if_initialize(ifp); 1588 rv = if_initialize(ifp);
1586 if (rv != 0) { 1589 if (rv != 0) {
1587 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv); 1590 aprint_error_dev(dev, "if_initialize failed(%d)\n", rv);
1588 return rv; 1591 return rv;
1589 } 1592 }
1590 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if); 1593 adapter->ipq = if_percpuq_create(&adapter->osdep.ec.ec_if);
1591 ether_ifattach(ifp, adapter->hw.mac.addr); 1594 ether_ifattach(ifp, adapter->hw.mac.addr);
1592 aprint_normal_dev(dev, "Ethernet address %s\n", 1595 aprint_normal_dev(dev, "Ethernet address %s\n",
1593 ether_sprintf(adapter->hw.mac.addr)); 1596 ether_sprintf(adapter->hw.mac.addr));
1594 /* 1597 /*
1595 * We use per TX queue softint, so if_deferred_start_init() isn't 1598 * We use per TX queue softint, so if_deferred_start_init() isn't
1596 * used. 1599 * used.
1597 */ 1600 */
1598 ether_set_ifflags_cb(ec, ixv_ifflags_cb); 1601 ether_set_ifflags_cb(ec, ixv_ifflags_cb);
1599 1602
1600 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1603 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1601 1604
1602 /* 1605 /*
1603 * Tell the upper layer(s) we support long frames. 1606 * Tell the upper layer(s) we support long frames.
1604 */ 1607 */
1605 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1608 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1606 1609
1607 /* Set capability flags */ 1610 /* Set capability flags */
1608 ifp->if_capabilities |= IFCAP_HWCSUM 1611 ifp->if_capabilities |= IFCAP_HWCSUM
1609 | IFCAP_TSOv4 1612 | IFCAP_TSOv4
1610 | IFCAP_TSOv6; 1613 | IFCAP_TSOv6;
1611 ifp->if_capenable = 0; 1614 ifp->if_capenable = 0;
1612 1615
1613 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER 1616 ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER
1614 | ETHERCAP_VLAN_HWTAGGING 1617 | ETHERCAP_VLAN_HWTAGGING
1615 | ETHERCAP_VLAN_HWCSUM 1618 | ETHERCAP_VLAN_HWCSUM
1616 | ETHERCAP_JUMBO_MTU 1619 | ETHERCAP_JUMBO_MTU
1617 | ETHERCAP_VLAN_MTU; 1620 | ETHERCAP_VLAN_MTU;
1618 1621
1619 /* Enable the above capabilities by default */ 1622 /* Enable the above capabilities by default */
1620 ec->ec_capenable = ec->ec_capabilities; 1623 ec->ec_capenable = ec->ec_capabilities;
1621 1624
1622 /* Don't enable LRO by default */ 1625 /* Don't enable LRO by default */
1623#if 0 1626#if 0
1624 /* NetBSD doesn't support LRO yet */ 1627 /* NetBSD doesn't support LRO yet */
1625 ifp->if_capabilities |= IFCAP_LRO; 1628 ifp->if_capabilities |= IFCAP_LRO;
1626#endif 1629#endif
1627 1630
1628 /* 1631 /*
1629 * Specify the media types supported by this adapter and register 1632 * Specify the media types supported by this adapter and register
1630 * callbacks to update media and link information 1633 * callbacks to update media and link information
1631 */ 1634 */
1632 ec->ec_ifmedia = &adapter->media; 1635 ec->ec_ifmedia = &adapter->media;
1633 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, 1636 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1634 ixv_media_status); 1637 ixv_media_status);
1635 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1638 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1636 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 1639 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1637 1640
1638 if_register(ifp); 1641 if_register(ifp);
1639 1642
1640 return 0; 1643 return 0;
1641} /* ixv_setup_interface */ 1644} /* ixv_setup_interface */
1642 1645
1643 1646
1644/************************************************************************ 1647/************************************************************************
1645 * ixv_initialize_transmit_units - Enable transmit unit. 1648 * ixv_initialize_transmit_units - Enable transmit unit.
1646 ************************************************************************/ 1649 ************************************************************************/
1647static void 1650static void
1648ixv_initialize_transmit_units(struct adapter *adapter) 1651ixv_initialize_transmit_units(struct adapter *adapter)
1649{ 1652{
1650 struct tx_ring *txr = adapter->tx_rings; 1653 struct tx_ring *txr = adapter->tx_rings;
1651 struct ixgbe_hw *hw = &adapter->hw; 1654 struct ixgbe_hw *hw = &adapter->hw;
1652 int i; 1655 int i;
1653 1656
1654 for (i = 0; i < adapter->num_queues; i++, txr++) { 1657 for (i = 0; i < adapter->num_queues; i++, txr++) {
1655 u64 tdba = txr->txdma.dma_paddr; 1658 u64 tdba = txr->txdma.dma_paddr;
1656 u32 txctrl, txdctl; 1659 u32 txctrl, txdctl;
1657 int j = txr->me; 1660 int j = txr->me;
1658 1661
1659 /* Set WTHRESH to 8, burst writeback */ 1662 /* Set WTHRESH to 8, burst writeback */
1660 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1663 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1661 txdctl |= (8 << 16); 1664 txdctl |= (8 << 16);
1662 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1665 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1663 1666
1664 /* Set the HW Tx Head and Tail indices */ 1667 /* Set the HW Tx Head and Tail indices */
1665 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); 1668 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1666 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); 1669 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1667 1670
1668 /* Set Tx Tail register */ 1671 /* Set Tx Tail register */
1669 txr->tail = IXGBE_VFTDT(j); 1672 txr->tail = IXGBE_VFTDT(j);
1670 1673
1671 txr->txr_no_space = false; 1674 txr->txr_no_space = false;
1672 1675
1673 /* Set Ring parameters */ 1676 /* Set Ring parameters */
1674 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1677 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1675 (tdba & 0x00000000ffffffffULL)); 1678 (tdba & 0x00000000ffffffffULL));
1676 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1679 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1677 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1680 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1678 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); 1681 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1679 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1682 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1680 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1683 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1681 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1684 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1682 1685
1683 /* Now enable */ 1686 /* Now enable */
1684 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1687 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1685 txdctl |= IXGBE_TXDCTL_ENABLE; 1688 txdctl |= IXGBE_TXDCTL_ENABLE;
1686 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1689 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1687 } 1690 }
1688 1691
1689 return; 1692 return;
1690} /* ixv_initialize_transmit_units */ 1693} /* ixv_initialize_transmit_units */
1691 1694
1692 1695
1693/************************************************************************ 1696/************************************************************************
1694 * ixv_initialize_rss_mapping 1697 * ixv_initialize_rss_mapping
1695 ************************************************************************/ 1698 ************************************************************************/
1696static void 1699static void
1697ixv_initialize_rss_mapping(struct adapter *adapter) 1700ixv_initialize_rss_mapping(struct adapter *adapter)
1698{ 1701{
1699 struct ixgbe_hw *hw = &adapter->hw; 1702 struct ixgbe_hw *hw = &adapter->hw;
1700 u32 reta = 0, mrqc, rss_key[10]; 1703 u32 reta = 0, mrqc, rss_key[10];
1701 int queue_id; 1704 int queue_id;
1702 int i, j; 1705 int i, j;
1703 u32 rss_hash_config; 1706 u32 rss_hash_config;
1704 1707
1705 /* force use default RSS key. */ 1708 /* force use default RSS key. */
1706#ifdef __NetBSD__ 1709#ifdef __NetBSD__
1707 rss_getkey((uint8_t *) &rss_key); 1710 rss_getkey((uint8_t *) &rss_key);
1708#else 1711#else
1709 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1712 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1710 /* Fetch the configured RSS key */ 1713 /* Fetch the configured RSS key */
1711 rss_getkey((uint8_t *)&rss_key); 1714 rss_getkey((uint8_t *)&rss_key);
1712 } else { 1715 } else {
1713 /* set up random bits */ 1716 /* set up random bits */
1714 cprng_fast(&rss_key, sizeof(rss_key)); 1717 cprng_fast(&rss_key, sizeof(rss_key));
1715 } 1718 }
1716#endif 1719#endif
1717 1720
1718 /* Now fill out hash function seeds */ 1721 /* Now fill out hash function seeds */
1719 for (i = 0; i < 10; i++) 1722 for (i = 0; i < 10; i++)
1720 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1723 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1721 1724
1722 /* Set up the redirection table */ 1725 /* Set up the redirection table */
1723 for (i = 0, j = 0; i < 64; i++, j++) { 1726 for (i = 0, j = 0; i < 64; i++, j++) {
1724 if (j == adapter->num_queues) 1727 if (j == adapter->num_queues)
1725 j = 0; 1728 j = 0;
1726 1729
1727 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1730 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1728 /* 1731 /*
1729 * Fetch the RSS bucket id for the given indirection 1732 * Fetch the RSS bucket id for the given indirection
1730 * entry. Cap it at the number of configured buckets 1733 * entry. Cap it at the number of configured buckets
1731 * (which is num_queues.) 1734 * (which is num_queues.)
1732 */ 1735 */
1733 queue_id = rss_get_indirection_to_bucket(i); 1736 queue_id = rss_get_indirection_to_bucket(i);
1734 queue_id = queue_id % adapter->num_queues; 1737 queue_id = queue_id % adapter->num_queues;
1735 } else 1738 } else
1736 queue_id = j; 1739 queue_id = j;
1737 1740
1738 /* 1741 /*
1739 * The low 8 bits are for hash value (n+0); 1742 * The low 8 bits are for hash value (n+0);
1740 * The next 8 bits are for hash value (n+1), etc. 1743 * The next 8 bits are for hash value (n+1), etc.
1741 */ 1744 */
1742 reta >>= 8; 1745 reta >>= 8;
1743 reta |= ((uint32_t)queue_id) << 24; 1746 reta |= ((uint32_t)queue_id) << 24;
1744 if ((i & 3) == 3) { 1747 if ((i & 3) == 3) {
1745 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1748 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1746 reta = 0; 1749 reta = 0;
1747 } 1750 }
1748 } 1751 }
1749 1752
1750 /* Perform hash on these packet types */ 1753 /* Perform hash on these packet types */
1751 if (adapter->feat_en & IXGBE_FEATURE_RSS) 1754 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1752 rss_hash_config = rss_gethashconfig(); 1755 rss_hash_config = rss_gethashconfig();
1753 else { 1756 else {
1754 /* 1757 /*
1755 * Disable UDP - IP fragments aren't currently being handled 1758 * Disable UDP - IP fragments aren't currently being handled
1756 * and so we end up with a mix of 2-tuple and 4-tuple 1759 * and so we end up with a mix of 2-tuple and 4-tuple
1757 * traffic. 1760 * traffic.
1758 */ 1761 */
1759 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1762 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1760 | RSS_HASHTYPE_RSS_TCP_IPV4 1763 | RSS_HASHTYPE_RSS_TCP_IPV4
1761 | RSS_HASHTYPE_RSS_IPV6 1764 | RSS_HASHTYPE_RSS_IPV6
1762 | RSS_HASHTYPE_RSS_TCP_IPV6; 1765 | RSS_HASHTYPE_RSS_TCP_IPV6;
1763 } 1766 }
1764 1767
1765 mrqc = IXGBE_MRQC_RSSEN; 1768 mrqc = IXGBE_MRQC_RSSEN;
1766 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1769 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1767 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1770 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1768 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1771 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1769 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1772 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1770 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1773 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1771 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1774 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1772 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1775 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1773 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1776 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1774 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1777 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1775 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1778 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1776 __func__); 1779 __func__);
1777 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1780 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1778 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1781 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1779 __func__); 1782 __func__);
1780 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1783 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1781 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1784 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1782 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1785 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1783 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1786 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1784 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1787 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1785 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1788 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1786 __func__); 1789 __func__);
1787 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1790 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1788} /* ixv_initialize_rss_mapping */ 1791} /* ixv_initialize_rss_mapping */
1789 1792
1790 1793
1791/************************************************************************ 1794/************************************************************************
1792 * ixv_initialize_receive_units - Setup receive registers and features. 1795 * ixv_initialize_receive_units - Setup receive registers and features.
1793 ************************************************************************/ 1796 ************************************************************************/
1794static void 1797static void
1795ixv_initialize_receive_units(struct adapter *adapter) 1798ixv_initialize_receive_units(struct adapter *adapter)
1796{ 1799{
1797 struct rx_ring *rxr = adapter->rx_rings; 1800 struct rx_ring *rxr = adapter->rx_rings;
1798 struct ixgbe_hw *hw = &adapter->hw; 1801 struct ixgbe_hw *hw = &adapter->hw;
1799 struct ifnet *ifp = adapter->ifp; 1802 struct ifnet *ifp = adapter->ifp;
1800 u32 bufsz, psrtype; 1803 u32 bufsz, psrtype;
1801 1804
1802 if (ifp->if_mtu > ETHERMTU) 1805 if (ifp->if_mtu > ETHERMTU)
1803 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1806 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1804 else 1807 else
1805 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1808 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1806 1809
1807 psrtype = IXGBE_PSRTYPE_TCPHDR 1810 psrtype = IXGBE_PSRTYPE_TCPHDR
1808 | IXGBE_PSRTYPE_UDPHDR 1811 | IXGBE_PSRTYPE_UDPHDR
1809 | IXGBE_PSRTYPE_IPV4HDR 1812 | IXGBE_PSRTYPE_IPV4HDR
1810 | IXGBE_PSRTYPE_IPV6HDR 1813 | IXGBE_PSRTYPE_IPV6HDR
1811 | IXGBE_PSRTYPE_L2HDR; 1814 | IXGBE_PSRTYPE_L2HDR;
1812 1815
1813 if (adapter->num_queues > 1) 1816 if (adapter->num_queues > 1)
1814 psrtype |= 1 << 29; 1817 psrtype |= 1 << 29;
1815 1818
1816 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1819 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1817 1820
1818 /* Tell PF our max_frame size */ 1821 /* Tell PF our max_frame size */
1819 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { 1822 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1820 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1823 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1821 } 1824 }
1822 1825
1823 for (int i = 0; i < adapter->num_queues; i++, rxr++) { 1826 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1824 u64 rdba = rxr->rxdma.dma_paddr; 1827 u64 rdba = rxr->rxdma.dma_paddr;
1825 u32 reg, rxdctl; 1828 u32 reg, rxdctl;
1826 int j = rxr->me; 1829 int j = rxr->me;
1827 1830
1828 /* Disable the queue */ 1831 /* Disable the queue */
1829 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1832 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1830 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1833 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1831 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1834 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1832 for (int k = 0; k < 10; k++) { 1835 for (int k = 0; k < 10; k++) {
1833 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1836 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1834 IXGBE_RXDCTL_ENABLE) 1837 IXGBE_RXDCTL_ENABLE)
1835 msec_delay(1); 1838 msec_delay(1);
1836 else 1839 else
1837 break; 1840 break;
1838 } 1841 }
1839 IXGBE_WRITE_BARRIER(hw); 1842 IXGBE_WRITE_BARRIER(hw);
1840 /* Setup the Base and Length of the Rx Descriptor Ring */ 1843 /* Setup the Base and Length of the Rx Descriptor Ring */
1841 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1844 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1842 (rdba & 0x00000000ffffffffULL)); 1845 (rdba & 0x00000000ffffffffULL));
1843 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1846 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1844 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1847 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1845 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); 1848 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1846 1849
1847 /* Reset the ring indices */ 1850 /* Reset the ring indices */
1848 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1851 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1849 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1852 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1850 1853
1851 /* Set up the SRRCTL register */ 1854 /* Set up the SRRCTL register */
1852 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1855 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1853 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1856 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1854 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1857 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1855 reg |= bufsz; 1858 reg |= bufsz;
1856 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1859 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1857 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1860 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1858 1861
1859 /* Capture Rx Tail index */ 1862 /* Capture Rx Tail index */
1860 rxr->tail = IXGBE_VFRDT(rxr->me); 1863 rxr->tail = IXGBE_VFRDT(rxr->me);
1861 1864
1862 /* Do the queue enabling last */ 1865 /* Do the queue enabling last */
1863 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1866 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1864 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1867 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1865 for (int k = 0; k < 10; k++) { 1868 for (int k = 0; k < 10; k++) {
1866 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1869 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1867 IXGBE_RXDCTL_ENABLE) 1870 IXGBE_RXDCTL_ENABLE)
1868 break; 1871 break;
1869 msec_delay(1); 1872 msec_delay(1);
1870 } 1873 }
1871 IXGBE_WRITE_BARRIER(hw); 1874 IXGBE_WRITE_BARRIER(hw);
1872 1875
1873 /* Set the Tail Pointer */ 1876 /* Set the Tail Pointer */
1874#ifdef DEV_NETMAP 1877#ifdef DEV_NETMAP
1875 /* 1878 /*
1876 * In netmap mode, we must preserve the buffers made 1879 * In netmap mode, we must preserve the buffers made
1877 * available to userspace before the if_init() 1880 * available to userspace before the if_init()
1878 * (this is true by default on the TX side, because 1881 * (this is true by default on the TX side, because
1879 * init makes all buffers available to userspace). 1882 * init makes all buffers available to userspace).
1880 * 1883 *
1881 * netmap_reset() and the device specific routines 1884 * netmap_reset() and the device specific routines
1882 * (e.g. ixgbe_setup_receive_rings()) map these 1885 * (e.g. ixgbe_setup_receive_rings()) map these
1883 * buffers at the end of the NIC ring, so here we 1886 * buffers at the end of the NIC ring, so here we
1884 * must set the RDT (tail) register to make sure 1887 * must set the RDT (tail) register to make sure
1885 * they are not overwritten. 1888 * they are not overwritten.
1886 * 1889 *
1887 * In this driver the NIC ring starts at RDH = 0, 1890 * In this driver the NIC ring starts at RDH = 0,
1888 * RDT points to the last slot available for reception (?), 1891 * RDT points to the last slot available for reception (?),
1889 * so RDT = num_rx_desc - 1 means the whole ring is available. 1892 * so RDT = num_rx_desc - 1 means the whole ring is available.
1890 */ 1893 */
1891 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) && 1894 if ((adapter->feat_en & IXGBE_FEATURE_NETMAP) &&
1892 (ifp->if_capenable & IFCAP_NETMAP)) { 1895 (ifp->if_capenable & IFCAP_NETMAP)) {
1893 struct netmap_adapter *na = NA(adapter->ifp); 1896 struct netmap_adapter *na = NA(adapter->ifp);
1894 struct netmap_kring *kring = na->rx_rings[i]; 1897 struct netmap_kring *kring = na->rx_rings[i];
1895 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1898 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1896 1899
1897 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1900 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1898 } else 1901 } else
1899#endif /* DEV_NETMAP */ 1902#endif /* DEV_NETMAP */
1900 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1903 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1901 adapter->num_rx_desc - 1); 1904 adapter->num_rx_desc - 1);
1902 } 1905 }
1903 1906
1904 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) 1907 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1905 ixv_initialize_rss_mapping(adapter); 1908 ixv_initialize_rss_mapping(adapter);
1906} /* ixv_initialize_receive_units */ 1909} /* ixv_initialize_receive_units */
1907 1910
1908/************************************************************************ 1911/************************************************************************
1909 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function 1912 * ixv_sysctl_tdh_handler - Transmit Descriptor Head handler function
1910 * 1913 *
1911 * Retrieves the TDH value from the hardware 1914 * Retrieves the TDH value from the hardware
1912 ************************************************************************/ 1915 ************************************************************************/
1913static int 1916static int
1914ixv_sysctl_tdh_handler(SYSCTLFN_ARGS) 1917ixv_sysctl_tdh_handler(SYSCTLFN_ARGS)
1915{ 1918{
1916 struct sysctlnode node = *rnode; 1919 struct sysctlnode node = *rnode;
1917 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1920 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1918 uint32_t val; 1921 uint32_t val;
1919 1922
1920 if (!txr) 1923 if (!txr)
1921 return (0); 1924 return (0);
1922 1925
1923 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me)); 1926 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDH(txr->me));
1924 node.sysctl_data = &val; 1927 node.sysctl_data = &val;
1925 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1928 return sysctl_lookup(SYSCTLFN_CALL(&node));
1926} /* ixv_sysctl_tdh_handler */ 1929} /* ixv_sysctl_tdh_handler */
1927 1930
1928/************************************************************************ 1931/************************************************************************
1929 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function 1932 * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function
1930 * 1933 *
1931 * Retrieves the TDT value from the hardware 1934 * Retrieves the TDT value from the hardware
1932 ************************************************************************/ 1935 ************************************************************************/
1933static int 1936static int
1934ixv_sysctl_tdt_handler(SYSCTLFN_ARGS) 1937ixv_sysctl_tdt_handler(SYSCTLFN_ARGS)
1935{ 1938{
1936 struct sysctlnode node = *rnode; 1939 struct sysctlnode node = *rnode;
1937 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data; 1940 struct tx_ring *txr = (struct tx_ring *)node.sysctl_data;
1938 uint32_t val; 1941 uint32_t val;
1939 1942
1940 if (!txr) 1943 if (!txr)
1941 return (0); 1944 return (0);
1942 1945
1943 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me)); 1946 val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_VFTDT(txr->me));
1944 node.sysctl_data = &val; 1947 node.sysctl_data = &val;
1945 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1948 return sysctl_lookup(SYSCTLFN_CALL(&node));
1946} /* ixv_sysctl_tdt_handler */ 1949} /* ixv_sysctl_tdt_handler */
1947 1950
1948/************************************************************************ 1951/************************************************************************
1949 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check 1952 * ixv_sysctl_next_to_check_handler - Receive Descriptor next to check
1950 * handler function 1953 * handler function
1951 * 1954 *
1952 * Retrieves the next_to_check value 1955 * Retrieves the next_to_check value
1953 ************************************************************************/ 1956 ************************************************************************/
1954static int 1957static int
1955ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS) 1958ixv_sysctl_next_to_check_handler(SYSCTLFN_ARGS)
1956{ 1959{
1957 struct sysctlnode node = *rnode; 1960 struct sysctlnode node = *rnode;
1958 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1961 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1959 uint32_t val; 1962 uint32_t val;
1960 1963
1961 if (!rxr) 1964 if (!rxr)
1962 return (0); 1965 return (0);
1963 1966
1964 val = rxr->next_to_check; 1967 val = rxr->next_to_check;
1965 node.sysctl_data = &val; 1968 node.sysctl_data = &val;
1966 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1969 return sysctl_lookup(SYSCTLFN_CALL(&node));
1967} /* ixv_sysctl_next_to_check_handler */ 1970} /* ixv_sysctl_next_to_check_handler */
1968 1971
1969/************************************************************************ 1972/************************************************************************
1970 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function 1973 * ixv_sysctl_rdh_handler - Receive Descriptor Head handler function
1971 * 1974 *
1972 * Retrieves the RDH value from the hardware 1975 * Retrieves the RDH value from the hardware
1973 ************************************************************************/ 1976 ************************************************************************/
1974static int 1977static int
1975ixv_sysctl_rdh_handler(SYSCTLFN_ARGS) 1978ixv_sysctl_rdh_handler(SYSCTLFN_ARGS)
1976{ 1979{
1977 struct sysctlnode node = *rnode; 1980 struct sysctlnode node = *rnode;
1978 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 1981 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1979 uint32_t val; 1982 uint32_t val;
1980 1983
1981 if (!rxr) 1984 if (!rxr)
1982 return (0); 1985 return (0);
1983 1986
1984 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me)); 1987 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDH(rxr->me));
1985 node.sysctl_data = &val; 1988 node.sysctl_data = &val;
1986 return sysctl_lookup(SYSCTLFN_CALL(&node)); 1989 return sysctl_lookup(SYSCTLFN_CALL(&node));
1987} /* ixv_sysctl_rdh_handler */ 1990} /* ixv_sysctl_rdh_handler */
1988 1991
1989/************************************************************************ 1992/************************************************************************
1990 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function 1993 * ixv_sysctl_rdt_handler - Receive Descriptor Tail handler function
1991 * 1994 *
1992 * Retrieves the RDT value from the hardware 1995 * Retrieves the RDT value from the hardware
1993 ************************************************************************/ 1996 ************************************************************************/
1994static int 1997static int
1995ixv_sysctl_rdt_handler(SYSCTLFN_ARGS) 1998ixv_sysctl_rdt_handler(SYSCTLFN_ARGS)
1996{ 1999{
1997 struct sysctlnode node = *rnode; 2000 struct sysctlnode node = *rnode;
1998 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data; 2001 struct rx_ring *rxr = (struct rx_ring *)node.sysctl_data;
1999 uint32_t val; 2002 uint32_t val;
2000 2003
2001 if (!rxr) 2004 if (!rxr)
2002 return (0); 2005 return (0);
2003 2006
2004 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me)); 2007 val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_VFRDT(rxr->me));
2005 node.sysctl_data = &val; 2008 node.sysctl_data = &val;
2006 return sysctl_lookup(SYSCTLFN_CALL(&node)); 2009 return sysctl_lookup(SYSCTLFN_CALL(&node));
2007} /* ixv_sysctl_rdt_handler */ 2010} /* ixv_sysctl_rdt_handler */
2008 2011
2009static void 2012static void
2010ixv_setup_vlan_tagging(struct adapter *adapter) 2013ixv_setup_vlan_tagging(struct adapter *adapter)
2011{ 2014{
2012 struct ethercom *ec = &adapter->osdep.ec; 2015 struct ethercom *ec = &adapter->osdep.ec;
2013 struct ixgbe_hw *hw = &adapter->hw; 2016 struct ixgbe_hw *hw = &adapter->hw;
2014 struct rx_ring *rxr; 2017 struct rx_ring *rxr;
2015 u32 ctrl; 2018 u32 ctrl;
2016 int i; 2019 int i;
2017 bool hwtagging; 2020 bool hwtagging;
2018 2021
2019 /* Enable HW tagging only if any vlan is attached */ 2022 /* Enable HW tagging only if any vlan is attached */
2020 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) 2023 hwtagging = (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2021 && VLAN_ATTACHED(ec); 2024 && VLAN_ATTACHED(ec);
2022 2025
2023 /* Enable the queues */ 2026 /* Enable the queues */
2024 for (i = 0; i < adapter->num_queues; i++) { 2027 for (i = 0; i < adapter->num_queues; i++) {
2025 rxr = &adapter->rx_rings[i]; 2028 rxr = &adapter->rx_rings[i];
2026 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me)); 2029 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(rxr->me));
2027 if (hwtagging) 2030 if (hwtagging)
2028 ctrl |= IXGBE_RXDCTL_VME; 2031 ctrl |= IXGBE_RXDCTL_VME;
2029 else 2032 else
2030 ctrl &= ~IXGBE_RXDCTL_VME; 2033 ctrl &= ~IXGBE_RXDCTL_VME;
2031 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl); 2034 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(rxr->me), ctrl);
2032 /* 2035 /*
2033 * Let Rx path know that it needs to store VLAN tag 2036 * Let Rx path know that it needs to store VLAN tag
2034 * as part of extra mbuf info. 2037 * as part of extra mbuf info.
2035 */ 2038 */
2036 rxr->vtag_strip = hwtagging ? TRUE : FALSE; 2039 rxr->vtag_strip = hwtagging ? TRUE : FALSE;
2037 } 2040 }
2038} /* ixv_setup_vlan_tagging */ 2041} /* ixv_setup_vlan_tagging */
2039 2042
2040/************************************************************************ 2043/************************************************************************
2041 * ixv_setup_vlan_support 2044 * ixv_setup_vlan_support
2042 ************************************************************************/ 2045 ************************************************************************/
2043static int 2046static int
2044ixv_setup_vlan_support(struct adapter *adapter) 2047ixv_setup_vlan_support(struct adapter *adapter)
2045{ 2048{
2046 struct ethercom *ec = &adapter->osdep.ec; 2049 struct ethercom *ec = &adapter->osdep.ec;
2047 struct ixgbe_hw *hw = &adapter->hw; 2050 struct ixgbe_hw *hw = &adapter->hw;
2048 u32 vid, vfta, retry; 2051 u32 vid, vfta, retry;
2049 struct vlanid_list *vlanidp; 2052 struct vlanid_list *vlanidp;
2050 int rv, error = 0; 2053 int rv, error = 0;
2051 2054
2052 /* 2055 /*
2053 * This function is called from both if_init and ifflags_cb() 2056 * This function is called from both if_init and ifflags_cb()
2054 * on NetBSD. 2057 * on NetBSD.
2055 */ 2058 */
2056 2059
2057 /* 2060 /*
2058 * Part 1: 2061 * Part 1:
2059 * Setup VLAN HW tagging 2062 * Setup VLAN HW tagging
2060 */ 2063 */
2061 ixv_setup_vlan_tagging(adapter); 2064 ixv_setup_vlan_tagging(adapter);
2062 2065
2063 if (!VLAN_ATTACHED(ec)) 2066 if (!VLAN_ATTACHED(ec))
2064 return 0; 2067 return 0;
2065 2068
2066 /* 2069 /*
2067 * Part 2: 2070 * Part 2:
2068 * Setup VLAN HW filter 2071 * Setup VLAN HW filter
2069 */ 2072 */
2070 /* Cleanup shadow_vfta */ 2073 /* Cleanup shadow_vfta */
2071 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) 2074 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
2072 adapter->shadow_vfta[i] = 0; 2075 adapter->shadow_vfta[i] = 0;
2073 /* Generate shadow_vfta from ec_vids */ 2076 /* Generate shadow_vfta from ec_vids */
2074 ETHER_LOCK(ec); 2077 ETHER_LOCK(ec);
2075 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 2078 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
2076 uint32_t idx; 2079 uint32_t idx;
2077 2080
2078 idx = vlanidp->vid / 32; 2081 idx = vlanidp->vid / 32;
2079 KASSERT(idx < IXGBE_VFTA_SIZE); 2082 KASSERT(idx < IXGBE_VFTA_SIZE);
2080 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32); 2083 adapter->shadow_vfta[idx] |= (u32)1 << (vlanidp->vid % 32);
2081 } 2084 }
2082 ETHER_UNLOCK(ec); 2085 ETHER_UNLOCK(ec);
2083 2086
2084 /* 2087 /*
2085 * A soft reset zero's out the VFTA, so 2088 * A soft reset zero's out the VFTA, so
2086 * we need to repopulate it now. 2089 * we need to repopulate it now.
2087 */ 2090 */
2088 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 2091 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
2089 if (adapter->shadow_vfta[i] == 0) 2092 if (adapter->shadow_vfta[i] == 0)
2090 continue; 2093 continue;
2091 vfta = adapter->shadow_vfta[i]; 2094 vfta = adapter->shadow_vfta[i];
2092 /* 2095 /*
2093 * Reconstruct the vlan id's 2096 * Reconstruct the vlan id's
2094 * based on the bits set in each 2097 * based on the bits set in each
2095 * of the array ints. 2098 * of the array ints.
2096 */ 2099 */
2097 for (int j = 0; j < 32; j++) { 2100 for (int j = 0; j < 32; j++) {
2098 retry = 0; 2101 retry = 0;
2099 if ((vfta & ((u32)1 << j)) == 0) 2102 if ((vfta & ((u32)1 << j)) == 0)
2100 continue; 2103 continue;
2101 vid = (i * 32) + j; 2104 vid = (i * 32) + j;
2102 2105
2103 /* Call the shared code mailbox routine */ 2106 /* Call the shared code mailbox routine */
2104 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE, 2107 while ((rv = hw->mac.ops.set_vfta(hw, vid, 0, TRUE,
2105 FALSE)) != 0) { 2108 FALSE)) != 0) {
2106 if (++retry > 5) { 2109 if (++retry > 5) {
2107 device_printf(adapter->dev, 2110 device_printf(adapter->dev,
2108 "%s: max retry exceeded\n", 2111 "%s: max retry exceeded\n",
2109 __func__); 2112 __func__);
2110 break; 2113 break;
2111 } 2114 }
2112 } 2115 }
2113 if (rv != 0) { 2116 if (rv != 0) {
2114 device_printf(adapter->dev, 2117 device_printf(adapter->dev,
2115 "failed to set vlan %d\n", vid); 2118 "failed to set vlan %d\n", vid);
2116 error = EACCES; 2119 error = EACCES;
2117 } 2120 }
2118 } 2121 }
2119 } 2122 }
2120 return error; 2123 return error;
2121} /* ixv_setup_vlan_support */ 2124} /* ixv_setup_vlan_support */
2122 2125
2123static int 2126static int
2124ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set) 2127ixv_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
2125{ 2128{
2126 struct ifnet *ifp = &ec->ec_if; 2129 struct ifnet *ifp = &ec->ec_if;
2127 struct adapter *adapter = ifp->if_softc; 2130 struct adapter *adapter = ifp->if_softc;
2128 int rv; 2131 int rv;
2129 2132
2130 if (set) 2133 if (set)
2131 rv = ixv_register_vlan(adapter, vid); 2134 rv = ixv_register_vlan(adapter, vid);
2132 else 2135 else
2133 rv = ixv_unregister_vlan(adapter, vid); 2136 rv = ixv_unregister_vlan(adapter, vid);
2134 2137
2135 if (rv != 0) 2138 if (rv != 0)
2136 return rv; 2139 return rv;
2137 2140
2138 /* 2141 /*
2139 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0 2142 * Control VLAN HW tagging when ec_nvlan is changed from 1 to 0
2140 * or 0 to 1. 2143 * or 0 to 1.
2141 */ 2144 */
2142 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0))) 2145 if ((set && (ec->ec_nvlans == 1)) || (!set && (ec->ec_nvlans == 0)))
2143 ixv_setup_vlan_tagging(adapter); 2146 ixv_setup_vlan_tagging(adapter);
2144 2147
2145 return rv; 2148 return rv;
2146} 2149}
2147 2150
2148/************************************************************************ 2151/************************************************************************
2149 * ixv_register_vlan 2152 * ixv_register_vlan
2150 * 2153 *
2151 * Run via a vlan config EVENT, it enables us to use the 2154 * Run via a vlan config EVENT, it enables us to use the
2152 * HW Filter table since we can get the vlan id. This just 2155 * HW Filter table since we can get the vlan id. This just
2153 * creates the entry in the soft version of the VFTA, init 2156 * creates the entry in the soft version of the VFTA, init
2154 * will repopulate the real table. 2157 * will repopulate the real table.
2155 ************************************************************************/ 2158 ************************************************************************/
2156static int 2159static int
2157ixv_register_vlan(struct adapter *adapter, u16 vtag) 2160ixv_register_vlan(struct adapter *adapter, u16 vtag)
2158{ 2161{
2159 struct ixgbe_hw *hw = &adapter->hw; 2162 struct ixgbe_hw *hw = &adapter->hw;
2160 u16 index, bit; 2163 u16 index, bit;
2161 int error; 2164 int error;
2162 2165
2163 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2166 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2164 return EINVAL; 2167 return EINVAL;
2165 IXGBE_CORE_LOCK(adapter); 2168 IXGBE_CORE_LOCK(adapter);
2166 index = (vtag >> 5) & 0x7F; 2169 index = (vtag >> 5) & 0x7F;
2167 bit = vtag & 0x1F; 2170 bit = vtag & 0x1F;
2168 adapter->shadow_vfta[index] |= ((u32)1 << bit); 2171 adapter->shadow_vfta[index] |= ((u32)1 << bit);
2169 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false); 2172 error = hw->mac.ops.set_vfta(hw, vtag, 0, true, false);
2170 IXGBE_CORE_UNLOCK(adapter); 2173 IXGBE_CORE_UNLOCK(adapter);
2171 2174
2172 if (error != 0) { 2175 if (error != 0) {
2173 device_printf(adapter->dev, "failed to register vlan %hu\n", 2176 device_printf(adapter->dev, "failed to register vlan %hu\n",
2174 vtag); 2177 vtag);
2175 error = EACCES; 2178 error = EACCES;
2176 } 2179 }
2177 return error; 2180 return error;
2178} /* ixv_register_vlan */ 2181} /* ixv_register_vlan */
2179 2182
2180/************************************************************************ 2183/************************************************************************
2181 * ixv_unregister_vlan 2184 * ixv_unregister_vlan
2182 * 2185 *
2183 * Run via a vlan unconfig EVENT, remove our entry 2186 * Run via a vlan unconfig EVENT, remove our entry
2184 * in the soft vfta. 2187 * in the soft vfta.
2185 ************************************************************************/ 2188 ************************************************************************/
2186static int 2189static int
2187ixv_unregister_vlan(struct adapter *adapter, u16 vtag) 2190ixv_unregister_vlan(struct adapter *adapter, u16 vtag)
2188{ 2191{
2189 struct ixgbe_hw *hw = &adapter->hw; 2192 struct ixgbe_hw *hw = &adapter->hw;
2190 u16 index, bit; 2193 u16 index, bit;
2191 int error; 2194 int error;
2192 2195
2193 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 2196 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
2194 return EINVAL; 2197 return EINVAL;
2195 2198
2196 IXGBE_CORE_LOCK(adapter); 2199 IXGBE_CORE_LOCK(adapter);
2197 index = (vtag >> 5) & 0x7F; 2200 index = (vtag >> 5) & 0x7F;
2198 bit = vtag & 0x1F; 2201 bit = vtag & 0x1F;
2199 adapter->shadow_vfta[index] &= ~((u32)1 << bit); 2202 adapter->shadow_vfta[index] &= ~((u32)1 << bit);
2200 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false); 2203 error = hw->mac.ops.set_vfta(hw, vtag, 0, false, false);
2201 IXGBE_CORE_UNLOCK(adapter); 2204 IXGBE_CORE_UNLOCK(adapter);
2202 2205
2203 if (error != 0) { 2206 if (error != 0) {
2204 device_printf(adapter->dev, "failed to unregister vlan %hu\n", 2207 device_printf(adapter->dev, "failed to unregister vlan %hu\n",
2205 vtag); 2208 vtag);
2206 error = EIO; 2209 error = EIO;
2207 } 2210 }
2208 return error; 2211 return error;
2209} /* ixv_unregister_vlan */ 2212} /* ixv_unregister_vlan */
2210 2213
2211/************************************************************************ 2214/************************************************************************
2212 * ixv_enable_intr 2215 * ixv_enable_intr
2213 ************************************************************************/ 2216 ************************************************************************/
2214static void 2217static void
2215ixv_enable_intr(struct adapter *adapter) 2218ixv_enable_intr(struct adapter *adapter)
2216{ 2219{
2217 struct ixgbe_hw *hw = &adapter->hw; 2220 struct ixgbe_hw *hw = &adapter->hw;
2218 struct ix_queue *que = adapter->queues; 2221 struct ix_queue *que = adapter->queues;
2219 u32 mask; 2222 u32 mask;
2220 int i; 2223 int i;
2221 2224
2222 /* For VTEIAC */ 2225 /* For VTEIAC */
2223 mask = (1 << adapter->vector); 2226 mask = (1 << adapter->vector);
2224 for (i = 0; i < adapter->num_queues; i++, que++) 2227 for (i = 0; i < adapter->num_queues; i++, que++)
2225 mask |= (1 << que->msix); 2228 mask |= (1 << que->msix);
2226 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 2229 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
2227 2230
2228 /* For VTEIMS */ 2231 /* For VTEIMS */
2229 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector)); 2232 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << adapter->vector));
2230 que = adapter->queues; 2233 que = adapter->queues;
2231 for (i = 0; i < adapter->num_queues; i++, que++) 2234 for (i = 0; i < adapter->num_queues; i++, que++)
2232 ixv_enable_queue(adapter, que->msix); 2235 ixv_enable_queue(adapter, que->msix);
2233 2236
2234 IXGBE_WRITE_FLUSH(hw); 2237 IXGBE_WRITE_FLUSH(hw);
2235} /* ixv_enable_intr */ 2238} /* ixv_enable_intr */
2236 2239
2237/************************************************************************ 2240/************************************************************************
2238 * ixv_disable_intr 2241 * ixv_disable_intr
2239 ************************************************************************/ 2242 ************************************************************************/
2240static void 2243static void
2241ixv_disable_intr(struct adapter *adapter) 2244ixv_disable_intr(struct adapter *adapter)
2242{ 2245{
2243 struct ix_queue *que = adapter->queues; 2246 struct ix_queue *que = adapter->queues;
2244 2247
2245 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 2248 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
2246 2249
2247 /* disable interrupts other than queues */ 2250 /* disable interrupts other than queues */
2248 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector); 2251 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, adapter->vector);
2249 2252
2250 for (int i = 0; i < adapter->num_queues; i++, que++) 2253 for (int i = 0; i < adapter->num_queues; i++, que++)
2251 ixv_disable_queue(adapter, que->msix); 2254 ixv_disable_queue(adapter, que->msix);
2252 2255
2253 IXGBE_WRITE_FLUSH(&adapter->hw); 2256 IXGBE_WRITE_FLUSH(&adapter->hw);
2254} /* ixv_disable_intr */ 2257} /* ixv_disable_intr */
2255 2258
2256/************************************************************************ 2259/************************************************************************
2257 * ixv_set_ivar 2260 * ixv_set_ivar
2258 * 2261 *
2259 * Setup the correct IVAR register for a particular MSI-X interrupt 2262 * Setup the correct IVAR register for a particular MSI-X interrupt
2260 * - entry is the register array entry 2263 * - entry is the register array entry
2261 * - vector is the MSI-X vector for this queue 2264 * - vector is the MSI-X vector for this queue
2262 * - type is RX/TX/MISC 2265 * - type is RX/TX/MISC
2263 ************************************************************************/ 2266 ************************************************************************/
2264static void 2267static void
2265ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 2268ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
2266{ 2269{
2267 struct ixgbe_hw *hw = &adapter->hw; 2270 struct ixgbe_hw *hw = &adapter->hw;
2268 u32 ivar, index; 2271 u32 ivar, index;
2269 2272
2270 vector |= IXGBE_IVAR_ALLOC_VAL; 2273 vector |= IXGBE_IVAR_ALLOC_VAL;
2271 2274
2272 if (type == -1) { /* MISC IVAR */ 2275 if (type == -1) { /* MISC IVAR */
2273 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 2276 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
2274 ivar &= ~0xFF; 2277 ivar &= ~0xFF;
2275 ivar |= vector; 2278 ivar |= vector;
2276 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 2279 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
2277 } else { /* RX/TX IVARS */ 2280 } else { /* RX/TX IVARS */
2278 index = (16 * (entry & 1)) + (8 * type); 2281 index = (16 * (entry & 1)) + (8 * type);
2279 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 2282 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
2280 ivar &= ~(0xffUL << index); 2283 ivar &= ~(0xffUL << index);
2281 ivar |= ((u32)vector << index); 2284 ivar |= ((u32)vector << index);
2282 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 2285 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
2283 } 2286 }
2284} /* ixv_set_ivar */ 2287} /* ixv_set_ivar */
2285 2288
2286/************************************************************************ 2289/************************************************************************
2287 * ixv_configure_ivars 2290 * ixv_configure_ivars
2288 ************************************************************************/ 2291 ************************************************************************/
2289static void 2292static void
2290ixv_configure_ivars(struct adapter *adapter) 2293ixv_configure_ivars(struct adapter *adapter)
2291{ 2294{
2292 struct ix_queue *que = adapter->queues; 2295 struct ix_queue *que = adapter->queues;
2293 2296
2294 /* XXX We should sync EITR value calculation with ixgbe.c? */ 2297 /* XXX We should sync EITR value calculation with ixgbe.c? */
2295 2298
2296 for (int i = 0; i < adapter->num_queues; i++, que++) { 2299 for (int i = 0; i < adapter->num_queues; i++, que++) {
2297 /* First the RX queue entry */ 2300 /* First the RX queue entry */
2298 ixv_set_ivar(adapter, i, que->msix, 0); 2301 ixv_set_ivar(adapter, i, que->msix, 0);
2299 /* ... and the TX */ 2302 /* ... and the TX */
2300 ixv_set_ivar(adapter, i, que->msix, 1); 2303 ixv_set_ivar(adapter, i, que->msix, 1);
2301 /* Set an initial value in EITR */ 2304 /* Set an initial value in EITR */
2302 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT); 2305 ixv_eitr_write(adapter, que->msix, IXGBE_EITR_DEFAULT);
2303 } 2306 }
2304 2307
2305 /* For the mailbox interrupt */ 2308 /* For the mailbox interrupt */
2306 ixv_set_ivar(adapter, 1, adapter->vector, -1); 2309 ixv_set_ivar(adapter, 1, adapter->vector, -1);
2307} /* ixv_configure_ivars */ 2310} /* ixv_configure_ivars */
2308 2311
2309 2312
2310/************************************************************************ 2313/************************************************************************
2311 * ixv_save_stats 2314 * ixv_save_stats
2312 * 2315 *
2313 * The VF stats registers never have a truly virgin 2316 * The VF stats registers never have a truly virgin
2314 * starting point, so this routine tries to make an 2317 * starting point, so this routine tries to make an
2315 * artificial one, marking ground zero on attach as 2318 * artificial one, marking ground zero on attach as
2316 * it were. 2319 * it were.
2317 ************************************************************************/ 2320 ************************************************************************/
2318static void 2321static void
2319ixv_save_stats(struct adapter *adapter) 2322ixv_save_stats(struct adapter *adapter)
2320{ 2323{
2321 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2324 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2322 2325
2323 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) { 2326 if (stats->vfgprc.ev_count || stats->vfgptc.ev_count) {
2324 stats->saved_reset_vfgprc += 2327 stats->saved_reset_vfgprc +=
2325 stats->vfgprc.ev_count - stats->base_vfgprc; 2328 stats->vfgprc.ev_count - stats->base_vfgprc;
2326 stats->saved_reset_vfgptc += 2329 stats->saved_reset_vfgptc +=
2327 stats->vfgptc.ev_count - stats->base_vfgptc; 2330 stats->vfgptc.ev_count - stats->base_vfgptc;
2328 stats->saved_reset_vfgorc += 2331 stats->saved_reset_vfgorc +=
2329 stats->vfgorc.ev_count - stats->base_vfgorc; 2332 stats->vfgorc.ev_count - stats->base_vfgorc;
2330 stats->saved_reset_vfgotc += 2333 stats->saved_reset_vfgotc +=
2331 stats->vfgotc.ev_count - stats->base_vfgotc; 2334 stats->vfgotc.ev_count - stats->base_vfgotc;
2332 stats->saved_reset_vfmprc += 2335 stats->saved_reset_vfmprc +=
2333 stats->vfmprc.ev_count - stats->base_vfmprc; 2336 stats->vfmprc.ev_count - stats->base_vfmprc;
2334 } 2337 }
2335} /* ixv_save_stats */ 2338} /* ixv_save_stats */
2336 2339
2337/************************************************************************ 2340/************************************************************************
2338 * ixv_init_stats 2341 * ixv_init_stats
2339 ************************************************************************/ 2342 ************************************************************************/
2340static void 2343static void
2341ixv_init_stats(struct adapter *adapter) 2344ixv_init_stats(struct adapter *adapter)
2342{ 2345{
2343 struct ixgbe_hw *hw = &adapter->hw; 2346 struct ixgbe_hw *hw = &adapter->hw;
2344 2347
2345 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 2348 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2346 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 2349 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2347 adapter->stats.vf.last_vfgorc |= 2350 adapter->stats.vf.last_vfgorc |=
2348 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 2351 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2349 2352
2350 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 2353 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2351 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 2354 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2352 adapter->stats.vf.last_vfgotc |= 2355 adapter->stats.vf.last_vfgotc |=
2353 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 2356 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2354 2357
2355 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 2358 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2356 2359
2357 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 2360 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2358 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 2361 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2359 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 2362 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2360 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 2363 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2361 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 2364 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2362} /* ixv_init_stats */ 2365} /* ixv_init_stats */
2363 2366
2364#define UPDATE_STAT_32(reg, last, count) \ 2367#define UPDATE_STAT_32(reg, last, count) \
2365{ \ 2368{ \
2366 u32 current = IXGBE_READ_REG(hw, (reg)); \ 2369 u32 current = IXGBE_READ_REG(hw, (reg)); \
2367 if (current < (last)) \ 2370 if (current < (last)) \
2368 count.ev_count += 0x100000000LL; \ 2371 count.ev_count += 0x100000000LL; \
2369 (last) = current; \ 2372 (last) = current; \
2370 count.ev_count &= 0xFFFFFFFF00000000LL; \ 2373 count.ev_count &= 0xFFFFFFFF00000000LL; \
2371 count.ev_count |= current; \ 2374 count.ev_count |= current; \
2372} 2375}
2373 2376
2374#define UPDATE_STAT_36(lsb, msb, last, count) \ 2377#define UPDATE_STAT_36(lsb, msb, last, count) \
2375{ \ 2378{ \
2376 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \ 2379 u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
2377 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \ 2380 u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
2378 u64 current = ((cur_msb << 32) | cur_lsb); \ 2381 u64 current = ((cur_msb << 32) | cur_lsb); \
2379 if (current < (last)) \ 2382 if (current < (last)) \
2380 count.ev_count += 0x1000000000LL; \ 2383 count.ev_count += 0x1000000000LL; \
2381 (last) = current; \ 2384 (last) = current; \
2382 count.ev_count &= 0xFFFFFFF000000000LL; \ 2385 count.ev_count &= 0xFFFFFFF000000000LL; \
2383 count.ev_count |= current; \ 2386 count.ev_count |= current; \
2384} 2387}
2385 2388
2386/************************************************************************ 2389/************************************************************************
2387 * ixv_update_stats - Update the board statistics counters. 2390 * ixv_update_stats - Update the board statistics counters.
2388 ************************************************************************/ 2391 ************************************************************************/
2389void 2392void
2390ixv_update_stats(struct adapter *adapter) 2393ixv_update_stats(struct adapter *adapter)
2391{ 2394{
2392 struct ixgbe_hw *hw = &adapter->hw; 2395 struct ixgbe_hw *hw = &adapter->hw;
2393 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2396 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2394 2397
2395 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc); 2398 UPDATE_STAT_32(IXGBE_VFGPRC, stats->last_vfgprc, stats->vfgprc);
2396 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc); 2399 UPDATE_STAT_32(IXGBE_VFGPTC, stats->last_vfgptc, stats->vfgptc);
2397 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc, 2400 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, stats->last_vfgorc,
2398 stats->vfgorc); 2401 stats->vfgorc);
2399 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc, 2402 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, stats->last_vfgotc,
2400 stats->vfgotc); 2403 stats->vfgotc);
2401 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc); 2404 UPDATE_STAT_32(IXGBE_VFMPRC, stats->last_vfmprc, stats->vfmprc);
2402 2405
2403 /* VF doesn't count errors by hardware */ 2406 /* VF doesn't count errors by hardware */
2404 2407
2405} /* ixv_update_stats */ 2408} /* ixv_update_stats */
2406 2409
2407/************************************************************************ 2410/************************************************************************
2408 * ixv_sysctl_interrupt_rate_handler 2411 * ixv_sysctl_interrupt_rate_handler
2409 ************************************************************************/ 2412 ************************************************************************/
2410static int 2413static int
2411ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) 2414ixv_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS)
2412{ 2415{
2413 struct sysctlnode node = *rnode; 2416 struct sysctlnode node = *rnode;
2414 struct ix_queue *que = (struct ix_queue *)node.sysctl_data; 2417 struct ix_queue *que = (struct ix_queue *)node.sysctl_data;
2415 struct adapter *adapter = que->adapter; 2418 struct adapter *adapter = que->adapter;
2416 uint32_t reg, usec, rate; 2419 uint32_t reg, usec, rate;
2417 int error; 2420 int error;
2418 2421
2419 if (que == NULL) 2422 if (que == NULL)
2420 return 0; 2423 return 0;
2421 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix)); 2424 reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_VTEITR(que->msix));
2422 usec = ((reg & 0x0FF8) >> 3); 2425 usec = ((reg & 0x0FF8) >> 3);
2423 if (usec > 0) 2426 if (usec > 0)
2424 rate = 500000 / usec; 2427 rate = 500000 / usec;
2425 else 2428 else
2426 rate = 0; 2429 rate = 0;
2427 node.sysctl_data = &rate; 2430 node.sysctl_data = &rate;
2428 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2431 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2429 if (error || newp == NULL) 2432 if (error || newp == NULL)
2430 return error; 2433 return error;
2431 reg &= ~0xfff; /* default, no limitation */ 2434 reg &= ~0xfff; /* default, no limitation */
2432 if (rate > 0 && rate < 500000) { 2435 if (rate > 0 && rate < 500000) {
2433 if (rate < 1000) 2436 if (rate < 1000)
2434 rate = 1000; 2437 rate = 1000;
2435 reg |= ((4000000 / rate) & 0xff8); 2438 reg |= ((4000000 / rate) & 0xff8);
2436 /* 2439 /*
2437 * When RSC is used, ITR interval must be larger than 2440 * When RSC is used, ITR interval must be larger than
2438 * RSC_DELAY. Currently, we use 2us for RSC_DELAY. 2441 * RSC_DELAY. Currently, we use 2us for RSC_DELAY.
2439 * The minimum value is always greater than 2us on 100M 2442 * The minimum value is always greater than 2us on 100M
2440 * (and 10M?(not documented)), but it's not on 1G and higher. 2443 * (and 10M?(not documented)), but it's not on 1G and higher.
2441 */ 2444 */
2442 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL) 2445 if ((adapter->link_speed != IXGBE_LINK_SPEED_100_FULL)
2443 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) { 2446 && (adapter->link_speed != IXGBE_LINK_SPEED_10_FULL)) {
2444 if ((adapter->num_queues > 1) 2447 if ((adapter->num_queues > 1)
2445 && (reg < IXGBE_MIN_RSC_EITR_10G1G)) 2448 && (reg < IXGBE_MIN_RSC_EITR_10G1G))
2446 return EINVAL; 2449 return EINVAL;
2447 } 2450 }
2448 ixv_max_interrupt_rate = rate; 2451 ixv_max_interrupt_rate = rate;
2449 } else 2452 } else
2450 ixv_max_interrupt_rate = 0; 2453 ixv_max_interrupt_rate = 0;
2451 ixv_eitr_write(adapter, que->msix, reg); 2454 ixv_eitr_write(adapter, que->msix, reg);
2452 2455
2453 return (0); 2456 return (0);
2454} /* ixv_sysctl_interrupt_rate_handler */ 2457} /* ixv_sysctl_interrupt_rate_handler */
2455 2458
2456const struct sysctlnode * 2459const struct sysctlnode *
2457ixv_sysctl_instance(struct adapter *adapter) 2460ixv_sysctl_instance(struct adapter *adapter)
2458{ 2461{
2459 const char *dvname; 2462 const char *dvname;
2460 struct sysctllog **log; 2463 struct sysctllog **log;
2461 int rc; 2464 int rc;
2462 const struct sysctlnode *rnode; 2465 const struct sysctlnode *rnode;
2463 2466
2464 log = &adapter->sysctllog; 2467 log = &adapter->sysctllog;
2465 dvname = device_xname(adapter->dev); 2468 dvname = device_xname(adapter->dev);
2466 2469
2467 if ((rc = sysctl_createv(log, 0, NULL, &rnode, 2470 if ((rc = sysctl_createv(log, 0, NULL, &rnode,
2468 0, CTLTYPE_NODE, dvname, 2471 0, CTLTYPE_NODE, dvname,
2469 SYSCTL_DESCR("ixv information and settings"), 2472 SYSCTL_DESCR("ixv information and settings"),
2470 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 2473 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
2471 goto err; 2474 goto err;
2472 2475
2473 return rnode; 2476 return rnode;
2474err: 2477err:
2475 device_printf(adapter->dev, 2478 device_printf(adapter->dev,
2476 "%s: sysctl_createv failed, rc = %d\n", __func__, rc); 2479 "%s: sysctl_createv failed, rc = %d\n", __func__, rc);
2477 return NULL; 2480 return NULL;
2478} 2481}
2479 2482
2480static void 2483static void
2481ixv_add_device_sysctls(struct adapter *adapter) 2484ixv_add_device_sysctls(struct adapter *adapter)
2482{ 2485{
2483 struct sysctllog **log; 2486 struct sysctllog **log;
2484 const struct sysctlnode *rnode, *cnode; 2487 const struct sysctlnode *rnode, *cnode;
2485 device_t dev; 2488 device_t dev;
2486 2489
2487 dev = adapter->dev; 2490 dev = adapter->dev;
2488 log = &adapter->sysctllog; 2491 log = &adapter->sysctllog;
2489 2492
2490 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2493 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2491 aprint_error_dev(dev, "could not create sysctl root\n"); 2494 aprint_error_dev(dev, "could not create sysctl root\n");
2492 return; 2495 return;
2493 } 2496 }
2494 2497
2495 if (sysctl_createv(log, 0, &rnode, &cnode, 2498 if (sysctl_createv(log, 0, &rnode, &cnode,
2496 CTLFLAG_READWRITE, CTLTYPE_INT, 2499 CTLFLAG_READWRITE, CTLTYPE_INT,
2497 "debug", SYSCTL_DESCR("Debug Info"), 2500 "debug", SYSCTL_DESCR("Debug Info"),
2498 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) 2501 ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0)
2499 aprint_error_dev(dev, "could not create sysctl\n"); 2502 aprint_error_dev(dev, "could not create sysctl\n");
2500 2503
2501 if (sysctl_createv(log, 0, &rnode, &cnode, 2504 if (sysctl_createv(log, 0, &rnode, &cnode,
 2505 CTLFLAG_READONLY, CTLTYPE_INT, "num_jcl_per_queue",
 2506 SYSCTL_DESCR("Number of jumbo buffers per queue"),
 2507 NULL, 0, &adapter->num_jcl, 0, CTL_CREATE,
 2508 CTL_EOL) != 0)
 2509 aprint_error_dev(dev, "could not create sysctl\n");
 2510
 2511 if (sysctl_createv(log, 0, &rnode, &cnode,
2502 CTLFLAG_READWRITE, CTLTYPE_BOOL, 2512 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2503 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), 2513 "enable_aim", SYSCTL_DESCR("Interrupt Moderation"),
2504 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) 2514 NULL, 0, &adapter->enable_aim, 0, CTL_CREATE, CTL_EOL) != 0)
2505 aprint_error_dev(dev, "could not create sysctl\n"); 2515 aprint_error_dev(dev, "could not create sysctl\n");
2506 2516
2507 if (sysctl_createv(log, 0, &rnode, &cnode, 2517 if (sysctl_createv(log, 0, &rnode, &cnode,
2508 CTLFLAG_READWRITE, CTLTYPE_BOOL, 2518 CTLFLAG_READWRITE, CTLTYPE_BOOL,
2509 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 2519 "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
2510 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0) 2520 NULL, 0, &adapter->txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL) != 0)
2511 aprint_error_dev(dev, "could not create sysctl\n"); 2521 aprint_error_dev(dev, "could not create sysctl\n");
2512} 2522}
2513 2523
2514/************************************************************************ 2524/************************************************************************
2515 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 2525 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
2516 ************************************************************************/ 2526 ************************************************************************/
2517static void 2527static void
2518ixv_add_stats_sysctls(struct adapter *adapter) 2528ixv_add_stats_sysctls(struct adapter *adapter)
2519{ 2529{
2520 device_t dev = adapter->dev; 2530 device_t dev = adapter->dev;
2521 struct tx_ring *txr = adapter->tx_rings; 2531 struct tx_ring *txr = adapter->tx_rings;
2522 struct rx_ring *rxr = adapter->rx_rings; 2532 struct rx_ring *rxr = adapter->rx_rings;
2523 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2533 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2524 struct ixgbe_hw *hw = &adapter->hw; 2534 struct ixgbe_hw *hw = &adapter->hw;
2525 const struct sysctlnode *rnode, *cnode; 2535 const struct sysctlnode *rnode, *cnode;
2526 struct sysctllog **log = &adapter->sysctllog; 2536 struct sysctllog **log = &adapter->sysctllog;
2527 const char *xname = device_xname(dev); 2537 const char *xname = device_xname(dev);
2528 2538
2529 /* Driver Statistics */ 2539 /* Driver Statistics */
2530 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC, 2540 evcnt_attach_dynamic(&adapter->efbig_tx_dma_setup, EVCNT_TYPE_MISC,
2531 NULL, xname, "Driver tx dma soft fail EFBIG"); 2541 NULL, xname, "Driver tx dma soft fail EFBIG");
2532 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC, 2542 evcnt_attach_dynamic(&adapter->mbuf_defrag_failed, EVCNT_TYPE_MISC,
2533 NULL, xname, "m_defrag() failed"); 2543 NULL, xname, "m_defrag() failed");
2534 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC, 2544 evcnt_attach_dynamic(&adapter->efbig2_tx_dma_setup, EVCNT_TYPE_MISC,
2535 NULL, xname, "Driver tx dma hard fail EFBIG"); 2545 NULL, xname, "Driver tx dma hard fail EFBIG");
2536 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC, 2546 evcnt_attach_dynamic(&adapter->einval_tx_dma_setup, EVCNT_TYPE_MISC,
2537 NULL, xname, "Driver tx dma hard fail EINVAL"); 2547 NULL, xname, "Driver tx dma hard fail EINVAL");
2538 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC, 2548 evcnt_attach_dynamic(&adapter->other_tx_dma_setup, EVCNT_TYPE_MISC,
2539 NULL, xname, "Driver tx dma hard fail other"); 2549 NULL, xname, "Driver tx dma hard fail other");
2540 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC, 2550 evcnt_attach_dynamic(&adapter->eagain_tx_dma_setup, EVCNT_TYPE_MISC,
2541 NULL, xname, "Driver tx dma soft fail EAGAIN"); 2551 NULL, xname, "Driver tx dma soft fail EAGAIN");
2542 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC, 2552 evcnt_attach_dynamic(&adapter->enomem_tx_dma_setup, EVCNT_TYPE_MISC,
2543 NULL, xname, "Driver tx dma soft fail ENOMEM"); 2553 NULL, xname, "Driver tx dma soft fail ENOMEM");
2544 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC, 2554 evcnt_attach_dynamic(&adapter->watchdog_events, EVCNT_TYPE_MISC,
2545 NULL, xname, "Watchdog timeouts"); 2555 NULL, xname, "Watchdog timeouts");
2546 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC, 2556 evcnt_attach_dynamic(&adapter->tso_err, EVCNT_TYPE_MISC,
2547 NULL, xname, "TSO errors"); 2557 NULL, xname, "TSO errors");
2548 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR, 2558 evcnt_attach_dynamic(&adapter->link_irq, EVCNT_TYPE_INTR,
2549 NULL, xname, "Link MSI-X IRQ Handled"); 2559 NULL, xname, "Link MSI-X IRQ Handled");
2550 2560
2551 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2561 for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2552 snprintf(adapter->queues[i].evnamebuf, 2562 snprintf(adapter->queues[i].evnamebuf,
2553 sizeof(adapter->queues[i].evnamebuf), "%s q%d", 2563 sizeof(adapter->queues[i].evnamebuf), "%s q%d",
2554 xname, i); 2564 xname, i);
2555 snprintf(adapter->queues[i].namebuf, 2565 snprintf(adapter->queues[i].namebuf,
2556 sizeof(adapter->queues[i].namebuf), "q%d", i); 2566 sizeof(adapter->queues[i].namebuf), "q%d", i);
2557 2567
2558 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2568 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2559 aprint_error_dev(dev, "could not create sysctl root\n"); 2569 aprint_error_dev(dev, "could not create sysctl root\n");
2560 break; 2570 break;
2561 } 2571 }
2562 2572
2563 if (sysctl_createv(log, 0, &rnode, &rnode, 2573 if (sysctl_createv(log, 0, &rnode, &rnode,
2564 0, CTLTYPE_NODE, 2574 0, CTLTYPE_NODE,
2565 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"), 2575 adapter->queues[i].namebuf, SYSCTL_DESCR("Queue Name"),
2566 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 2576 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
2567 break; 2577 break;
2568 2578
2569 if (sysctl_createv(log, 0, &rnode, &cnode, 2579 if (sysctl_createv(log, 0, &rnode, &cnode,
2570 CTLFLAG_READWRITE, CTLTYPE_INT, 2580 CTLFLAG_READWRITE, CTLTYPE_INT,
2571 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), 2581 "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"),
2572 ixv_sysctl_interrupt_rate_handler, 0, 2582 ixv_sysctl_interrupt_rate_handler, 0,
2573 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) 2583 (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0)
2574 break; 2584 break;
2575 2585
2576 if (sysctl_createv(log, 0, &rnode, &cnode, 2586 if (sysctl_createv(log, 0, &rnode, &cnode,
2577 CTLFLAG_READONLY, CTLTYPE_INT, 2587 CTLFLAG_READONLY, CTLTYPE_INT,
2578 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), 2588 "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"),
2579 ixv_sysctl_tdh_handler, 0, (void *)txr, 2589 ixv_sysctl_tdh_handler, 0, (void *)txr,
2580 0, CTL_CREATE, CTL_EOL) != 0) 2590 0, CTL_CREATE, CTL_EOL) != 0)
2581 break; 2591 break;
2582 2592
2583 if (sysctl_createv(log, 0, &rnode, &cnode, 2593 if (sysctl_createv(log, 0, &rnode, &cnode,
2584 CTLFLAG_READONLY, CTLTYPE_INT, 2594 CTLFLAG_READONLY, CTLTYPE_INT,
2585 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"), 2595 "txd_tail", SYSCTL_DESCR("Transmit Descriptor Tail"),
2586 ixv_sysctl_tdt_handler, 0, (void *)txr, 2596 ixv_sysctl_tdt_handler, 0, (void *)txr,
2587 0, CTL_CREATE, CTL_EOL) != 0) 2597 0, CTL_CREATE, CTL_EOL) != 0)
2588 break; 2598 break;
2589 2599
2590 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR, 2600 evcnt_attach_dynamic(&adapter->queues[i].irqs, EVCNT_TYPE_INTR,
2591 NULL, adapter->queues[i].evnamebuf, "IRQs on queue"); 2601 NULL, adapter->queues[i].evnamebuf, "IRQs on queue");
2592 evcnt_attach_dynamic(&adapter->queues[i].handleq, 2602 evcnt_attach_dynamic(&adapter->queues[i].handleq,
2593 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf, 2603 EVCNT_TYPE_MISC, NULL, adapter->queues[i].evnamebuf,
2594 "Handled queue in softint"); 2604 "Handled queue in softint");
2595 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC, 2605 evcnt_attach_dynamic(&adapter->queues[i].req, EVCNT_TYPE_MISC,
2596 NULL, adapter->queues[i].evnamebuf, "Requeued in softint"); 2606 NULL, adapter->queues[i].evnamebuf, "Requeued in softint");
2597 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, 2607 evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC,
2598 NULL, adapter->queues[i].evnamebuf, "TSO"); 2608 NULL, adapter->queues[i].evnamebuf, "TSO");
2599 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, 2609 evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC,
2600 NULL, adapter->queues[i].evnamebuf, 2610 NULL, adapter->queues[i].evnamebuf,
2601 "Queue No Descriptor Available"); 2611 "TX Queue No Descriptor Available");
2602 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, 2612 evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC,
2603 NULL, adapter->queues[i].evnamebuf, 2613 NULL, adapter->queues[i].evnamebuf,
2604 "Queue Packets Transmitted"); 2614 "Queue Packets Transmitted");
2605#ifndef IXGBE_LEGACY_TX 2615#ifndef IXGBE_LEGACY_TX
2606 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC, 2616 evcnt_attach_dynamic(&txr->pcq_drops, EVCNT_TYPE_MISC,
2607 NULL, adapter->queues[i].evnamebuf, 2617 NULL, adapter->queues[i].evnamebuf,
2608 "Packets dropped in pcq"); 2618 "Packets dropped in pcq");
2609#endif 2619#endif
2610 2620
2611#ifdef LRO 2621#ifdef LRO
2612 struct lro_ctrl *lro = &rxr->lro; 2622 struct lro_ctrl *lro = &rxr->lro;
2613#endif /* LRO */ 2623#endif /* LRO */
2614 2624
2615 if (sysctl_createv(log, 0, &rnode, &cnode, 2625 if (sysctl_createv(log, 0, &rnode, &cnode,
2616 CTLFLAG_READONLY, 2626 CTLFLAG_READONLY,
2617 CTLTYPE_INT, 2627 CTLTYPE_INT,
2618 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"), 2628 "rxd_nxck", SYSCTL_DESCR("Receive Descriptor next to check"),
2619 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0, 2629 ixv_sysctl_next_to_check_handler, 0, (void *)rxr, 0,
2620 CTL_CREATE, CTL_EOL) != 0) 2630 CTL_CREATE, CTL_EOL) != 0)
2621 break; 2631 break;
2622 2632
2623 if (sysctl_createv(log, 0, &rnode, &cnode, 2633 if (sysctl_createv(log, 0, &rnode, &cnode,
2624 CTLFLAG_READONLY, 2634 CTLFLAG_READONLY,
2625 CTLTYPE_INT, 2635 CTLTYPE_INT,
2626 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"), 2636 "rxd_head", SYSCTL_DESCR("Receive Descriptor Head"),
2627 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0, 2637 ixv_sysctl_rdh_handler, 0, (void *)rxr, 0,
2628 CTL_CREATE, CTL_EOL) != 0) 2638 CTL_CREATE, CTL_EOL) != 0)
2629 break; 2639 break;
2630 2640
2631 if (sysctl_createv(log, 0, &rnode, &cnode, 2641 if (sysctl_createv(log, 0, &rnode, &cnode,
2632 CTLFLAG_READONLY, 2642 CTLFLAG_READONLY,
2633 CTLTYPE_INT, 2643 CTLTYPE_INT,
2634 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"), 2644 "rxd_tail", SYSCTL_DESCR("Receive Descriptor Tail"),
2635 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0, 2645 ixv_sysctl_rdt_handler, 0, (void *)rxr, 0,
2636 CTL_CREATE, CTL_EOL) != 0) 2646 CTL_CREATE, CTL_EOL) != 0)
2637 break; 2647 break;
2638 2648
2639 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC, 2649 evcnt_attach_dynamic(&rxr->rx_packets, EVCNT_TYPE_MISC,
2640 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received"); 2650 NULL, adapter->queues[i].evnamebuf, "Queue Packets Received");
2641 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC, 2651 evcnt_attach_dynamic(&rxr->rx_bytes, EVCNT_TYPE_MISC,
2642 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received"); 2652 NULL, adapter->queues[i].evnamebuf, "Queue Bytes Received");
2643 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC, 2653 evcnt_attach_dynamic(&rxr->rx_copies, EVCNT_TYPE_MISC,
2644 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames"); 2654 NULL, adapter->queues[i].evnamebuf, "Copied RX Frames");
2645 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC, 2655 evcnt_attach_dynamic(&rxr->no_jmbuf, EVCNT_TYPE_MISC,
2646 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf"); 2656 NULL, adapter->queues[i].evnamebuf, "Rx no jumbo mbuf");
2647 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC, 2657 evcnt_attach_dynamic(&rxr->rx_discarded, EVCNT_TYPE_MISC,
2648 NULL, adapter->queues[i].evnamebuf, "Rx discarded"); 2658 NULL, adapter->queues[i].evnamebuf, "Rx discarded");
2649#ifdef LRO 2659#ifdef LRO
2650 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", 2660 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
2651 CTLFLAG_RD, &lro->lro_queued, 0, 2661 CTLFLAG_RD, &lro->lro_queued, 0,
2652 "LRO Queued"); 2662 "LRO Queued");
2653 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", 2663 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
2654 CTLFLAG_RD, &lro->lro_flushed, 0, 2664 CTLFLAG_RD, &lro->lro_flushed, 0,
2655 "LRO Flushed"); 2665 "LRO Flushed");
2656#endif /* LRO */ 2666#endif /* LRO */
2657 } 2667 }
2658 2668
2659 /* MAC stats get their own sub node */ 2669 /* MAC stats get their own sub node */
2660 2670
2661 snprintf(stats->namebuf, 2671 snprintf(stats->namebuf,
2662 sizeof(stats->namebuf), "%s MAC Statistics", xname); 2672 sizeof(stats->namebuf), "%s MAC Statistics", xname);
2663 2673
2664 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL, 2674 evcnt_attach_dynamic(&stats->ipcs, EVCNT_TYPE_MISC, NULL,
2665 stats->namebuf, "rx csum offload - IP"); 2675 stats->namebuf, "rx csum offload - IP");
2666 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL, 2676 evcnt_attach_dynamic(&stats->l4cs, EVCNT_TYPE_MISC, NULL,
2667 stats->namebuf, "rx csum offload - L4"); 2677 stats->namebuf, "rx csum offload - L4");
2668 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL, 2678 evcnt_attach_dynamic(&stats->ipcs_bad, EVCNT_TYPE_MISC, NULL,
2669 stats->namebuf, "rx csum offload - IP bad"); 2679 stats->namebuf, "rx csum offload - IP bad");
2670 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL, 2680 evcnt_attach_dynamic(&stats->l4cs_bad, EVCNT_TYPE_MISC, NULL,
2671 stats->namebuf, "rx csum offload - L4 bad"); 2681 stats->namebuf, "rx csum offload - L4 bad");
2672 2682
2673 /* Packet Reception Stats */ 2683 /* Packet Reception Stats */
2674 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL, 2684 evcnt_attach_dynamic(&stats->vfgprc, EVCNT_TYPE_MISC, NULL,
2675 xname, "Good Packets Received"); 2685 xname, "Good Packets Received");
2676 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL, 2686 evcnt_attach_dynamic(&stats->vfgorc, EVCNT_TYPE_MISC, NULL,
2677 xname, "Good Octets Received"); 2687 xname, "Good Octets Received");
2678 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL, 2688 evcnt_attach_dynamic(&stats->vfmprc, EVCNT_TYPE_MISC, NULL,
2679 xname, "Multicast Packets Received"); 2689 xname, "Multicast Packets Received");
2680 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL, 2690 evcnt_attach_dynamic(&stats->vfgptc, EVCNT_TYPE_MISC, NULL,
2681 xname, "Good Packets Transmitted"); 2691 xname, "Good Packets Transmitted");
2682 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL, 2692 evcnt_attach_dynamic(&stats->vfgotc, EVCNT_TYPE_MISC, NULL,
2683 xname, "Good Octets Transmitted"); 2693 xname, "Good Octets Transmitted");
2684 2694
2685 /* Mailbox Stats */ 2695 /* Mailbox Stats */
2686 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL, 2696 evcnt_attach_dynamic(&hw->mbx.stats.msgs_tx, EVCNT_TYPE_MISC, NULL,
2687 xname, "message TXs"); 2697 xname, "message TXs");
2688 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL, 2698 evcnt_attach_dynamic(&hw->mbx.stats.msgs_rx, EVCNT_TYPE_MISC, NULL,
2689 xname, "message RXs"); 2699 xname, "message RXs");
2690 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL, 2700 evcnt_attach_dynamic(&hw->mbx.stats.acks, EVCNT_TYPE_MISC, NULL,
2691 xname, "ACKs"); 2701 xname, "ACKs");
2692 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL, 2702 evcnt_attach_dynamic(&hw->mbx.stats.reqs, EVCNT_TYPE_MISC, NULL,
2693 xname, "REQs"); 2703 xname, "REQs");
2694 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL, 2704 evcnt_attach_dynamic(&hw->mbx.stats.rsts, EVCNT_TYPE_MISC, NULL,
2695 xname, "RSTs"); 2705 xname, "RSTs");
2696 2706
2697} /* ixv_add_stats_sysctls */ 2707} /* ixv_add_stats_sysctls */
2698 2708
2699static void 2709static void
2700ixv_clear_evcnt(struct adapter *adapter) 2710ixv_clear_evcnt(struct adapter *adapter)
2701{ 2711{
2702 struct tx_ring *txr = adapter->tx_rings; 2712 struct tx_ring *txr = adapter->tx_rings;
2703 struct rx_ring *rxr = adapter->rx_rings; 2713 struct rx_ring *rxr = adapter->rx_rings;
2704 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 2714 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2705 struct ixgbe_hw *hw = &adapter->hw; 2715 struct ixgbe_hw *hw = &adapter->hw;
2706 int i; 2716 int i;
2707 2717
2708 /* Driver Statistics */ 2718 /* Driver Statistics */
2709 adapter->efbig_tx_dma_setup.ev_count = 0; 2719 adapter->efbig_tx_dma_setup.ev_count = 0;
2710 adapter->mbuf_defrag_failed.ev_count = 0; 2720 adapter->mbuf_defrag_failed.ev_count = 0;
2711 adapter->efbig2_tx_dma_setup.ev_count = 0; 2721 adapter->efbig2_tx_dma_setup.ev_count = 0;
2712 adapter->einval_tx_dma_setup.ev_count = 0; 2722 adapter->einval_tx_dma_setup.ev_count = 0;
2713 adapter->other_tx_dma_setup.ev_count = 0; 2723 adapter->other_tx_dma_setup.ev_count = 0;
2714 adapter->eagain_tx_dma_setup.ev_count = 0; 2724 adapter->eagain_tx_dma_setup.ev_count = 0;
2715 adapter->enomem_tx_dma_setup.ev_count = 0; 2725 adapter->enomem_tx_dma_setup.ev_count = 0;
2716 adapter->watchdog_events.ev_count = 0; 2726 adapter->watchdog_events.ev_count = 0;
2717 adapter->tso_err.ev_count = 0; 2727 adapter->tso_err.ev_count = 0;
2718 adapter->link_irq.ev_count = 0; 2728 adapter->link_irq.ev_count = 0;
2719 2729
2720 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) { 2730 for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
2721 adapter->queues[i].irqs.ev_count = 0; 2731 adapter->queues[i].irqs.ev_count = 0;
2722 adapter->queues[i].handleq.ev_count = 0; 2732 adapter->queues[i].handleq.ev_count = 0;
2723 adapter->queues[i].req.ev_count = 0; 2733 adapter->queues[i].req.ev_count = 0;
2724 txr->tso_tx.ev_count = 0; 2734 txr->tso_tx.ev_count = 0;
2725 txr->no_desc_avail.ev_count = 0; 2735 txr->no_desc_avail.ev_count = 0;
2726 txr->total_packets.ev_count = 0; 2736 txr->total_packets.ev_count = 0;
2727#ifndef IXGBE_LEGACY_TX 2737#ifndef IXGBE_LEGACY_TX
2728 txr->pcq_drops.ev_count = 0; 2738 txr->pcq_drops.ev_count = 0;
2729#endif 2739#endif
2730 txr->q_efbig_tx_dma_setup = 0; 2740 txr->q_efbig_tx_dma_setup = 0;
2731 txr->q_mbuf_defrag_failed = 0; 2741 txr->q_mbuf_defrag_failed = 0;
2732 txr->q_efbig2_tx_dma_setup = 0; 2742 txr->q_efbig2_tx_dma_setup = 0;
2733 txr->q_einval_tx_dma_setup = 0; 2743 txr->q_einval_tx_dma_setup = 0;
2734 txr->q_other_tx_dma_setup = 0; 2744 txr->q_other_tx_dma_setup = 0;
2735 txr->q_eagain_tx_dma_setup = 0; 2745 txr->q_eagain_tx_dma_setup = 0;
2736 txr->q_enomem_tx_dma_setup = 0; 2746 txr->q_enomem_tx_dma_setup = 0;
2737 txr->q_tso_err = 0; 2747 txr->q_tso_err = 0;
2738 2748
2739 rxr->rx_packets.ev_count = 0; 2749 rxr->rx_packets.ev_count = 0;
2740 rxr->rx_bytes.ev_count = 0; 2750 rxr->rx_bytes.ev_count = 0;
2741 rxr->rx_copies.ev_count = 0; 2751 rxr->rx_copies.ev_count = 0;
2742 rxr->no_jmbuf.ev_count = 0; 2752 rxr->no_jmbuf.ev_count = 0;
2743 rxr->rx_discarded.ev_count = 0; 2753 rxr->rx_discarded.ev_count = 0;
2744 } 2754 }
2745 2755
2746 /* MAC stats get their own sub node */ 2756 /* MAC stats get their own sub node */
2747 2757
2748 stats->ipcs.ev_count = 0; 2758 stats->ipcs.ev_count = 0;
2749 stats->l4cs.ev_count = 0; 2759 stats->l4cs.ev_count = 0;
2750 stats->ipcs_bad.ev_count = 0; 2760 stats->ipcs_bad.ev_count = 0;
2751 stats->l4cs_bad.ev_count = 0; 2761 stats->l4cs_bad.ev_count = 0;
2752 2762
2753 /* Packet Reception Stats */ 2763 /* Packet Reception Stats */
2754 stats->vfgprc.ev_count = 0; 2764 stats->vfgprc.ev_count = 0;
2755 stats->vfgorc.ev_count = 0; 2765 stats->vfgorc.ev_count = 0;
2756 stats->vfmprc.ev_count = 0; 2766 stats->vfmprc.ev_count = 0;
2757 stats->vfgptc.ev_count = 0; 2767 stats->vfgptc.ev_count = 0;
2758 stats->vfgotc.ev_count = 0; 2768 stats->vfgotc.ev_count = 0;
2759 2769
2760 /* Mailbox Stats */ 2770 /* Mailbox Stats */
2761 hw->mbx.stats.msgs_tx.ev_count = 0; 2771 hw->mbx.stats.msgs_tx.ev_count = 0;
2762 hw->mbx.stats.msgs_rx.ev_count = 0; 2772 hw->mbx.stats.msgs_rx.ev_count = 0;
2763 hw->mbx.stats.acks.ev_count = 0; 2773 hw->mbx.stats.acks.ev_count = 0;
2764 hw->mbx.stats.reqs.ev_count = 0; 2774 hw->mbx.stats.reqs.ev_count = 0;
2765 hw->mbx.stats.rsts.ev_count = 0; 2775 hw->mbx.stats.rsts.ev_count = 0;
2766 2776
2767} /* ixv_clear_evcnt */ 2777} /* ixv_clear_evcnt */
2768 2778
2769/************************************************************************ 2779/************************************************************************
2770 * ixv_set_sysctl_value 2780 * ixv_set_sysctl_value
2771 ************************************************************************/ 2781 ************************************************************************/
2772static void 2782static void
2773ixv_set_sysctl_value(struct adapter *adapter, const char *name, 2783ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2774 const char *description, int *limit, int value) 2784 const char *description, int *limit, int value)
2775{ 2785{
2776 device_t dev = adapter->dev; 2786 device_t dev = adapter->dev;
2777 struct sysctllog **log; 2787 struct sysctllog **log;
2778 const struct sysctlnode *rnode, *cnode; 2788 const struct sysctlnode *rnode, *cnode;
2779 2789
2780 log = &adapter->sysctllog; 2790 log = &adapter->sysctllog;
2781 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { 2791 if ((rnode = ixv_sysctl_instance(adapter)) == NULL) {
2782 aprint_error_dev(dev, "could not create sysctl root\n"); 2792 aprint_error_dev(dev, "could not create sysctl root\n");
2783 return; 2793 return;
2784 } 2794 }
2785 if (sysctl_createv(log, 0, &rnode, &cnode, 2795 if (sysctl_createv(log, 0, &rnode, &cnode,
2786 CTLFLAG_READWRITE, CTLTYPE_INT, 2796 CTLFLAG_READWRITE, CTLTYPE_INT,
2787 name, SYSCTL_DESCR(description), 2797 name, SYSCTL_DESCR(description),
2788 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0) 2798 NULL, 0, limit, 0, CTL_CREATE, CTL_EOL) != 0)
2789 aprint_error_dev(dev, "could not create sysctl\n"); 2799 aprint_error_dev(dev, "could not create sysctl\n");
2790 *limit = value; 2800 *limit = value;
2791} /* ixv_set_sysctl_value */ 2801} /* ixv_set_sysctl_value */
2792 2802
2793/************************************************************************ 2803/************************************************************************
2794 * ixv_print_debug_info 2804 * ixv_print_debug_info
2795 * 2805 *
2796 * Called only when em_display_debug_stats is enabled. 2806 * Called only when em_display_debug_stats is enabled.
2797 * Provides a way to take a look at important statistics 2807 * Provides a way to take a look at important statistics
2798 * maintained by the driver and hardware. 2808 * maintained by the driver and hardware.
2799 ************************************************************************/ 2809 ************************************************************************/
2800static void 2810static void
2801ixv_print_debug_info(struct adapter *adapter) 2811ixv_print_debug_info(struct adapter *adapter)
2802{ 2812{
2803 device_t dev = adapter->dev; 2813 device_t dev = adapter->dev;
2804 struct ix_queue *que = adapter->queues; 2814 struct ix_queue *que = adapter->queues;
2805 struct rx_ring *rxr; 2815 struct rx_ring *rxr;
2806 struct tx_ring *txr; 2816 struct tx_ring *txr;
2807#ifdef LRO 2817#ifdef LRO
2808 struct lro_ctrl *lro; 2818 struct lro_ctrl *lro;
2809#endif /* LRO */ 2819#endif /* LRO */
2810 2820
2811 for (int i = 0; i < adapter->num_queues; i++, que++) { 2821 for (int i = 0; i < adapter->num_queues; i++, que++) {
2812 txr = que->txr; 2822 txr = que->txr;
2813 rxr = que->rxr; 2823 rxr = que->rxr;
2814#ifdef LRO 2824#ifdef LRO
2815 lro = &rxr->lro; 2825 lro = &rxr->lro;
2816#endif /* LRO */ 2826#endif /* LRO */
2817 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n", 2827 device_printf(dev, "QUE(%d) IRQs Handled: %lu\n",
2818 que->msix, (long)que->irqs.ev_count); 2828 que->msix, (long)que->irqs.ev_count);
2819 device_printf(dev, "RX(%d) Packets Received: %lld\n", 2829 device_printf(dev, "RX(%d) Packets Received: %lld\n",
2820 rxr->me, (long long)rxr->rx_packets.ev_count); 2830 rxr->me, (long long)rxr->rx_packets.ev_count);
2821 device_printf(dev, "RX(%d) Bytes Received: %lu\n", 2831 device_printf(dev, "RX(%d) Bytes Received: %lu\n",
2822 rxr->me, (long)rxr->rx_bytes.ev_count); 2832 rxr->me, (long)rxr->rx_bytes.ev_count);
2823#ifdef LRO 2833#ifdef LRO
2824 device_printf(dev, "RX(%d) LRO Queued= %ju\n", 2834 device_printf(dev, "RX(%d) LRO Queued= %ju\n",
2825 rxr->me, (uintmax_t)lro->lro_queued); 2835 rxr->me, (uintmax_t)lro->lro_queued);
2826 device_printf(dev, "RX(%d) LRO Flushed= %ju\n", 2836 device_printf(dev, "RX(%d) LRO Flushed= %ju\n",
2827 rxr->me, (uintmax_t)lro->lro_flushed); 2837 rxr->me, (uintmax_t)lro->lro_flushed);
2828#endif /* LRO */ 2838#endif /* LRO */
2829 device_printf(dev, "TX(%d) Packets Sent: %lu\n", 2839 device_printf(dev, "TX(%d) Packets Sent: %lu\n",
2830 txr->me, (long)txr->total_packets.ev_count); 2840 txr->me, (long)txr->total_packets.ev_count);
2831 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n", 2841 device_printf(dev, "TX(%d) NO Desc Avail: %lu\n",
2832 txr->me, (long)txr->no_desc_avail.ev_count); 2842 txr->me, (long)txr->no_desc_avail.ev_count);
2833 } 2843 }
2834 2844
2835 device_printf(dev, "MBX IRQ Handled: %lu\n", 2845 device_printf(dev, "MBX IRQ Handled: %lu\n",
2836 (long)adapter->link_irq.ev_count); 2846 (long)adapter->link_irq.ev_count);
2837} /* ixv_print_debug_info */ 2847} /* ixv_print_debug_info */
2838 2848
2839/************************************************************************ 2849/************************************************************************
2840 * ixv_sysctl_debug 2850 * ixv_sysctl_debug
2841 ************************************************************************/ 2851 ************************************************************************/
2842static int 2852static int
2843ixv_sysctl_debug(SYSCTLFN_ARGS) 2853ixv_sysctl_debug(SYSCTLFN_ARGS)
2844{ 2854{
2845 struct sysctlnode node = *rnode; 2855 struct sysctlnode node = *rnode;
2846 struct adapter *adapter = (struct adapter *)node.sysctl_data; 2856 struct adapter *adapter = (struct adapter *)node.sysctl_data;
2847 int error, result; 2857 int error, result;
2848 2858
2849 node.sysctl_data = &result; 2859 node.sysctl_data = &result;
2850 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2860 error = sysctl_lookup(SYSCTLFN_CALL(&node));
2851 2861
2852 if (error || newp == NULL) 2862 if (error || newp == NULL)
2853 return error; 2863 return error;
2854 2864
2855 if (result == 1) 2865 if (result == 1)
2856 ixv_print_debug_info(adapter); 2866 ixv_print_debug_info(adapter);
2857 2867
2858 return 0; 2868 return 0;
2859} /* ixv_sysctl_debug */ 2869} /* ixv_sysctl_debug */
2860 2870
2861/************************************************************************ 2871/************************************************************************
2862 * ixv_init_device_features 2872 * ixv_init_device_features
2863 ************************************************************************/ 2873 ************************************************************************/
2864static void 2874static void
2865ixv_init_device_features(struct adapter *adapter) 2875ixv_init_device_features(struct adapter *adapter)
2866{ 2876{
2867 adapter->feat_cap = IXGBE_FEATURE_NETMAP 2877 adapter->feat_cap = IXGBE_FEATURE_NETMAP
2868 | IXGBE_FEATURE_VF 2878 | IXGBE_FEATURE_VF
2869 | IXGBE_FEATURE_RSS 2879 | IXGBE_FEATURE_RSS
2870 | IXGBE_FEATURE_LEGACY_TX; 2880 | IXGBE_FEATURE_LEGACY_TX;
2871 2881
2872 /* A tad short on feature flags for VFs, atm. */ 2882 /* A tad short on feature flags for VFs, atm. */
2873 switch (adapter->hw.mac.type) { 2883 switch (adapter->hw.mac.type) {
2874 case ixgbe_mac_82599_vf: 2884 case ixgbe_mac_82599_vf:
2875 break; 2885 break;
2876 case ixgbe_mac_X540_vf: 2886 case ixgbe_mac_X540_vf:
2877 break; 2887 break;
2878 case ixgbe_mac_X550_vf: 2888 case ixgbe_mac_X550_vf:
2879 case ixgbe_mac_X550EM_x_vf: 2889 case ixgbe_mac_X550EM_x_vf:
2880 case ixgbe_mac_X550EM_a_vf: 2890 case ixgbe_mac_X550EM_a_vf:
2881 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 2891 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
2882 break; 2892 break;
2883 default: 2893 default:
2884 break; 2894 break;
2885 } 2895 }
2886 2896
2887 /* Enabled by default... */ 2897 /* Enabled by default... */
2888 /* Is a virtual function (VF) */ 2898 /* Is a virtual function (VF) */
2889 if (adapter->feat_cap & IXGBE_FEATURE_VF) 2899 if (adapter->feat_cap & IXGBE_FEATURE_VF)
2890 adapter->feat_en |= IXGBE_FEATURE_VF; 2900 adapter->feat_en |= IXGBE_FEATURE_VF;
2891 /* Netmap */ 2901 /* Netmap */
2892 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 2902 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
2893 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 2903 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
2894 /* Receive-Side Scaling (RSS) */ 2904 /* Receive-Side Scaling (RSS) */
2895 if (adapter->feat_cap & IXGBE_FEATURE_RSS) 2905 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
2896 adapter->feat_en |= IXGBE_FEATURE_RSS; 2906 adapter->feat_en |= IXGBE_FEATURE_RSS;
2897 /* Needs advanced context descriptor regardless of offloads req'd */ 2907 /* Needs advanced context descriptor regardless of offloads req'd */
2898 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 2908 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
2899 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 2909 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
2900 2910
2901 /* Enabled via sysctl... */ 2911 /* Enabled via sysctl... */
2902 /* Legacy (single queue) transmit */ 2912 /* Legacy (single queue) transmit */
2903 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) && 2913 if ((adapter->feat_cap & IXGBE_FEATURE_LEGACY_TX) &&
2904 ixv_enable_legacy_tx) 2914 ixv_enable_legacy_tx)
2905 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX; 2915 adapter->feat_en |= IXGBE_FEATURE_LEGACY_TX;
2906} /* ixv_init_device_features */ 2916} /* ixv_init_device_features */
2907 2917
2908/************************************************************************ 2918/************************************************************************
2909 * ixv_shutdown - Shutdown entry point 2919 * ixv_shutdown - Shutdown entry point
2910 ************************************************************************/ 2920 ************************************************************************/
2911#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ 2921#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */
2912static int 2922static int
2913ixv_shutdown(device_t dev) 2923ixv_shutdown(device_t dev)
2914{ 2924{
2915 struct adapter *adapter = device_private(dev); 2925 struct adapter *adapter = device_private(dev);
2916 IXGBE_CORE_LOCK(adapter); 2926 IXGBE_CORE_LOCK(adapter);
2917 ixv_stop(adapter); 2927 ixv_stop(adapter);
2918 IXGBE_CORE_UNLOCK(adapter); 2928 IXGBE_CORE_UNLOCK(adapter);
2919 2929
2920 return (0); 2930 return (0);
2921} /* ixv_shutdown */ 2931} /* ixv_shutdown */
2922#endif 2932#endif
2923 2933
2924static int 2934static int
2925ixv_ifflags_cb(struct ethercom *ec) 2935ixv_ifflags_cb(struct ethercom *ec)
2926{ 2936{
2927 struct ifnet *ifp = &ec->ec_if; 2937 struct ifnet *ifp = &ec->ec_if;
2928 struct adapter *adapter = ifp->if_softc; 2938 struct adapter *adapter = ifp->if_softc;
2929 u_short saved_flags; 2939 u_short saved_flags;
2930 u_short change; 2940 u_short change;
2931 int rv = 0; 2941 int rv = 0;
2932 2942
2933 IXGBE_CORE_LOCK(adapter); 2943 IXGBE_CORE_LOCK(adapter);
2934 2944
2935 saved_flags = adapter->if_flags; 2945 saved_flags = adapter->if_flags;
2936 change = ifp->if_flags ^ adapter->if_flags; 2946 change = ifp->if_flags ^ adapter->if_flags;
2937 if (change != 0) 2947 if (change != 0)
2938 adapter->if_flags = ifp->if_flags; 2948 adapter->if_flags = ifp->if_flags;
2939 2949
2940 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 2950 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2941 rv = ENETRESET; 2951 rv = ENETRESET;
2942 goto out; 2952 goto out;
2943 } else if ((change & IFF_PROMISC) != 0) { 2953 } else if ((change & IFF_PROMISC) != 0) {
2944 rv = ixv_set_rxfilter(adapter); 2954 rv = ixv_set_rxfilter(adapter);
2945 if (rv != 0) { 2955 if (rv != 0) {
2946 /* Restore previous */ 2956 /* Restore previous */
2947 adapter->if_flags = saved_flags; 2957 adapter->if_flags = saved_flags;
2948 goto out; 2958 goto out;
2949 } 2959 }
2950 } 2960 }
2951 2961
2952 /* Check for ec_capenable. */ 2962 /* Check for ec_capenable. */
2953 change = ec->ec_capenable ^ adapter->ec_capenable; 2963 change = ec->ec_capenable ^ adapter->ec_capenable;
2954 adapter->ec_capenable = ec->ec_capenable; 2964 adapter->ec_capenable = ec->ec_capenable;
2955 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 2965 if ((change & ~(ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING
2956 | ETHERCAP_VLAN_HWFILTER)) != 0) { 2966 | ETHERCAP_VLAN_HWFILTER)) != 0) {
2957 rv = ENETRESET; 2967 rv = ENETRESET;
2958 goto out; 2968 goto out;
2959 } 2969 }
2960 2970
2961 /* 2971 /*
2962 * Special handling is not required for ETHERCAP_VLAN_MTU. 2972 * Special handling is not required for ETHERCAP_VLAN_MTU.
2963 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header. 2973 * PF's MAXFRS(MHADD) does not include the 4bytes of the VLAN header.
2964 */ 2974 */
2965 2975
2966 /* Set up VLAN support and filter */ 2976 /* Set up VLAN support and filter */
2967 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0) 2977 if ((change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_HWFILTER)) != 0)
2968 rv = ixv_setup_vlan_support(adapter); 2978 rv = ixv_setup_vlan_support(adapter);
2969 2979
2970out: 2980out:
2971 IXGBE_CORE_UNLOCK(adapter); 2981 IXGBE_CORE_UNLOCK(adapter);
2972 2982
2973 return rv; 2983 return rv;
2974} 2984}
2975 2985
2976 2986
2977/************************************************************************ 2987/************************************************************************
2978 * ixv_ioctl - Ioctl entry point 2988 * ixv_ioctl - Ioctl entry point
2979 * 2989 *
2980 * Called when the user wants to configure the interface. 2990 * Called when the user wants to configure the interface.
2981 * 2991 *
2982 * return 0 on success, positive on failure 2992 * return 0 on success, positive on failure
2983 ************************************************************************/ 2993 ************************************************************************/
2984static int 2994static int
2985ixv_ioctl(struct ifnet *ifp, u_long command, void *data) 2995ixv_ioctl(struct ifnet *ifp, u_long command, void *data)
2986{ 2996{
2987 struct adapter *adapter = ifp->if_softc; 2997 struct adapter *adapter = ifp->if_softc;
2988 struct ixgbe_hw *hw = &adapter->hw; 2998 struct ixgbe_hw *hw = &adapter->hw;
2989 struct ifcapreq *ifcr = data; 2999 struct ifcapreq *ifcr = data;
2990 int error; 3000 int error;
2991 int l4csum_en; 3001 int l4csum_en;
2992 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 3002 const int l4csum = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
2993 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 3003 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2994 3004
2995 switch (command) { 3005 switch (command) {
2996 case SIOCSIFFLAGS: 3006 case SIOCSIFFLAGS:
2997 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); 3007 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
2998 break; 3008 break;
2999 case SIOCADDMULTI: { 3009 case SIOCADDMULTI: {
3000 struct ether_multi *enm; 3010 struct ether_multi *enm;
3001 struct ether_multistep step; 3011 struct ether_multistep step;
3002 struct ethercom *ec = &adapter->osdep.ec; 3012 struct ethercom *ec = &adapter->osdep.ec;
3003 bool overflow = false; 3013 bool overflow = false;
3004 int mcnt = 0; 3014 int mcnt = 0;
3005 3015
3006 /* 3016 /*
3007 * Check the number of multicast address. If it exceeds, 3017 * Check the number of multicast address. If it exceeds,
3008 * return ENOSPC. 3018 * return ENOSPC.
3009 * Update this code when we support API 1.3. 3019 * Update this code when we support API 1.3.
3010 */ 3020 */
3011 ETHER_LOCK(ec); 3021 ETHER_LOCK(ec);
3012 ETHER_FIRST_MULTI(step, ec, enm); 3022 ETHER_FIRST_MULTI(step, ec, enm);
3013 while (enm != NULL) { 3023 while (enm != NULL) {
3014 mcnt++; 3024 mcnt++;
3015 3025
3016 /* 3026 /*
3017 * This code is before adding, so one room is required 3027 * This code is before adding, so one room is required
3018 * at least. 3028 * at least.
3019 */ 3029 */
3020 if (mcnt > (IXGBE_MAX_VF_MC - 1)) { 3030 if (mcnt > (IXGBE_MAX_VF_MC - 1)) {
3021 overflow = true; 3031 overflow = true;
3022 break; 3032 break;
3023 } 3033 }
3024 ETHER_NEXT_MULTI(step, enm); 3034 ETHER_NEXT_MULTI(step, enm);
3025 } 3035 }
3026 ETHER_UNLOCK(ec); 3036 ETHER_UNLOCK(ec);
3027 error = 0; 3037 error = 0;
3028 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) { 3038 if (overflow && ((ec->ec_flags & ETHER_F_ALLMULTI) == 0)) {
3029 error = hw->mac.ops.update_xcast_mode(hw, 3039 error = hw->mac.ops.update_xcast_mode(hw,
3030 IXGBEVF_XCAST_MODE_ALLMULTI); 3040 IXGBEVF_XCAST_MODE_ALLMULTI);
3031 if (error == IXGBE_ERR_NOT_TRUSTED) { 3041 if (error == IXGBE_ERR_NOT_TRUSTED) {
3032 device_printf(adapter->dev, 3042 device_printf(adapter->dev,
3033 "this interface is not trusted\n"); 3043 "this interface is not trusted\n");
3034 error = EPERM; 3044 error = EPERM;
3035 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) { 3045 } else if (error == IXGBE_ERR_FEATURE_NOT_SUPPORTED) {
3036 device_printf(adapter->dev, 3046 device_printf(adapter->dev,
3037 "the PF doesn't support allmulti mode\n"); 3047 "the PF doesn't support allmulti mode\n");
3038 error = EOPNOTSUPP; 3048 error = EOPNOTSUPP;
3039 } else if (error) { 3049 } else if (error) {
3040 device_printf(adapter->dev, 3050 device_printf(adapter->dev,
3041 "number of Ethernet multicast addresses " 3051 "number of Ethernet multicast addresses "
3042 "exceeds the limit (%d). error = %d\n", 3052 "exceeds the limit (%d). error = %d\n",
3043 IXGBE_MAX_VF_MC, error); 3053 IXGBE_MAX_VF_MC, error);
3044 error = ENOSPC; 3054 error = ENOSPC;
3045 } else 3055 } else
3046 ec->ec_flags |= ETHER_F_ALLMULTI; 3056 ec->ec_flags |= ETHER_F_ALLMULTI;
3047 } 3057 }
3048 if (error) 3058 if (error)
3049 return error; 3059 return error;
3050 } 3060 }
3051 /*FALLTHROUGH*/ 3061 /*FALLTHROUGH*/
3052 case SIOCDELMULTI: 3062 case SIOCDELMULTI:
3053 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); 3063 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
3054 break; 3064 break;
3055 case SIOCSIFMEDIA: 3065 case SIOCSIFMEDIA:
3056 case SIOCGIFMEDIA: 3066 case SIOCGIFMEDIA:
3057 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); 3067 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
3058 break; 3068 break;
3059 case SIOCSIFCAP: 3069 case SIOCSIFCAP:
3060 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); 3070 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
3061 break; 3071 break;
3062 case SIOCSIFMTU: 3072 case SIOCSIFMTU:
3063 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 3073 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
3064 break; 3074 break;
3065 case SIOCZIFDATA: 3075 case SIOCZIFDATA:
3066 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)"); 3076 IOCTL_DEBUGOUT("ioctl: SIOCZIFDATA (Zero counter)");
3067 ixv_update_stats(adapter); 3077 ixv_update_stats(adapter);
3068 ixv_clear_evcnt(adapter); 3078 ixv_clear_evcnt(adapter);
3069 break; 3079 break;
3070 default: 3080 default:
3071 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command); 3081 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)", (int)command);
3072 break; 3082 break;
3073 } 3083 }
3074 3084
3075 switch (command) { 3085 switch (command) {
3076 case SIOCSIFCAP: 3086 case SIOCSIFCAP:
3077 /* Layer-4 Rx checksum offload has to be turned on and 3087 /* Layer-4 Rx checksum offload has to be turned on and
3078 * off as a unit. 3088 * off as a unit.
3079 */ 3089 */
3080 l4csum_en = ifcr->ifcr_capenable & l4csum; 3090 l4csum_en = ifcr->ifcr_capenable & l4csum;
3081 if (l4csum_en != l4csum && l4csum_en != 0) 3091 if (l4csum_en != l4csum && l4csum_en != 0)
3082 return EINVAL; 3092 return EINVAL;
3083 /*FALLTHROUGH*/ 3093 /*FALLTHROUGH*/
3084 case SIOCADDMULTI: 3094 case SIOCADDMULTI:
3085 case SIOCDELMULTI: 3095 case SIOCDELMULTI:
3086 case SIOCSIFFLAGS: 3096 case SIOCSIFFLAGS:
3087 case SIOCSIFMTU: 3097 case SIOCSIFMTU:
3088 default: 3098 default:
3089 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 3099 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
3090 return error; 3100 return error;
3091 if ((ifp->if_flags & IFF_RUNNING) == 0) 3101 if ((ifp->if_flags & IFF_RUNNING) == 0)
3092 ; 3102 ;
3093 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { 3103 else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
3094 IXGBE_CORE_LOCK(adapter); 3104 IXGBE_CORE_LOCK(adapter);
3095 ixv_init_locked(adapter); 3105 ixv_init_locked(adapter);
3096 IXGBE_CORE_UNLOCK(adapter); 3106 IXGBE_CORE_UNLOCK(adapter);
3097 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { 3107 } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
3098 /* 3108 /*
3099 * Multicast list has changed; set the hardware filter 3109 * Multicast list has changed; set the hardware filter
3100 * accordingly. 3110 * accordingly.
3101 */ 3111 */
3102 IXGBE_CORE_LOCK(adapter); 3112 IXGBE_CORE_LOCK(adapter);
3103 ixv_disable_intr(adapter); 3113 ixv_disable_intr(adapter);
3104 ixv_set_rxfilter(adapter); 3114 ixv_set_rxfilter(adapter);
3105 ixv_enable_intr(adapter); 3115 ixv_enable_intr(adapter);
3106 IXGBE_CORE_UNLOCK(adapter); 3116 IXGBE_CORE_UNLOCK(adapter);
3107 } 3117 }
3108 return 0; 3118 return 0;
3109 } 3119 }
3110} /* ixv_ioctl */ 3120} /* ixv_ioctl */
3111 3121
3112/************************************************************************ 3122/************************************************************************
3113 * ixv_init 3123 * ixv_init
3114 ************************************************************************/ 3124 ************************************************************************/
3115static int 3125static int
3116ixv_init(struct ifnet *ifp) 3126ixv_init(struct ifnet *ifp)
3117{ 3127{
3118 struct adapter *adapter = ifp->if_softc; 3128 struct adapter *adapter = ifp->if_softc;
3119 3129
3120 IXGBE_CORE_LOCK(adapter); 3130 IXGBE_CORE_LOCK(adapter);
3121 ixv_init_locked(adapter); 3131 ixv_init_locked(adapter);
3122 IXGBE_CORE_UNLOCK(adapter); 3132 IXGBE_CORE_UNLOCK(adapter);
3123 3133
3124 return 0; 3134 return 0;
3125} /* ixv_init */ 3135} /* ixv_init */
3126 3136
3127/************************************************************************ 3137/************************************************************************
3128 * ixv_handle_que 3138 * ixv_handle_que
3129 ************************************************************************/ 3139 ************************************************************************/
3130static void 3140static void
3131ixv_handle_que(void *context) 3141ixv_handle_que(void *context)
3132{ 3142{
3133 struct ix_queue *que = context; 3143 struct ix_queue *que = context;
3134 struct adapter *adapter = que->adapter; 3144 struct adapter *adapter = que->adapter;
3135 struct tx_ring *txr = que->txr; 3145 struct tx_ring *txr = que->txr;
3136 struct ifnet *ifp = adapter->ifp; 3146 struct ifnet *ifp = adapter->ifp;
3137 bool more; 3147 bool more;
3138 3148
3139 que->handleq.ev_count++; 3149 que->handleq.ev_count++;
3140 3150
3141 if (ifp->if_flags & IFF_RUNNING) { 3151 if (ifp->if_flags & IFF_RUNNING) {
3142 more = ixgbe_rxeof(que); 3152 more = ixgbe_rxeof(que);
3143 IXGBE_TX_LOCK(txr); 3153 IXGBE_TX_LOCK(txr);
3144 more |= ixgbe_txeof(txr); 3154 more |= ixgbe_txeof(txr);
3145 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX)) 3155 if (!(adapter->feat_en & IXGBE_FEATURE_LEGACY_TX))
3146 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq)) 3156 if (!ixgbe_mq_ring_empty(ifp, txr->txr_interq))
3147 ixgbe_mq_start_locked(ifp, txr); 3157 ixgbe_mq_start_locked(ifp, txr);
3148 /* Only for queue 0 */ 3158 /* Only for queue 0 */
3149 /* NetBSD still needs this for CBQ */ 3159 /* NetBSD still needs this for CBQ */
3150 if ((&adapter->queues[0] == que) 3160 if ((&adapter->queues[0] == que)
3151 && (!ixgbe_legacy_ring_empty(ifp, NULL))) 3161 && (!ixgbe_legacy_ring_empty(ifp, NULL)))
3152 ixgbe_legacy_start_locked(ifp, txr); 3162 ixgbe_legacy_start_locked(ifp, txr);
3153 IXGBE_TX_UNLOCK(txr); 3163 IXGBE_TX_UNLOCK(txr);
3154 if (more) { 3164 if (more) {
3155 que->req.ev_count++; 3165 que->req.ev_count++;
3156 if (adapter->txrx_use_workqueue) { 3166 if (adapter->txrx_use_workqueue) {
3157 /* 3167 /*
3158 * "enqueued flag" is not required here 3168 * "enqueued flag" is not required here
3159 * the same as ixg(4). See ixgbe_msix_que(). 3169 * the same as ixg(4). See ixgbe_msix_que().
3160 */ 3170 */
3161 workqueue_enqueue(adapter->que_wq, 3171 workqueue_enqueue(adapter->que_wq,
3162 &que->wq_cookie, curcpu()); 3172 &que->wq_cookie, curcpu());
3163 } else 3173 } else
3164 softint_schedule(que->que_si); 3174 softint_schedule(que->que_si);
3165 return; 3175 return;
3166 } 3176 }
3167 } 3177 }
3168 3178
3169 /* Re-enable this interrupt */ 3179 /* Re-enable this interrupt */
3170 ixv_enable_queue(adapter, que->msix); 3180 ixv_enable_queue(adapter, que->msix);
3171 3181
3172 return; 3182 return;
3173} /* ixv_handle_que */ 3183} /* ixv_handle_que */
3174 3184
3175/************************************************************************ 3185/************************************************************************
3176 * ixv_handle_que_work 3186 * ixv_handle_que_work
3177 ************************************************************************/ 3187 ************************************************************************/
3178static void 3188static void
3179ixv_handle_que_work(struct work *wk, void *context) 3189ixv_handle_que_work(struct work *wk, void *context)
3180{ 3190{
3181 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie); 3191 struct ix_queue *que = container_of(wk, struct ix_queue, wq_cookie);
3182 3192
3183 /* 3193 /*
3184 * "enqueued flag" is not required here the same as ixg(4). 3194 * "enqueued flag" is not required here the same as ixg(4).
3185 * See ixgbe_msix_que(). 3195 * See ixgbe_msix_que().
3186 */ 3196 */
3187 ixv_handle_que(que); 3197 ixv_handle_que(que);
3188} 3198}
3189 3199
3190/************************************************************************ 3200/************************************************************************
3191 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers 3201 * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
3192 ************************************************************************/ 3202 ************************************************************************/
3193static int 3203static int
3194ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa) 3204ixv_allocate_msix(struct adapter *adapter, const struct pci_attach_args *pa)
3195{ 3205{
3196 device_t dev = adapter->dev; 3206 device_t dev = adapter->dev;
3197 struct ix_queue *que = adapter->queues; 3207 struct ix_queue *que = adapter->queues;
3198 struct tx_ring *txr = adapter->tx_rings; 3208 struct tx_ring *txr = adapter->tx_rings;
3199 int error, msix_ctrl, rid, vector = 0; 3209 int error, msix_ctrl, rid, vector = 0;
3200 pci_chipset_tag_t pc; 3210 pci_chipset_tag_t pc;
3201 pcitag_t tag; 3211 pcitag_t tag;
3202 char intrbuf[PCI_INTRSTR_LEN]; 3212 char intrbuf[PCI_INTRSTR_LEN];
3203 char wqname[MAXCOMLEN]; 3213 char wqname[MAXCOMLEN];
3204 char intr_xname[32]; 3214 char intr_xname[32];
3205 const char *intrstr = NULL; 3215 const char *intrstr = NULL;
3206 kcpuset_t *affinity; 3216 kcpuset_t *affinity;
3207 int cpu_id = 0; 3217 int cpu_id = 0;
3208 3218
3209 pc = adapter->osdep.pc; 3219 pc = adapter->osdep.pc;
3210 tag = adapter->osdep.tag; 3220 tag = adapter->osdep.tag;
3211 3221
3212 adapter->osdep.nintrs = adapter->num_queues + 1; 3222 adapter->osdep.nintrs = adapter->num_queues + 1;
3213 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs, 3223 if (pci_msix_alloc_exact(pa, &adapter->osdep.intrs,
3214 adapter->osdep.nintrs) != 0) { 3224 adapter->osdep.nintrs) != 0) {
3215 aprint_error_dev(dev, 3225 aprint_error_dev(dev,
3216 "failed to allocate MSI-X interrupt\n"); 3226 "failed to allocate MSI-X interrupt\n");
3217 return (ENXIO); 3227 return (ENXIO);
3218 } 3228 }
3219 3229
3220 kcpuset_create(&affinity, false); 3230 kcpuset_create(&affinity, false);
3221 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) { 3231 for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
3222 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d", 3232 snprintf(intr_xname, sizeof(intr_xname), "%s TXRX%d",
3223 device_xname(dev), i); 3233 device_xname(dev), i);
3224 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf, 3234 intrstr = pci_intr_string(pc, adapter->osdep.intrs[i], intrbuf,
3225 sizeof(intrbuf)); 3235 sizeof(intrbuf));
3226#ifdef IXGBE_MPSAFE 3236#ifdef IXGBE_MPSAFE
3227 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE, 3237 pci_intr_setattr(pc, &adapter->osdep.intrs[i], PCI_INTR_MPSAFE,
3228 true); 3238 true);
3229#endif 3239#endif
3230 /* Set the handler function */ 3240 /* Set the handler function */
3231 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc, 3241 que->res = adapter->osdep.ihs[i] = pci_intr_establish_xname(pc,
3232 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que, 3242 adapter->osdep.intrs[i], IPL_NET, ixv_msix_que, que,
3233 intr_xname); 3243 intr_xname);
3234 if (que->res == NULL) { 3244 if (que->res == NULL) {
3235 pci_intr_release(pc, adapter->osdep.intrs, 3245 pci_intr_release(pc, adapter->osdep.intrs,
3236 adapter->osdep.nintrs); 3246 adapter->osdep.nintrs);
3237 aprint_error_dev(dev, 3247 aprint_error_dev(dev,
3238 "Failed to register QUE handler\n"); 3248 "Failed to register QUE handler\n");
3239 kcpuset_destroy(affinity); 3249 kcpuset_destroy(affinity);
3240 return (ENXIO); 3250 return (ENXIO);
3241 } 3251 }
3242 que->msix = vector; 3252 que->msix = vector;
3243 adapter->active_queues |= (u64)(1 << que->msix); 3253 adapter->active_queues |= (u64)(1 << que->msix);
3244 3254
3245 cpu_id = i; 3255 cpu_id = i;
3246 /* Round-robin affinity */ 3256 /* Round-robin affinity */
3247 kcpuset_zero(affinity); 3257 kcpuset_zero(affinity);
3248 kcpuset_set(affinity, cpu_id % ncpu); 3258 kcpuset_set(affinity, cpu_id % ncpu);
3249 error = interrupt_distribute(adapter->osdep.ihs[i], affinity, 3259 error = interrupt_distribute(adapter->osdep.ihs[i], affinity,
3250 NULL); 3260 NULL);
3251 aprint_normal_dev(dev, "for TX/RX, interrupting at %s", 3261 aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
3252 intrstr); 3262 intrstr);
3253 if (error == 0) 3263 if (error == 0)
3254 aprint_normal(", bound queue %d to cpu %d\n", 3264 aprint_normal(", bound queue %d to cpu %d\n",
3255 i, cpu_id % ncpu); 3265 i, cpu_id % ncpu);
3256 else 3266 else
3257 aprint_normal("\n"); 3267 aprint_normal("\n");
3258 3268
3259#ifndef IXGBE_LEGACY_TX 3269#ifndef IXGBE_LEGACY_TX
3260 txr->txr_si 3270 txr->txr_si
3261 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 3271 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3262 ixgbe_deferred_mq_start, txr); 3272 ixgbe_deferred_mq_start, txr);
3263#endif 3273#endif
3264 que->que_si 3274 que->que_si
3265 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS, 3275 = softint_establish(SOFTINT_NET | IXGBE_SOFTINT_FLAGS,
3266 ixv_handle_que, que); 3276 ixv_handle_que, que);
3267 if (que->que_si == NULL) { 3277 if (que->que_si == NULL) {
3268 aprint_error_dev(dev, 3278 aprint_error_dev(dev,
3269 "could not establish software interrupt\n"); 3279 "could not establish software interrupt\n");
3270 } 3280 }
3271 } 3281 }
3272 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev)); 3282 snprintf(wqname, sizeof(wqname), "%sdeferTx", device_xname(dev));
3273 error = workqueue_create(&adapter->txr_wq, wqname, 3283 error = workqueue_create(&adapter->txr_wq, wqname,
3274 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3284 ixgbe_deferred_mq_start_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3275 IXGBE_WORKQUEUE_FLAGS); 3285 IXGBE_WORKQUEUE_FLAGS);
3276 if (error) { 3286 if (error) {
3277 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n"); 3287 aprint_error_dev(dev, "couldn't create workqueue for deferred Tx\n");
3278 } 3288 }
3279 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int)); 3289 adapter->txr_wq_enqueued = percpu_alloc(sizeof(u_int));
3280 3290
3281 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev)); 3291 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(dev));
3282 error = workqueue_create(&adapter->que_wq, wqname, 3292 error = workqueue_create(&adapter->que_wq, wqname,
3283 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET, 3293 ixv_handle_que_work, adapter, IXGBE_WORKQUEUE_PRI, IPL_NET,
3284 IXGBE_WORKQUEUE_FLAGS); 3294 IXGBE_WORKQUEUE_FLAGS);
3285 if (error) { 3295 if (error) {
3286 aprint_error_dev(dev, 3296 aprint_error_dev(dev,
3287 "couldn't create workqueue\n"); 3297 "couldn't create workqueue\n");
3288 } 3298 }
3289 3299
3290 /* and Mailbox */ 3300 /* and Mailbox */
3291 cpu_id++; 3301 cpu_id++;
3292 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev)); 3302 snprintf(intr_xname, sizeof(intr_xname), "%s link", device_xname(dev));
3293 adapter->vector = vector; 3303 adapter->vector = vector;
3294 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf, 3304 intrstr = pci_intr_string(pc, adapter->osdep.intrs[vector], intrbuf,
3295 sizeof(intrbuf)); 3305 sizeof(intrbuf));
3296#ifdef IXGBE_MPSAFE 3306#ifdef IXGBE_MPSAFE
3297 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE, 3307 pci_intr_setattr(pc, &adapter->osdep.intrs[vector], PCI_INTR_MPSAFE,
3298 true); 3308 true);
3299#endif 3309#endif
3300 /* Set the mbx handler function */ 3310 /* Set the mbx handler function */
3301 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc, 3311 adapter->osdep.ihs[vector] = pci_intr_establish_xname(pc,
3302 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter, 3312 adapter->osdep.intrs[vector], IPL_NET, ixv_msix_mbx, adapter,
3303 intr_xname); 3313 intr_xname);
3304 if (adapter->osdep.ihs[vector] == NULL) { 3314 if (adapter->osdep.ihs[vector] == NULL) {
3305 aprint_error_dev(dev, "Failed to register LINK handler\n"); 3315 aprint_error_dev(dev, "Failed to register LINK handler\n");
3306 kcpuset_destroy(affinity); 3316 kcpuset_destroy(affinity);
3307 return (ENXIO); 3317 return (ENXIO);
3308 } 3318 }
3309 /* Round-robin affinity */ 3319 /* Round-robin affinity */
3310 kcpuset_zero(affinity); 3320 kcpuset_zero(affinity);
3311 kcpuset_set(affinity, cpu_id % ncpu); 3321 kcpuset_set(affinity, cpu_id % ncpu);
3312 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity, 3322 error = interrupt_distribute(adapter->osdep.ihs[vector], affinity,
3313 NULL); 3323 NULL);
3314 3324
3315 aprint_normal_dev(dev, 3325 aprint_normal_dev(dev,
3316 "for link, interrupting at %s", intrstr); 3326 "for link, interrupting at %s", intrstr);
3317 if (error == 0) 3327 if (error == 0)
3318 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu); 3328 aprint_normal(", affinity to cpu %d\n", cpu_id % ncpu);
3319 else 3329 else
3320 aprint_normal("\n"); 3330 aprint_normal("\n");
3321 3331
3322 /* Tasklets for Mailbox */ 3332 /* Tasklets for Mailbox */
3323 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS, 3333 adapter->link_si = softint_establish(SOFTINT_NET |IXGBE_SOFTINT_FLAGS,
3324 ixv_handle_link, adapter); 3334 ixv_handle_link, adapter);
3325 /* 3335 /*
3326 * Due to a broken design QEMU will fail to properly 3336 * Due to a broken design QEMU will fail to properly
3327 * enable the guest for MSI-X unless the vectors in 3337 * enable the guest for MSI-X unless the vectors in
3328 * the table are all set up, so we must rewrite the 3338 * the table are all set up, so we must rewrite the
3329 * ENABLE in the MSI-X control register again at this 3339 * ENABLE in the MSI-X control register again at this
3330 * point to cause it to successfully initialize us. 3340 * point to cause it to successfully initialize us.
3331 */ 3341 */
3332 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 3342 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
3333 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL); 3343 pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid, NULL);
3334 rid += PCI_MSIX_CTL; 3344 rid += PCI_MSIX_CTL;
3335 msix_ctrl = pci_conf_read(pc, tag, rid); 3345 msix_ctrl = pci_conf_read(pc, tag, rid);
3336 msix_ctrl |= PCI_MSIX_CTL_ENABLE; 3346 msix_ctrl |= PCI_MSIX_CTL_ENABLE;
3337 pci_conf_write(pc, tag, rid, msix_ctrl); 3347 pci_conf_write(pc, tag, rid, msix_ctrl);
3338 } 3348 }
3339 3349
3340 kcpuset_destroy(affinity); 3350 kcpuset_destroy(affinity);
3341 return (0); 3351 return (0);
3342} /* ixv_allocate_msix */ 3352} /* ixv_allocate_msix */
3343 3353
3344/************************************************************************ 3354/************************************************************************
3345 * ixv_configure_interrupts - Setup MSI-X resources 3355 * ixv_configure_interrupts - Setup MSI-X resources
3346 * 3356 *
3347 * Note: The VF device MUST use MSI-X, there is no fallback. 3357 * Note: The VF device MUST use MSI-X, there is no fallback.
3348 ************************************************************************/ 3358 ************************************************************************/
3349static int 3359static int
3350ixv_configure_interrupts(struct adapter *adapter) 3360ixv_configure_interrupts(struct adapter *adapter)
3351{ 3361{
3352 device_t dev = adapter->dev; 3362 device_t dev = adapter->dev;
3353 int want, queues, msgs; 3363 int want, queues, msgs;
3354 3364
3355 /* Must have at least 2 MSI-X vectors */ 3365 /* Must have at least 2 MSI-X vectors */
3356 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag); 3366 msgs = pci_msix_count(adapter->osdep.pc, adapter->osdep.tag);
3357 if (msgs < 2) { 3367 if (msgs < 2) {
3358 aprint_error_dev(dev, "MSIX config error\n"); 3368 aprint_error_dev(dev, "MSIX config error\n");
3359 return (ENXIO); 3369 return (ENXIO);
3360 } 3370 }
3361 msgs = MIN(msgs, IXG_MAX_NINTR); 3371 msgs = MIN(msgs, IXG_MAX_NINTR);
3362 3372
3363 /* Figure out a reasonable auto config value */ 3373 /* Figure out a reasonable auto config value */
3364 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu; 3374 queues = (ncpu > (msgs - 1)) ? (msgs - 1) : ncpu;
3365 3375
3366 if (ixv_num_queues != 0) 3376 if (ixv_num_queues != 0)
3367 queues = ixv_num_queues; 3377 queues = ixv_num_queues;
3368 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES)) 3378 else if ((ixv_num_queues == 0) && (queues > IXGBE_VF_MAX_TX_QUEUES))
3369 queues = IXGBE_VF_MAX_TX_QUEUES; 3379 queues = IXGBE_VF_MAX_TX_QUEUES;
3370 3380
3371 /* 3381 /*
3372 * Want vectors for the queues, 3382 * Want vectors for the queues,
3373 * plus an additional for mailbox. 3383 * plus an additional for mailbox.
3374 */ 3384 */
3375 want = queues + 1; 3385 want = queues + 1;
3376 if (msgs >= want) 3386 if (msgs >= want)
3377 msgs = want; 3387 msgs = want;
3378 else { 3388 else {
3379 aprint_error_dev(dev, 3389 aprint_error_dev(dev,
3380 "MSI-X Configuration Problem, " 3390 "MSI-X Configuration Problem, "
3381 "%d vectors but %d queues wanted!\n", 3391 "%d vectors but %d queues wanted!\n",
3382 msgs, want); 3392 msgs, want);
3383 return -1; 3393 return -1;
3384 } 3394 }
3385 3395
3386 adapter->msix_mem = (void *)1; /* XXX */ 3396 adapter->msix_mem = (void *)1; /* XXX */
3387 aprint_normal_dev(dev, 3397 aprint_normal_dev(dev,
3388 "Using MSI-X interrupts with %d vectors\n", msgs); 3398 "Using MSI-X interrupts with %d vectors\n", msgs);
3389 adapter->num_queues = queues; 3399 adapter->num_queues = queues;
3390 3400
3391 return (0); 3401 return (0);
3392} /* ixv_configure_interrupts */ 3402} /* ixv_configure_interrupts */
3393 3403
3394 3404
3395/************************************************************************ 3405/************************************************************************
3396 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts 3406 * ixv_handle_link - Tasklet handler for MSI-X MBX interrupts
3397 * 3407 *
3398 * Done outside of interrupt context since the driver might sleep 3408 * Done outside of interrupt context since the driver might sleep
3399 ************************************************************************/ 3409 ************************************************************************/
3400static void 3410static void
3401ixv_handle_link(void *context) 3411ixv_handle_link(void *context)
3402{ 3412{
3403 struct adapter *adapter = context; 3413 struct adapter *adapter = context;
3404 3414
3405 IXGBE_CORE_LOCK(adapter); 3415 IXGBE_CORE_LOCK(adapter);
3406 3416
3407 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed, 3417 adapter->hw.mac.ops.check_link(&adapter->hw, &adapter->link_speed,
3408 &adapter->link_up, FALSE); 3418 &adapter->link_up, FALSE);
3409 ixv_update_link_status(adapter); 3419 ixv_update_link_status(adapter);
3410 3420
3411 IXGBE_CORE_UNLOCK(adapter); 3421 IXGBE_CORE_UNLOCK(adapter);
3412} /* ixv_handle_link */ 3422} /* ixv_handle_link */
3413 3423
3414/************************************************************************ 3424/************************************************************************
3415 * ixv_check_link - Used in the local timer to poll for link changes 3425 * ixv_check_link - Used in the local timer to poll for link changes
3416 ************************************************************************/ 3426 ************************************************************************/
3417static s32 3427static s32
3418ixv_check_link(struct adapter *adapter) 3428ixv_check_link(struct adapter *adapter)
3419{ 3429{
3420 s32 error; 3430 s32 error;
3421 3431
3422 KASSERT(mutex_owned(&adapter->core_mtx)); 3432 KASSERT(mutex_owned(&adapter->core_mtx));
3423 3433
3424 adapter->hw.mac.get_link_status = TRUE; 3434 adapter->hw.mac.get_link_status = TRUE;
3425 3435
3426 error = adapter->hw.mac.ops.check_link(&adapter->hw, 3436 error = adapter->hw.mac.ops.check_link(&adapter->hw,
3427 &adapter->link_speed, &adapter->link_up, FALSE); 3437 &adapter->link_speed, &adapter->link_up, FALSE);
3428 ixv_update_link_status(adapter); 3438 ixv_update_link_status(adapter);
3429 3439
3430 return error; 3440 return error;
3431} /* ixv_check_link */ 3441} /* ixv_check_link */