| @@ -1,2042 +1,2045 @@ | | | @@ -1,2042 +1,2045 @@ |
1 | /* $NetBSD: if_scx.c,v 1.42 2023/06/14 00:07:22 nisimura Exp $ */ | | 1 | /* $NetBSD: if_scx.c,v 1.43 2023/06/15 07:21:45 nisimura Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2020 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2020 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Tohru Nishimura. | | 8 | * by Tohru Nishimura. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | #define NOT_MP_SAFE 0 | | 32 | #define NOT_MP_SAFE 0 |
33 | | | 33 | |
34 | /* | | 34 | /* |
35 | * Socionext SC2A11 SynQuacer NetSec GbE driver | | 35 | * Socionext SC2A11 SynQuacer NetSec GbE driver |
36 | * | | 36 | * |
37 | * Multiple Tx and Rx queues exist inside and dedicated descriptor | | 37 | * Multiple Tx and Rx queues exist inside and dedicated descriptor |
38 | * fields specifies which queue is to use. Three internal micro-processors | | 38 | * fields specifies which queue is to use. Three internal micro-processors |
39 | * to handle incoming frames, outgoing frames and packet data crypto | | 39 | * to handle incoming frames, outgoing frames and packet data crypto |
40 | * processing. uP programs are stored in an external flash memory and | | 40 | * processing. uP programs are stored in an external flash memory and |
41 | * have to be loaded by device driver. | | 41 | * have to be loaded by device driver. |
42 | * NetSec uses Synopsys DesignWare Core EMAC. DWC implementation | | 42 | * NetSec uses Synopsys DesignWare Core EMAC. DWC implementation |
43 | * register (0x20) is known to have 0x10.36 and feature register (0x1058) | | 43 | * register (0x20) is known to have 0x10.36 and feature register (0x1058) |
44 | * reports 0x11056f37. | | 44 | * reports 0x11056f37. |
45 | * <24> alternative/enhanced desc format | | 45 | * <24> alternative/enhanced desc format |
46 | * <18> receive IP type 2 checksum offload | | 46 | * <18> receive IP type 2 checksum offload |
47 | * <16> transmit checksum offload | | 47 | * <16> transmit checksum offload |
48 | * <11> event counter (mac management counter, MMC) | | 48 | * <11> event counter (mac management counter, MMC) |
49 | */ | | 49 | */ |
50 | | | 50 | |
51 | #include <sys/cdefs.h> | | 51 | #include <sys/cdefs.h> |
52 | __KERNEL_RCSID(0, "$NetBSD: if_scx.c,v 1.42 2023/06/14 00:07:22 nisimura Exp $"); | | 52 | __KERNEL_RCSID(0, "$NetBSD: if_scx.c,v 1.43 2023/06/15 07:21:45 nisimura Exp $"); |
53 | | | 53 | |
54 | #include <sys/param.h> | | 54 | #include <sys/param.h> |
55 | #include <sys/bus.h> | | 55 | #include <sys/bus.h> |
56 | #include <sys/intr.h> | | 56 | #include <sys/intr.h> |
57 | #include <sys/device.h> | | 57 | #include <sys/device.h> |
58 | #include <sys/callout.h> | | 58 | #include <sys/callout.h> |
59 | #include <sys/mbuf.h> | | 59 | #include <sys/mbuf.h> |
60 | #include <sys/errno.h> | | 60 | #include <sys/errno.h> |
61 | #include <sys/rndsource.h> | | 61 | #include <sys/rndsource.h> |
62 | #include <sys/kernel.h> | | 62 | #include <sys/kernel.h> |
63 | #include <sys/systm.h> | | 63 | #include <sys/systm.h> |
64 | | | 64 | |
65 | #include <net/if.h> | | 65 | #include <net/if.h> |
66 | #include <net/if_media.h> | | 66 | #include <net/if_media.h> |
67 | #include <net/if_dl.h> | | 67 | #include <net/if_dl.h> |
68 | #include <net/if_ether.h> | | 68 | #include <net/if_ether.h> |
69 | #include <dev/mii/mii.h> | | 69 | #include <dev/mii/mii.h> |
70 | #include <dev/mii/miivar.h> | | 70 | #include <dev/mii/miivar.h> |
71 | #include <net/bpf.h> | | 71 | #include <net/bpf.h> |
72 | | | 72 | |
73 | #include <dev/fdt/fdtvar.h> | | 73 | #include <dev/fdt/fdtvar.h> |
74 | #include <dev/acpi/acpireg.h> | | 74 | #include <dev/acpi/acpireg.h> |
75 | #include <dev/acpi/acpivar.h> | | 75 | #include <dev/acpi/acpivar.h> |
76 | #include <dev/acpi/acpi_intr.h> | | 76 | #include <dev/acpi/acpi_intr.h> |
77 | | | 77 | |
78 | /* SC2A11 GbE has 64-bit paddr descriptor */ | | 78 | /* SC2A11 GbE has 64-bit paddr descriptor */ |
79 | struct tdes { | | 79 | struct tdes { |
80 | uint32_t t0, t1, t2, t3; | | 80 | uint32_t t0, t1, t2, t3; |
81 | }; | | 81 | }; |
82 | struct rdes { | | 82 | struct rdes { |
83 | uint32_t r0, r1, r2, r3; | | 83 | uint32_t r0, r1, r2, r3; |
84 | }; | | 84 | }; |
85 | #define T0_OWN (1U<<31) /* desc is ready to Tx */ | | 85 | #define T0_OWN (1U<<31) /* desc is ready to Tx */ |
86 | #define T0_LD (1U<<30) /* last descriptor in array */ | | 86 | #define T0_LD (1U<<30) /* last descriptor in array */ |
87 | #define T0_DRID (24) /* 29:24 desc ring id */ | | 87 | #define T0_DRID (24) /* 29:24 desc ring id */ |
88 | #define T0_PT (1U<<21) /* 23:21 "pass-through" */ | | 88 | #define T0_PT (1U<<21) /* 23:21 "pass-through" */ |
89 | #define T0_TDRID (16) /* 20:16 target desc ring id: GMAC=15 */ | | 89 | #define T0_TDRID (16) /* 20:16 target desc ring id: GMAC=15 */ |
90 | #define T0_CC (1U<<15) /* ??? */ | | 90 | #define T0_CC (1U<<15) /* ??? */ |
91 | #define T0_FS (1U<<9) /* first segment of frame */ | | 91 | #define T0_FS (1U<<9) /* first segment of frame */ |
92 | #define T0_LS (1U<<8) /* last segment of frame */ | | 92 | #define T0_LS (1U<<8) /* last segment of frame */ |
93 | #define T0_CSUM (1U<<7) /* enable check sum offload */ | | 93 | #define T0_CSUM (1U<<7) /* enable check sum offload */ |
94 | #define T0_TSO (1U<<6) /* enable TCP segment offload */ | | 94 | #define T0_TSO (1U<<6) /* enable TCP segment offload */ |
95 | #define T0_TRS (1U<<4) /* 5:4 "TRS" ??? */ | | 95 | #define T0_TRS (1U<<4) /* 5:4 "TRS" ??? */ |
96 | /* T1 frame segment address 63:32 */ | | 96 | /* T1 frame segment address 63:32 */ |
97 | /* T2 frame segment address 31:0 */ | | 97 | /* T2 frame segment address 31:0 */ |
98 | /* T3 31:16 TCP segment length, 15:0 frame segment length to transmit */ | | 98 | /* T3 31:16 TCP segment length, 15:0 frame segment length to transmit */ |
99 | #define R0_OWN (1U<<31) /* desc is empty */ | | 99 | #define R0_OWN (1U<<31) /* desc is empty */ |
100 | #define R0_LD (1U<<30) /* last descriptor in array */ | | 100 | #define R0_LD (1U<<30) /* last descriptor in array */ |
101 | #define R0_SDRID (24) /* 29:24 source desc ring id */ | | 101 | #define R0_SDRID (24) /* 29:24 source desc ring id */ |
102 | #define R0_FR (1U<<23) /* found fragmented */ | | 102 | #define R0_FR (1U<<23) /* found fragmented */ |
103 | #define R0_ER (1U<<21) /* Rx error indication */ | | 103 | #define R0_ER (1U<<21) /* Rx error indication */ |
104 | #define R0_ERR (3U<<16) /* 18:16 receive error code */ | | 104 | #define R0_ERR (3U<<16) /* 18:16 receive error code */ |
105 | #define R0_TDRID (12) /* 15:12 target desc ring id */ | | 105 | #define R0_TDRID (12) /* 15:12 target desc ring id */ |
106 | #define R0_FS (1U<<9) /* first segment of frame */ | | 106 | #define R0_FS (1U<<9) /* first segment of frame */ |
107 | #define R0_LS (1U<<8) /* last segment of frame */ | | 107 | #define R0_LS (1U<<8) /* last segment of frame */ |
108 | #define R0_CSUM (3U<<6) /* 7:6 checksum status, 0: undone */ | | 108 | #define R0_CSUM (3U<<6) /* 7:6 checksum status, 0: undone */ |
109 | #define R0_CERR (2U<<6) /* 2: found bad */ | | 109 | #define R0_CERR (2U<<6) /* 2: found bad */ |
110 | #define R0_COK (1U<<6) /* 1: found ok */ | | 110 | #define R0_COK (1U<<6) /* 1: found ok */ |
111 | /* R1 frame address 63:32 */ | | 111 | /* R1 frame address 63:32 */ |
112 | /* R2 frame address 31:0 */ | | 112 | /* R2 frame address 31:0 */ |
113 | /* R3 31:16 received frame length, 15:0 buffer length to receive */ | | 113 | /* R3 31:16 received frame length, 15:0 buffer length to receive */ |
114 | | | 114 | |
115 | /* | | 115 | /* |
116 | * SC2A11 registers. 0x100 - 1204 | | 116 | * SC2A11 registers. 0x100 - 1204 |
117 | */ | | 117 | */ |
118 | #define SWRESET 0x104 | | 118 | #define SWRESET 0x104 |
119 | #define SRST_RUN (1U<<31) /* instruct start, 0 to stop */ | | 119 | #define SRST_RUN (1U<<31) /* instruct start, 0 to stop */ |
120 | #define COMINIT 0x120 | | 120 | #define COMINIT 0x120 |
121 | #define INIT_DB (1U<<2) /* ???; self clear when done */ | | 121 | #define INIT_DB (1U<<2) /* ???; self clear when done */ |
122 | #define INIT_CLS (1U<<1) /* ???; self clear when done */ | | 122 | #define INIT_CLS (1U<<1) /* ???; self clear when done */ |
123 | #define PKTCTRL 0x140 /* pkt engine control */ | | | |
124 | #define MODENRM (1U<<28) /* set operational mode to 'normal' */ | | | |
125 | #define ENJUMBO (1U<<27) /* allow jumbo frame */ | | | |
126 | #define RPTCSUMERR (1U<<3) /* log Rx checksum error */ | | | |
127 | #define RPTHDCOMP (1U<<2) /* log header incomplete condition */ | | | |
128 | #define RPTHDERR (1U<<1) /* log header error */ | | | |
129 | #define DROPNOMATCH (1U<<0) /* drop no match frames */ | | | |
130 | #define xINTSR 0x200 /* aggregated interrupt status */ | | 123 | #define xINTSR 0x200 /* aggregated interrupt status */ |
131 | #define IRQ_UCODE (1U<<20) /* ucode load completed; W1C */ | | 124 | #define IRQ_UCODE (1U<<20) /* ucode load completed; W1C */ |
132 | #define IRQ_MAC (1U<<19) /* ??? */ | | 125 | #define IRQ_MAC (1U<<19) /* ??? */ |
133 | #define IRQ_PKT (1U<<18) /* ??? */ | | 126 | #define IRQ_PKT (1U<<18) /* ??? */ |
134 | #define IRQ_BOOTCODE (1U<<5) /* ??? */ | | 127 | #define IRQ_BOOTCODE (1U<<5) /* ??? */ |
135 | #define IRQ_XDONE (1U<<4) /* ??? mode change completed */ | | 128 | #define IRQ_XDONE (1U<<4) /* ??? mode change completed */ |
136 | #define IRQ_RX (1U<<1) /* top level Rx interrupt */ | | 129 | #define IRQ_RX (1U<<1) /* top level Rx interrupt */ |
137 | #define IRQ_TX (1U<<0) /* top level Tx interrupt */ | | 130 | #define IRQ_TX (1U<<0) /* top level Tx interrupt */ |
138 | #define xINTAEN 0x204 /* INT_A enable */ | | 131 | #define xINTAEN 0x204 /* INT_A enable */ |
139 | #define xINTAE_SET 0x234 /* bit to set */ | | 132 | #define xINTAE_SET 0x234 /* bit to set */ |
140 | #define xINTAE_CLR 0x238 /* bit to clr */ | | 133 | #define xINTAE_CLR 0x238 /* bit to clr */ |
141 | #define xINTBEN 0x23c /* INT_B enable */ | | 134 | #define xINTBEN 0x23c /* INT_B enable */ |
142 | #define xINTBE_SET 0x240 /* bit to set */ | | 135 | #define xINTBE_SET 0x240 /* bit to set */ |
143 | #define xINTBE_CLR 0x244 /* bit to clr */ | | 136 | #define xINTBE_CLR 0x244 /* bit to clr */ |
144 | #define TXISR 0x400 /* transmit status; W1C */ | | 137 | #define TXISR 0x400 /* transmit status; W1C */ |
145 | #define TXIEN 0x404 /* tx interrupt enable */ | | 138 | #define TXIEN 0x404 /* tx interrupt enable */ |
146 | #define TXIE_SET 0x428 /* bit to set */ | | 139 | #define TXIE_SET 0x428 /* bit to set */ |
147 | #define TXIE_CLR 0x42c /* bit to clr */ | | 140 | #define TXIE_CLR 0x42c /* bit to clr */ |
148 | #define TXI_NTOWNR (1U<<17) /* ??? desc array got empty */ | | 141 | #define TXI_NTOWNR (1U<<17) /* ??? desc array got empty */ |
149 | #define TXI_TR_ERR (1U<<16) /* xmit error */ | | 142 | #define TXI_TR_ERR (1U<<16) /* xmit error detected */ |
150 | #define TXI_TXDONE (1U<<15) /* xmit completed */ | | 143 | #define TXI_TXDONE (1U<<15) /* xmit completed */ |
151 | #define TXI_TMREXP (1U<<14) /* coalesce guard timer expired */ | | 144 | #define TXI_TMREXP (1U<<14) /* coalesce guard timer expired */ |
152 | #define RXISR 0x440 /* receive status; W1C */ | | 145 | #define RXISR 0x440 /* receive status; W1C */ |
153 | #define RXIEN 0x444 /* rx interrupt enable */ | | 146 | #define RXIEN 0x444 /* rx interrupt enable */ |
154 | #define RXIE_SET 0x468 /* bit to set */ | | 147 | #define RXIE_SET 0x468 /* bit to set */ |
155 | #define RXIE_CLR 0x46c /* bit to clr */ | | 148 | #define RXIE_CLR 0x46c /* bit to clr */ |
156 | #define RXI_RC_ERR (1U<<16) /* recv error */ | | 149 | #define RXI_RC_ERR (1U<<16) /* recv error detected */ |
157 | #define RXI_PKTCNT (1U<<15) /* recv counter has new value */ | | 150 | #define RXI_PKTCNT (1U<<15) /* recv counter has new value */ |
158 | #define RXI_TMREXP (1U<<14) /* coalesce guard timer expired */ | | 151 | #define RXI_TMREXP (1U<<14) /* coalesce guard timer expired */ |
159 | #define TDBA_LO 0x408 /* tdes array base addr 31:0 */ | | 152 | #define TDBA_LO 0x408 /* tdes array base addr 31:0 */ |
160 | #define TDBA_HI 0x434 /* tdes array base addr 63:32 */ | | 153 | #define TDBA_HI 0x434 /* tdes array base addr 63:32 */ |
161 | #define RDBA_LO 0x448 /* rdes array base addr 31:0 */ | | 154 | #define RDBA_LO 0x448 /* rdes array base addr 31:0 */ |
162 | #define RDBA_HI 0x474 /* rdes array base addr 63:32 */ | | 155 | #define RDBA_HI 0x474 /* rdes array base addr 63:32 */ |
163 | #define TXCONF 0x430 /* tdes config */ | | 156 | #define TXCONF 0x430 /* tdes config */ |
164 | #define RXCONF 0x470 /* rdes config */ | | 157 | #define RXCONF 0x470 /* rdes config */ |
165 | #define DESCNF_UP (1U<<31) /* 'up-and-running' */ | | 158 | #define DESCNF_UP (1U<<31) /* 'up-and-running' */ |
166 | #define DESCNF_CHRST (1U<<30) /* channel reset */ | | 159 | #define DESCNF_CHRST (1U<<30) /* channel reset */ |
167 | #define DESCNF_TMR (1U<<4) /* coalesce timer mode select */ | | 160 | #define DESCNF_TMR (1U<<4) /* coalesce timer unit select */ |
168 | #define DESCNF_LE (1) /* little endian desc format */ | | 161 | #define DESCNF_LE (1) /* little endian desc format */ |
169 | #define TXSUBMIT 0x410 /* submit frame(s) to transmit */ | | 162 | #define TXSUBMIT 0x410 /* submit frame(s) to transmit */ |
170 | #define TXCOALESC 0x418 /* tx intr coalesce upper bound */ | | 163 | #define TXCOALESC 0x418 /* tx intr coalesce upper bound */ |
171 | #define RXCOALESC 0x458 /* rx intr coalesce upper bound */ | | 164 | #define RXCOALESC 0x458 /* rx intr coalesce upper bound */ |
172 | #define TCLSCTIME 0x420 /* tintr guard time usec, MSB to on */ | | 165 | #define TCLSCTIME 0x420 /* tintr guard time usec */ |
173 | #define RCLSCTIME 0x460 /* rintr guard time usec, MSB to on */ | | 166 | #define RCLSCTIME 0x460 /* rintr guard time usec */ |
174 | #define TXDONECNT 0x414 /* tx completed count, auto-zero */ | | 167 | #define TXDONECNT 0x414 /* tx completed count, auto-zero */ |
175 | #define RXAVAILCNT 0x454 /* rx available count, auto-zero */ | | 168 | #define RXAVAILCNT 0x454 /* rx available count, auto-zero */ |
176 | #define DMACTL_TMR 0x20c /* engine DMA timer value */ | | 169 | #define DMACTL_TMR 0x20c /* DMA cycle tick value */ |
| | | 170 | #define PKTCTRL 0x140 /* pkt engine control */ |
| | | 171 | #define MODENRM (1U<<28) /* set operational mode to 'normal' */ |
| | | 172 | #define ENJUMBO (1U<<27) /* allow jumbo frame */ |
| | | 173 | #define RPTCSUMERR (1U<<3) /* log Rx checksum error */ |
| | | 174 | #define RPTHDCOMP (1U<<2) /* log header incomplete condition */ |
| | | 175 | #define RPTHDERR (1U<<1) /* log header error */ |
| | | 176 | #define DROPNOMATCH (1U<<0) /* drop no match frames */ |
| | | 177 | #define UCODE_PKT 0x0d0 /* packet engine ucode port */ |
177 | #define UCODE_H2M 0x210 /* host2media engine ucode port */ | | 178 | #define UCODE_H2M 0x210 /* host2media engine ucode port */ |
178 | #define UCODE_M2H 0x21c /* media2host engine ucode port */ | | 179 | #define UCODE_M2H 0x21c /* media2host engine ucode port */ |
179 | #define CORESTAT 0x218 /* engine run state */ | | 180 | #define CORESTAT 0x218 /* engine run state */ |
180 | #define PKTSTOP (1U<<2) /* pkt engine stopped */ | | 181 | #define PKTSTOP (1U<<2) /* pkt engine stopped */ |
181 | #define M2HSTOP (1U<<1) /* M2H engine stopped */ | | 182 | #define M2HSTOP (1U<<1) /* M2H engine stopped */ |
182 | #define H2MSTOP (1U<<0) /* H2M engine stopped */ | | 183 | #define H2MSTOP (1U<<0) /* H2M engine stopped */ |
183 | #define DMACTL_H2M 0x214 /* host2media engine control */ | | 184 | #define DMACTL_H2M 0x214 /* host2media engine control */ |
184 | #define DMACTL_M2H 0x220 /* media2host engine control */ | | 185 | #define DMACTL_M2H 0x220 /* media2host engine control */ |
185 | #define DMACTL_STOP (1U<<0) /* instruct stop; self-clear */ | | 186 | #define DMACTL_STOP (1U<<0) /* instruct stop; self-clear */ |
186 | #define M2H_MODE_TRANS (1U<<20) /* initiate M2H mode change */ | | 187 | #define M2H_MODE_TRANS (1U<<20) /* initiate M2H mode change */ |
187 | #define UCODE_PKT 0x0d0 /* packet engine ucode port */ | | 188 | #define MODE_TRANS 0x500 /* mode change completion status */ |
| | | 189 | #define N2T_DONE (1U<<20) /* normal->taiki change completed */ |
| | | 190 | #define T2N_DONE (1U<<19) /* taiki->normal change completed */ |
188 | #define CLKEN 0x100 /* clock distribution enable */ | | 191 | #define CLKEN 0x100 /* clock distribution enable */ |
189 | #define CLK_G (1U<<5) /* feed clk domain G */ | | 192 | #define CLK_G (1U<<5) /* feed clk domain G */ |
190 | #define CLK_C (1U<<1) /* feed clk domain C */ | | 193 | #define CLK_C (1U<<1) /* feed clk domain C */ |
191 | #define CLK_D (1U<<0) /* feed clk domain D */ | | 194 | #define CLK_D (1U<<0) /* feed clk domain D */ |
| | | 195 | #define DESC_INIT 0x11fc /* write 1 for desc init, SC */ |
| | | 196 | #define DESC_SRST 0x1204 /* write 1 for desc sw reset, SC */ |
192 | | | 197 | |
193 | /* GMAC register indirect access. thru MACCMD/MACDATA operation */ | | 198 | /* GMAC register indirect access. thru MACCMD/MACDATA operation */ |
194 | #define MACDATA 0x11c0 /* gmac register rd/wr data */ | | 199 | #define MACDATA 0x11c0 /* gmac register rd/wr data */ |
195 | #define MACCMD 0x11c4 /* gmac register operation */ | | 200 | #define MACCMD 0x11c4 /* gmac register operation */ |
196 | #define CMD_IOWR (1U<<28) /* write op */ | | 201 | #define CMD_IOWR (1U<<28) /* write op */ |
197 | #define CMD_BUSY (1U<<31) /* busy bit */ | | 202 | #define CMD_BUSY (1U<<31) /* busy bit */ |
198 | #define MACSTAT 0x1024 /* mac interrupt status (unused) */ | | 203 | #define MACSTAT 0x1024 /* mac interrupt status (unused) */ |
199 | #define MACINTE 0x1028 /* mac interrupt enable (unused) */ | | 204 | #define MACINTE 0x1028 /* mac interrupt enable (unused) */ |
200 | | | 205 | |
201 | #define FLOWTHR 0x11cc /* flow control threshold */ | | 206 | #define FLOWTHR 0x11cc /* flow control threshold */ |
202 | /* 31:16 pause threshold, 15:0 resume threshold */ | | 207 | /* 31:16 pause threshold, 15:0 resume threshold */ |
203 | #define INTF_SEL 0x11d4 /* phy interface type */ | | 208 | #define INTF_SEL 0x11d4 /* phy interface type */ |
204 | #define INTF_GMII 0 | | 209 | #define INTF_GMII 0 |
205 | #define INTF_RGMII 1 | | 210 | #define INTF_RGMII 1 |
206 | #define INTF_RMII 4 | | 211 | #define INTF_RMII 4 |
207 | | | 212 | |
208 | #define DESC_INIT 0x11fc /* write 1 for desc init, SC */ | | | |
209 | #define DESC_SRST 0x1204 /* write 1 for desc sw reset, SC */ | | | |
210 | #define MODE_TRANS 0x500 /* mode change completion status */ | | | |
211 | #define N2T_DONE (1U<<20) /* normal->taiki change completed */ | | | |
212 | #define T2N_DONE (1U<<19) /* taiki->normal change completed */ | | | |
213 | #define MCVER 0x22c /* micro controller version */ | | 213 | #define MCVER 0x22c /* micro controller version */ |
214 | #define HWVER 0x230 /* hardware version */ | | 214 | #define HWVER 0x230 /* hardware version */ |
215 | | | 215 | |
216 | /* | | 216 | /* |
217 | * GMAC registers are mostly identical to Synopsys DesignWare Core | | 217 | * GMAC registers are mostly identical to Synopsys DesignWare Core |
218 | * Ethernet. These must be handled by indirect access. | | 218 | * Ethernet. These must be handled by indirect access. |
219 | */ | | 219 | */ |
220 | #define GMACMCR 0x0000 /* MAC configuration */ | | 220 | #define GMACMCR 0x0000 /* MAC configuration */ |
221 | #define MCR_IBN (1U<<30) /* watch in-band-signal */ | | 221 | #define MCR_IBN (1U<<30) /* watch in-band-signal */ |
222 | #define MCR_CST (1U<<25) /* strip CRC */ | | 222 | #define MCR_CST (1U<<25) /* strip CRC */ |
223 | #define MCR_TC (1U<<24) /* keep RGMII PHY notified */ | | 223 | #define MCR_TC (1U<<24) /* keep RGMII PHY notified */ |
224 | #define MCR_WD (1U<<23) /* allow long >2048 tx frame */ | | 224 | #define MCR_WD (1U<<23) /* allow long >2048 tx frame */ |
225 | #define MCR_JE (1U<<20) /* allow ~9018 tx jumbo frame */ | | 225 | #define MCR_JE (1U<<20) /* allow ~9018 tx jumbo frame */ |
226 | #define MCR_IFG (7U<<17) /* 19:17 IFG value 0~7 */ | | 226 | #define MCR_IFG (7U<<17) /* 19:17 IFG value 0~7 */ |
227 | #define MCR_DCRS (1U<<16) /* ignore (G)MII HDX Tx error */ | | 227 | #define MCR_DCRS (1U<<16) /* ignore (G)MII HDX Tx error */ |
228 | #define MCR_PS (1U<<15) /* 1: MII 10/100, 0: GMII 1000 */ | | 228 | #define MCR_PS (1U<<15) /* 1: MII 10/100, 0: GMII 1000 */ |
229 | #define MCR_FES (1U<<14) /* force speed 100 */ | | 229 | #define MCR_FES (1U<<14) /* force speed 100 */ |
230 | #define MCR_DO (1U<<13) /* don't receive my own HDX Tx frames */ | | 230 | #define MCR_DO (1U<<13) /* don't receive my own HDX Tx frames */ |
231 | #define MCR_LOOP (1U<<12) /* run loop back */ | | 231 | #define MCR_LOOP (1U<<12) /* run loop back */ |
232 | #define MCR_USEFDX (1U<<11) /* force full duplex */ | | 232 | #define MCR_USEFDX (1U<<11) /* force full duplex */ |
233 | #define MCR_IPCEN (1U<<10) /* handle checksum */ | | 233 | #define MCR_IPCEN (1U<<10) /* handle checksum */ |
234 | #define MCR_DR (1U<<9) /* attempt no tx retry, send once */ | | 234 | #define MCR_DR (1U<<9) /* attempt no tx retry, send once */ |
235 | #define MCR_LUD (1U<<8) /* link condition report when RGMII */ | | 235 | #define MCR_LUD (1U<<8) /* link condition report when RGMII */ |
236 | #define MCR_ACS (1U<<7) /* auto pad auto strip CRC */ | | 236 | #define MCR_ACS (1U<<7) /* auto pad auto strip CRC */ |
237 | #define MCR_DC (1U<<4) /* report excessive tx deferral */ | | 237 | #define MCR_DC (1U<<4) /* report excessive tx deferral */ |
238 | #define MCR_TE (1U<<3) /* run Tx MAC engine, 0 to stop */ | | 238 | #define MCR_TE (1U<<3) /* run Tx MAC engine, 0 to stop */ |
239 | #define MCR_RE (1U<<2) /* run Rx MAC engine, 0 to stop */ | | 239 | #define MCR_RE (1U<<2) /* run Rx MAC engine, 0 to stop */ |
240 | #define MCR_PREA (3U) /* 1:0 preamble len. 0~2 */ | | 240 | #define MCR_PREA (3U) /* 1:0 preamble len. 0~2 */ |
241 | #define GMACAFR 0x0004 /* frame DA/SA address filter */ | | 241 | #define GMACAFR 0x0004 /* frame DA/SA address filter */ |
242 | #define AFR_RA (1U<<31) /* accept all irrespective of filt. */ | | 242 | #define AFR_RA (1U<<31) /* accept all irrespective of filt. */ |
243 | #define AFR_HPF (1U<<10) /* hash+perfect filter, or hash only */ | | 243 | #define AFR_HPF (1U<<10) /* hash+perfect filter, or hash only */ |
244 | #define AFR_SAF (1U<<9) /* source address filter */ | | 244 | #define AFR_SAF (1U<<9) /* source address filter */ |
245 | #define AFR_SAIF (1U<<8) /* SA inverse filtering */ | | 245 | #define AFR_SAIF (1U<<8) /* SA inverse filtering */ |
246 | #define AFR_PCF (2U<<6) /* 7:6 accept pause frame 0~3 */ | | 246 | #define AFR_PCF (2U<<6) /* 7:6 accept pause frame 0~3 */ |
247 | #define AFR_DBF (1U<<5) /* reject broadcast frame */ | | 247 | #define AFR_DBF (1U<<5) /* reject broadcast frame */ |
248 | #define AFR_PM (1U<<4) /* accept all multicast frame */ | | 248 | #define AFR_PM (1U<<4) /* accept all multicast frame */ |
249 | #define AFR_DAIF (1U<<3) /* DA inverse filtering */ | | 249 | #define AFR_DAIF (1U<<3) /* DA inverse filtering */ |
250 | #define AFR_MHTE (1U<<2) /* use multicast hash table */ | | 250 | #define AFR_MHTE (1U<<2) /* use multicast hash table */ |
251 | #define AFR_UHTE (1U<<1) /* use hash table for unicast */ | | 251 | #define AFR_UHTE (1U<<1) /* use hash table for unicast */ |
252 | #define AFR_PR (1U<<0) /* run promisc mode */ | | 252 | #define AFR_PR (1U<<0) /* run promisc mode */ |
253 | #define GMACGAR 0x0010 /* MDIO operation */ | | 253 | #define GMACGAR 0x0010 /* MDIO operation */ |
254 | #define GAR_PHY (11) /* 15:11 mii phy */ | | 254 | #define GAR_PHY (11) /* 15:11 mii phy */ |
255 | #define GAR_REG (6) /* 10:6 mii reg */ | | 255 | #define GAR_REG (6) /* 10:6 mii reg */ |
256 | #define GAR_CLK (2) /* 5:2 mdio clock tick ratio */ | | 256 | #define GAR_CLK (2) /* 5:2 mdio clock tick ratio */ |
257 | #define GAR_IOWR (1U<<1) /* MDIO write op */ | | 257 | #define GAR_IOWR (1U<<1) /* MDIO write op */ |
258 | #define GAR_BUSY (1U<<0) /* busy bit */ | | 258 | #define GAR_BUSY (1U<<0) /* busy bit */ |
259 | #define GAR_MDIO_25_35MHZ 2 | | 259 | #define GAR_MDIO_25_35MHZ 2 |
260 | #define GAR_MDIO_35_60MHZ 3 | | 260 | #define GAR_MDIO_35_60MHZ 3 |
261 | #define GAR_MDIO_60_100MHZ 0 | | 261 | #define GAR_MDIO_60_100MHZ 0 |
262 | #define GAR_MDIO_100_150MHZ 1 | | 262 | #define GAR_MDIO_100_150MHZ 1 |
263 | #define GAR_MDIO_150_250MHZ 4 | | 263 | #define GAR_MDIO_150_250MHZ 4 |
264 | #define GAR_MDIO_250_300MHZ 5 | | 264 | #define GAR_MDIO_250_300MHZ 5 |
265 | #define GMACGDR 0x0014 /* MDIO rd/wr data */ | | 265 | #define GMACGDR 0x0014 /* MDIO rd/wr data */ |
266 | #define GMACFCR 0x0018 /* 802.3x flowcontrol */ | | 266 | #define GMACFCR 0x0018 /* 802.3x flowcontrol */ |
267 | /* 31:16 pause timer value, 5:4 pause timer threshold */ | | 267 | /* 31:16 pause timer value, 5:4 pause timer threshold */ |
268 | #define FCR_RFE (1U<<2) /* accept PAUSE to throttle Tx */ | | 268 | #define FCR_RFE (1U<<2) /* accept PAUSE to throttle Tx */ |
269 | #define FCR_TFE (1U<<1) /* generate PAUSE to moderate Rx lvl */ | | 269 | #define FCR_TFE (1U<<1) /* generate PAUSE to moderate Rx lvl */ |
270 | #define GMACIMPL 0x0020 /* implementation id */ | | 270 | #define GMACIMPL 0x0020 /* implementation id */ |
271 | #define GMACISR 0x0038 /* interrupt status indication */ | | 271 | #define GMACISR 0x0038 /* interrupt status indication */ |
272 | #define GMACIMR 0x003c /* interrupt mask to inhibit */ | | 272 | #define GMACIMR 0x003c /* interrupt mask to inhibit */ |
273 | #define ISR_TS (1U<<9) /* time stamp operation detected */ | | 273 | #define ISR_TS (1U<<9) /* time stamp operation detected */ |
274 | #define ISR_CO (1U<<7) /* Rx checksum offload completed */ | | 274 | #define ISR_CO (1U<<7) /* Rx checksum offload completed */ |
275 | #define ISR_TX (1U<<6) /* Tx completed */ | | 275 | #define ISR_TX (1U<<6) /* Tx completed */ |
276 | #define ISR_RX (1U<<5) /* Rx completed */ | | 276 | #define ISR_RX (1U<<5) /* Rx completed */ |
277 | #define ISR_ANY (1U<<4) /* any of above 5-7 report */ | | 277 | #define ISR_ANY (1U<<4) /* any of above 5-7 report */ |
278 | #define ISR_LC (1U<<0) /* link status change detected */ | | 278 | #define ISR_LC (1U<<0) /* link status change detected */ |
279 | #define GMACMAH0 0x0040 /* my own MAC address 47:32 */ | | 279 | #define GMACMAH0 0x0040 /* my own MAC address 47:32 */ |
280 | #define GMACMAL0 0x0044 /* my own MAC address 31:0 */ | | 280 | #define GMACMAL0 0x0044 /* my own MAC address 31:0 */ |
281 | #define GMACMAH(i) ((i)*8+0x40) /* supplemental MAC addr 1-15 */ | | 281 | #define GMACMAH(i) ((i)*8+0x40) /* supplemental MAC addr 1-15 */ |
282 | #define GMACMAL(i) ((i)*8+0x44) /* 31:0 MAC address low part */ | | 282 | #define GMACMAL(i) ((i)*8+0x44) /* 31:0 MAC address low part */ |
283 | /* MAH bit-31: slot in use, 30: SA to match, 29:24 byte-wise don'care */ | | 283 | /* MAH bit-31: slot in use, 30: SA to match, 29:24 byte-wise don'care */ |
284 | #define GMACAMAH(i) ((i)*8+0x800) /* supplemental MAC addr 16-31 */ | | 284 | #define GMACAMAH(i) ((i)*8+0x800) /* supplemental MAC addr 16-31 */ |
285 | #define GMACAMAL(i) ((i)*8+0x804) /* 31: MAC address low part */ | | 285 | #define GMACAMAL(i) ((i)*8+0x804) /* 31: MAC address low part */ |
286 | /* supplimental MAH bit-31: slot in use, no other bit is effective */ | | 286 | /* supplimental MAH bit-31: slot in use, no other bit is effective */ |
287 | #define GMACMHTH 0x0008 /* 64bit multicast hash table 63:32 */ | | 287 | #define GMACMHTH 0x0008 /* 64bit multicast hash table 63:32 */ |
288 | #define GMACMHTL 0x000c /* 64bit multicast hash table 31:0 */ | | 288 | #define GMACMHTL 0x000c /* 64bit multicast hash table 31:0 */ |
289 | #define GMACMHT(i) ((i)*4+0x500) /* 256-bit alternative mcast hash 0-7 */ | | 289 | #define GMACMHT(i) ((i)*4+0x500) /* 256-bit alternative mcast hash 0-7 */ |
290 | #define GMACVTAG 0x001c /* VLAN tag control */ | | 290 | #define GMACVTAG 0x001c /* VLAN tag control */ |
291 | #define VTAG_HASH (1U<<19) /* use VLAN tag hash table */ | | 291 | #define VTAG_HASH (1U<<19) /* use VLAN tag hash table */ |
292 | #define VTAG_SVLAN (1U<<18) /* handle type 0x88A8 SVLAN frame */ | | 292 | #define VTAG_SVLAN (1U<<18) /* handle type 0x88A8 SVLAN frame */ |
293 | #define VTAG_INV (1U<<17) /* run inverse match logic */ | | 293 | #define VTAG_INV (1U<<17) /* run inverse match logic */ |
294 | #define VTAG_ETV (1U<<16) /* use only 12bit VID field to match */ | | 294 | #define VTAG_ETV (1U<<16) /* use only 12bit VID field to match */ |
295 | /* 15:0 concat of PRIO+CFI+VID */ | | 295 | /* 15:0 concat of PRIO+CFI+VID */ |
296 | #define GMACVHT 0x0588 /* 16-bit VLAN tag hash */ | | 296 | #define GMACVHT 0x0588 /* 16-bit VLAN tag hash */ |
297 | #define GMACMIISR 0x00d8 /* resolved RGMII/SGMII link status */ | | 297 | #define GMACMIISR 0x00d8 /* resolved RGMII/SGMII link status */ |
298 | #define MIISR_LUP (1U<<3) /* link up(1)/down(0) report */ | | 298 | #define MIISR_LUP (1U<<3) /* link up(1)/down(0) report */ |
299 | #define MIISR_SPD (3U<<1) /* 2:1 speed 10(0)/100(1)/1000(2) */ | | 299 | #define MIISR_SPD (3U<<1) /* 2:1 speed 10(0)/100(1)/1000(2) */ |
300 | #define MIISR_FDX (1U<<0) /* fdx detected */ | | 300 | #define MIISR_FDX (1U<<0) /* fdx detected */ |
301 | | | 301 | |
302 | #define GMACLPIS 0x0030 /* LPI control & status */ | | 302 | #define GMACLPIS 0x0030 /* LPI control & status */ |
303 | #define LPIS_TXA (1U<<19) /* complete Tx in progress and LPI */ | | 303 | #define LPIS_TXA (1U<<19) /* complete Tx in progress and LPI */ |
304 | #define LPIS_PLS (1U<<17) | | 304 | #define LPIS_PLS (1U<<17) |
305 | #define LPIS_EN (1U<<16) /* 1: enter LPI mode, 0: exit */ | | 305 | #define LPIS_EN (1U<<16) /* 1: enter LPI mode, 0: exit */ |
306 | #define LPIS_TEN (1U<<0) /* Tx LPI report */ | | 306 | #define LPIS_TEN (1U<<0) /* Tx LPI report */ |
307 | #define GMACLPIC 0x0034 /* LPI timer control */ | | 307 | #define GMACLPIC 0x0034 /* LPI timer control */ |
308 | #define LPIC_LST (5) /* 16:5 ??? */ | | 308 | #define LPIC_LST (5) /* 16:5 ??? */ |
309 | #define LPIC_TWT (0) /* 15:0 ??? */ | | 309 | #define LPIC_TWT (0) /* 15:0 ??? */ |
310 | /* 0x700-764 Time Stamp control */ | | 310 | /* 0x700-764 Time Stamp control */ |
311 | | | 311 | |
312 | #define GMACBMR 0x1000 /* DMA bus mode control */ | | 312 | #define GMACBMR 0x1000 /* DMA bus mode control */ |
313 | /* 24 8xPBL multiply by 8 for RPBL & PBL values | | 313 | /* 24 8xPBL multiply by 8 for RPBL & PBL values |
314 | * 23 USP 1 to use RPBL for Rx DMA burst, 0 to share PBL by Rx and Tx | | 314 | * 23 USP 1 to use RPBL for Rx DMA burst, 0 to share PBL by Rx and Tx |
315 | * 22:17 RPBL | | 315 | * 22:17 RPBL |
316 | * 16 FB fixed burst | | 316 | * 16 FB fixed burst |
317 | * 15:14 priority between Rx and Tx | | 317 | * 15:14 priority between Rx and Tx |
318 | * 3 rxtx ratio 41 | | 318 | * 3 rxtx ratio 41 |
319 | * 2 rxtx ratio 31 | | 319 | * 2 rxtx ratio 31 |
320 | * 1 rxtx ratio 21 | | 320 | * 1 rxtx ratio 21 |
321 | * 0 rxtx ratio 11 | | 321 | * 0 rxtx ratio 11 |
322 | * 13:8 PBL possible DMA burst length | | 322 | * 13:8 PBL possible DMA burst length |
323 | * 7 ATDS select 32-byte descriptor format for advanced features | | 323 | * 7 ATDS select 32-byte descriptor format for advanced features |
324 | * 6:2 DSL descriptor skip length, 0 for adjuscent, counted on bus width | | 324 | * 6:2 DSL descriptor skip length, 0 for adjuscent, counted on bus width |
325 | * 0 MAC reset op. self-clear | | 325 | * 0 MAC reset op. self-clear |
326 | */ | | 326 | */ |
327 | #define BMR_RST (1) /* reset op. self clear when done */ | | 327 | #define BMR_RST (1) /* reset op. self clear when done */ |
328 | #define GMACTPD 0x1004 /* write any to resume tdes */ | | 328 | #define GMACTPD 0x1004 /* write any to resume tdes */ |
329 | #define GMACRPD 0x1008 /* write any to resume rdes */ | | 329 | #define GMACRPD 0x1008 /* write any to resume rdes */ |
330 | #define GMACRDLA 0x100c /* rdes base address 32bit paddr */ | | 330 | #define GMACRDLA 0x100c /* rdes base address 32bit paddr */ |
331 | #define GMACTDLA 0x1010 /* tdes base address 32bit paddr */ | | 331 | #define GMACTDLA 0x1010 /* tdes base address 32bit paddr */ |
332 | #define GMACDSR 0x1014 /* DMA status detail report; W1C */ | | 332 | #define GMACDSR 0x1014 /* DMA status detail report; W1C */ |
333 | #define GMACDIE 0x101c /* DMA interrupt enable */ | | 333 | #define GMACDIE 0x101c /* DMA interrupt enable */ |
334 | #define DMAI_LPI (1U<<30) /* LPI interrupt */ | | 334 | #define DMAI_LPI (1U<<30) /* LPI interrupt */ |
335 | #define DMAI_TTI (1U<<29) /* timestamp trigger interrupt */ | | 335 | #define DMAI_TTI (1U<<29) /* timestamp trigger interrupt */ |
336 | #define DMAI_GMI (1U<<27) /* management counter interrupt */ | | 336 | #define DMAI_GMI (1U<<27) /* management counter interrupt */ |
337 | #define DMAI_GLI (1U<<26) /* xMII link change detected */ | | 337 | #define DMAI_GLI (1U<<26) /* xMII link change detected */ |
338 | #define DMAI_EB (23) /* 25:23 DMA bus error detected */ | | 338 | #define DMAI_EB (23) /* 25:23 DMA bus error detected */ |
339 | #define DMAI_TS (20) /* 22:20 Tx DMA state report */ | | 339 | #define DMAI_TS (20) /* 22:20 Tx DMA state report */ |
340 | #define DMAI_RS (17) /* 29:17 Rx DMA state report */ | | 340 | #define DMAI_RS (17) /* 29:17 Rx DMA state report */ |
341 | #define DMAI_NIS (1U<<16) /* normal interrupt summary; W1C */ | | 341 | #define DMAI_NIS (1U<<16) /* normal interrupt summary; W1C */ |
342 | #define DMAI_AIS (1U<<15) /* abnormal interrupt summary; W1C */ | | 342 | #define DMAI_AIS (1U<<15) /* abnormal interrupt summary; W1C */ |
343 | #define DMAI_ERI (1U<<14) /* the first Rx buffer is filled */ | | 343 | #define DMAI_ERI (1U<<14) /* the first Rx buffer is filled */ |
344 | #define DMAI_FBI (1U<<13) /* DMA bus error detected */ | | 344 | #define DMAI_FBI (1U<<13) /* DMA bus error detected */ |
345 | #define DMAI_ETI (1U<<10) /* single frame Tx completed */ | | 345 | #define DMAI_ETI (1U<<10) /* single frame Tx completed */ |
346 | #define DMAI_RWT (1U<<9) /* longer than 2048 frame received */ | | 346 | #define DMAI_RWT (1U<<9) /* longer than 2048 frame received */ |
347 | #define DMAI_RPS (1U<<8) /* Rx process is now stopped */ | | 347 | #define DMAI_RPS (1U<<8) /* Rx process is now stopped */ |
348 | #define DMAI_RU (1U<<7) /* Rx descriptor not available */ | | 348 | #define DMAI_RU (1U<<7) /* Rx descriptor not available */ |
349 | #define DMAI_RI (1U<<6) /* frame Rx completed by !R1_DIC */ | | 349 | #define DMAI_RI (1U<<6) /* frame Rx completed by !R1_DIC */ |
350 | #define DMAI_UNF (1U<<5) /* Tx underflow detected */ | | 350 | #define DMAI_UNF (1U<<5) /* Tx underflow detected */ |
351 | #define DMAI_OVF (1U<<4) /* receive buffer overflow detected */ | | 351 | #define DMAI_OVF (1U<<4) /* receive buffer overflow detected */ |
352 | #define DMAI_TJT (1U<<3) /* longer than 2048 frame sent */ | | 352 | #define DMAI_TJT (1U<<3) /* longer than 2048 frame sent */ |
353 | #define DMAI_TU (1U<<2) /* Tx descriptor not available */ | | 353 | #define DMAI_TU (1U<<2) /* Tx descriptor not available */ |
354 | #define DMAI_TPS (1U<<1) /* transmission is stopped */ | | 354 | #define DMAI_TPS (1U<<1) /* transmission is stopped */ |
355 | #define DMAI_TI (1U<<0) /* frame Tx completed by T0_IC */ | | 355 | #define DMAI_TI (1U<<0) /* frame Tx completed by T0_IC */ |
356 | #define GMACOMR 0x1018 /* DMA operation mode */ | | 356 | #define GMACOMR 0x1018 /* DMA operation mode */ |
| | | 357 | #define OMR_DT (1U<<26) /* don't drop error frames */ |
357 | #define OMR_RSF (1U<<25) /* 1: Rx store&forward, 0: immed. */ | | 358 | #define OMR_RSF (1U<<25) /* 1: Rx store&forward, 0: immed. */ |
| | | 359 | #define OMR_DFF (1U<<24) /* don't flush rx frames on shortage */ |
358 | #define OMR_TSF (1U<<21) /* 1: Tx store&forward, 0: immed. */ | | 360 | #define OMR_TSF (1U<<21) /* 1: Tx store&forward, 0: immed. */ |
| | | 361 | #define OMR_FTF (1U<<20) /* initiate tx FIFO reset, SC */ |
359 | #define OMR_TTC (14) /* 16:14 Tx threshold */ | | 362 | #define OMR_TTC (14) /* 16:14 Tx threshold */ |
360 | #define OMR_ST (1U<<13) /* run Tx DMA engine, 0 to stop */ | | 363 | #define OMR_ST (1U<<13) /* run Tx DMA engine, 0 to stop */ |
361 | #define OMR_RFD (11) /* 12:11 Rx FIFO fill level */ | | 364 | #define OMR_RFD (11) /* 12:11 Rx FIFO fill level */ |
362 | #define OMR_EFC (1U<<8) /* transmit PAUSE to throttle Rx lvl. */ | | 365 | #define OMR_EFC (1U<<8) /* transmit PAUSE to throttle Rx lvl. */ |
363 | #define OMR_FEF (1U<<7) /* allow to receive error frames */ | | 366 | #define OMR_FEF (1U<<7) /* allow to receive error frames */ |
364 | #define OMR_SR (1U<<1) /* run Rx DMA engine, 0 to stop */ | | 367 | #define OMR_SR (1U<<1) /* run Rx DMA engine, 0 to stop */ |
365 | #define GMACEVCS 0x1020 /* missed frame or ovf detected */ | | 368 | #define GMACEVCS 0x1020 /* missed frame or ovf detected */ |
366 | #define GMACRWDT 0x1024 /* enable rx watchdog timer interrupt */ | | 369 | #define GMACRWDT 0x1024 /* enable rx watchdog timer interrupt */ |
367 | #define GMACAXIB 0x1028 /* AXI bus mode control */ | | 370 | #define GMACAXIB 0x1028 /* AXI bus mode control */ |
368 | #define GMACAXIS 0x102c /* AXI status report */ | | 371 | #define GMACAXIS 0x102c /* AXI status report */ |
369 | /* 0x1048 current tx desc address */ | | 372 | /* 0x1048 current tx desc address */ |
370 | /* 0x104c current rx desc address */ | | 373 | /* 0x104c current rx desc address */ |
371 | /* 0x1050 current tx buffer address */ | | 374 | /* 0x1050 current tx buffer address */ |
372 | /* 0x1054 current rx buffer address */ | | 375 | /* 0x1054 current rx buffer address */ |
373 | #define HWFEA 0x1058 /* DWC feature report */ | | 376 | #define HWFEA 0x1058 /* DWC feature report */ |
374 | #define FEA_EXDESC (1U<<24) /* alternative/enhanced desc layout */ | | 377 | #define FEA_EXDESC (1U<<24) /* alternative/enhanced desc layout */ |
375 | #define FEA_2COE (1U<<18) /* Rx type 2 IP checksum offload */ | | 378 | #define FEA_2COE (1U<<18) /* Rx type 2 IP checksum offload */ |
376 | #define FEA_1COE (1U<<17) /* Rx type 1 IP checksum offload */ | | 379 | #define FEA_1COE (1U<<17) /* Rx type 1 IP checksum offload */ |
377 | #define FEA_TXOE (1U<<16) /* Tx checksum offload */ | | 380 | #define FEA_TXOE (1U<<16) /* Tx checksum offload */ |
378 | #define FEA_MMC (1U<<11) /* RMON event counter */ | | 381 | #define FEA_MMC (1U<<11) /* RMON event counter */ |
379 | | | 382 | |
380 | #define GMACEVCTL 0x0100 /* event counter control */ | | 383 | #define GMACEVCTL 0x0100 /* event counter control */ |
381 | #define EVC_FHP (1U<<5) /* full-half preset */ | | 384 | #define EVC_FHP (1U<<5) /* full-half preset */ |
382 | #define EVC_CP (1U<<4) /* counter preset */ | | 385 | #define EVC_CP (1U<<4) /* counter preset */ |
383 | #define EVC_MCF (1U<<3) /* counter freeze */ | | 386 | #define EVC_MCF (1U<<3) /* counter freeze */ |
384 | #define EVC_ROR (1U<<2) /* auto-zero on counter read */ | | 387 | #define EVC_ROR (1U<<2) /* auto-zero on counter read */ |
385 | #define EVC_CSR (1U<<1) /* counter stop rollover */ | | 388 | #define EVC_CSR (1U<<1) /* counter stop rollover */ |
386 | #define EVC_CR (1U<<0) /* reset counters */ | | 389 | #define EVC_CR (1U<<0) /* reset counters */ |
387 | #define GMACEVCNT(i) ((i)*4+0x114) /* 80 event counters 0x114 - 0x284 */ | | 390 | #define GMACEVCNT(i) ((i)*4+0x114) /* 80 event counters 0x114 - 0x284 */ |
388 | | | 391 | |
389 | /* 0x400-4ac L3/L4 control */ | | 392 | /* 0x400-4ac L3/L4 control */ |
390 | | | 393 | |
391 | /* | | 394 | /* |
392 | * flash memory layout | | 395 | * flash memory layout |
393 | * 0x00 - 07 48-bit MAC station address. 4 byte wise in BE order. | | 396 | * 0x00 - 07 48-bit MAC station address. 4 byte wise in BE order. |
394 | * 0x08 - 0b H->MAC xfer engine program start addr 63:32. | | 397 | * 0x08 - 0b H->MAC xfer engine program start addr 63:32. |
395 | * 0x0c - 0f H2M program addr 31:0 (these are absolute addr, not offset) | | 398 | * 0x0c - 0f H2M program addr 31:0 (these are absolute addr, not offset) |
396 | * 0x10 - 13 H2M program length in 4 byte count. | | 399 | * 0x10 - 13 H2M program length in 4 byte count. |
397 | * 0x14 - 0b M->HOST xfer engine program start addr 63:32. | | 400 | * 0x14 - 0b M->HOST xfer engine program start addr 63:32. |
398 | * 0x18 - 0f M2H program addr 31:0 (absolute addr, not relative) | | 401 | * 0x18 - 0f M2H program addr 31:0 (absolute addr, not relative) |
399 | * 0x1c - 13 M2H program length in 4 byte count. | | 402 | * 0x1c - 13 M2H program length in 4 byte count. |
400 | * 0x20 - 23 packet engine program addr 31:0, (absolute addr, not offset) | | 403 | * 0x20 - 23 packet engine program addr 31:0, (absolute addr, not offset) |
401 | * 0x24 - 27 packet program length in 4 byte count. | | 404 | * 0x24 - 27 packet program length in 4 byte count. |
402 | * | | 405 | * |
403 | * above ucode are loaded via mapped reg 0x210, 0x21c and 0x0c0. | | 406 | * above ucode are loaded via mapped reg 0x210, 0x21c and 0x0c0. |
404 | */ | | 407 | */ |
405 | | | 408 | |
406 | #define _BMR 0x00412080 /* XXX TBD */ | | 409 | #define _BMR 0x00412080 /* NetSec BMR value, magic spell */ |
407 | /* NetSec uses local RAM to handle GMAC desc arrays */ | | 410 | /* NetSec uses local RAM to handle GMAC desc arrays */ |
408 | #define _RDLA 0x18000 | | 411 | #define _RDLA 0x18000 |
409 | #define _TDLA 0x1c000 | | 412 | #define _TDLA 0x1c000 |
410 | /* lower address region is used for intermediate frame data buffers */ | | 413 | /* lower address region is used for intermediate frame data buffers */ |
411 | | | 414 | |
412 | /* | | 415 | /* |
413 | * all below are software construction. | | 416 | * all below are software construction. |
414 | */ | | 417 | */ |
415 | #define MD_NTXDESC 128 | | 418 | #define MD_NTXDESC 128 |
416 | #define MD_NRXDESC 64 | | 419 | #define MD_NRXDESC 64 |
417 | | | 420 | |
418 | #define MD_NTXSEGS 16 | | 421 | #define MD_NTXSEGS 16 |
419 | #define MD_TXQUEUELEN 8 | | 422 | #define MD_TXQUEUELEN 8 |
420 | #define MD_TXQUEUELEN_MASK (MD_TXQUEUELEN - 1) | | 423 | #define MD_TXQUEUELEN_MASK (MD_TXQUEUELEN - 1) |
421 | #define MD_TXQUEUE_GC (MD_TXQUEUELEN / 4) | | 424 | #define MD_TXQUEUE_GC (MD_TXQUEUELEN / 4) |
422 | #define MD_NTXDESC_MASK (MD_NTXDESC - 1) | | 425 | #define MD_NTXDESC_MASK (MD_NTXDESC - 1) |
423 | #define MD_NEXTTX(x) (((x) + 1) & MD_NTXDESC_MASK) | | 426 | #define MD_NEXTTX(x) (((x) + 1) & MD_NTXDESC_MASK) |
424 | #define MD_NEXTTXS(x) (((x) + 1) & MD_TXQUEUELEN_MASK) | | 427 | #define MD_NEXTTXS(x) (((x) + 1) & MD_TXQUEUELEN_MASK) |
425 | | | 428 | |
426 | #define MD_NRXDESC_MASK (MD_NRXDESC - 1) | | 429 | #define MD_NRXDESC_MASK (MD_NRXDESC - 1) |
427 | #define MD_NEXTRX(x) (((x) + 1) & MD_NRXDESC_MASK) | | 430 | #define MD_NEXTRX(x) (((x) + 1) & MD_NRXDESC_MASK) |
428 | | | 431 | |
429 | struct control_data { | | 432 | struct control_data { |
430 | struct tdes cd_txdescs[MD_NTXDESC]; | | 433 | struct tdes cd_txdescs[MD_NTXDESC]; |
431 | struct rdes cd_rxdescs[MD_NRXDESC]; | | 434 | struct rdes cd_rxdescs[MD_NRXDESC]; |
432 | }; | | 435 | }; |
433 | #define SCX_CDOFF(x) offsetof(struct control_data, x) | | 436 | #define SCX_CDOFF(x) offsetof(struct control_data, x) |
434 | #define SCX_CDTXOFF(x) SCX_CDOFF(cd_txdescs[(x)]) | | 437 | #define SCX_CDTXOFF(x) SCX_CDOFF(cd_txdescs[(x)]) |
435 | #define SCX_CDRXOFF(x) SCX_CDOFF(cd_rxdescs[(x)]) | | 438 | #define SCX_CDRXOFF(x) SCX_CDOFF(cd_rxdescs[(x)]) |
436 | | | 439 | |
437 | struct scx_txsoft { | | 440 | struct scx_txsoft { |
438 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ | | 441 | struct mbuf *txs_mbuf; /* head of our mbuf chain */ |
439 | bus_dmamap_t txs_dmamap; /* our DMA map */ | | 442 | bus_dmamap_t txs_dmamap; /* our DMA map */ |
440 | int txs_firstdesc; /* first descriptor in packet */ | | 443 | int txs_firstdesc; /* first descriptor in packet */ |
441 | int txs_lastdesc; /* last descriptor in packet */ | | 444 | int txs_lastdesc; /* last descriptor in packet */ |
442 | int txs_ndesc; /* # of descriptors used */ | | 445 | int txs_ndesc; /* # of descriptors used */ |
443 | }; | | 446 | }; |
444 | | | 447 | |
445 | struct scx_rxsoft { | | 448 | struct scx_rxsoft { |
446 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ | | 449 | struct mbuf *rxs_mbuf; /* head of our mbuf chain */ |
447 | bus_dmamap_t rxs_dmamap; /* our DMA map */ | | 450 | bus_dmamap_t rxs_dmamap; /* our DMA map */ |
448 | }; | | 451 | }; |
449 | | | 452 | |
450 | struct scx_softc { | | 453 | struct scx_softc { |
451 | device_t sc_dev; /* generic device information */ | | 454 | device_t sc_dev; /* generic device information */ |
452 | bus_space_tag_t sc_st; /* bus space tag */ | | 455 | bus_space_tag_t sc_st; /* bus space tag */ |
453 | bus_space_handle_t sc_sh; /* bus space handle */ | | 456 | bus_space_handle_t sc_sh; /* bus space handle */ |
454 | bus_size_t sc_sz; /* csr map size */ | | 457 | bus_size_t sc_sz; /* csr map size */ |
455 | bus_space_handle_t sc_eesh; /* eeprom section handle */ | | 458 | bus_space_handle_t sc_eesh; /* eeprom section handle */ |
456 | bus_size_t sc_eesz; /* eeprom map size */ | | 459 | bus_size_t sc_eesz; /* eeprom map size */ |
457 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ | | 460 | bus_dma_tag_t sc_dmat; /* bus DMA tag */ |
458 | struct ethercom sc_ethercom; /* Ethernet common data */ | | 461 | struct ethercom sc_ethercom; /* Ethernet common data */ |
459 | struct mii_data sc_mii; /* MII */ | | 462 | struct mii_data sc_mii; /* MII */ |
460 | callout_t sc_callout; /* PHY monitor callout */ | | 463 | callout_t sc_callout; /* PHY monitor callout */ |
461 | bus_dma_segment_t sc_seg; /* descriptor store seg */ | | 464 | bus_dma_segment_t sc_seg; /* descriptor store seg */ |
462 | int sc_nseg; /* descriptor store nseg */ | | 465 | int sc_nseg; /* descriptor store nseg */ |
463 | void *sc_ih; /* interrupt cookie */ | | 466 | void *sc_ih; /* interrupt cookie */ |
464 | int sc_phy_id; /* PHY address */ | | 467 | int sc_phy_id; /* PHY address */ |
465 | int sc_flowflags; /* 802.3x PAUSE flow control */ | | 468 | int sc_flowflags; /* 802.3x PAUSE flow control */ |
466 | uint32_t sc_mdclk; /* GAR 5:2 clock selection */ | | 469 | uint32_t sc_mdclk; /* GAR 5:2 clock selection */ |
467 | uint32_t sc_t0cotso; /* T0_CSUM | T0_TSO to run */ | | 470 | uint32_t sc_t0cotso; /* T0_CSUM | T0_TSO to run */ |
468 | int sc_miigmii; /* 1: MII/GMII, 0: RGMII */ | | 471 | int sc_miigmii; /* 1: MII/GMII, 0: RGMII */ |
469 | int sc_phandle; /* fdt phandle */ | | 472 | int sc_phandle; /* fdt phandle */ |
470 | uint64_t sc_freq; | | 473 | uint64_t sc_freq; |
471 | uint32_t sc_maxsize; | | 474 | uint32_t sc_maxsize; |
472 | | | 475 | |
473 | bus_dmamap_t sc_cddmamap; /* control data DMA map */ | | 476 | bus_dmamap_t sc_cddmamap; /* control data DMA map */ |
474 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr | | 477 | #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr |
475 | | | 478 | |
476 | struct control_data *sc_control_data; | | 479 | struct control_data *sc_control_data; |
477 | #define sc_txdescs sc_control_data->cd_txdescs | | 480 | #define sc_txdescs sc_control_data->cd_txdescs |
478 | #define sc_rxdescs sc_control_data->cd_rxdescs | | 481 | #define sc_rxdescs sc_control_data->cd_rxdescs |
479 | | | 482 | |
480 | struct scx_txsoft sc_txsoft[MD_TXQUEUELEN]; | | 483 | struct scx_txsoft sc_txsoft[MD_TXQUEUELEN]; |
481 | struct scx_rxsoft sc_rxsoft[MD_NRXDESC]; | | 484 | struct scx_rxsoft sc_rxsoft[MD_NRXDESC]; |
482 | int sc_txfree; /* number of free Tx descriptors */ | | 485 | int sc_txfree; /* number of free Tx descriptors */ |
483 | int sc_txnext; /* next ready Tx descriptor */ | | 486 | int sc_txnext; /* next ready Tx descriptor */ |
484 | int sc_txsfree; /* number of free Tx jobs */ | | 487 | int sc_txsfree; /* number of free Tx jobs */ |
485 | int sc_txsnext; /* next ready Tx job */ | | 488 | int sc_txsnext; /* next ready Tx job */ |
486 | int sc_txsdirty; /* dirty Tx jobs */ | | 489 | int sc_txsdirty; /* dirty Tx jobs */ |
487 | int sc_rxptr; /* next ready Rx descriptor/descsoft */ | | 490 | int sc_rxptr; /* next ready Rx descriptor/descsoft */ |
488 | | | 491 | |
489 | krndsource_t rnd_source; /* random source */ | | 492 | krndsource_t rnd_source; /* random source */ |
490 | #ifdef GMAC_EVENT_COUNTERS | | 493 | #ifdef GMAC_EVENT_COUNTERS |
491 | /* 80 event counters exist */ | | 494 | /* 80 event counters exist */ |
492 | #endif | | 495 | #endif |
493 | }; | | 496 | }; |
494 | | | 497 | |
495 | #define SCX_CDTXADDR(sc, x) ((sc)->sc_cddma + SCX_CDTXOFF((x))) | | 498 | #define SCX_CDTXADDR(sc, x) ((sc)->sc_cddma + SCX_CDTXOFF((x))) |
496 | #define SCX_CDRXADDR(sc, x) ((sc)->sc_cddma + SCX_CDRXOFF((x))) | | 499 | #define SCX_CDRXADDR(sc, x) ((sc)->sc_cddma + SCX_CDRXOFF((x))) |
497 | | | 500 | |
498 | #define SCX_CDTXSYNC(sc, x, n, ops) \ | | 501 | #define SCX_CDTXSYNC(sc, x, n, ops) \ |
499 | do { \ | | 502 | do { \ |
500 | int __x, __n; \ | | 503 | int __x, __n; \ |
501 | \ | | 504 | \ |
502 | __x = (x); \ | | 505 | __x = (x); \ |
503 | __n = (n); \ | | 506 | __n = (n); \ |
504 | \ | | 507 | \ |
505 | /* If it will wrap around, sync to the end of the ring. */ \ | | 508 | /* If it will wrap around, sync to the end of the ring. */ \ |
506 | if ((__x + __n) > MD_NTXDESC) { \ | | 509 | if ((__x + __n) > MD_NTXDESC) { \ |
507 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 510 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
508 | SCX_CDTXOFF(__x), sizeof(struct tdes) * \ | | 511 | SCX_CDTXOFF(__x), sizeof(struct tdes) * \ |
509 | (MD_NTXDESC - __x), (ops)); \ | | 512 | (MD_NTXDESC - __x), (ops)); \ |
510 | __n -= (MD_NTXDESC - __x); \ | | 513 | __n -= (MD_NTXDESC - __x); \ |
511 | __x = 0; \ | | 514 | __x = 0; \ |
512 | } \ | | 515 | } \ |
513 | \ | | 516 | \ |
514 | /* Now sync whatever is left. */ \ | | 517 | /* Now sync whatever is left. */ \ |
515 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 518 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
516 | SCX_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ | | 519 | SCX_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ |
517 | } while (/*CONSTCOND*/0) | | 520 | } while (/*CONSTCOND*/0) |
518 | | | 521 | |
519 | #define SCX_CDRXSYNC(sc, x, ops) \ | | 522 | #define SCX_CDRXSYNC(sc, x, ops) \ |
520 | do { \ | | 523 | do { \ |
521 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ | | 524 | bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ |
522 | SCX_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ | | 525 | SCX_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ |
523 | } while (/*CONSTCOND*/0) | | 526 | } while (/*CONSTCOND*/0) |
524 | | | 527 | |
525 | #define SCX_INIT_RXDESC(sc, x) \ | | 528 | #define SCX_INIT_RXDESC(sc, x) \ |
526 | do { \ | | 529 | do { \ |
527 | struct scx_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ | | 530 | struct scx_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ |
528 | struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ | | 531 | struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ |
529 | struct mbuf *__m = __rxs->rxs_mbuf; \ | | 532 | struct mbuf *__m = __rxs->rxs_mbuf; \ |
530 | bus_addr_t __p = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ | | 533 | bus_addr_t __p = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ |
531 | bus_size_t __z = __rxs->rxs_dmamap->dm_segs[0].ds_len; \ | | 534 | bus_size_t __z = __rxs->rxs_dmamap->dm_segs[0].ds_len; \ |
532 | __m->m_data = __m->m_ext.ext_buf; \ | | 535 | __m->m_data = __m->m_ext.ext_buf; \ |
533 | __rxd->r3 = htole32(__z - 4); \ | | 536 | __rxd->r3 = htole32(__z - 4); \ |
534 | __rxd->r2 = htole32(BUS_ADDR_LO32(__p)); \ | | 537 | __rxd->r2 = htole32(BUS_ADDR_LO32(__p)); \ |
535 | __rxd->r1 = htole32(BUS_ADDR_HI32(__p)); \ | | 538 | __rxd->r1 = htole32(BUS_ADDR_HI32(__p)); \ |
536 | __rxd->r0 &= htole32(R0_LD); \ | | 539 | __rxd->r0 &= htole32(R0_LD); \ |
537 | __rxd->r0 |= htole32(R0_OWN); \ | | 540 | __rxd->r0 |= htole32(R0_OWN); \ |
538 | } while (/*CONSTCOND*/0) | | 541 | } while (/*CONSTCOND*/0) |
539 | | | 542 | |
540 | /* memory mapped CSR register access */ | | 543 | /* memory mapped CSR register access */ |
541 | #define CSR_READ(sc,off) \ | | 544 | #define CSR_READ(sc,off) \ |
542 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off)) | | 545 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off)) |
543 | #define CSR_WRITE(sc,off,val) \ | | 546 | #define CSR_WRITE(sc,off,val) \ |
544 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val)) | | 547 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val)) |
545 | | | 548 | |
546 | /* flash memory access */ | | 549 | /* flash memory access */ |
547 | #define EE_READ(sc,off) \ | | 550 | #define EE_READ(sc,off) \ |
548 | bus_space_read_4((sc)->sc_st, (sc)->sc_eesh, (off)) | | 551 | bus_space_read_4((sc)->sc_st, (sc)->sc_eesh, (off)) |
549 | | | 552 | |
550 | static int scx_fdt_match(device_t, cfdata_t, void *); | | 553 | static int scx_fdt_match(device_t, cfdata_t, void *); |
551 | static void scx_fdt_attach(device_t, device_t, void *); | | 554 | static void scx_fdt_attach(device_t, device_t, void *); |
552 | static int scx_acpi_match(device_t, cfdata_t, void *); | | 555 | static int scx_acpi_match(device_t, cfdata_t, void *); |
553 | static void scx_acpi_attach(device_t, device_t, void *); | | 556 | static void scx_acpi_attach(device_t, device_t, void *); |
554 | | | 557 | |
555 | CFATTACH_DECL_NEW(scx_fdt, sizeof(struct scx_softc), | | 558 | CFATTACH_DECL_NEW(scx_fdt, sizeof(struct scx_softc), |
556 | scx_fdt_match, scx_fdt_attach, NULL, NULL); | | 559 | scx_fdt_match, scx_fdt_attach, NULL, NULL); |
557 | | | 560 | |
558 | CFATTACH_DECL_NEW(scx_acpi, sizeof(struct scx_softc), | | 561 | CFATTACH_DECL_NEW(scx_acpi, sizeof(struct scx_softc), |
559 | scx_acpi_match, scx_acpi_attach, NULL, NULL); | | 562 | scx_acpi_match, scx_acpi_attach, NULL, NULL); |
560 | | | 563 | |
561 | static void scx_attach_i(struct scx_softc *); | | 564 | static void scx_attach_i(struct scx_softc *); |
562 | static void scx_reset(struct scx_softc *); | | 565 | static void scx_reset(struct scx_softc *); |
563 | static void scx_stop(struct ifnet *, int); | | 566 | static void scx_stop(struct ifnet *, int); |
564 | static int scx_init(struct ifnet *); | | 567 | static int scx_init(struct ifnet *); |
565 | static int scx_ioctl(struct ifnet *, u_long, void *); | | 568 | static int scx_ioctl(struct ifnet *, u_long, void *); |
566 | static void scx_set_rcvfilt(struct scx_softc *); | | 569 | static void scx_set_rcvfilt(struct scx_softc *); |
567 | static void scx_start(struct ifnet *); | | 570 | static void scx_start(struct ifnet *); |
568 | static void scx_watchdog(struct ifnet *); | | 571 | static void scx_watchdog(struct ifnet *); |
569 | static int scx_intr(void *); | | 572 | static int scx_intr(void *); |
570 | static void txreap(struct scx_softc *); | | 573 | static void txreap(struct scx_softc *); |
571 | static void rxfill(struct scx_softc *); | | 574 | static void rxfill(struct scx_softc *); |
572 | static int add_rxbuf(struct scx_softc *, int); | | 575 | static int add_rxbuf(struct scx_softc *, int); |
573 | static void rxdrain(struct scx_softc *sc); | | 576 | static void rxdrain(struct scx_softc *sc); |
574 | static void mii_statchg(struct ifnet *); | | 577 | static void mii_statchg(struct ifnet *); |
575 | static void scx_ifmedia_sts(struct ifnet *, struct ifmediareq *); | | 578 | static void scx_ifmedia_sts(struct ifnet *, struct ifmediareq *); |
576 | static int mii_readreg(device_t, int, int, uint16_t *); | | 579 | static int mii_readreg(device_t, int, int, uint16_t *); |
577 | static int mii_writereg(device_t, int, int, uint16_t); | | 580 | static int mii_writereg(device_t, int, int, uint16_t); |
578 | static void phy_tick(void *); | | 581 | static void phy_tick(void *); |
579 | static void dump_hwfeature(struct scx_softc *); | | 582 | static void dump_hwfeature(struct scx_softc *); |
580 | | | 583 | |
581 | static void resetuengine(struct scx_softc *); | | 584 | static void resetuengine(struct scx_softc *); |
582 | static void loaducode(struct scx_softc *); | | 585 | static void loaducode(struct scx_softc *); |
583 | static void injectucode(struct scx_softc *, int, bus_addr_t, bus_size_t); | | 586 | static void injectucode(struct scx_softc *, int, bus_addr_t, bus_size_t); |
584 | static void forcephyloopback(struct scx_softc *); | | 587 | static void forcephyloopback(struct scx_softc *); |
585 | static void resetphytonormal(struct scx_softc *); | | 588 | static void resetphytonormal(struct scx_softc *); |
586 | | | 589 | |
587 | static int get_mdioclk(uint32_t); | | 590 | static int get_mdioclk(uint32_t); |
588 | | | 591 | |
589 | #define WAIT_FOR_SET(sc, reg, set) \ | | 592 | #define WAIT_FOR_SET(sc, reg, set) \ |
590 | wait_for_bits(sc, reg, set, ~0, 0) | | 593 | wait_for_bits(sc, reg, set, ~0, 0) |
591 | #define WAIT_FOR_CLR(sc, reg, clr) \ | | 594 | #define WAIT_FOR_CLR(sc, reg, clr) \ |
592 | wait_for_bits(sc, reg, 0, clr, 0) | | 595 | wait_for_bits(sc, reg, 0, clr, 0) |
593 | | | 596 | |
594 | static int | | 597 | static int |
595 | wait_for_bits(struct scx_softc *sc, int reg, | | 598 | wait_for_bits(struct scx_softc *sc, int reg, |
596 | uint32_t set, uint32_t clr, uint32_t fail) | | 599 | uint32_t set, uint32_t clr, uint32_t fail) |
597 | { | | 600 | { |
598 | uint32_t val; | | 601 | uint32_t val; |
599 | int ntries; | | 602 | int ntries; |
600 | | | 603 | |
601 | for (ntries = 0; ntries < 1000; ntries++) { | | 604 | for (ntries = 0; ntries < 1000; ntries++) { |
602 | val = CSR_READ(sc, reg); | | 605 | val = CSR_READ(sc, reg); |
603 | if ((val & set) || !(val & clr)) | | 606 | if ((val & set) || !(val & clr)) |
604 | return 0; | | 607 | return 0; |
605 | if (val & fail) | | 608 | if (val & fail) |
606 | return 1; | | 609 | return 1; |
607 | DELAY(1); | | 610 | DELAY(1); |
608 | } | | 611 | } |
609 | return 1; | | 612 | return 1; |
610 | } | | 613 | } |
611 | | | 614 | |
612 | /* GMAC register indirect access */ | | 615 | /* GMAC register indirect access */ |
613 | static int | | 616 | static int |
614 | mac_read(struct scx_softc *sc, int reg) | | 617 | mac_read(struct scx_softc *sc, int reg) |
615 | { | | 618 | { |
616 | | | 619 | |
617 | CSR_WRITE(sc, MACCMD, reg | CMD_BUSY); | | 620 | CSR_WRITE(sc, MACCMD, reg | CMD_BUSY); |
618 | (void)WAIT_FOR_CLR(sc, MACCMD, CMD_BUSY); | | 621 | (void)WAIT_FOR_CLR(sc, MACCMD, CMD_BUSY); |
619 | return CSR_READ(sc, MACDATA); | | 622 | return CSR_READ(sc, MACDATA); |
620 | } | | 623 | } |
621 | | | 624 | |
622 | static void | | 625 | static void |
623 | mac_write(struct scx_softc *sc, int reg, int val) | | 626 | mac_write(struct scx_softc *sc, int reg, int val) |
624 | { | | 627 | { |
625 | | | 628 | |
626 | CSR_WRITE(sc, MACDATA, val); | | 629 | CSR_WRITE(sc, MACDATA, val); |
627 | CSR_WRITE(sc, MACCMD, reg | CMD_IOWR | CMD_BUSY); | | 630 | CSR_WRITE(sc, MACCMD, reg | CMD_IOWR | CMD_BUSY); |
628 | (void)WAIT_FOR_CLR(sc, MACCMD, CMD_BUSY); | | 631 | (void)WAIT_FOR_CLR(sc, MACCMD, CMD_BUSY); |
629 | } | | 632 | } |
630 | | | 633 | |
631 | /* dig and decode "clock-frequency" value for a given clkname */ | | 634 | /* dig and decode "clock-frequency" value for a given clkname */ |
632 | static int | | 635 | static int |
633 | get_clk_freq(int phandle, const char *clkname) | | 636 | get_clk_freq(int phandle, const char *clkname) |
634 | { | | 637 | { |
635 | u_int index, n, cells; | | 638 | u_int index, n, cells; |
636 | const u_int *p; | | 639 | const u_int *p; |
637 | int err, len, resid; | | 640 | int err, len, resid; |
638 | unsigned int freq = 0; | | 641 | unsigned int freq = 0; |
639 | | | 642 | |
640 | err = fdtbus_get_index(phandle, "clock-names", clkname, &index); | | 643 | err = fdtbus_get_index(phandle, "clock-names", clkname, &index); |
641 | if (err == -1) | | 644 | if (err == -1) |
642 | return -1; | | 645 | return -1; |
643 | p = fdtbus_get_prop(phandle, "clocks", &len); | | 646 | p = fdtbus_get_prop(phandle, "clocks", &len); |
644 | if (p == NULL) | | 647 | if (p == NULL) |
645 | return -1; | | 648 | return -1; |
646 | for (n = 0, resid = len; resid > 0; n++) { | | 649 | for (n = 0, resid = len; resid > 0; n++) { |
647 | const int cc_phandle = | | 650 | const int cc_phandle = |
648 | fdtbus_get_phandle_from_native(be32toh(p[0])); | | 651 | fdtbus_get_phandle_from_native(be32toh(p[0])); |
649 | if (of_getprop_uint32(cc_phandle, "#clock-cells", &cells)) | | 652 | if (of_getprop_uint32(cc_phandle, "#clock-cells", &cells)) |
650 | return -1; | | 653 | return -1; |
651 | if (n == index) { | | 654 | if (n == index) { |
652 | if (of_getprop_uint32(cc_phandle, | | 655 | if (of_getprop_uint32(cc_phandle, |
653 | "clock-frequency", &freq)) | | 656 | "clock-frequency", &freq)) |
654 | return -1; | | 657 | return -1; |
655 | return freq; | | 658 | return freq; |
656 | } | | 659 | } |
657 | resid -= (cells + 1) * 4; | | 660 | resid -= (cells + 1) * 4; |
658 | p += (cells + 1) * 4; | | 661 | p += (cells + 1) * 4; |
659 | } | | 662 | } |
660 | return -1; | | 663 | return -1; |
661 | } | | 664 | } |
662 | | | 665 | |
663 | #define ATTACH_DEBUG 1 | | 666 | #define ATTACH_DEBUG 1 |
664 | | | 667 | |
665 | static const struct device_compatible_entry compat_data[] = { | | 668 | static const struct device_compatible_entry compat_data[] = { |
666 | { .compat = "socionext,synquacer-netsec" }, | | 669 | { .compat = "socionext,synquacer-netsec" }, |
667 | DEVICE_COMPAT_EOL | | 670 | DEVICE_COMPAT_EOL |
668 | }; | | 671 | }; |
669 | static const struct device_compatible_entry compatible[] = { | | 672 | static const struct device_compatible_entry compatible[] = { |
670 | { .compat = "SCX0001" }, | | 673 | { .compat = "SCX0001" }, |
671 | DEVICE_COMPAT_EOL | | 674 | DEVICE_COMPAT_EOL |
672 | }; | | 675 | }; |
673 | | | 676 | |
674 | static int | | 677 | static int |
675 | scx_fdt_match(device_t parent, cfdata_t cf, void *aux) | | 678 | scx_fdt_match(device_t parent, cfdata_t cf, void *aux) |
676 | { | | 679 | { |
677 | struct fdt_attach_args * const faa = aux; | | 680 | struct fdt_attach_args * const faa = aux; |
678 | | | 681 | |
679 | return of_compatible_match(faa->faa_phandle, compat_data); | | 682 | return of_compatible_match(faa->faa_phandle, compat_data); |
680 | } | | 683 | } |
681 | | | 684 | |
682 | static void | | 685 | static void |
683 | scx_fdt_attach(device_t parent, device_t self, void *aux) | | 686 | scx_fdt_attach(device_t parent, device_t self, void *aux) |
684 | { | | 687 | { |
685 | struct scx_softc * const sc = device_private(self); | | 688 | struct scx_softc * const sc = device_private(self); |
686 | struct fdt_attach_args * const faa = aux; | | 689 | struct fdt_attach_args * const faa = aux; |
687 | const int phandle = faa->faa_phandle; | | 690 | const int phandle = faa->faa_phandle; |
688 | bus_space_handle_t bsh; | | 691 | bus_space_handle_t bsh; |
689 | bus_space_handle_t eebsh; | | 692 | bus_space_handle_t eebsh; |
690 | bus_addr_t addr[2]; | | 693 | bus_addr_t addr[2]; |
691 | bus_size_t size[2]; | | 694 | bus_size_t size[2]; |
692 | void *intrh; | | 695 | void *intrh; |
693 | char intrstr[128]; | | 696 | char intrstr[128]; |
694 | int phy_phandle; | | 697 | int phy_phandle; |
695 | const char *phy_mode; | | 698 | const char *phy_mode; |
696 | bus_addr_t phy_id; | | 699 | bus_addr_t phy_id; |
697 | long ref_clk; | | 700 | long ref_clk; |
698 | | | 701 | |
699 | if (fdtbus_get_reg(phandle, 0, addr+0, size+0) != 0 | | 702 | if (fdtbus_get_reg(phandle, 0, addr+0, size+0) != 0 |
700 | || bus_space_map(faa->faa_bst, addr[0], size[0], 0, &bsh) != 0) { | | 703 | || bus_space_map(faa->faa_bst, addr[0], size[0], 0, &bsh) != 0) { |
701 | aprint_error(": unable to map registers\n"); | | 704 | aprint_error(": unable to map registers\n"); |
702 | return; | | 705 | return; |
703 | } | | 706 | } |
704 | if (fdtbus_get_reg(phandle, 1, addr+1, size+1) != 0 | | 707 | if (fdtbus_get_reg(phandle, 1, addr+1, size+1) != 0 |
705 | || bus_space_map(faa->faa_bst, addr[1], size[1], 0, &eebsh) != 0) { | | 708 | || bus_space_map(faa->faa_bst, addr[1], size[1], 0, &eebsh) != 0) { |
706 | aprint_error(": unable to map device eeprom\n"); | | 709 | aprint_error(": unable to map device eeprom\n"); |
707 | goto fail; | | 710 | goto fail; |
708 | } | | 711 | } |
709 | if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { | | 712 | if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { |
710 | aprint_error(": failed to decode interrupt\n"); | | 713 | aprint_error(": failed to decode interrupt\n"); |
711 | goto fail; | | 714 | goto fail; |
712 | } | | 715 | } |
713 | | | 716 | |
714 | phy_mode = fdtbus_get_string(phandle, "phy-mode"); | | 717 | phy_mode = fdtbus_get_string(phandle, "phy-mode"); |
715 | if (phy_mode == NULL) | | 718 | if (phy_mode == NULL) |
716 | aprint_error(": missing 'phy-mode' property\n"); | | 719 | aprint_error(": missing 'phy-mode' property\n"); |
717 | phy_phandle = fdtbus_get_phandle(phandle, "phy-handle"); | | 720 | phy_phandle = fdtbus_get_phandle(phandle, "phy-handle"); |
718 | if (phy_phandle == -1 | | 721 | if (phy_phandle == -1 |
719 | || fdtbus_get_reg(phy_phandle, 0, &phy_id, NULL) != 0) | | 722 | || fdtbus_get_reg(phy_phandle, 0, &phy_id, NULL) != 0) |
720 | phy_id = MII_PHY_ANY; | | 723 | phy_id = MII_PHY_ANY; |
721 | ref_clk = get_clk_freq(phandle, "phy_ref_clk"); | | 724 | ref_clk = get_clk_freq(phandle, "phy_ref_clk"); |
722 | if (ref_clk == -1) | | 725 | if (ref_clk == -1) |
723 | ref_clk = 250 * 1000 * 1000; | | 726 | ref_clk = 250 * 1000 * 1000; |
724 | | | 727 | |
725 | #if ATTACH_DEBUG == 1 | | 728 | #if ATTACH_DEBUG == 1 |
726 | aprint_normal("\n"); | | 729 | aprint_normal("\n"); |
727 | aprint_normal_dev(self, | | 730 | aprint_normal_dev(self, |
728 | "[FDT] phy mode %s, phy id %d, freq %ld\n", | | 731 | "[FDT] phy mode %s, phy id %d, freq %ld\n", |
729 | phy_mode, (int)phy_id, ref_clk); | | 732 | phy_mode, (int)phy_id, ref_clk); |
730 | aprint_normal("%s", device_xname(self)); | | 733 | aprint_normal("%s", device_xname(self)); |
731 | #endif | | 734 | #endif |
732 | | | 735 | |
733 | intrh = fdtbus_intr_establish(phandle, 0, IPL_NET, | | 736 | intrh = fdtbus_intr_establish(phandle, 0, IPL_NET, |
734 | NOT_MP_SAFE, scx_intr, sc); | | 737 | NOT_MP_SAFE, scx_intr, sc); |
735 | if (intrh == NULL) { | | 738 | if (intrh == NULL) { |
736 | aprint_error(": couldn't establish interrupt\n"); | | 739 | aprint_error(": couldn't establish interrupt\n"); |
737 | goto fail; | | 740 | goto fail; |
738 | } | | 741 | } |
739 | aprint_normal(" interrupt on %s", intrstr); | | 742 | aprint_normal(" interrupt on %s", intrstr); |
740 | | | 743 | |
741 | sc->sc_dev = self; | | 744 | sc->sc_dev = self; |
742 | sc->sc_st = faa->faa_bst; | | 745 | sc->sc_st = faa->faa_bst; |
743 | sc->sc_sh = bsh; | | 746 | sc->sc_sh = bsh; |
744 | sc->sc_sz = size[0]; | | 747 | sc->sc_sz = size[0]; |
745 | sc->sc_eesh = eebsh; | | 748 | sc->sc_eesh = eebsh; |
746 | sc->sc_eesz = size[1]; | | 749 | sc->sc_eesz = size[1]; |
747 | sc->sc_ih = intrh; | | 750 | sc->sc_ih = intrh; |
748 | sc->sc_dmat = faa->faa_dmat; | | 751 | sc->sc_dmat = faa->faa_dmat; |
749 | sc->sc_phandle = phandle; | | 752 | sc->sc_phandle = phandle; |
750 | sc->sc_phy_id = phy_id; | | 753 | sc->sc_phy_id = phy_id; |
751 | sc->sc_freq = ref_clk; | | 754 | sc->sc_freq = ref_clk; |
752 | | | 755 | |
753 | scx_attach_i(sc); | | 756 | scx_attach_i(sc); |
754 | | | 757 | |
755 | return; | | 758 | return; |
756 | fail: | | 759 | fail: |
757 | if (sc->sc_eesz) | | 760 | if (sc->sc_eesz) |
758 | bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz); | | 761 | bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz); |
759 | if (sc->sc_sz) | | 762 | if (sc->sc_sz) |
760 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); | | 763 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); |
761 | return; | | 764 | return; |
762 | } | | 765 | } |
763 | | | 766 | |
764 | static int | | 767 | static int |
765 | scx_acpi_match(device_t parent, cfdata_t cf, void *aux) | | 768 | scx_acpi_match(device_t parent, cfdata_t cf, void *aux) |
766 | { | | 769 | { |
767 | struct acpi_attach_args *aa = aux; | | 770 | struct acpi_attach_args *aa = aux; |
768 | | | 771 | |
769 | return acpi_compatible_match(aa, compatible); | | 772 | return acpi_compatible_match(aa, compatible); |
770 | } | | 773 | } |
771 | | | 774 | |
772 | #define HWFEA_DEBUG 1 | | 775 | #define HWFEA_DEBUG 1 |
773 | | | 776 | |
774 | static void | | 777 | static void |
775 | scx_acpi_attach(device_t parent, device_t self, void *aux) | | 778 | scx_acpi_attach(device_t parent, device_t self, void *aux) |
776 | { | | 779 | { |
777 | struct scx_softc * const sc = device_private(self); | | 780 | struct scx_softc * const sc = device_private(self); |
778 | struct acpi_attach_args * const aa = aux; | | 781 | struct acpi_attach_args * const aa = aux; |
779 | ACPI_HANDLE handle = aa->aa_node->ad_handle; | | 782 | ACPI_HANDLE handle = aa->aa_node->ad_handle; |
780 | bus_space_handle_t bsh, eebsh; | | 783 | bus_space_handle_t bsh, eebsh; |
781 | struct acpi_resources res; | | 784 | struct acpi_resources res; |
782 | struct acpi_mem *mem, *mem1; | | 785 | struct acpi_mem *mem, *mem1; |
783 | struct acpi_irq *irq; | | 786 | struct acpi_irq *irq; |
784 | ACPI_INTEGER max_spd, max_frame, phy_id, phy_freq; | | 787 | ACPI_INTEGER max_spd, max_frame, phy_id, phy_freq; |
785 | ACPI_STATUS rv; | | 788 | ACPI_STATUS rv; |
786 | void *intrh; | | 789 | void *intrh; |
787 | | | 790 | |
788 | rv = acpi_resource_parse(self, handle, "_CRS", | | 791 | rv = acpi_resource_parse(self, handle, "_CRS", |
789 | &res, &acpi_resource_parse_ops_default); | | 792 | &res, &acpi_resource_parse_ops_default); |
790 | if (ACPI_FAILURE(rv)) | | 793 | if (ACPI_FAILURE(rv)) |
791 | return; | | 794 | return; |
792 | mem = acpi_res_mem(&res, 0); | | 795 | mem = acpi_res_mem(&res, 0); |
793 | irq = acpi_res_irq(&res, 0); | | 796 | irq = acpi_res_irq(&res, 0); |
794 | if (mem == NULL || irq == NULL || mem->ar_length == 0) { | | 797 | if (mem == NULL || irq == NULL || mem->ar_length == 0) { |
795 | aprint_error(": incomplete crs resources\n"); | | 798 | aprint_error(": incomplete crs resources\n"); |
796 | goto done; | | 799 | goto done; |
797 | } | | 800 | } |
798 | if (bus_space_map(aa->aa_memt, mem->ar_base, mem->ar_length, 0, | | 801 | if (bus_space_map(aa->aa_memt, mem->ar_base, mem->ar_length, 0, |
799 | &bsh) != 0) { | | 802 | &bsh) != 0) { |
800 | aprint_error(": unable to map registers\n"); | | 803 | aprint_error(": unable to map registers\n"); |
801 | goto done; | | 804 | goto done; |
802 | } | | 805 | } |
803 | mem1 = acpi_res_mem(&res, 1); /* EEPROM for MAC address and ucode */ | | 806 | mem1 = acpi_res_mem(&res, 1); /* EEPROM for MAC address and ucode */ |
804 | if (mem1 == NULL || mem1->ar_length == 0) { | | 807 | if (mem1 == NULL || mem1->ar_length == 0) { |
805 | aprint_error(": incomplete eeprom resources\n"); | | 808 | aprint_error(": incomplete eeprom resources\n"); |
806 | goto fail_0; | | 809 | goto fail_0; |
807 | } | | 810 | } |
808 | if (bus_space_map(aa->aa_memt, mem1->ar_base, mem1->ar_length, 0, | | 811 | if (bus_space_map(aa->aa_memt, mem1->ar_base, mem1->ar_length, 0, |
809 | &eebsh)) { | | 812 | &eebsh)) { |
810 | aprint_error(": unable to map device eeprom\n"); | | 813 | aprint_error(": unable to map device eeprom\n"); |
811 | goto fail_0; | | 814 | goto fail_0; |
812 | } | | 815 | } |
813 | rv = acpi_dsd_integer(handle, "max-speed", &max_spd); | | 816 | rv = acpi_dsd_integer(handle, "max-speed", &max_spd); |
814 | if (ACPI_FAILURE(rv)) | | 817 | if (ACPI_FAILURE(rv)) |
815 | max_spd = 1000; | | 818 | max_spd = 1000; |
816 | rv = acpi_dsd_integer(handle, "max-frame-size", &max_frame); | | 819 | rv = acpi_dsd_integer(handle, "max-frame-size", &max_frame); |
817 | if (ACPI_FAILURE(rv)) | | 820 | if (ACPI_FAILURE(rv)) |
818 | max_frame = 2048; | | 821 | max_frame = 2048; |
819 | rv = acpi_dsd_integer(handle, "phy-channel", &phy_id); | | 822 | rv = acpi_dsd_integer(handle, "phy-channel", &phy_id); |
820 | if (ACPI_FAILURE(rv)) | | 823 | if (ACPI_FAILURE(rv)) |
821 | phy_id = MII_PHY_ANY; | | 824 | phy_id = MII_PHY_ANY; |
822 | rv = acpi_dsd_integer(handle, "socionext,phy-clock-frequency", | | 825 | rv = acpi_dsd_integer(handle, "socionext,phy-clock-frequency", |
823 | &phy_freq); | | 826 | &phy_freq); |
824 | if (ACPI_FAILURE(rv)) | | 827 | if (ACPI_FAILURE(rv)) |
825 | phy_freq = 250 * 1000 * 1000; | | 828 | phy_freq = 250 * 1000 * 1000; |
826 | | | 829 | |
827 | #if ATTACH_DEBUG == 1 | | 830 | #if ATTACH_DEBUG == 1 |
828 | aprint_normal_dev(self, | | 831 | aprint_normal_dev(self, |
829 | "[ACPI] max-speed %d, phy id %d, freq %ld\n", | | 832 | "[ACPI] max-speed %d, phy id %d, freq %ld\n", |
830 | (int)max_spd, (int)phy_id, phy_freq); | | 833 | (int)max_spd, (int)phy_id, phy_freq); |
831 | aprint_normal("%s", device_xname(self)); | | 834 | aprint_normal("%s", device_xname(self)); |
832 | #endif | | 835 | #endif |
833 | | | 836 | |
834 | intrh = acpi_intr_establish(self, (uint64_t)(uintptr_t)handle, | | 837 | intrh = acpi_intr_establish(self, (uint64_t)(uintptr_t)handle, |
835 | IPL_NET, NOT_MP_SAFE, scx_intr, sc, device_xname(self)); | | 838 | IPL_NET, NOT_MP_SAFE, scx_intr, sc, device_xname(self)); |
836 | if (intrh == NULL) { | | 839 | if (intrh == NULL) { |
837 | aprint_error(": couldn't establish interrupt\n"); | | 840 | aprint_error(": couldn't establish interrupt\n"); |
838 | goto fail_1; | | 841 | goto fail_1; |
839 | } | | 842 | } |
840 | | | 843 | |
841 | sc->sc_dev = self; | | 844 | sc->sc_dev = self; |
842 | sc->sc_st = aa->aa_memt; | | 845 | sc->sc_st = aa->aa_memt; |
843 | sc->sc_sh = bsh; | | 846 | sc->sc_sh = bsh; |
844 | sc->sc_sz = mem->ar_length; | | 847 | sc->sc_sz = mem->ar_length; |
845 | sc->sc_eesh = eebsh; | | 848 | sc->sc_eesh = eebsh; |
846 | sc->sc_eesz = mem1->ar_length; | | 849 | sc->sc_eesz = mem1->ar_length; |
847 | sc->sc_ih = intrh; | | 850 | sc->sc_ih = intrh; |
848 | sc->sc_dmat = | | 851 | sc->sc_dmat = |
849 | BUS_DMA_TAG_VALID(aa->aa_dmat64) ? aa->aa_dmat64 : aa->aa_dmat; | | 852 | BUS_DMA_TAG_VALID(aa->aa_dmat64) ? aa->aa_dmat64 : aa->aa_dmat; |
850 | sc->sc_phy_id = (int)phy_id; | | 853 | sc->sc_phy_id = (int)phy_id; |
851 | sc->sc_freq = phy_freq; | | 854 | sc->sc_freq = phy_freq; |
852 | sc->sc_maxsize = max_frame; | | 855 | sc->sc_maxsize = max_frame; |
853 | | | 856 | |
854 | scx_attach_i(sc); | | 857 | scx_attach_i(sc); |
855 | done: | | 858 | done: |
856 | acpi_resource_cleanup(&res); | | 859 | acpi_resource_cleanup(&res); |
857 | return; | | 860 | return; |
858 | fail_1: | | 861 | fail_1: |
859 | bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz); | | 862 | bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz); |
860 | fail_0: | | 863 | fail_0: |
861 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); | | 864 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); |
862 | acpi_resource_cleanup(&res); | | 865 | acpi_resource_cleanup(&res); |
863 | return; | | 866 | return; |
864 | } | | 867 | } |
865 | | | 868 | |
866 | static void | | 869 | static void |
867 | scx_attach_i(struct scx_softc *sc) | | 870 | scx_attach_i(struct scx_softc *sc) |
868 | { | | 871 | { |
869 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; | | 872 | struct ifnet * const ifp = &sc->sc_ethercom.ec_if; |
870 | struct mii_data * const mii = &sc->sc_mii; | | 873 | struct mii_data * const mii = &sc->sc_mii; |
871 | struct ifmedia * const ifm = &mii->mii_media; | | 874 | struct ifmedia * const ifm = &mii->mii_media; |
872 | uint32_t which, dwimp, dwfea; | | 875 | uint32_t which, dwimp, dwfea; |
873 | uint8_t enaddr[ETHER_ADDR_LEN]; | | 876 | uint8_t enaddr[ETHER_ADDR_LEN]; |
874 | bus_dma_segment_t seg; | | 877 | bus_dma_segment_t seg; |
875 | paddr_t p, q; | | 878 | paddr_t p, q; |
876 | uint32_t csr; | | 879 | uint32_t csr; |
877 | int i, nseg, error = 0; | | 880 | int i, nseg, error = 0; |
878 | | | 881 | |
879 | which = CSR_READ(sc, HWVER); /* Socionext version 5.xx */ | | 882 | which = CSR_READ(sc, HWVER); /* Socionext version 5.xx */ |
880 | dwimp = mac_read(sc, GMACIMPL); /* DWC implementation XX.YY */ | | 883 | dwimp = mac_read(sc, GMACIMPL); /* DWC implementation XX.YY */ |
881 | dwfea = mac_read(sc, HWFEA); /* DWC feature bits */ | | 884 | dwfea = mac_read(sc, HWFEA); /* DWC feature bits */ |
882 | | | 885 | |
883 | aprint_naive("\n"); | | 886 | aprint_naive("\n"); |
884 | aprint_normal(": Socionext NetSec Gigabit Ethernet controller " | | 887 | aprint_normal(": Socionext NetSec Gigabit Ethernet controller " |
885 | "%x.%x\n", which >> 16, which & 0xffff); | | 888 | "%x.%x\n", which >> 16, which & 0xffff); |
886 | | | 889 | |
887 | aprint_normal_dev(sc->sc_dev, | | 890 | aprint_normal_dev(sc->sc_dev, |
888 | "DesignWare EMAC ver 0x%x (0x%x) hw feature %08x\n", | | 891 | "DesignWare EMAC ver 0x%x (0x%x) hw feature %08x\n", |
889 | dwimp & 0xff, dwimp >> 8, dwfea); | | 892 | dwimp & 0xff, dwimp >> 8, dwfea); |
890 | dump_hwfeature(sc); | | 893 | dump_hwfeature(sc); |
891 | | | 894 | |
892 | /* detected PHY type */ | | 895 | /* detected PHY type */ |
893 | sc->sc_miigmii = ((dwfea & __BITS(30,28) >> 28) == 0); | | 896 | sc->sc_miigmii = ((dwfea & __BITS(30,28) >> 28) == 0); |
894 | | | 897 | |
895 | /* fetch MAC address in flash 0:7, stored in big endian order */ | | 898 | /* fetch MAC address in flash 0:7, stored in big endian order */ |
896 | csr = EE_READ(sc, 0x00); | | 899 | csr = EE_READ(sc, 0x00); |
897 | enaddr[0] = csr >> 24; | | 900 | enaddr[0] = csr >> 24; |
898 | enaddr[1] = csr >> 16; | | 901 | enaddr[1] = csr >> 16; |
899 | enaddr[2] = csr >> 8; | | 902 | enaddr[2] = csr >> 8; |
900 | enaddr[3] = csr; | | 903 | enaddr[3] = csr; |
901 | csr = EE_READ(sc, 0x04); | | 904 | csr = EE_READ(sc, 0x04); |
902 | enaddr[4] = csr >> 24; | | 905 | enaddr[4] = csr >> 24; |
903 | enaddr[5] = csr >> 16; | | 906 | enaddr[5] = csr >> 16; |
904 | aprint_normal_dev(sc->sc_dev, | | 907 | aprint_normal_dev(sc->sc_dev, |
905 | "Ethernet address %s\n", ether_sprintf(enaddr)); | | 908 | "Ethernet address %s\n", ether_sprintf(enaddr)); |
906 | | | 909 | |
907 | sc->sc_mdclk = get_mdioclk(sc->sc_freq) << GAR_CLK; /* 5:2 clk ratio */ | | 910 | sc->sc_mdclk = get_mdioclk(sc->sc_freq) << GAR_CLK; /* 5:2 clk ratio */ |
908 | | | 911 | |
909 | mii->mii_ifp = ifp; | | 912 | mii->mii_ifp = ifp; |
910 | mii->mii_readreg = mii_readreg; | | 913 | mii->mii_readreg = mii_readreg; |
911 | mii->mii_writereg = mii_writereg; | | 914 | mii->mii_writereg = mii_writereg; |
912 | mii->mii_statchg = mii_statchg; | | 915 | mii->mii_statchg = mii_statchg; |
913 | | | 916 | |
914 | sc->sc_ethercom.ec_mii = mii; | | 917 | sc->sc_ethercom.ec_mii = mii; |
915 | ifmedia_init(ifm, 0, ether_mediachange, scx_ifmedia_sts); | | 918 | ifmedia_init(ifm, 0, ether_mediachange, scx_ifmedia_sts); |
916 | mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, | | 919 | mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phy_id, |
917 | MII_OFFSET_ANY, MIIF_DOPAUSE); | | 920 | MII_OFFSET_ANY, MIIF_DOPAUSE); |
918 | if (LIST_FIRST(&mii->mii_phys) == NULL) { | | 921 | if (LIST_FIRST(&mii->mii_phys) == NULL) { |
919 | ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); | | 922 | ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); |
920 | ifmedia_set(ifm, IFM_ETHER | IFM_NONE); | | 923 | ifmedia_set(ifm, IFM_ETHER | IFM_NONE); |
921 | } else | | 924 | } else |
922 | ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); | | 925 | ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); |
923 | ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */ | | 926 | ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */ |
924 | | | 927 | |
925 | /* | | 928 | /* |
926 | * Allocate the control data structures, and create and load the | | 929 | * Allocate the control data structures, and create and load the |
927 | * DMA map for it. | | 930 | * DMA map for it. |
928 | */ | | 931 | */ |
929 | error = bus_dmamem_alloc(sc->sc_dmat, | | 932 | error = bus_dmamem_alloc(sc->sc_dmat, |
930 | sizeof(struct control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); | | 933 | sizeof(struct control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); |
931 | if (error != 0) { | | 934 | if (error != 0) { |
932 | aprint_error_dev(sc->sc_dev, | | 935 | aprint_error_dev(sc->sc_dev, |
933 | "unable to allocate control data, error = %d\n", error); | | 936 | "unable to allocate control data, error = %d\n", error); |
934 | goto fail_0; | | 937 | goto fail_0; |
935 | } | | 938 | } |
936 | error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, | | 939 | error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, |
937 | sizeof(struct control_data), (void **)&sc->sc_control_data, | | 940 | sizeof(struct control_data), (void **)&sc->sc_control_data, |
938 | BUS_DMA_COHERENT); | | 941 | BUS_DMA_COHERENT); |
939 | if (error != 0) { | | 942 | if (error != 0) { |
940 | aprint_error_dev(sc->sc_dev, | | 943 | aprint_error_dev(sc->sc_dev, |
941 | "unable to map control data, error = %d\n", error); | | 944 | "unable to map control data, error = %d\n", error); |
942 | goto fail_1; | | 945 | goto fail_1; |
943 | } | | 946 | } |
944 | error = bus_dmamap_create(sc->sc_dmat, | | 947 | error = bus_dmamap_create(sc->sc_dmat, |
945 | sizeof(struct control_data), 1, | | 948 | sizeof(struct control_data), 1, |
946 | sizeof(struct control_data), 0, 0, &sc->sc_cddmamap); | | 949 | sizeof(struct control_data), 0, 0, &sc->sc_cddmamap); |
947 | if (error != 0) { | | 950 | if (error != 0) { |
948 | aprint_error_dev(sc->sc_dev, | | 951 | aprint_error_dev(sc->sc_dev, |
949 | "unable to create control data DMA map, " | | 952 | "unable to create control data DMA map, " |
950 | "error = %d\n", error); | | 953 | "error = %d\n", error); |
951 | goto fail_2; | | 954 | goto fail_2; |
952 | } | | 955 | } |
953 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, | | 956 | error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, |
954 | sc->sc_control_data, sizeof(struct control_data), NULL, 0); | | 957 | sc->sc_control_data, sizeof(struct control_data), NULL, 0); |
955 | if (error != 0) { | | 958 | if (error != 0) { |
956 | aprint_error_dev(sc->sc_dev, | | 959 | aprint_error_dev(sc->sc_dev, |
957 | "unable to load control data DMA map, error = %d\n", | | 960 | "unable to load control data DMA map, error = %d\n", |
958 | error); | | 961 | error); |
959 | goto fail_3; | | 962 | goto fail_3; |
960 | } | | 963 | } |
961 | for (i = 0; i < MD_TXQUEUELEN; i++) { | | 964 | for (i = 0; i < MD_TXQUEUELEN; i++) { |
962 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 965 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
963 | MD_NTXSEGS, MCLBYTES, 0, 0, | | 966 | MD_NTXSEGS, MCLBYTES, 0, 0, |
964 | &sc->sc_txsoft[i].txs_dmamap)) != 0) { | | 967 | &sc->sc_txsoft[i].txs_dmamap)) != 0) { |
965 | aprint_error_dev(sc->sc_dev, | | 968 | aprint_error_dev(sc->sc_dev, |
966 | "unable to create tx DMA map %d, error = %d\n", | | 969 | "unable to create tx DMA map %d, error = %d\n", |
967 | i, error); | | 970 | i, error); |
968 | goto fail_4; | | 971 | goto fail_4; |
969 | } | | 972 | } |
970 | } | | 973 | } |
971 | for (i = 0; i < MD_NRXDESC; i++) { | | 974 | for (i = 0; i < MD_NRXDESC; i++) { |
972 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, | | 975 | if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, |
973 | 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { | | 976 | 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { |
974 | aprint_error_dev(sc->sc_dev, | | 977 | aprint_error_dev(sc->sc_dev, |
975 | "unable to create rx DMA map %d, error = %d\n", | | 978 | "unable to create rx DMA map %d, error = %d\n", |
976 | i, error); | | 979 | i, error); |
977 | goto fail_5; | | 980 | goto fail_5; |
978 | } | | 981 | } |
979 | sc->sc_rxsoft[i].rxs_mbuf = NULL; | | 982 | sc->sc_rxsoft[i].rxs_mbuf = NULL; |
980 | } | | 983 | } |
981 | sc->sc_seg = seg; | | 984 | sc->sc_seg = seg; |
982 | sc->sc_nseg = nseg; | | 985 | sc->sc_nseg = nseg; |
983 | #if 0 | | 986 | #if 0 |
984 | aprint_normal_dev(sc->sc_dev, "descriptor ds_addr %lx, ds_len %lx, nseg %d\n", seg.ds_addr, seg.ds_len, nseg); | | 987 | aprint_normal_dev(sc->sc_dev, "descriptor ds_addr %lx, ds_len %lx, nseg %d\n", seg.ds_addr, seg.ds_len, nseg); |
985 | #endif | | 988 | #endif |
986 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); | | 989 | strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); |
987 | ifp->if_softc = sc; | | 990 | ifp->if_softc = sc; |
988 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 991 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
989 | ifp->if_ioctl = scx_ioctl; | | 992 | ifp->if_ioctl = scx_ioctl; |
990 | ifp->if_start = scx_start; | | 993 | ifp->if_start = scx_start; |
991 | ifp->if_watchdog = scx_watchdog; | | 994 | ifp->if_watchdog = scx_watchdog; |
992 | ifp->if_init = scx_init; | | 995 | ifp->if_init = scx_init; |
993 | ifp->if_stop = scx_stop; | | 996 | ifp->if_stop = scx_stop; |
994 | IFQ_SET_READY(&ifp->if_snd); | | 997 | IFQ_SET_READY(&ifp->if_snd); |
995 | | | 998 | |
996 | /* 802.1Q VLAN-sized frames, and 9000 jumbo frame are supported */ | | 999 | /* 802.1Q VLAN-sized frames, and 9000 jumbo frame are supported */ |
997 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; | | 1000 | sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; |
998 | /* sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; not yet */ | | 1001 | /* sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; not yet */ |
999 | | | 1002 | |
1000 | sc->sc_flowflags = 0; /* track PAUSE flow caps */ | | 1003 | sc->sc_flowflags = 0; /* track PAUSE flow caps */ |
1001 | | | 1004 | |
1002 | if_attach(ifp); | | 1005 | if_attach(ifp); |
1003 | if_deferred_start_init(ifp, NULL); | | 1006 | if_deferred_start_init(ifp, NULL); |
1004 | ether_ifattach(ifp, enaddr); | | 1007 | ether_ifattach(ifp, enaddr); |
1005 | | | 1008 | |
1006 | callout_init(&sc->sc_callout, 0); | | 1009 | callout_init(&sc->sc_callout, 0); |
1007 | callout_setfunc(&sc->sc_callout, phy_tick, sc); | | 1010 | callout_setfunc(&sc->sc_callout, phy_tick, sc); |
1008 | | | 1011 | |
1009 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), | | 1012 | rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), |
1010 | RND_TYPE_NET, RND_FLAG_DEFAULT); | | 1013 | RND_TYPE_NET, RND_FLAG_DEFAULT); |
1011 | | | 1014 | |
1012 | resetuengine(sc); | | 1015 | resetuengine(sc); |
1013 | loaducode(sc); | | 1016 | loaducode(sc); |
1014 | | | 1017 | |
1015 | /* feed NetSec descriptor array base addresses and timer value */ | | 1018 | /* feed NetSec descriptor array base addresses and timer value */ |
1016 | p = SCX_CDTXADDR(sc, 0); /* tdes array (ring#0) */ | | 1019 | p = SCX_CDTXADDR(sc, 0); /* tdes array (ring#0) */ |
1017 | q = SCX_CDRXADDR(sc, 0); /* rdes array (ring#1) */ | | 1020 | q = SCX_CDRXADDR(sc, 0); /* rdes array (ring#1) */ |
1018 | CSR_WRITE(sc, TDBA_LO, BUS_ADDR_LO32(p)); | | 1021 | CSR_WRITE(sc, TDBA_LO, BUS_ADDR_LO32(p)); |
1019 | CSR_WRITE(sc, TDBA_HI, BUS_ADDR_HI32(p)); | | 1022 | CSR_WRITE(sc, TDBA_HI, BUS_ADDR_HI32(p)); |
1020 | CSR_WRITE(sc, RDBA_LO, BUS_ADDR_LO32(q)); | | 1023 | CSR_WRITE(sc, RDBA_LO, BUS_ADDR_LO32(q)); |
1021 | CSR_WRITE(sc, RDBA_HI, BUS_ADDR_HI32(q)); | | 1024 | CSR_WRITE(sc, RDBA_HI, BUS_ADDR_HI32(q)); |
1022 | CSR_WRITE(sc, TXCONF, DESCNF_LE); /* little endian */ | | 1025 | CSR_WRITE(sc, TXCONF, DESCNF_LE); /* little endian */ |
1023 | CSR_WRITE(sc, RXCONF, DESCNF_LE); /* little endian */ | | 1026 | CSR_WRITE(sc, RXCONF, DESCNF_LE); /* little endian */ |
1024 | CSR_WRITE(sc, DMACTL_TMR, sc->sc_freq / 1000000 - 1); | | 1027 | CSR_WRITE(sc, DMACTL_TMR, sc->sc_freq / 1000000 - 1); |
1025 | | | 1028 | |
1026 | forcephyloopback(sc);/* make PHY loopback mode for uengine init */ | | 1029 | forcephyloopback(sc);/* make PHY loopback mode for uengine init */ |
1027 | | | 1030 | |
1028 | CSR_WRITE(sc, xINTSR, IRQ_UCODE); /* pre-cautional W1C */ | | 1031 | CSR_WRITE(sc, xINTSR, IRQ_UCODE); /* pre-cautional W1C */ |
1029 | CSR_WRITE(sc, CORESTAT, 0); /* start uengine to reprogram */ | | 1032 | CSR_WRITE(sc, CORESTAT, 0); /* start uengine to reprogram */ |
1030 | error = WAIT_FOR_SET(sc, xINTSR, IRQ_UCODE); | | 1033 | error = WAIT_FOR_SET(sc, xINTSR, IRQ_UCODE); |
1031 | if (error) { | | 1034 | if (error) { |
1032 | aprint_error_dev(sc->sc_dev, "uengine start failed\n"); | | 1035 | aprint_error_dev(sc->sc_dev, "uengine start failed\n"); |
1033 | } | | 1036 | } |
1034 | CSR_WRITE(sc, xINTSR, IRQ_UCODE); /* W1C load complete report */ | | 1037 | CSR_WRITE(sc, xINTSR, IRQ_UCODE); /* W1C load complete report */ |
1035 | | | 1038 | |
1036 | resetphytonormal(sc); /* take back PHY to normal mode */ | | 1039 | resetphytonormal(sc); /* take back PHY to normal mode */ |
1037 | | | 1040 | |
1038 | CSR_WRITE(sc, DMACTL_M2H, M2H_MODE_TRANS); | | 1041 | CSR_WRITE(sc, DMACTL_M2H, M2H_MODE_TRANS); |
1039 | CSR_WRITE(sc, PKTCTRL, MODENRM); /* change to use normal mode */ | | 1042 | CSR_WRITE(sc, PKTCTRL, MODENRM); /* change to use normal mode */ |
1040 | error = WAIT_FOR_SET(sc, MODE_TRANS, T2N_DONE); | | 1043 | error = WAIT_FOR_SET(sc, MODE_TRANS, T2N_DONE); |
1041 | if (error) { | | 1044 | if (error) { |
1042 | aprint_error_dev(sc->sc_dev, "uengine mode change failed\n"); | | 1045 | aprint_error_dev(sc->sc_dev, "uengine mode change failed\n"); |
1043 | } | | 1046 | } |
1044 | | | 1047 | |
1045 | CSR_WRITE(sc, TXISR, ~0); /* clear pending emtpry/error irq */ | | 1048 | CSR_WRITE(sc, TXISR, ~0); /* clear pending emtpry/error irq */ |
1046 | CSR_WRITE(sc, xINTAE_CLR, ~0); /* disable tx / rx interrupts */ | | 1049 | CSR_WRITE(sc, xINTAE_CLR, ~0); /* disable tx / rx interrupts */ |
1047 | | | 1050 | |
1048 | return; | | 1051 | return; |
1049 | | | 1052 | |
1050 | fail_5: | | 1053 | fail_5: |
1051 | for (i = 0; i < MD_NRXDESC; i++) { | | 1054 | for (i = 0; i < MD_NRXDESC; i++) { |
1052 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL) | | 1055 | if (sc->sc_rxsoft[i].rxs_dmamap != NULL) |
1053 | bus_dmamap_destroy(sc->sc_dmat, | | 1056 | bus_dmamap_destroy(sc->sc_dmat, |
1054 | sc->sc_rxsoft[i].rxs_dmamap); | | 1057 | sc->sc_rxsoft[i].rxs_dmamap); |
1055 | } | | 1058 | } |
1056 | fail_4: | | 1059 | fail_4: |
1057 | for (i = 0; i < MD_TXQUEUELEN; i++) { | | 1060 | for (i = 0; i < MD_TXQUEUELEN; i++) { |
1058 | if (sc->sc_txsoft[i].txs_dmamap != NULL) | | 1061 | if (sc->sc_txsoft[i].txs_dmamap != NULL) |
1059 | bus_dmamap_destroy(sc->sc_dmat, | | 1062 | bus_dmamap_destroy(sc->sc_dmat, |
1060 | sc->sc_txsoft[i].txs_dmamap); | | 1063 | sc->sc_txsoft[i].txs_dmamap); |
1061 | } | | 1064 | } |
1062 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); | | 1065 | bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); |
1063 | fail_3: | | 1066 | fail_3: |
1064 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); | | 1067 | bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); |
1065 | fail_2: | | 1068 | fail_2: |
1066 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, | | 1069 | bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, |
1067 | sizeof(struct control_data)); | | 1070 | sizeof(struct control_data)); |
1068 | fail_1: | | 1071 | fail_1: |
1069 | bus_dmamem_free(sc->sc_dmat, &seg, nseg); | | 1072 | bus_dmamem_free(sc->sc_dmat, &seg, nseg); |
1070 | fail_0: | | 1073 | fail_0: |
1071 | if (sc->sc_phandle) | | 1074 | if (sc->sc_phandle) |
1072 | fdtbus_intr_disestablish(sc->sc_phandle, sc->sc_ih); | | 1075 | fdtbus_intr_disestablish(sc->sc_phandle, sc->sc_ih); |
1073 | else | | 1076 | else |
1074 | acpi_intr_disestablish(sc->sc_ih); | | 1077 | acpi_intr_disestablish(sc->sc_ih); |
1075 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); | | 1078 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); |
1076 | return; | | 1079 | return; |
1077 | } | | 1080 | } |
1078 | | | 1081 | |
1079 | static void | | 1082 | static void |
1080 | scx_reset(struct scx_softc *sc) | | 1083 | scx_reset(struct scx_softc *sc) |
1081 | { | | 1084 | { |
1082 | int loop = 0, busy; | | 1085 | int loop = 0, busy; |
1083 | | | 1086 | |
1084 | mac_write(sc, GMACOMR, 0); | | 1087 | mac_write(sc, GMACOMR, 0); |
1085 | mac_write(sc, GMACBMR, BMR_RST); | | 1088 | mac_write(sc, GMACBMR, BMR_RST); |
1086 | do { | | 1089 | do { |
1087 | DELAY(1); | | 1090 | DELAY(1); |
1088 | busy = mac_read(sc, GMACBMR) & BMR_RST; | | 1091 | busy = mac_read(sc, GMACBMR) & BMR_RST; |
1089 | } while (++loop < 3000 && busy); | | 1092 | } while (++loop < 3000 && busy); |
1090 | mac_write(sc, GMACBMR, _BMR); | | 1093 | mac_write(sc, GMACBMR, _BMR); |
1091 | mac_write(sc, GMACAFR, 0); | | 1094 | mac_write(sc, GMACAFR, 0); |
1092 | } | | 1095 | } |
1093 | | | 1096 | |
1094 | static void | | 1097 | static void |
1095 | scx_stop(struct ifnet *ifp, int disable) | | 1098 | scx_stop(struct ifnet *ifp, int disable) |
1096 | { | | 1099 | { |
1097 | struct scx_softc *sc = ifp->if_softc; | | 1100 | struct scx_softc *sc = ifp->if_softc; |
1098 | uint32_t csr; | | 1101 | uint32_t csr; |
1099 | | | 1102 | |
1100 | /* Stop the one second clock. */ | | 1103 | /* Stop the one second clock. */ |
1101 | callout_stop(&sc->sc_callout); | | 1104 | callout_stop(&sc->sc_callout); |
1102 | | | 1105 | |
1103 | /* Down the MII. */ | | 1106 | /* Down the MII. */ |
1104 | mii_down(&sc->sc_mii); | | 1107 | mii_down(&sc->sc_mii); |
1105 | | | 1108 | |
1106 | /* Mark the interface down and cancel the watchdog timer. */ | | 1109 | /* Mark the interface down and cancel the watchdog timer. */ |
1107 | ifp->if_flags &= ~IFF_RUNNING; | | 1110 | ifp->if_flags &= ~IFF_RUNNING; |
1108 | ifp->if_timer = 0; | | 1111 | ifp->if_timer = 0; |
1109 | | | 1112 | |
1110 | CSR_WRITE(sc, RXIE_CLR, ~0); | | 1113 | CSR_WRITE(sc, RXIE_CLR, ~0); |
1111 | CSR_WRITE(sc, TXIE_CLR, ~0); | | 1114 | CSR_WRITE(sc, TXIE_CLR, ~0); |
1112 | CSR_WRITE(sc, xINTAE_CLR, ~0); | | 1115 | CSR_WRITE(sc, xINTAE_CLR, ~0); |
1113 | CSR_WRITE(sc, TXISR, ~0); | | 1116 | CSR_WRITE(sc, TXISR, ~0); |
1114 | CSR_WRITE(sc, RXISR, ~0); | | 1117 | CSR_WRITE(sc, RXISR, ~0); |
1115 | | | 1118 | |
1116 | csr = mac_read(sc, GMACOMR); | | 1119 | csr = mac_read(sc, GMACOMR); |
1117 | mac_write(sc, GMACOMR, csr &~ (OMR_SR | OMR_ST)); | | 1120 | mac_write(sc, GMACOMR, csr &~ (OMR_SR | OMR_ST)); |
1118 | } | | 1121 | } |
1119 | | | 1122 | |
1120 | static int | | 1123 | static int |
1121 | scx_init(struct ifnet *ifp) | | 1124 | scx_init(struct ifnet *ifp) |
1122 | { | | 1125 | { |
1123 | struct scx_softc *sc = ifp->if_softc; | | 1126 | struct scx_softc *sc = ifp->if_softc; |
1124 | const uint8_t *ea = CLLADDR(ifp->if_sadl); | | 1127 | const uint8_t *ea = CLLADDR(ifp->if_sadl); |
1125 | uint32_t csr; | | 1128 | uint32_t csr; |
1126 | int i, error; | | 1129 | int i, error; |
1127 | | | 1130 | |
1128 | /* Cancel pending I/O. */ | | 1131 | /* Cancel pending I/O. */ |
1129 | scx_stop(ifp, 0); | | 1132 | scx_stop(ifp, 0); |
1130 | | | 1133 | |
1131 | /* Reset the chip to a known state. */ | | 1134 | /* Reset the chip to a known state. */ |
1132 | scx_reset(sc); | | 1135 | scx_reset(sc); |
1133 | | | 1136 | |
1134 | /* build sane Tx */ | | 1137 | /* build sane Tx */ |
1135 | memset(sc->sc_txdescs, 0, sizeof(struct tdes) * MD_NTXDESC); | | 1138 | memset(sc->sc_txdescs, 0, sizeof(struct tdes) * MD_NTXDESC); |
1136 | sc->sc_txdescs[MD_NTXDESC - 1].t0 = T0_LD; /* tie off the ring */ | | 1139 | sc->sc_txdescs[MD_NTXDESC - 1].t0 = htole32(T0_LD); /* tie off */ |
1137 | SCX_CDTXSYNC(sc, 0, MD_NTXDESC, | | 1140 | SCX_CDTXSYNC(sc, 0, MD_NTXDESC, |
1138 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1141 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1139 | sc->sc_txfree = MD_NTXDESC; | | 1142 | sc->sc_txfree = MD_NTXDESC; |
1140 | sc->sc_txnext = 0; | | 1143 | sc->sc_txnext = 0; |
1141 | for (i = 0; i < MD_TXQUEUELEN; i++) | | 1144 | for (i = 0; i < MD_TXQUEUELEN; i++) |
1142 | sc->sc_txsoft[i].txs_mbuf = NULL; | | 1145 | sc->sc_txsoft[i].txs_mbuf = NULL; |
1143 | sc->sc_txsfree = MD_TXQUEUELEN; | | 1146 | sc->sc_txsfree = MD_TXQUEUELEN; |
1144 | sc->sc_txsnext = 0; | | 1147 | sc->sc_txsnext = 0; |
1145 | sc->sc_txsdirty = 0; | | 1148 | sc->sc_txsdirty = 0; |
1146 | | | 1149 | |
1147 | /* load Rx descriptors with fresh mbuf */ | | 1150 | /* load Rx descriptors with fresh mbuf */ |
1148 | for (i = 0; i < MD_NRXDESC; i++) { | | 1151 | for (i = 0; i < MD_NRXDESC; i++) { |
1149 | if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { | | 1152 | if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { |
1150 | if ((error = add_rxbuf(sc, i)) != 0) { | | 1153 | if ((error = add_rxbuf(sc, i)) != 0) { |
1151 | aprint_error_dev(sc->sc_dev, | | 1154 | aprint_error_dev(sc->sc_dev, |
1152 | "unable to allocate or map rx " | | 1155 | "unable to allocate or map rx " |
1153 | "buffer %d, error = %d\n", | | 1156 | "buffer %d, error = %d\n", |
1154 | i, error); | | 1157 | i, error); |
1155 | rxdrain(sc); | | 1158 | rxdrain(sc); |
1156 | goto out; | | 1159 | goto out; |
1157 | } | | 1160 | } |
1158 | } | | 1161 | } |
1159 | else | | 1162 | else |
1160 | SCX_INIT_RXDESC(sc, i); | | 1163 | SCX_INIT_RXDESC(sc, i); |
1161 | } | | 1164 | } |
1162 | sc->sc_rxdescs[MD_NRXDESC - 1].r0 = R0_LD; /* tie off the ring */ | | 1165 | sc->sc_rxdescs[MD_NRXDESC - 1].r0 = htole32(R0_LD); /* tie off */ |
1163 | sc->sc_rxptr = 0; | | 1166 | sc->sc_rxptr = 0; |
1164 | | | 1167 | |
1165 | /* set my address in perfect match slot 0. little endian order */ | | 1168 | /* set my address in perfect match slot 0. little endian order */ |
1166 | csr = (ea[3] << 24) | (ea[2] << 16) | (ea[1] << 8) | ea[0]; | | 1169 | csr = (ea[3] << 24) | (ea[2] << 16) | (ea[1] << 8) | ea[0]; |
1167 | mac_write(sc, GMACMAL0, csr); | | 1170 | mac_write(sc, GMACMAL0, csr); |
1168 | csr = (ea[5] << 8) | ea[4]; | | 1171 | csr = (ea[5] << 8) | ea[4]; |
1169 | mac_write(sc, GMACMAH0, csr); | | 1172 | mac_write(sc, GMACMAH0, csr); |
1170 | | | 1173 | |
1171 | /* accept multicast frame or run promisc mode */ | | 1174 | /* accept multicast frame or run promisc mode */ |
1172 | scx_set_rcvfilt(sc); | | 1175 | scx_set_rcvfilt(sc); |
1173 | | | 1176 | |
1174 | /* set current media */ | | 1177 | /* set current media */ |
1175 | if ((error = ether_mediachange(ifp)) != 0) | | 1178 | if ((error = ether_mediachange(ifp)) != 0) |
1176 | goto out; | | 1179 | goto out; |
1177 | | | 1180 | |
1178 | CSR_WRITE(sc, DESC_SRST, 01); | | 1181 | CSR_WRITE(sc, DESC_SRST, 01); |
1179 | WAIT_FOR_CLR(sc, DESC_SRST, 01); | | 1182 | WAIT_FOR_CLR(sc, DESC_SRST, 01); |
1180 | | | 1183 | |
1181 | CSR_WRITE(sc, DESC_INIT, 01); | | 1184 | CSR_WRITE(sc, DESC_INIT, 01); |
1182 | WAIT_FOR_CLR(sc, DESC_INIT, 01); | | 1185 | WAIT_FOR_CLR(sc, DESC_INIT, 01); |
1183 | | | 1186 | |
1184 | /* feed local memory descriptor array base addresses */ | | 1187 | /* feed local memory descriptor array base addresses */ |
1185 | mac_write(sc, GMACRDLA, _RDLA); /* GMAC rdes store */ | | 1188 | mac_write(sc, GMACRDLA, _RDLA); /* GMAC rdes store */ |
1186 | mac_write(sc, GMACTDLA, _TDLA); /* GMAC tdes store */ | | 1189 | mac_write(sc, GMACTDLA, _TDLA); /* GMAC tdes store */ |
1187 | | | 1190 | |
1188 | CSR_WRITE(sc, FLOWTHR, (48<<16) | 36); /* pause|resume threshold */ | | 1191 | CSR_WRITE(sc, FLOWTHR, (48<<16) | 36); /* pause|resume threshold */ |
1189 | mac_write(sc, GMACFCR, 256 << 16); /* 31:16 pause value */ | | 1192 | mac_write(sc, GMACFCR, 256 << 16); /* 31:16 pause value */ |
1190 | | | 1193 | |
1191 | CSR_WRITE(sc, INTF_SEL, sc->sc_miigmii ? INTF_GMII : INTF_RGMII); | | 1194 | CSR_WRITE(sc, INTF_SEL, sc->sc_miigmii ? INTF_GMII : INTF_RGMII); |
1192 | | | 1195 | |
1193 | CSR_WRITE(sc, RXCOALESC, 8); /* Rx coalesce bound */ | | 1196 | CSR_WRITE(sc, RXCOALESC, 8); /* Rx coalesce bound */ |
1194 | CSR_WRITE(sc, TXCOALESC, 8); /* Tx coalesce bound */ | | 1197 | CSR_WRITE(sc, TXCOALESC, 8); /* Tx coalesce bound */ |
1195 | CSR_WRITE(sc, RCLSCTIME, 500|(1U<<31)); /* Rx co. guard time usec */ | | 1198 | CSR_WRITE(sc, RCLSCTIME, 500); /* Rx co. guard time usec */ |
1196 | CSR_WRITE(sc, TCLSCTIME, 500|(1U<<31)); /* Tx co. guard time usec */ | | 1199 | CSR_WRITE(sc, TCLSCTIME, 500); /* Tx co. guard time usec */ |
1197 | | | 1200 | |
1198 | CSR_WRITE(sc, RXIE_SET, RXI_RC_ERR | RXI_PKTCNT | RXI_TMREXP); | | 1201 | CSR_WRITE(sc, RXIE_SET, RXI_RC_ERR | RXI_PKTCNT | RXI_TMREXP); |
1199 | CSR_WRITE(sc, TXIE_SET, TXI_TR_ERR | TXI_TXDONE | TXI_TMREXP); | | 1202 | CSR_WRITE(sc, TXIE_SET, TXI_TR_ERR | TXI_TXDONE | TXI_TMREXP); |
1200 | CSR_WRITE(sc, xINTAE_SET, IRQ_RX | IRQ_TX); | | 1203 | CSR_WRITE(sc, xINTAE_SET, IRQ_RX | IRQ_TX); |
1201 | #if 1 | | 1204 | #if 1 |
1202 | /* clear event counters, auto-zero after every read */ | | 1205 | /* clear event counters, auto-zero after every read */ |
1203 | mac_write(sc, GMACEVCTL, EVC_CR | EVC_ROR); | | 1206 | mac_write(sc, GMACEVCTL, EVC_CR | EVC_ROR); |
1204 | #endif | | 1207 | #endif |
1205 | /* kick to start GMAC engine */ | | 1208 | /* kick to start GMAC engine */ |
1206 | csr = mac_read(sc, GMACOMR); | | 1209 | csr = mac_read(sc, GMACOMR); |
1207 | mac_write(sc, GMACOMR, csr | OMR_SR | OMR_ST); | | 1210 | mac_write(sc, GMACOMR, csr | OMR_SR | OMR_ST); |
1208 | | | 1211 | |
1209 | ifp->if_flags |= IFF_RUNNING; | | 1212 | ifp->if_flags |= IFF_RUNNING; |
1210 | | | 1213 | |
1211 | /* start one second timer */ | | 1214 | /* start one second timer */ |
1212 | callout_schedule(&sc->sc_callout, hz); | | 1215 | callout_schedule(&sc->sc_callout, hz); |
1213 | out: | | 1216 | out: |
1214 | return error; | | 1217 | return error; |
1215 | } | | 1218 | } |
1216 | | | 1219 | |
1217 | static int | | 1220 | static int |
1218 | scx_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 1221 | scx_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
1219 | { | | 1222 | { |
1220 | struct scx_softc *sc = ifp->if_softc; | | 1223 | struct scx_softc *sc = ifp->if_softc; |
1221 | struct ifreq *ifr = (struct ifreq *)data; | | 1224 | struct ifreq *ifr = (struct ifreq *)data; |
1222 | struct ifmedia *ifm = &sc->sc_mii.mii_media; | | 1225 | struct ifmedia *ifm = &sc->sc_mii.mii_media; |
1223 | int s, error; | | 1226 | int s, error; |
1224 | | | 1227 | |
1225 | s = splnet(); | | 1228 | s = splnet(); |
1226 | | | 1229 | |
1227 | switch (cmd) { | | 1230 | switch (cmd) { |
1228 | case SIOCSIFMEDIA: | | 1231 | case SIOCSIFMEDIA: |
1229 | /* Flow control requires full-duplex mode. */ | | 1232 | /* Flow control requires full-duplex mode. */ |
1230 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || | | 1233 | if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || |
1231 | (ifr->ifr_media & IFM_FDX) == 0) | | 1234 | (ifr->ifr_media & IFM_FDX) == 0) |
1232 | ifr->ifr_media &= ~IFM_ETH_FMASK; | | 1235 | ifr->ifr_media &= ~IFM_ETH_FMASK; |
1233 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { | | 1236 | if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { |
1234 | if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { | | 1237 | if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { |
1235 | /* We can do both TXPAUSE and RXPAUSE. */ | | 1238 | /* We can do both TXPAUSE and RXPAUSE. */ |
1236 | ifr->ifr_media |= | | 1239 | ifr->ifr_media |= |
1237 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; | | 1240 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; |
1238 | } | | 1241 | } |
1239 | sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; | | 1242 | sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; |
1240 | } | | 1243 | } |
1241 | error = ifmedia_ioctl(ifp, ifr, ifm, cmd); | | 1244 | error = ifmedia_ioctl(ifp, ifr, ifm, cmd); |
1242 | break; | | 1245 | break; |
1243 | default: | | 1246 | default: |
1244 | error = ether_ioctl(ifp, cmd, data); | | 1247 | error = ether_ioctl(ifp, cmd, data); |
1245 | if (error != ENETRESET) | | 1248 | if (error != ENETRESET) |
1246 | break; | | 1249 | break; |
1247 | error = 0; | | 1250 | error = 0; |
1248 | if (cmd == SIOCSIFCAP) | | 1251 | if (cmd == SIOCSIFCAP) |
1249 | error = if_init(ifp); | | 1252 | error = if_init(ifp); |
1250 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) | | 1253 | if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) |
1251 | ; | | 1254 | ; |
1252 | else if (ifp->if_flags & IFF_RUNNING) { | | 1255 | else if (ifp->if_flags & IFF_RUNNING) { |
1253 | /* | | 1256 | /* |
1254 | * Multicast list has changed; set the hardware filter | | 1257 | * Multicast list has changed; set the hardware filter |
1255 | * accordingly. | | 1258 | * accordingly. |
1256 | */ | | 1259 | */ |
1257 | scx_set_rcvfilt(sc); | | 1260 | scx_set_rcvfilt(sc); |
1258 | } | | 1261 | } |
1259 | break; | | 1262 | break; |
1260 | } | | 1263 | } |
1261 | | | 1264 | |
1262 | splx(s); | | 1265 | splx(s); |
1263 | return error; | | 1266 | return error; |
1264 | } | | 1267 | } |
1265 | | | 1268 | |
1266 | static uint32_t | | 1269 | static uint32_t |
1267 | bit_reverse_32(uint32_t x) | | 1270 | bit_reverse_32(uint32_t x) |
1268 | { | | 1271 | { |
1269 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); | | 1272 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); |
1270 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); | | 1273 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); |
1271 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); | | 1274 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); |
1272 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); | | 1275 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); |
1273 | return (x >> 16) | (x << 16); | | 1276 | return (x >> 16) | (x << 16); |
1274 | } | | 1277 | } |
1275 | | | 1278 | |
1276 | #define MCAST_DEBUG 0 | | 1279 | #define MCAST_DEBUG 0 |
1277 | | | 1280 | |
1278 | static void | | 1281 | static void |
1279 | scx_set_rcvfilt(struct scx_softc *sc) | | 1282 | scx_set_rcvfilt(struct scx_softc *sc) |
1280 | { | | 1283 | { |
1281 | struct ethercom * const ec = &sc->sc_ethercom; | | 1284 | struct ethercom * const ec = &sc->sc_ethercom; |
1282 | struct ifnet * const ifp = &ec->ec_if; | | 1285 | struct ifnet * const ifp = &ec->ec_if; |
1283 | struct ether_multistep step; | | 1286 | struct ether_multistep step; |
1284 | struct ether_multi *enm; | | 1287 | struct ether_multi *enm; |
1285 | uint32_t mchash[2]; /* 2x 32 = 64 bit */ | | 1288 | uint32_t mchash[2]; /* 2x 32 = 64 bit */ |
1286 | uint32_t csr, crc; | | 1289 | uint32_t csr, crc; |
1287 | int i; | | 1290 | int i; |
1288 | | | 1291 | |
1289 | csr = mac_read(sc, GMACAFR); | | 1292 | csr = mac_read(sc, GMACAFR); |
1290 | csr &= ~(AFR_PR | AFR_PM | AFR_MHTE | AFR_HPF); | | 1293 | csr &= ~(AFR_PR | AFR_PM | AFR_MHTE | AFR_HPF); |
1291 | mac_write(sc, GMACAFR, csr); | | 1294 | mac_write(sc, GMACAFR, csr); |
1292 | | | 1295 | |
1293 | /* clear 15 entry supplemental perfect match filter */ | | 1296 | /* clear 15 entry supplemental perfect match filter */ |
1294 | for (i = 1; i < 16; i++) | | 1297 | for (i = 1; i < 16; i++) |
1295 | mac_write(sc, GMACMAH(i), 0); | | 1298 | mac_write(sc, GMACMAH(i), 0); |
1296 | /* build 64 bit multicast hash filter */ | | 1299 | /* build 64 bit multicast hash filter */ |
1297 | crc = mchash[1] = mchash[0] = 0; | | 1300 | crc = mchash[1] = mchash[0] = 0; |
1298 | | | 1301 | |
1299 | ETHER_LOCK(ec); | | 1302 | ETHER_LOCK(ec); |
1300 | if (ifp->if_flags & IFF_PROMISC) { | | 1303 | if (ifp->if_flags & IFF_PROMISC) { |
1301 | ec->ec_flags |= ETHER_F_ALLMULTI; | | 1304 | ec->ec_flags |= ETHER_F_ALLMULTI; |
1302 | ETHER_UNLOCK(ec); | | 1305 | ETHER_UNLOCK(ec); |
1303 | /* run promisc. mode */ | | 1306 | /* run promisc. mode */ |
1304 | csr |= AFR_PR; | | 1307 | csr |= AFR_PR; |
1305 | goto update; | | 1308 | goto update; |
1306 | } | | 1309 | } |
1307 | ec->ec_flags &= ~ETHER_F_ALLMULTI; | | 1310 | ec->ec_flags &= ~ETHER_F_ALLMULTI; |
1308 | ETHER_FIRST_MULTI(step, ec, enm); | | 1311 | ETHER_FIRST_MULTI(step, ec, enm); |
1309 | i = 1; /* slot 0 is occupied */ | | 1312 | i = 1; /* slot 0 is occupied */ |
1310 | while (enm != NULL) { | | 1313 | while (enm != NULL) { |
1311 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { | | 1314 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
1312 | /* | | 1315 | /* |
1313 | * We must listen to a range of multicast addresses. | | 1316 | * We must listen to a range of multicast addresses. |
1314 | * For now, just accept all multicasts, rather than | | 1317 | * For now, just accept all multicasts, rather than |
1315 | * trying to set only those filter bits needed to match | | 1318 | * trying to set only those filter bits needed to match |
1316 | * the range. (At this time, the only use of address | | 1319 | * the range. (At this time, the only use of address |
1317 | * ranges is for IP multicast routing, for which the | | 1320 | * ranges is for IP multicast routing, for which the |
1318 | * range is big enough to require all bits set.) | | 1321 | * range is big enough to require all bits set.) |
1319 | */ | | 1322 | */ |
1320 | ec->ec_flags |= ETHER_F_ALLMULTI; | | 1323 | ec->ec_flags |= ETHER_F_ALLMULTI; |
1321 | ETHER_UNLOCK(ec); | | 1324 | ETHER_UNLOCK(ec); |
1322 | /* accept all multi */ | | 1325 | /* accept all multi */ |
1323 | csr |= AFR_PM; | | 1326 | csr |= AFR_PM; |
1324 | goto update; | | 1327 | goto update; |
1325 | } | | 1328 | } |
1326 | #if MCAST_DEBUG == 1 | | 1329 | #if MCAST_DEBUG == 1 |
1327 | aprint_normal_dev(sc->sc_dev, "[%d] %s\n", i, ether_sprintf(enm->enm_addrlo)); | | 1330 | aprint_normal_dev(sc->sc_dev, "[%d] %s\n", i, ether_sprintf(enm->enm_addrlo)); |
1328 | #endif | | 1331 | #endif |
1329 | if (i < 16) { | | 1332 | if (i < 16) { |
1330 | /* use 15 entry perfect match filter */ | | 1333 | /* use 15 entry perfect match filter */ |
1331 | uint32_t addr; | | 1334 | uint32_t addr; |
1332 | uint8_t *ep = enm->enm_addrlo; | | 1335 | uint8_t *ep = enm->enm_addrlo; |
1333 | addr = (ep[3] << 24) | (ep[2] << 16) | | 1336 | addr = (ep[3] << 24) | (ep[2] << 16) |
1334 | | (ep[1] << 8) | ep[0]; | | 1337 | | (ep[1] << 8) | ep[0]; |
1335 | mac_write(sc, GMACMAL(i), addr); | | 1338 | mac_write(sc, GMACMAL(i), addr); |
1336 | addr = (ep[5] << 8) | ep[4]; | | 1339 | addr = (ep[5] << 8) | ep[4]; |
1337 | mac_write(sc, GMACMAH(i), addr | 1U<<31); | | 1340 | mac_write(sc, GMACMAH(i), addr | 1U<<31); |
1338 | } else { | | 1341 | } else { |
1339 | /* use hash table when too many */ | | 1342 | /* use hash table when too many */ |
1340 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); | | 1343 | crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); |
1341 | crc = bit_reverse_32(~crc); | | 1344 | crc = bit_reverse_32(~crc); |
1342 | /* 1(31) 5(30:26) bit sampling */ | | 1345 | /* 1(31) 5(30:26) bit sampling */ |
1343 | mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); | | 1346 | mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); |
1344 | } | | 1347 | } |
1345 | ETHER_NEXT_MULTI(step, enm); | | 1348 | ETHER_NEXT_MULTI(step, enm); |
1346 | i++; | | 1349 | i++; |
1347 | } | | 1350 | } |
1348 | ETHER_UNLOCK(ec); | | 1351 | ETHER_UNLOCK(ec); |
1349 | if (crc) | | 1352 | if (crc) |
1350 | csr |= AFR_MHTE; /* use mchash[] */ | | 1353 | csr |= AFR_MHTE; /* use mchash[] */ |
1351 | csr |= AFR_HPF; /* use perfect match as well */ | | 1354 | csr |= AFR_HPF; /* use perfect match as well */ |
1352 | update: | | 1355 | update: |
1353 | mac_write(sc, GMACMHTH, mchash[1]); | | 1356 | mac_write(sc, GMACMHTH, mchash[1]); |
1354 | mac_write(sc, GMACMHTL, mchash[0]); | | 1357 | mac_write(sc, GMACMHTL, mchash[0]); |
1355 | mac_write(sc, GMACAFR, csr); | | 1358 | mac_write(sc, GMACAFR, csr); |
1356 | return; | | 1359 | return; |
1357 | } | | 1360 | } |
1358 | | | 1361 | |
1359 | static void | | 1362 | static void |
1360 | scx_start(struct ifnet *ifp) | | 1363 | scx_start(struct ifnet *ifp) |
1361 | { | | 1364 | { |
1362 | struct scx_softc *sc = ifp->if_softc; | | 1365 | struct scx_softc *sc = ifp->if_softc; |
1363 | struct mbuf *m0; | | 1366 | struct mbuf *m0; |
1364 | struct scx_txsoft *txs; | | 1367 | struct scx_txsoft *txs; |
1365 | bus_dmamap_t dmamap; | | 1368 | bus_dmamap_t dmamap; |
1366 | int error, nexttx, lasttx, ofree, seg; | | 1369 | int error, nexttx, lasttx, ofree, seg; |
1367 | uint32_t tdes0; | | 1370 | uint32_t tdes0; |
1368 | | | 1371 | |
1369 | if ((ifp->if_flags & IFF_RUNNING) == 0) | | 1372 | if ((ifp->if_flags & IFF_RUNNING) == 0) |
1370 | return; | | 1373 | return; |
1371 | | | 1374 | |
1372 | /* Remember the previous number of free descriptors. */ | | 1375 | /* Remember the previous number of free descriptors. */ |
1373 | ofree = sc->sc_txfree; | | 1376 | ofree = sc->sc_txfree; |
1374 | /* | | 1377 | /* |
1375 | * Loop through the send queue, setting up transmit descriptors | | 1378 | * Loop through the send queue, setting up transmit descriptors |
1376 | * until we drain the queue, or use up all available transmit | | 1379 | * until we drain the queue, or use up all available transmit |
1377 | * descriptors. | | 1380 | * descriptors. |
1378 | */ | | 1381 | */ |
1379 | for (;;) { | | 1382 | for (;;) { |
1380 | IFQ_POLL(&ifp->if_snd, m0); | | 1383 | IFQ_POLL(&ifp->if_snd, m0); |
1381 | if (m0 == NULL) | | 1384 | if (m0 == NULL) |
1382 | break; | | 1385 | break; |
1383 | if (sc->sc_txsfree < MD_TXQUEUE_GC) { | | 1386 | if (sc->sc_txsfree < MD_TXQUEUE_GC) { |
1384 | txreap(sc); | | 1387 | txreap(sc); |
1385 | if (sc->sc_txsfree == 0) | | 1388 | if (sc->sc_txsfree == 0) |
1386 | break; | | 1389 | break; |
1387 | } | | 1390 | } |
1388 | txs = &sc->sc_txsoft[sc->sc_txsnext]; | | 1391 | txs = &sc->sc_txsoft[sc->sc_txsnext]; |
1389 | dmamap = txs->txs_dmamap; | | 1392 | dmamap = txs->txs_dmamap; |
1390 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, | | 1393 | error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, |
1391 | BUS_DMA_WRITE | BUS_DMA_NOWAIT); | | 1394 | BUS_DMA_WRITE | BUS_DMA_NOWAIT); |
1392 | if (error) { | | 1395 | if (error) { |
1393 | if (error == EFBIG) { | | 1396 | if (error == EFBIG) { |
1394 | aprint_error_dev(sc->sc_dev, | | 1397 | aprint_error_dev(sc->sc_dev, |
1395 | "Tx packet consumes too many " | | 1398 | "Tx packet consumes too many " |
1396 | "DMA segments, dropping...\n"); | | 1399 | "DMA segments, dropping...\n"); |
1397 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 1400 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
1398 | m_freem(m0); | | 1401 | m_freem(m0); |
1399 | if_statinc_ref(ifp, if_oerrors); | | 1402 | if_statinc_ref(ifp, if_oerrors); |
1400 | continue; | | 1403 | continue; |
1401 | } | | 1404 | } |
1402 | /* Short on resources, just stop for now. */ | | 1405 | /* Short on resources, just stop for now. */ |
1403 | break; | | 1406 | break; |
1404 | } | | 1407 | } |
1405 | if (dmamap->dm_nsegs > sc->sc_txfree) { | | 1408 | if (dmamap->dm_nsegs > sc->sc_txfree) { |
1406 | /* | | 1409 | /* |
1407 | * Not enough free descriptors to transmit this | | 1410 | * Not enough free descriptors to transmit this |
1408 | * packet. We haven't committed anything yet, | | 1411 | * packet. We haven't committed anything yet, |
1409 | * so just unload the DMA map, put the packet | | 1412 | * so just unload the DMA map, put the packet |
1410 | * back on the queue, and punt. | | 1413 | * back on the queue, and punt. |
1411 | */ | | 1414 | */ |
1412 | bus_dmamap_unload(sc->sc_dmat, dmamap); | | 1415 | bus_dmamap_unload(sc->sc_dmat, dmamap); |
1413 | break; | | 1416 | break; |
1414 | } | | 1417 | } |
1415 | | | 1418 | |
1416 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 1419 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
1417 | /* | | 1420 | /* |
1418 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. | | 1421 | * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. |
1419 | */ | | 1422 | */ |
1420 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, | | 1423 | bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, |
1421 | BUS_DMASYNC_PREWRITE); | | 1424 | BUS_DMASYNC_PREWRITE); |
1422 | | | 1425 | |
1423 | tdes0 = 0; /* to postpone 1st segment T0_OWN write */ | | 1426 | tdes0 = 0; /* to postpone 1st segment T0_OWN write */ |
1424 | lasttx = -1; | | 1427 | lasttx = -1; |
1425 | for (nexttx = sc->sc_txnext, seg = 0; | | 1428 | for (nexttx = sc->sc_txnext, seg = 0; |
1426 | seg < dmamap->dm_nsegs; | | 1429 | seg < dmamap->dm_nsegs; |
1427 | seg++, nexttx = MD_NEXTTX(nexttx)) { | | 1430 | seg++, nexttx = MD_NEXTTX(nexttx)) { |
1428 | struct tdes *tdes = &sc->sc_txdescs[nexttx]; | | 1431 | struct tdes *tdes = &sc->sc_txdescs[nexttx]; |
1429 | bus_addr_t p = dmamap->dm_segs[seg].ds_addr; | | 1432 | bus_addr_t p = dmamap->dm_segs[seg].ds_addr; |
1430 | bus_size_t z = dmamap->dm_segs[seg].ds_len; | | 1433 | bus_size_t z = dmamap->dm_segs[seg].ds_len; |
1431 | /* | | 1434 | /* |
1432 | * If this is the first descriptor we're | | 1435 | * If this is the first descriptor we're |
1433 | * enqueueing, don't set the OWN bit just | | 1436 | * enqueueing, don't set the OWN bit just |
1434 | * yet. That could cause a race condition. | | 1437 | * yet. That could cause a race condition. |
1435 | * We'll do it below. | | 1438 | * We'll do it below. |
1436 | */ | | 1439 | */ |
1437 | tdes->t3 = htole32(z); | | 1440 | tdes->t3 = htole32(z); |
1438 | tdes->t2 = htole32(BUS_ADDR_LO32(p)); | | 1441 | tdes->t2 = htole32(BUS_ADDR_LO32(p)); |
1439 | tdes->t1 = htole32(BUS_ADDR_HI32(p)); | | 1442 | tdes->t1 = htole32(BUS_ADDR_HI32(p)); |
1440 | tdes->t0 &= htole32(T0_LD); | | 1443 | tdes->t0 &= htole32(T0_LD); |
1441 | tdes->t0 |= htole32(tdes0 | | | 1444 | tdes->t0 |= htole32(tdes0 | |
1442 | (15 << T0_TDRID) | T0_PT | | | 1445 | (15 << T0_TDRID) | T0_PT | |
1443 | sc->sc_t0cotso | T0_TRS); | | 1446 | sc->sc_t0cotso | T0_TRS); |
1444 | tdes0 = T0_OWN; /* 2nd and other segments */ | | 1447 | tdes0 = T0_OWN; /* 2nd and other segments */ |
1445 | /* NB; t0 DRID field contains zero */ | | 1448 | /* NB; t0 DRID field contains zero */ |
1446 | lasttx = nexttx; | | 1449 | lasttx = nexttx; |
1447 | } | | 1450 | } |
1448 | | | 1451 | |
1449 | /* HW lacks of per-frame xmit done interrupt control */ | | 1452 | /* HW lacks of per-frame xmit done interrupt control */ |
1450 | | | 1453 | |
1451 | /* Write deferred 1st segment T0_OWN at the final stage */ | | 1454 | /* Write deferred 1st segment T0_OWN at the final stage */ |
1452 | sc->sc_txdescs[lasttx].t0 |= htole32(T0_LS); | | 1455 | sc->sc_txdescs[lasttx].t0 |= htole32(T0_LS); |
1453 | sc->sc_txdescs[sc->sc_txnext].t0 |= htole32(T0_FS | T0_OWN); | | 1456 | sc->sc_txdescs[sc->sc_txnext].t0 |= htole32(T0_FS | T0_OWN); |
1454 | SCX_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, | | 1457 | SCX_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, |
1455 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); | | 1458 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1456 | | | 1459 | |
1457 | /* submit one frame to xmit */ | | 1460 | /* submit one frame to xmit */ |
1458 | CSR_WRITE(sc, TXSUBMIT, 1); | | 1461 | CSR_WRITE(sc, TXSUBMIT, 1); |
1459 | | | 1462 | |
1460 | txs->txs_mbuf = m0; | | 1463 | txs->txs_mbuf = m0; |
1461 | txs->txs_firstdesc = sc->sc_txnext; | | 1464 | txs->txs_firstdesc = sc->sc_txnext; |
1462 | txs->txs_lastdesc = lasttx; | | 1465 | txs->txs_lastdesc = lasttx; |
1463 | txs->txs_ndesc = dmamap->dm_nsegs; | | 1466 | txs->txs_ndesc = dmamap->dm_nsegs; |
1464 | sc->sc_txfree -= txs->txs_ndesc; | | 1467 | sc->sc_txfree -= txs->txs_ndesc; |
1465 | sc->sc_txnext = nexttx; | | 1468 | sc->sc_txnext = nexttx; |
1466 | sc->sc_txsfree--; | | 1469 | sc->sc_txsfree--; |
1467 | sc->sc_txsnext = MD_NEXTTXS(sc->sc_txsnext); | | 1470 | sc->sc_txsnext = MD_NEXTTXS(sc->sc_txsnext); |
1468 | /* | | 1471 | /* |
1469 | * Pass the packet to any BPF listeners. | | 1472 | * Pass the packet to any BPF listeners. |
1470 | */ | | 1473 | */ |
1471 | bpf_mtap(ifp, m0, BPF_D_OUT); | | 1474 | bpf_mtap(ifp, m0, BPF_D_OUT); |
1472 | } | | 1475 | } |
1473 | if (sc->sc_txfree != ofree) { | | 1476 | if (sc->sc_txfree != ofree) { |
1474 | /* Set a watchdog timer in case the chip flakes out. */ | | 1477 | /* Set a watchdog timer in case the chip flakes out. */ |
1475 | ifp->if_timer = 5; | | 1478 | ifp->if_timer = 5; |
1476 | } | | 1479 | } |
1477 | } | | 1480 | } |
1478 | | | 1481 | |
1479 | #define EVENT_DEBUG 1 | | 1482 | #define EVENT_DEBUG 1 |
1480 | | | 1483 | |
1481 | static void | | 1484 | static void |
1482 | scx_watchdog(struct ifnet *ifp) | | 1485 | scx_watchdog(struct ifnet *ifp) |
1483 | { | | 1486 | { |
1484 | struct scx_softc *sc = ifp->if_softc; | | 1487 | struct scx_softc *sc = ifp->if_softc; |
1485 | | | 1488 | |
1486 | /* | | 1489 | /* |
1487 | * Since we're not interrupting every packet, sweep | | 1490 | * Since we're not interrupting every packet, sweep |
1488 | * up before we report an error. | | 1491 | * up before we report an error. |
1489 | */ | | 1492 | */ |
1490 | txreap(sc); | | 1493 | txreap(sc); |
1491 | | | 1494 | |
1492 | if (sc->sc_txfree != MD_NTXDESC) { | | 1495 | if (sc->sc_txfree != MD_NTXDESC) { |
1493 | aprint_error_dev(sc->sc_dev, | | 1496 | aprint_error_dev(sc->sc_dev, |
1494 | "device timeout (txfree %d txsfree %d txnext %d)\n", | | 1497 | "device timeout (txfree %d txsfree %d txnext %d)\n", |
1495 | sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext); | | 1498 | sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext); |
1496 | if_statinc(ifp, if_oerrors); | | 1499 | if_statinc(ifp, if_oerrors); |
1497 | #if EVENT_DEBUG == 1 | | 1500 | #if EVENT_DEBUG == 1 |
1498 | aprint_error_dev(sc->sc_dev, | | 1501 | aprint_error_dev(sc->sc_dev, |
1499 | "tx frames %d, octects %d, bcast %d, mcast %d\n", | | 1502 | "tx frames %d, octects %d, bcast %d, mcast %d\n", |
1500 | mac_read(sc, GMACEVCNT(1)), | | 1503 | mac_read(sc, GMACEVCNT(1)), |
1501 | mac_read(sc, GMACEVCNT(0)), | | 1504 | mac_read(sc, GMACEVCNT(0)), |
1502 | mac_read(sc, GMACEVCNT(2)), | | 1505 | mac_read(sc, GMACEVCNT(2)), |
1503 | mac_read(sc, GMACEVCNT(3))); | | 1506 | mac_read(sc, GMACEVCNT(3))); |
1504 | aprint_error_dev(sc->sc_dev, | | 1507 | aprint_error_dev(sc->sc_dev, |
1505 | "rx frames %d, octects %d, bcast %d, mcast %d\n", | | 1508 | "rx frames %d, octects %d, bcast %d, mcast %d\n", |
1506 | mac_read(sc, GMACEVCNT(27)), | | 1509 | mac_read(sc, GMACEVCNT(27)), |
1507 | mac_read(sc, GMACEVCNT(28)), | | 1510 | mac_read(sc, GMACEVCNT(28)), |
1508 | mac_read(sc, GMACEVCNT(30)), | | 1511 | mac_read(sc, GMACEVCNT(30)), |
1509 | mac_read(sc, GMACEVCNT(31))); | | 1512 | mac_read(sc, GMACEVCNT(31))); |
1510 | aprint_error_dev(sc->sc_dev, | | 1513 | aprint_error_dev(sc->sc_dev, |
1511 | "current tdes addr %x, buf addr %x\n", | | 1514 | "current tdes addr %x, buf addr %x\n", |
1512 | mac_read(sc, 0x1048), mac_read(sc, 0x1050)); | | 1515 | mac_read(sc, 0x1048), mac_read(sc, 0x1050)); |
1513 | aprint_error_dev(sc->sc_dev, | | 1516 | aprint_error_dev(sc->sc_dev, |
1514 | "current rdes addr %x, buf addr %x\n", | | 1517 | "current rdes addr %x, buf addr %x\n", |
1515 | mac_read(sc, 0x104c), mac_read(sc, 0x1054)); | | 1518 | mac_read(sc, 0x104c), mac_read(sc, 0x1054)); |
1516 | #endif | | 1519 | #endif |
1517 | /* Reset the interface. */ | | 1520 | /* Reset the interface. */ |
1518 | scx_init(ifp); | | 1521 | scx_init(ifp); |
1519 | } | | 1522 | } |
1520 | | | 1523 | |
1521 | scx_start(ifp); | | 1524 | scx_start(ifp); |
1522 | } | | 1525 | } |
1523 | | | 1526 | |
1524 | static int | | 1527 | static int |
1525 | scx_intr(void *arg) | | 1528 | scx_intr(void *arg) |
1526 | { | | 1529 | { |
1527 | struct scx_softc *sc = arg; | | 1530 | struct scx_softc *sc = arg; |
1528 | uint32_t enable, status; | | 1531 | uint32_t enable, status; |
1529 | | | 1532 | |
1530 | status = CSR_READ(sc, xINTSR); /* not W1C */ | | 1533 | status = CSR_READ(sc, xINTSR); /* not W1C */ |
1531 | enable = CSR_READ(sc, xINTAEN); | | 1534 | enable = CSR_READ(sc, xINTAEN); |
1532 | if ((status & enable) == 0) | | 1535 | if ((status & enable) == 0) |
1533 | return 0; | | 1536 | return 0; |
1534 | if (status & (IRQ_TX | IRQ_RX)) { | | 1537 | if (status & (IRQ_TX | IRQ_RX)) { |
1535 | CSR_WRITE(sc, xINTAE_CLR, (IRQ_TX | IRQ_RX)); | | 1538 | CSR_WRITE(sc, xINTAE_CLR, (IRQ_TX | IRQ_RX)); |
1536 | | | 1539 | |
1537 | status = CSR_READ(sc, RXISR); | | 1540 | status = CSR_READ(sc, RXISR); |
1538 | CSR_WRITE(sc, RXISR, status); | | 1541 | CSR_WRITE(sc, RXISR, status); |
1539 | if (status & RXI_RC_ERR) | | 1542 | if (status & RXI_RC_ERR) |
1540 | aprint_error_dev(sc->sc_dev, "Rx error\n"); | | 1543 | aprint_error_dev(sc->sc_dev, "Rx error\n"); |
1541 | if (status & (RXI_PKTCNT | RXI_TMREXP)) { | | 1544 | if (status & (RXI_PKTCNT | RXI_TMREXP)) { |
1542 | rxfill(sc); | | 1545 | rxfill(sc); |
1543 | (void)CSR_READ(sc, RXAVAILCNT); /* clear RXI_PKTCNT */ | | 1546 | (void)CSR_READ(sc, RXAVAILCNT); /* clear IRQ_RX ? */ |
1544 | } | | 1547 | } |
1545 | | | 1548 | |
1546 | status = CSR_READ(sc, TXISR); | | 1549 | status = CSR_READ(sc, TXISR); |
1547 | CSR_WRITE(sc, TXISR, status); | | 1550 | CSR_WRITE(sc, TXISR, status); |
1548 | if (status & TXI_TR_ERR) | | 1551 | if (status & TXI_TR_ERR) |
1549 | aprint_error_dev(sc->sc_dev, "Tx error\n"); | | 1552 | aprint_error_dev(sc->sc_dev, "Tx error\n"); |
1550 | if (status & (TXI_TXDONE | TXI_TMREXP)) { | | 1553 | if (status & (TXI_TXDONE | TXI_TMREXP)) { |
1551 | txreap(sc); | | 1554 | txreap(sc); |
1552 | (void)CSR_READ(sc, TXDONECNT); /* clear TXI_TXDONE */ | | 1555 | (void)CSR_READ(sc, TXDONECNT); /* clear IRQ_TX ? */ |
1553 | } | | 1556 | } |
1554 | | | 1557 | |
1555 | CSR_WRITE(sc, xINTAE_SET, (IRQ_TX | IRQ_RX)); | | 1558 | CSR_WRITE(sc, xINTAE_SET, (IRQ_TX | IRQ_RX)); |
1556 | } | | 1559 | } |
1557 | return 1; | | 1560 | return 1; |
1558 | } | | 1561 | } |
1559 | | | 1562 | |
1560 | static void | | 1563 | static void |
1561 | txreap(struct scx_softc *sc) | | 1564 | txreap(struct scx_softc *sc) |
1562 | { | | 1565 | { |
1563 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1566 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1564 | struct scx_txsoft *txs; | | 1567 | struct scx_txsoft *txs; |
1565 | uint32_t txstat; | | 1568 | uint32_t txstat; |
1566 | int i; | | 1569 | int i; |
1567 | | | 1570 | |
1568 | for (i = sc->sc_txsdirty; sc->sc_txsfree != MD_TXQUEUELEN; | | 1571 | for (i = sc->sc_txsdirty; sc->sc_txsfree != MD_TXQUEUELEN; |
1569 | i = MD_NEXTTXS(i), sc->sc_txsfree++) { | | 1572 | i = MD_NEXTTXS(i), sc->sc_txsfree++) { |
1570 | txs = &sc->sc_txsoft[i]; | | 1573 | txs = &sc->sc_txsoft[i]; |
1571 | | | 1574 | |
1572 | SCX_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, | | 1575 | SCX_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, |
1573 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1576 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1574 | | | 1577 | |
1575 | txstat = le32toh(sc->sc_txdescs[txs->txs_lastdesc].t0); | | 1578 | txstat = le32toh(sc->sc_txdescs[txs->txs_lastdesc].t0); |
1576 | if (txstat & T0_OWN) /* desc is still in use */ | | 1579 | if (txstat & T0_OWN) /* desc is still in use */ |
1577 | break; | | 1580 | break; |
1578 | | | 1581 | |
1579 | /* There is no way to tell transmission status per frame */ | | 1582 | /* There is no way to tell transmission status per frame */ |
1580 | | | 1583 | |
1581 | if_statinc(ifp, if_opackets); | | 1584 | if_statinc(ifp, if_opackets); |
1582 | | | 1585 | |
1583 | sc->sc_txfree += txs->txs_ndesc; | | 1586 | sc->sc_txfree += txs->txs_ndesc; |
1584 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, | | 1587 | bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, |
1585 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); | | 1588 | 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1586 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); | | 1589 | bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); |
1587 | m_freem(txs->txs_mbuf); | | 1590 | m_freem(txs->txs_mbuf); |
1588 | txs->txs_mbuf = NULL; | | 1591 | txs->txs_mbuf = NULL; |
1589 | } | | 1592 | } |
1590 | sc->sc_txsdirty = i; | | 1593 | sc->sc_txsdirty = i; |
1591 | if (sc->sc_txsfree == MD_TXQUEUELEN) | | 1594 | if (sc->sc_txsfree == MD_TXQUEUELEN) |
1592 | ifp->if_timer = 0; | | 1595 | ifp->if_timer = 0; |
1593 | } | | 1596 | } |
1594 | | | 1597 | |
1595 | static void | | 1598 | static void |
1596 | rxfill(struct scx_softc *sc) | | 1599 | rxfill(struct scx_softc *sc) |
1597 | { | | 1600 | { |
1598 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; | | 1601 | struct ifnet *ifp = &sc->sc_ethercom.ec_if; |
1599 | struct scx_rxsoft *rxs; | | 1602 | struct scx_rxsoft *rxs; |
1600 | struct mbuf *m; | | 1603 | struct mbuf *m; |
1601 | uint32_t rxstat, rlen; | | 1604 | uint32_t rxstat, rlen; |
1602 | int i; | | 1605 | int i; |
1603 | | | 1606 | |
1604 | for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = MD_NEXTRX(i)) { | | 1607 | for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = MD_NEXTRX(i)) { |
1605 | SCX_CDRXSYNC(sc, i, | | 1608 | SCX_CDRXSYNC(sc, i, |
1606 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); | | 1609 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1607 | | | 1610 | |
1608 | rxstat = le32toh(sc->sc_rxdescs[i].r0); | | 1611 | rxstat = le32toh(sc->sc_rxdescs[i].r0); |
1609 | if (rxstat & R0_OWN) /* desc is left empty */ | | 1612 | if (rxstat & R0_OWN) /* desc is left empty */ |
1610 | break; | | 1613 | break; |
1611 | | | 1614 | |
1612 | /* received frame length in R3 31:16 */ | | 1615 | /* received frame length in R3 31:16 */ |
1613 | rlen = le32toh(sc->sc_rxdescs[i].r3) >> 16; | | 1616 | rlen = le32toh(sc->sc_rxdescs[i].r3) >> 16; |
1614 | | | 1617 | |
1615 | /* R0_FS | R0_LS must have been marked for this desc */ | | 1618 | /* R0_FS | R0_LS must have been marked for this desc */ |
1616 | rxs = &sc->sc_rxsoft[i]; | | 1619 | rxs = &sc->sc_rxsoft[i]; |
1617 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 1620 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
1618 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); | | 1621 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1619 | | | 1622 | |
1620 | /* dispense new storage to receive frame */ | | 1623 | /* dispense new storage to receive frame */ |
1621 | m = rxs->rxs_mbuf; | | 1624 | m = rxs->rxs_mbuf; |
1622 | if (add_rxbuf(sc, i) != 0) { | | 1625 | if (add_rxbuf(sc, i) != 0) { |
1623 | if_statinc(ifp, if_ierrors); /* resource shortage */ | | 1626 | if_statinc(ifp, if_ierrors); /* resource shortage */ |
1624 | SCX_INIT_RXDESC(sc, i); /* then reuse */ | | 1627 | SCX_INIT_RXDESC(sc, i); /* then reuse */ |
1625 | bus_dmamap_sync(sc->sc_dmat, | | 1628 | bus_dmamap_sync(sc->sc_dmat, |
1626 | rxs->rxs_dmamap, 0, | | 1629 | rxs->rxs_dmamap, 0, |
1627 | rxs->rxs_dmamap->dm_mapsize, | | 1630 | rxs->rxs_dmamap->dm_mapsize, |
1628 | BUS_DMASYNC_PREREAD); | | 1631 | BUS_DMASYNC_PREREAD); |
1629 | continue; | | 1632 | continue; |
1630 | } | | 1633 | } |
1631 | /* complete mbuf */ | | 1634 | /* complete mbuf */ |
1632 | m_set_rcvif(m, ifp); | | 1635 | m_set_rcvif(m, ifp); |
1633 | m->m_pkthdr.len = m->m_len = rlen; | | 1636 | m->m_pkthdr.len = m->m_len = rlen; |
1634 | m->m_flags |= M_HASFCS; | | 1637 | m->m_flags |= M_HASFCS; |
1635 | if (rxstat & R0_CSUM) { | | 1638 | if (rxstat & R0_CSUM) { |
1636 | uint32_t csum = M_CSUM_IPv4; | | 1639 | uint32_t csum = M_CSUM_IPv4; |
1637 | if (rxstat & R0_CERR) | | 1640 | if (rxstat & R0_CERR) |
1638 | csum |= M_CSUM_IPv4_BAD; | | 1641 | csum |= M_CSUM_IPv4_BAD; |
1639 | m->m_pkthdr.csum_flags |= csum; | | 1642 | m->m_pkthdr.csum_flags |= csum; |
1640 | } | | 1643 | } |
1641 | /* and pass to upper layer */ | | 1644 | /* and pass to upper layer */ |
1642 | if_percpuq_enqueue(ifp->if_percpuq, m); | | 1645 | if_percpuq_enqueue(ifp->if_percpuq, m); |
1643 | } | | 1646 | } |
1644 | sc->sc_rxptr = i; | | 1647 | sc->sc_rxptr = i; |
1645 | } | | 1648 | } |
1646 | | | 1649 | |
1647 | static int | | 1650 | static int |
1648 | add_rxbuf(struct scx_softc *sc, int i) | | 1651 | add_rxbuf(struct scx_softc *sc, int i) |
1649 | { | | 1652 | { |
1650 | struct scx_rxsoft *rxs = &sc->sc_rxsoft[i]; | | 1653 | struct scx_rxsoft *rxs = &sc->sc_rxsoft[i]; |
1651 | struct mbuf *m; | | 1654 | struct mbuf *m; |
1652 | int error; | | 1655 | int error; |
1653 | | | 1656 | |
1654 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 1657 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1655 | if (m == NULL) | | 1658 | if (m == NULL) |
1656 | return ENOBUFS; | | 1659 | return ENOBUFS; |
1657 | MCLGET(m, M_DONTWAIT); | | 1660 | MCLGET(m, M_DONTWAIT); |
1658 | if ((m->m_flags & M_EXT) == 0) { | | 1661 | if ((m->m_flags & M_EXT) == 0) { |
1659 | m_freem(m); | | 1662 | m_freem(m); |
1660 | return ENOBUFS; | | 1663 | return ENOBUFS; |
1661 | } | | 1664 | } |
1662 | if (rxs->rxs_mbuf != NULL) | | 1665 | if (rxs->rxs_mbuf != NULL) |
1663 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 1666 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
1664 | rxs->rxs_mbuf = m; | | 1667 | rxs->rxs_mbuf = m; |
1665 | error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, | | 1668 | error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, |
1666 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); | | 1669 | m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); |
1667 | if (error) { | | 1670 | if (error) { |
1668 | aprint_error_dev(sc->sc_dev, | | 1671 | aprint_error_dev(sc->sc_dev, |
1669 | "can't load rx DMA map %d, error = %d\n", i, error); | | 1672 | "can't load rx DMA map %d, error = %d\n", i, error); |
1670 | panic("add_rxbuf"); | | 1673 | panic("add_rxbuf"); |
1671 | } | | 1674 | } |
1672 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, | | 1675 | bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, |
1673 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); | | 1676 | rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); |
1674 | SCX_INIT_RXDESC(sc, i); | | 1677 | SCX_INIT_RXDESC(sc, i); |
1675 | | | 1678 | |
1676 | return 0; | | 1679 | return 0; |
1677 | } | | 1680 | } |
1678 | | | 1681 | |
1679 | static void | | 1682 | static void |
1680 | rxdrain(struct scx_softc *sc) | | 1683 | rxdrain(struct scx_softc *sc) |
1681 | { | | 1684 | { |
1682 | struct scx_rxsoft *rxs; | | 1685 | struct scx_rxsoft *rxs; |
1683 | int i; | | 1686 | int i; |
1684 | | | 1687 | |
1685 | for (i = 0; i < MD_NRXDESC; i++) { | | 1688 | for (i = 0; i < MD_NRXDESC; i++) { |
1686 | rxs = &sc->sc_rxsoft[i]; | | 1689 | rxs = &sc->sc_rxsoft[i]; |
1687 | if (rxs->rxs_mbuf != NULL) { | | 1690 | if (rxs->rxs_mbuf != NULL) { |
1688 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); | | 1691 | bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); |
1689 | m_freem(rxs->rxs_mbuf); | | 1692 | m_freem(rxs->rxs_mbuf); |
1690 | rxs->rxs_mbuf = NULL; | | 1693 | rxs->rxs_mbuf = NULL; |
1691 | } | | 1694 | } |
1692 | } | | 1695 | } |
1693 | } | | 1696 | } |
1694 | | | 1697 | |
1695 | #define LINK_DEBUG 0 | | 1698 | #define LINK_DEBUG 0 |
1696 | | | 1699 | |
1697 | static void | | 1700 | static void |
1698 | mii_statchg(struct ifnet *ifp) | | 1701 | mii_statchg(struct ifnet *ifp) |
1699 | { | | 1702 | { |
1700 | struct scx_softc *sc = ifp->if_softc; | | 1703 | struct scx_softc *sc = ifp->if_softc; |
1701 | struct mii_data *mii = &sc->sc_mii; | | 1704 | struct mii_data *mii = &sc->sc_mii; |
1702 | const int Mbps[4] = { 10, 100, 1000, 0 }; | | 1705 | const int Mbps[4] = { 10, 100, 1000, 0 }; |
1703 | uint32_t miisr, mcr, fcr; | | 1706 | uint32_t miisr, mcr, fcr; |
1704 | int spd; | | 1707 | int spd; |
1705 | | | 1708 | |
1706 | /* decode MIISR register value */ | | 1709 | /* decode MIISR register value */ |
1707 | miisr = mac_read(sc, GMACMIISR); | | 1710 | miisr = mac_read(sc, GMACMIISR); |
1708 | spd = Mbps[(miisr & MIISR_SPD) >> 1]; | | 1711 | spd = Mbps[(miisr & MIISR_SPD) >> 1]; |
1709 | #if LINK_DEBUG == 1 | | 1712 | #if LINK_DEBUG == 1 |
1710 | static uint32_t oldmiisr = 0; | | 1713 | static uint32_t oldmiisr = 0; |
1711 | if (miisr != oldmiisr) { | | 1714 | if (miisr != oldmiisr) { |
1712 | printf("MII link status (0x%x) %s", | | 1715 | printf("MII link status (0x%x) %s", |
1713 | miisr, (miisr & MIISR_LUP) ? "up" : "down"); | | 1716 | miisr, (miisr & MIISR_LUP) ? "up" : "down"); |
1714 | if (miisr & MIISR_LUP) { | | 1717 | if (miisr & MIISR_LUP) { |
1715 | printf(" spd%d", spd); | | 1718 | printf(" spd%d", spd); |
1716 | if (miisr & MIISR_FDX) | | 1719 | if (miisr & MIISR_FDX) |
1717 | printf(",full-duplex"); | | 1720 | printf(",full-duplex"); |
1718 | } | | 1721 | } |
1719 | printf("\n"); | | 1722 | printf("\n"); |
1720 | } | | 1723 | } |
1721 | #endif | | 1724 | #endif |
1722 | /* Get flow control negotiation result. */ | | 1725 | /* Get flow control negotiation result. */ |
1723 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && | | 1726 | if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && |
1724 | (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) | | 1727 | (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) |
1725 | sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; | | 1728 | sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; |
1726 | | | 1729 | |
1727 | /* Adjust speed 1000/100/10. */ | | 1730 | /* Adjust speed 1000/100/10. */ |
1728 | mcr = mac_read(sc, GMACMCR) &~ (MCR_PS | MCR_FES); | | 1731 | mcr = mac_read(sc, GMACMCR) &~ (MCR_PS | MCR_FES); |
1729 | if (sc->sc_miigmii) { | | 1732 | if (sc->sc_miigmii) { |
1730 | if (spd != 1000) | | 1733 | if (spd != 1000) |
1731 | mcr |= MCR_PS; | | 1734 | mcr |= MCR_PS; |
1732 | } else { | | 1735 | } else { |
1733 | if (spd == 100) | | 1736 | if (spd == 100) |
1734 | mcr |= MCR_FES; | | 1737 | mcr |= MCR_FES; |
1735 | } | | 1738 | } |
1736 | mcr |= MCR_CST | MCR_JE; | | 1739 | mcr |= MCR_CST | MCR_JE; |
1737 | if (sc->sc_miigmii == 0) | | 1740 | if (sc->sc_miigmii == 0) |
1738 | mcr |= MCR_IBN; | | 1741 | mcr |= MCR_IBN; |
1739 | | | 1742 | |
1740 | /* Adjust duplexity and PAUSE flow control. */ | | 1743 | /* Adjust duplexity and PAUSE flow control. */ |
1741 | mcr &= ~MCR_USEFDX; | | 1744 | mcr &= ~MCR_USEFDX; |
1742 | fcr = mac_read(sc, GMACFCR) & ~(FCR_TFE | FCR_RFE); | | 1745 | fcr = mac_read(sc, GMACFCR) & ~(FCR_TFE | FCR_RFE); |
1743 | if (miisr & MIISR_FDX) { | | 1746 | if (miisr & MIISR_FDX) { |
1744 | if (sc->sc_flowflags & IFM_ETH_TXPAUSE) | | 1747 | if (sc->sc_flowflags & IFM_ETH_TXPAUSE) |
1745 | fcr |= FCR_TFE; | | 1748 | fcr |= FCR_TFE; |
1746 | if (sc->sc_flowflags & IFM_ETH_RXPAUSE) | | 1749 | if (sc->sc_flowflags & IFM_ETH_RXPAUSE) |
1747 | fcr |= FCR_RFE; | | 1750 | fcr |= FCR_RFE; |
1748 | mcr |= MCR_USEFDX; | | 1751 | mcr |= MCR_USEFDX; |
1749 | } | | 1752 | } |
1750 | mac_write(sc, GMACMCR, mcr); | | 1753 | mac_write(sc, GMACMCR, mcr); |
1751 | mac_write(sc, GMACFCR, fcr); | | 1754 | mac_write(sc, GMACFCR, fcr); |
1752 | #if LINK_DEBUG == 1 | | 1755 | #if LINK_DEBUG == 1 |
1753 | if (miisr != oldmiisr) { | | 1756 | if (miisr != oldmiisr) { |
1754 | printf("%ctxfe, %crxfe\n", | | 1757 | printf("%ctxfe, %crxfe\n", |
1755 | (fcr & FCR_TFE) ? '+' : '-', | | 1758 | (fcr & FCR_TFE) ? '+' : '-', |
1756 | (fcr & FCR_RFE) ? '+' : '-'); | | 1759 | (fcr & FCR_RFE) ? '+' : '-'); |
1757 | } | | 1760 | } |
1758 | oldmiisr = miisr; | | 1761 | oldmiisr = miisr; |
1759 | #endif | | 1762 | #endif |
1760 | } | | 1763 | } |
1761 | | | 1764 | |
1762 | static void | | 1765 | static void |
1763 | scx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) | | 1766 | scx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
1764 | { | | 1767 | { |
1765 | struct scx_softc *sc = ifp->if_softc; | | 1768 | struct scx_softc *sc = ifp->if_softc; |
1766 | struct mii_data *mii = &sc->sc_mii; | | 1769 | struct mii_data *mii = &sc->sc_mii; |
1767 | | | 1770 | |
1768 | mii_pollstat(mii); | | 1771 | mii_pollstat(mii); |
1769 | ifmr->ifm_status = mii->mii_media_status; | | 1772 | ifmr->ifm_status = mii->mii_media_status; |
1770 | ifmr->ifm_active = sc->sc_flowflags | | | 1773 | ifmr->ifm_active = sc->sc_flowflags | |
1771 | (mii->mii_media_active & ~IFM_ETH_FMASK); | | 1774 | (mii->mii_media_active & ~IFM_ETH_FMASK); |
1772 | } | | 1775 | } |
1773 | | | 1776 | |
1774 | static int | | 1777 | static int |
1775 | mii_readreg(device_t self, int phy, int reg, uint16_t *val) | | 1778 | mii_readreg(device_t self, int phy, int reg, uint16_t *val) |
1776 | { | | 1779 | { |
1777 | struct scx_softc *sc = device_private(self); | | 1780 | struct scx_softc *sc = device_private(self); |
1778 | uint32_t miia; | | 1781 | uint32_t miia; |
1779 | int ntries; | | 1782 | int ntries; |
1780 | | | 1783 | |
1781 | miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk; | | 1784 | miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk; |
1782 | mac_write(sc, GMACGAR, miia | GAR_BUSY); | | 1785 | mac_write(sc, GMACGAR, miia | GAR_BUSY); |
1783 | for (ntries = 0; ntries < 1000; ntries++) { | | 1786 | for (ntries = 0; ntries < 1000; ntries++) { |
1784 | if ((mac_read(sc, GMACGAR) & GAR_BUSY) == 0) | | 1787 | if ((mac_read(sc, GMACGAR) & GAR_BUSY) == 0) |
1785 | goto unbusy; | | 1788 | goto unbusy; |
1786 | DELAY(1); | | 1789 | DELAY(1); |
1787 | } | | 1790 | } |
1788 | return ETIMEDOUT; | | 1791 | return ETIMEDOUT; |
1789 | unbusy: | | 1792 | unbusy: |
1790 | *val = mac_read(sc, GMACGDR); | | 1793 | *val = mac_read(sc, GMACGDR); |
1791 | return 0; | | 1794 | return 0; |
1792 | } | | 1795 | } |
1793 | | | 1796 | |
1794 | static int | | 1797 | static int |
1795 | mii_writereg(device_t self, int phy, int reg, uint16_t val) | | 1798 | mii_writereg(device_t self, int phy, int reg, uint16_t val) |
1796 | { | | 1799 | { |
1797 | struct scx_softc *sc = device_private(self); | | 1800 | struct scx_softc *sc = device_private(self); |
1798 | uint32_t miia; | | 1801 | uint32_t miia; |
1799 | uint16_t dummy; | | 1802 | uint16_t dummy; |
1800 | int ntries; | | 1803 | int ntries; |
1801 | | | 1804 | |
1802 | miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk; | | 1805 | miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk; |
1803 | mac_write(sc, GMACGDR, val); | | 1806 | mac_write(sc, GMACGDR, val); |
1804 | mac_write(sc, GMACGAR, miia | GAR_IOWR | GAR_BUSY); | | 1807 | mac_write(sc, GMACGAR, miia | GAR_IOWR | GAR_BUSY); |
1805 | for (ntries = 0; ntries < 1000; ntries++) { | | 1808 | for (ntries = 0; ntries < 1000; ntries++) { |
1806 | if ((mac_read(sc, GMACGAR) & GAR_BUSY) == 0) | | 1809 | if ((mac_read(sc, GMACGAR) & GAR_BUSY) == 0) |
1807 | goto unbusy; | | 1810 | goto unbusy; |
1808 | DELAY(1); | | 1811 | DELAY(1); |
1809 | } | | 1812 | } |
1810 | return ETIMEDOUT; | | 1813 | return ETIMEDOUT; |
1811 | unbusy: | | 1814 | unbusy: |
1812 | mii_readreg(self, phy, MII_PHYIDR1, &dummy); /* dummy read cycle */ | | 1815 | mii_readreg(self, phy, MII_PHYIDR1, &dummy); /* dummy read cycle */ |
1813 | return 0; | | 1816 | return 0; |
1814 | } | | 1817 | } |
1815 | | | 1818 | |
1816 | static void | | 1819 | static void |
1817 | phy_tick(void *arg) | | 1820 | phy_tick(void *arg) |
1818 | { | | 1821 | { |
1819 | struct scx_softc *sc = arg; | | 1822 | struct scx_softc *sc = arg; |
1820 | struct mii_data *mii = &sc->sc_mii; | | 1823 | struct mii_data *mii = &sc->sc_mii; |
1821 | int s; | | 1824 | int s; |
1822 | | | 1825 | |
1823 | s = splnet(); | | 1826 | s = splnet(); |
1824 | mii_tick(mii); | | 1827 | mii_tick(mii); |
1825 | splx(s); | | 1828 | splx(s); |
1826 | #ifdef GMAC_EVENT_COUNTERS | | 1829 | #ifdef GMAC_EVENT_COUNTERS |
1827 | /* 80 event counters exist */ | | 1830 | /* 80 event counters exist */ |
1828 | #endif | | 1831 | #endif |
1829 | callout_schedule(&sc->sc_callout, hz); | | 1832 | callout_schedule(&sc->sc_callout, hz); |
1830 | } | | 1833 | } |
1831 | | | 1834 | |
1832 | static void | | 1835 | static void |
1833 | resetuengine(struct scx_softc *sc) | | 1836 | resetuengine(struct scx_softc *sc) |
1834 | { | | 1837 | { |
1835 | | | 1838 | |
1836 | if (CSR_READ(sc, CORESTAT) == 0) { | | 1839 | if (CSR_READ(sc, CORESTAT) == 0) { |
1837 | /* make sure to stop */ | | 1840 | /* make sure to stop */ |
1838 | CSR_WRITE(sc, DMACTL_H2M, DMACTL_STOP); | | 1841 | CSR_WRITE(sc, DMACTL_H2M, DMACTL_STOP); |
1839 | CSR_WRITE(sc, DMACTL_M2H, DMACTL_STOP); | | 1842 | CSR_WRITE(sc, DMACTL_M2H, DMACTL_STOP); |
1840 | WAIT_FOR_CLR(sc, DMACTL_H2M, DMACTL_STOP); | | 1843 | WAIT_FOR_CLR(sc, DMACTL_H2M, DMACTL_STOP); |
1841 | WAIT_FOR_CLR(sc, DMACTL_M2H, DMACTL_STOP); | | 1844 | WAIT_FOR_CLR(sc, DMACTL_M2H, DMACTL_STOP); |
1842 | } | | 1845 | } |
1843 | CSR_WRITE(sc, SWRESET, 0); /* reset operation */ | | 1846 | CSR_WRITE(sc, SWRESET, 0); /* reset operation */ |
1844 | CSR_WRITE(sc, SWRESET, SRST_RUN); /* manifest run */ | | 1847 | CSR_WRITE(sc, SWRESET, SRST_RUN); /* manifest run */ |
1845 | CSR_WRITE(sc, COMINIT, INIT_DB | INIT_CLS); | | 1848 | CSR_WRITE(sc, COMINIT, INIT_DB | INIT_CLS); |
1846 | WAIT_FOR_CLR(sc, COMINIT, (INIT_DB | INIT_CLS)); | | 1849 | WAIT_FOR_CLR(sc, COMINIT, (INIT_DB | INIT_CLS)); |
1847 | } | | 1850 | } |
1848 | | | 1851 | |
1849 | #define UCODE_DEBUG 0 | | 1852 | #define UCODE_DEBUG 0 |
1850 | | | 1853 | |
1851 | /* | | 1854 | /* |
1852 | * 3 independent uengines exist to process host2media, media2host and | | 1855 | * 3 independent uengines exist to process host2media, media2host and |
1853 | * packet data flows. | | 1856 | * packet data flows. |
1854 | */ | | 1857 | */ |
1855 | static void | | 1858 | static void |
1856 | loaducode(struct scx_softc *sc) | | 1859 | loaducode(struct scx_softc *sc) |
1857 | { | | 1860 | { |
1858 | uint32_t up, lo, sz; | | 1861 | uint32_t up, lo, sz; |
1859 | uint64_t addr; | | 1862 | uint64_t addr; |
1860 | | | 1863 | |
1861 | up = EE_READ(sc, 0x08); /* H->M ucode addr high */ | | 1864 | up = EE_READ(sc, 0x08); /* H->M ucode addr high */ |
1862 | lo = EE_READ(sc, 0x0c); /* H->M ucode addr low */ | | 1865 | lo = EE_READ(sc, 0x0c); /* H->M ucode addr low */ |
1863 | sz = EE_READ(sc, 0x10); /* H->M ucode size */ | | 1866 | sz = EE_READ(sc, 0x10); /* H->M ucode size */ |
1864 | sz *= 4; | | 1867 | sz *= 4; |
1865 | addr = ((uint64_t)up << 32) | lo; | | 1868 | addr = ((uint64_t)up << 32) | lo; |
1866 | injectucode(sc, UCODE_H2M, (bus_addr_t)addr, (bus_size_t)sz); | | 1869 | injectucode(sc, UCODE_H2M, (bus_addr_t)addr, (bus_size_t)sz); |
1867 | #if UCODE_DEBUG == 1 | | 1870 | #if UCODE_DEBUG == 1 |
1868 | aprint_normal_dev(sc->sc_dev, "0x%x H2M ucode %u\n", lo, sz); | | 1871 | aprint_normal_dev(sc->sc_dev, "0x%x H2M ucode %u\n", lo, sz); |
1869 | #endif | | 1872 | #endif |
1870 | | | 1873 | |
1871 | up = EE_READ(sc, 0x14); /* M->H ucode addr high */ | | 1874 | up = EE_READ(sc, 0x14); /* M->H ucode addr high */ |
1872 | lo = EE_READ(sc, 0x18); /* M->H ucode addr low */ | | 1875 | lo = EE_READ(sc, 0x18); /* M->H ucode addr low */ |
1873 | sz = EE_READ(sc, 0x1c); /* M->H ucode size */ | | 1876 | sz = EE_READ(sc, 0x1c); /* M->H ucode size */ |
1874 | sz *= 4; | | 1877 | sz *= 4; |
1875 | addr = ((uint64_t)up << 32) | lo; | | 1878 | addr = ((uint64_t)up << 32) | lo; |
1876 | injectucode(sc, UCODE_M2H, (bus_addr_t)addr, (bus_size_t)sz); | | 1879 | injectucode(sc, UCODE_M2H, (bus_addr_t)addr, (bus_size_t)sz); |
1877 | #if UCODE_DEBUG == 1 | | 1880 | #if UCODE_DEBUG == 1 |
1878 | aprint_normal_dev(sc->sc_dev, "0x%x M2H ucode %u\n", lo, sz); | | 1881 | aprint_normal_dev(sc->sc_dev, "0x%x M2H ucode %u\n", lo, sz); |
1879 | #endif | | 1882 | #endif |
1880 | | | 1883 | |
1881 | lo = EE_READ(sc, 0x20); /* PKT ucode addr */ | | 1884 | lo = EE_READ(sc, 0x20); /* PKT ucode addr */ |
1882 | sz = EE_READ(sc, 0x24); /* PKT ucode size */ | | 1885 | sz = EE_READ(sc, 0x24); /* PKT ucode size */ |
1883 | sz *= 4; | | 1886 | sz *= 4; |
1884 | injectucode(sc, UCODE_PKT, (bus_addr_t)lo, (bus_size_t)sz); | | 1887 | injectucode(sc, UCODE_PKT, (bus_addr_t)lo, (bus_size_t)sz); |
1885 | #if UCODE_DEBUG == 1 | | 1888 | #if UCODE_DEBUG == 1 |
1886 | aprint_normal_dev(sc->sc_dev, "0x%x PKT ucode %u\n", lo, sz); | | 1889 | aprint_normal_dev(sc->sc_dev, "0x%x PKT ucode %u\n", lo, sz); |
1887 | #endif | | 1890 | #endif |
1888 | } | | 1891 | } |
1889 | | | 1892 | |
1890 | static void | | 1893 | static void |
1891 | injectucode(struct scx_softc *sc, int port, | | 1894 | injectucode(struct scx_softc *sc, int port, |
1892 | bus_addr_t addr, bus_size_t size) | | 1895 | bus_addr_t addr, bus_size_t size) |
1893 | { | | 1896 | { |
1894 | bus_space_handle_t bsh; | | 1897 | bus_space_handle_t bsh; |
1895 | bus_size_t off; | | 1898 | bus_size_t off; |
1896 | uint32_t ucode; | | 1899 | uint32_t ucode; |
1897 | | | 1900 | |
1898 | if (bus_space_map(sc->sc_st, addr, size, 0, &bsh) != 0) { | | 1901 | if (bus_space_map(sc->sc_st, addr, size, 0, &bsh) != 0) { |
1899 | aprint_error_dev(sc->sc_dev, | | 1902 | aprint_error_dev(sc->sc_dev, |
1900 | "eeprom map failure for ucode port 0x%x\n", port); | | 1903 | "eeprom map failure for ucode port 0x%x\n", port); |
1901 | return; | | 1904 | return; |
1902 | } | | 1905 | } |
1903 | for (off = 0; off < size; off += 4) { | | 1906 | for (off = 0; off < size; off += 4) { |
1904 | ucode = bus_space_read_4(sc->sc_st, bsh, off); | | 1907 | ucode = bus_space_read_4(sc->sc_st, bsh, off); |
1905 | CSR_WRITE(sc, port, ucode); | | 1908 | CSR_WRITE(sc, port, ucode); |
1906 | } | | 1909 | } |
1907 | bus_space_unmap(sc->sc_st, bsh, size); | | 1910 | bus_space_unmap(sc->sc_st, bsh, size); |
1908 | } | | 1911 | } |
1909 | | | 1912 | |
1910 | static void | | 1913 | static void |
1911 | forcephyloopback(struct scx_softc *sc) | | 1914 | forcephyloopback(struct scx_softc *sc) |
1912 | { | | 1915 | { |
1913 | struct device *d = sc->sc_dev; | | 1916 | struct device *d = sc->sc_dev; |
1914 | uint16_t val; | | 1917 | uint16_t val; |
1915 | int loop, err; | | 1918 | int loop, err; |
1916 | | | 1919 | |
1917 | err = mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); | | 1920 | err = mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); |
1918 | if (err) { | | 1921 | if (err) { |
1919 | aprint_error_dev(d, "forcephyloopback() failed\n"); | | 1922 | aprint_error_dev(d, "forcephyloopback() failed\n"); |
1920 | return; | | 1923 | return; |
1921 | } | | 1924 | } |
1922 | if (val & BMCR_PDOWN) | | 1925 | if (val & BMCR_PDOWN) |
1923 | val &= ~BMCR_PDOWN; | | 1926 | val &= ~BMCR_PDOWN; |
1924 | val |= BMCR_ISO; | | 1927 | val |= BMCR_ISO; |
1925 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val); | | 1928 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val); |
1926 | loop = 100; | | 1929 | loop = 100; |
1927 | do { | | 1930 | do { |
1928 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); | | 1931 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); |
1929 | } while (loop-- > 0 && (val & (BMCR_PDOWN | BMCR_ISO)) != 0); | | 1932 | } while (loop-- > 0 && (val & (BMCR_PDOWN | BMCR_ISO)) != 0); |
1930 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val | BMCR_LOOP); | | 1933 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val | BMCR_LOOP); |
1931 | loop = 100; | | 1934 | loop = 100; |
1932 | do { | | 1935 | do { |
1933 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMSR, &val); | | 1936 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMSR, &val); |
1934 | } while (loop-- > 0 && (val & BMSR_LINK) != 0); | | 1937 | } while (loop-- > 0 && (val & BMSR_LINK) != 0); |
1935 | } | | 1938 | } |
1936 | | | 1939 | |
1937 | static void | | 1940 | static void |
1938 | resetphytonormal(struct scx_softc *sc) | | 1941 | resetphytonormal(struct scx_softc *sc) |
1939 | { | | 1942 | { |
1940 | struct device *d = sc->sc_dev; | | 1943 | struct device *d = sc->sc_dev; |
1941 | uint16_t val; | | 1944 | uint16_t val; |
1942 | int loop, err; | | 1945 | int loop, err; |
1943 | | | 1946 | |
1944 | err = mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); | | 1947 | err = mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); |
1945 | if (err) { | | 1948 | if (err) { |
1946 | aprint_error_dev(d, "resetphytonormal() failed\n"); | | 1949 | aprint_error_dev(d, "resetphytonormal() failed\n"); |
1947 | } | | 1950 | } |
1948 | val &= ~BMCR_LOOP; | | 1951 | val &= ~BMCR_LOOP; |
1949 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val); | | 1952 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val); |
1950 | loop = 100; | | 1953 | loop = 100; |
1951 | do { | | 1954 | do { |
1952 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); | | 1955 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); |
1953 | } while (loop-- > 0 && (val & BMCR_LOOP) != 0); | | 1956 | } while (loop-- > 0 && (val & BMCR_LOOP) != 0); |
1954 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val | BMCR_RESET); | | 1957 | (void)mii_writereg(d, sc->sc_phy_id, MII_BMCR, val | BMCR_RESET); |
1955 | loop = 100; | | 1958 | loop = 100; |
1956 | do { | | 1959 | do { |
1957 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); | | 1960 | (void)mii_readreg(d, sc->sc_phy_id, MII_BMCR, &val); |
1958 | } while (loop-- > 0 && (val & BMCR_RESET) != 0); | | 1961 | } while (loop-- > 0 && (val & BMCR_RESET) != 0); |
1959 | } | | 1962 | } |
1960 | | | 1963 | |
1961 | /* GAR 5:2 MDIO frequency selection */ | | 1964 | /* GAR 5:2 MDIO frequency selection */ |
1962 | static int | | 1965 | static int |
1963 | get_mdioclk(uint32_t freq) | | 1966 | get_mdioclk(uint32_t freq) |
1964 | { | | 1967 | { |
1965 | | | 1968 | |
1966 | freq /= 1000 * 1000; | | 1969 | freq /= 1000 * 1000; |
1967 | if (freq < 35) | | 1970 | if (freq < 35) |
1968 | return GAR_MDIO_25_35MHZ; | | 1971 | return GAR_MDIO_25_35MHZ; |
1969 | if (freq < 60) | | 1972 | if (freq < 60) |
1970 | return GAR_MDIO_35_60MHZ; | | 1973 | return GAR_MDIO_35_60MHZ; |
1971 | if (freq < 100) | | 1974 | if (freq < 100) |
1972 | return GAR_MDIO_60_100MHZ; | | 1975 | return GAR_MDIO_60_100MHZ; |
1973 | if (freq < 150) | | 1976 | if (freq < 150) |
1974 | return GAR_MDIO_100_150MHZ; | | 1977 | return GAR_MDIO_100_150MHZ; |
1975 | if (freq < 250) | | 1978 | if (freq < 250) |
1976 | return GAR_MDIO_150_250MHZ; | | 1979 | return GAR_MDIO_150_250MHZ; |
1977 | return GAR_MDIO_250_300MHZ; | | 1980 | return GAR_MDIO_250_300MHZ; |
1978 | } | | 1981 | } |
1979 | | | 1982 | |
1980 | #define HWFEA_DEBUG 1 | | 1983 | #define HWFEA_DEBUG 1 |
1981 | | | 1984 | |
1982 | static void | | 1985 | static void |
1983 | dump_hwfeature(struct scx_softc *sc) | | 1986 | dump_hwfeature(struct scx_softc *sc) |
1984 | { | | 1987 | { |
1985 | #if HWFEA_DEBUG == 1 | | 1988 | #if HWFEA_DEBUG == 1 |
1986 | struct { | | 1989 | struct { |
1987 | uint32_t bit; | | 1990 | uint32_t bit; |
1988 | const char *des; | | 1991 | const char *des; |
1989 | } field[] = { | | 1992 | } field[] = { |
1990 | { 27, "SA/VLAN insertion replacement enabled" }, | | 1993 | { 27, "SA/VLAN insertion replacement enabled" }, |
1991 | { 26, "flexible PPS enabled" }, | | 1994 | { 26, "flexible PPS enabled" }, |
1992 | { 25, "time stamping with internal system enabled" }, | | 1995 | { 25, "time stamping with internal system enabled" }, |
1993 | { 24, "alternate/enhanced descriptor enabled" }, | | 1996 | { 24, "alternate/enhanced descriptor enabled" }, |
1994 | { 19, "rx FIFO >2048 enabled" }, | | 1997 | { 19, "rx FIFO >2048 enabled" }, |
1995 | { 18, "type 2 IP checksum offload enabled" }, | | 1998 | { 18, "type 2 IP checksum offload enabled" }, |
1996 | { 17, "type 1 IP checksum offload enabled" }, | | 1999 | { 17, "type 1 IP checksum offload enabled" }, |
1997 | { 16, "Tx checksum offload enabled" }, | | 2000 | { 16, "Tx checksum offload enabled" }, |
1998 | { 15, "AV feature enabled" }, | | 2001 | { 15, "AV feature enabled" }, |
1999 | { 14, "EEE energy save feature enabled" }, | | 2002 | { 14, "EEE energy save feature enabled" }, |
2000 | { 13, "1588-2008 version 2 advanced feature enabled" }, | | 2003 | { 13, "1588-2008 version 2 advanced feature enabled" }, |
2001 | { 12, "only 1588-2002 version 1 feature enabled" }, | | 2004 | { 12, "only 1588-2002 version 1 feature enabled" }, |
2002 | { 11, "RMON event counter enabled" }, | | 2005 | { 11, "RMON event counter enabled" }, |
2003 | { 10, "PMT magic packet enabled" }, | | 2006 | { 10, "PMT magic packet enabled" }, |
2004 | { 9, "PMT remote wakeup enabled" }, | | 2007 | { 9, "PMT remote wakeup enabled" }, |
2005 | { 8, "MDIO enabled", }, | | 2008 | { 8, "MDIO enabled", }, |
2006 | { 7, "L3/L4 filter enabled" }, | | 2009 | { 7, "L3/L4 filter enabled" }, |
2007 | { 6, "TBI/SGMII/RTBI support enabled" }, | | 2010 | { 6, "TBI/SGMII/RTBI support enabled" }, |
2008 | { 5, "supplimental MAC address enabled" }, | | 2011 | { 5, "supplimental MAC address enabled" }, |
2009 | { 4, "receive hash filter enabled" }, | | 2012 | { 4, "receive hash filter enabled" }, |
2010 | { 3, "hash size is expanded" }, | | 2013 | { 3, "hash size is expanded" }, |
2011 | { 2, "Half Duplex enabled" }, | | 2014 | { 2, "Half Duplex enabled" }, |
2012 | { 1, "1000 Mbps enabled" }, | | 2015 | { 1, "1000 Mbps enabled" }, |
2013 | { 0, "10/100 Mbps enabled" }, | | 2016 | { 0, "10/100 Mbps enabled" }, |
2014 | }; | | 2017 | }; |
2015 | const char *nameofmii[] = { | | 2018 | const char *nameofmii[] = { |
2016 | "GMII or MII", | | 2019 | "GMII or MII", |
2017 | "RGMII", | | 2020 | "RGMII", |
2018 | "SGMII", | | 2021 | "SGMII", |
2019 | "TBI", | | 2022 | "TBI", |
2020 | "RMII", | | 2023 | "RMII", |
2021 | "RTBI", | | 2024 | "RTBI", |
2022 | "SMII", | | 2025 | "SMII", |
2023 | "RevMII" | | 2026 | "RevMII" |
2024 | }; | | 2027 | }; |
2025 | uint32_t hwfea, mtype, txchan, rxchan; | | 2028 | uint32_t hwfea, mtype, txchan, rxchan; |
2026 | | | 2029 | |
2027 | hwfea = CSR_READ(sc, HWFEA); | | 2030 | hwfea = CSR_READ(sc, HWFEA); |
2028 | mtype = (hwfea & __BITS(30,28)) >> 28; | | 2031 | mtype = (hwfea & __BITS(30,28)) >> 28; |
2029 | aprint_normal("HWFEA 0x%08x\n", hwfea); | | 2032 | aprint_normal("HWFEA 0x%08x\n", hwfea); |
2030 | aprint_normal("%s <30:28>\n", nameofmii[mtype]); | | 2033 | aprint_normal("%s <30:28>\n", nameofmii[mtype]); |
2031 | for (unsigned i = 0; i < __arraycount(field); i++) { | | 2034 | for (unsigned i = 0; i < __arraycount(field); i++) { |
2032 | if ((hwfea & (1U << field[i].bit)) == 0) | | 2035 | if ((hwfea & (1U << field[i].bit)) == 0) |
2033 | continue; | | 2036 | continue; |
2034 | aprint_normal("%s <%d>\n", field[i].des, field[i].bit); | | 2037 | aprint_normal("%s <%d>\n", field[i].des, field[i].bit); |
2035 | } | | 2038 | } |
2036 | if ((txchan = (hwfea & __BITS(23,22)) >> 22) != 0) | | 2039 | if ((txchan = (hwfea & __BITS(23,22)) >> 22) != 0) |
2037 | aprint_normal("+%d tx channel available <23,22>\n", txchan); | | 2040 | aprint_normal("+%d tx channel available <23,22>\n", txchan); |
2038 | if ((rxchan = (hwfea & __BITS(21,20)) >> 20) != 0) | | 2041 | if ((rxchan = (hwfea & __BITS(21,20)) >> 20) != 0) |
2039 | aprint_normal("+%d rx channel available <21:20>\n", rxchan); | | 2042 | aprint_normal("+%d rx channel available <21:20>\n", rxchan); |
2040 | return; | | 2043 | return; |
2041 | #endif | | 2044 | #endif |
2042 | } | | 2045 | } |