Mon Mar 29 13:14:14 2021 UTC ()
Make VIRTEX_* kernels compile. Not tested.


(rin)
diff -r1.5 -r1.6 src/sys/arch/evbppc/virtex/autoconf.c
diff -r1.3 -r1.4 src/sys/arch/evbppc/virtex/consinit.c
diff -r1.4 -r1.5 src/sys/arch/evbppc/virtex/design_gsrd2.c
diff -r1.25 -r1.26 src/sys/arch/evbppc/virtex/machdep.c
diff -r1.16 -r1.17 src/sys/arch/evbppc/virtex/dev/if_temac.c
diff -r1.4 -r1.5 src/sys/arch/evbppc/virtex/dev/tft_ll.c

cvs diff -r1.5 -r1.6 src/sys/arch/evbppc/virtex/autoconf.c (switch to unified diff)

--- src/sys/arch/evbppc/virtex/autoconf.c 2012/07/29 18:05:42 1.5
+++ src/sys/arch/evbppc/virtex/autoconf.c 2021/03/29 13:14:13 1.6
@@ -1,106 +1,106 @@ @@ -1,106 +1,106 @@
1/* $NetBSD: autoconf.c,v 1.5 2012/07/29 18:05:42 mlelstv Exp $ */ 1/* $NetBSD: autoconf.c,v 1.6 2021/03/29 13:14:13 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Jachym Holecek 4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written for DFC Design, s.r.o. 7 * Written for DFC Design, s.r.o.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 15 *
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 33 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
34 * Copyright (C) 1995, 1996 TooLs GmbH. 34 * Copyright (C) 1995, 1996 TooLs GmbH.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions 38 * modification, are permitted provided that the following conditions
39 * are met: 39 * are met:
40 * 1. Redistributions of source code must retain the above copyright 40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer. 41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright 42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the 43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution. 44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software 45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement: 46 * must display the following acknowledgement:
47 * This product includes software developed by TooLs GmbH. 47 * This product includes software developed by TooLs GmbH.
48 * 4. The name of TooLs GmbH may not be used to endorse or promote products 48 * 4. The name of TooLs GmbH may not be used to endorse or promote products
49 * derived from this software without specific prior written permission. 49 * derived from this software without specific prior written permission.
50 * 50 *
51 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 51 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
56 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 56 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
57 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 57 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
58 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 58 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
59 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 59 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
60 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63#include <sys/cdefs.h> 63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: autoconf.c,v 1.5 2012/07/29 18:05:42 mlelstv Exp $"); 64__KERNEL_RCSID(0, "$NetBSD: autoconf.c,v 1.6 2021/03/29 13:14:13 rin Exp $");
65 65
66#include <sys/param.h> 66#include <sys/param.h>
67#include <sys/conf.h> 67#include <sys/conf.h>
68#include <sys/cpu.h> 68#include <sys/cpu.h>
69#include <sys/device_if.h> 69#include <sys/device_if.h>
70#include <sys/systm.h> 70#include <sys/systm.h>
71 71
72#include <powerpc/ibm4xx/spr.h> 72#include <powerpc/ibm4xx/spr.h>
73 73
74#include <powerpc/ibm4xx/cpu.h> 74#include <powerpc/ibm4xx/cpu.h>
75#include <powerpc/ibm4xx/dev/plbvar.h> 75#include <powerpc/ibm4xx/dev/plbvar.h>
76 76
77 77
78/* List of port-specific devices to attach to the processor local bus. */ 78/* List of port-specific devices to attach to the processor local bus. */
79static const struct plb_dev local_plb_devs [] = { 79static struct plb_dev local_plb_devs [] = {
80 { XILVIRTEX, "xcvbus" }, 80 { XILVIRTEX, "xcvbus" },
81 { 0, NULL } 81 { 0, NULL }
82}; 82};
83 83
84/* 84/*
85 * Determine device configuration for a machine. 85 * Determine device configuration for a machine.
86 */ 86 */
87void 87void
88cpu_configure(void) 88cpu_configure(void)
89{ 89{
90 intr_init(); 90 intr_init();
91 calc_delayconst(); 91 calc_delayconst();
92 92
93 if (config_rootfound("plb", &local_plb_devs) == NULL) 93 if (config_rootfound("plb", &local_plb_devs) == NULL)
94 panic("configure: plb not configured"); 94 panic("configure: plb not configured");
95 95
96 (void)spl0(); 96 (void)spl0();
97} 97}
98 98
99/* 99/*
100 * Setup root device, configure swap area. 100 * Setup root device, configure swap area.
101 */ 101 */
102void 102void
103cpu_rootconf(void) 103cpu_rootconf(void)
104{ 104{
105 rootconf(); 105 rootconf();
106} 106}

cvs diff -r1.3 -r1.4 src/sys/arch/evbppc/virtex/consinit.c (switch to unified diff)

--- src/sys/arch/evbppc/virtex/consinit.c 2011/07/01 19:03:50 1.3
+++ src/sys/arch/evbppc/virtex/consinit.c 2021/03/29 13:14:13 1.4
@@ -1,110 +1,109 @@ @@ -1,110 +1,109 @@
1/* $NetBSD: consinit.c,v 1.3 2011/07/01 19:03:50 dyoung Exp $ */ 1/* $NetBSD: consinit.c,v 1.4 2021/03/29 13:14:13 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Jachym Holecek 4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written for DFC Design, s.r.o. 7 * Written for DFC Design, s.r.o.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 15 *
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include "opt_cons.h" 32#include "opt_cons.h"
33#include "xlcom.h" 33#include "xlcom.h"
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: consinit.c,v 1.3 2011/07/01 19:03:50 dyoung Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: consinit.c,v 1.4 2021/03/29 13:14:13 rin Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/systm.h> 39#include <sys/systm.h>
40#include <sys/device.h> 40#include <sys/device.h>
41 41
42#include <sys/bus.h> 42#include <sys/bus.h>
43 43
44#include <evbppc/virtex/virtex.h> 44#include <evbppc/virtex/virtex.h>
45 45
46#include <dev/cons.h> 46#include <dev/cons.h>
47 47
48 48
49#if NXLCOM > 0 49#if NXLCOM > 0
50extern struct consdev consdev_xlcom; 50extern struct consdev consdev_xlcom;
51void xlcom_cninit(struct consdev *, bus_addr_t); 51void xlcom_cninit(struct consdev *, bus_addr_t);
52#if defined(KGDB) 52#if defined(KGDB)
53void xlcom_kgdbinit(void); 53void xlcom_kgdbinit(void);
54#endif 54#endif
55#endif 55#endif
56 56
57struct consdev *cn_tab = NULL; 
58bus_space_tag_t consdev_iot; 57bus_space_tag_t consdev_iot;
59bus_space_handle_t consdev_ioh; 58bus_space_handle_t consdev_ioh;
60 59
61#if defined(KGDB) 60#if defined(KGDB)
62bus_space_tag_t kgdb_iot; 61bus_space_tag_t kgdb_iot;
63bus_space_handle_t kgdb_ioh; 62bus_space_handle_t kgdb_ioh;
64#endif 63#endif
65 64
66 65
67/* 66/*
68 * Initialize the system console (hmm, as if anyone can see those panics). 67 * Initialize the system console (hmm, as if anyone can see those panics).
69 */ 68 */
70void 69void
71consinit(void) 70consinit(void)
72{ 71{
73 static int initted = 0; 72 static int initted = 0;
74 73
75 if (initted) 74 if (initted)
76 return; 75 return;
77 76
78 /* Pick MD knowledge about console. */ 77 /* Pick MD knowledge about console. */
79 if (virtex_bus_space_tag(CONS_NAME, &consdev_iot)) 78 if (virtex_bus_space_tag(CONS_NAME, &consdev_iot))
80 panic("No bus space for %s console", CONS_NAME); 79 panic("No bus space for %s console", CONS_NAME);
81 80
82#if defined(KGDB) 81#if defined(KGDB)
83 if (virtex_bus_space_tag(KGDB_NAME, &kgdb_iot)) 82 if (virtex_bus_space_tag(KGDB_NAME, &kgdb_iot))
84 panic("No bus space for %s kgdb", KGDB_NAME); 83 panic("No bus space for %s kgdb", KGDB_NAME);
85#endif 84#endif
86 85
87#if NXLCOM > 0 86#if NXLCOM > 0
88#if defined(KGDB) 87#if defined(KGDB)
89 if (strncmp("xlcom", KGDB_NAME, 5)) { 88 if (strncmp("xlcom", KGDB_NAME, 5)) {
90 xlcom_kgdbinit(); 89 xlcom_kgdbinit();
91 90
92 /* Overtake console device, we're higher priority. */ 91 /* Overtake console device, we're higher priority. */
93 if (strcmp(KGDB_NAME, CONS_NAME) == 0 && 92 if (strcmp(KGDB_NAME, CONS_NAME) == 0 &&
94 KGDB_ADDR == CONS_ADDR) 93 KGDB_ADDR == CONS_ADDR)
95 goto done; 94 goto done;
96 } 95 }
97#endif 96#endif
98 if (strncmp("xlcom", CONS_NAME, 5) == 0) { 97 if (strncmp("xlcom", CONS_NAME, 5) == 0) {
99 cn_tab = &consdev_xlcom; 98 cn_tab = &consdev_xlcom;
100 xlcom_cninit(cn_tab, CONS_ADDR); 99 xlcom_cninit(cn_tab, CONS_ADDR);
101 100
102 goto done; 101 goto done;
103 } 102 }
104#endif 103#endif
105 104
106 panic("No console"); /* XXX really panic? */ 105 panic("No console"); /* XXX really panic? */
107 done: 106 done:
108 /* If kgdb overtook console, cn_tab is NULL and dev/cons.c deals. */ 107 /* If kgdb overtook console, cn_tab is NULL and dev/cons.c deals. */
109 initted = 1; 108 initted = 1;
110} 109}

cvs diff -r1.4 -r1.5 src/sys/arch/evbppc/virtex/design_gsrd2.c (switch to unified diff)

--- src/sys/arch/evbppc/virtex/design_gsrd2.c 2011/06/18 06:44:27 1.4
+++ src/sys/arch/evbppc/virtex/design_gsrd2.c 2021/03/29 13:14:13 1.5
@@ -1,510 +1,514 @@ @@ -1,510 +1,514 @@
1/* $NetBSD: design_gsrd2.c,v 1.4 2011/06/18 06:44:27 matt Exp $ */ 1/* $NetBSD: design_gsrd2.c,v 1.5 2021/03/29 13:14:13 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Jachym Holecek 4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written for DFC Design, s.r.o. 7 * Written for DFC Design, s.r.o.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 15 *
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include "opt_virtex.h" 32#include "opt_virtex.h"
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: design_gsrd2.c,v 1.4 2011/06/18 06:44:27 matt Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: design_gsrd2.c,v 1.5 2021/03/29 13:14:13 rin Exp $");
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/device.h> 39#include <sys/device.h>
40#include <sys/kernel.h> 40#include <sys/kernel.h>
41#include <sys/malloc.h> 41#include <sys/malloc.h>
42#include <sys/extent.h> 42#include <sys/extent.h>
43#include <sys/cpu.h> 43#include <sys/cpu.h>
44#include <sys/bus.h> 44#include <sys/bus.h>
45#include <sys/intr.h> 45#include <sys/intr.h>
46 46
47#include <machine/powerpc.h> 47#include <machine/powerpc.h>
48#include <machine/tlb.h> 48#include <machine/tlb.h>
49 49
50#include <powerpc/ibm4xx/cpu.h> 50#include <powerpc/ibm4xx/cpu.h>
51#include <powerpc/ibm4xx/dev/plbvar.h> 51#include <powerpc/ibm4xx/dev/plbvar.h>
52 52
53#include <evbppc/virtex/dev/xcvbusvar.h> 53#include <evbppc/virtex/dev/xcvbusvar.h>
54#include <evbppc/virtex/dev/cdmacreg.h> 54#include <evbppc/virtex/dev/cdmacreg.h>
55#include <evbppc/virtex/dev/temacreg.h> 55#include <evbppc/virtex/dev/temacreg.h>
56#include <evbppc/virtex/dev/tftreg.h> 56#include <evbppc/virtex/dev/tftreg.h>
57 57
58#include <evbppc/virtex/virtex.h> 58#include <evbppc/virtex/virtex.h>
59#include <evbppc/virtex/dcr.h> 59#include <evbppc/virtex/dcr.h>
60 60
61 61
62#define DCR_TEMAC_BASE 0x0030 62#define DCR_TEMAC_BASE 0x0030
63#define DCR_TFT0_BASE 0x0082 63#define DCR_TFT0_BASE 0x0082
64#define DCR_TFT1_BASE 0x0086 64#define DCR_TFT1_BASE 0x0086
65#define DCR_CDMAC_BASE 0x0140 65#define DCR_CDMAC_BASE 0x0140
66 66
67#define OPB_BASE 0x80000000 /* below are offsets in opb */ 67#define OPB_BASE 0x80000000 /* below are offsets in opb */
68#define OPB_XLCOM_BASE 0x010000 68#define OPB_XLCOM_BASE 0x010000
69#define OPB_GPIO_BASE 0x020000 69#define OPB_GPIO_BASE 0x020000
70#define OPB_PSTWO0_BASE 0x040000 70#define OPB_PSTWO0_BASE 0x040000
71#define OPB_PSTWO1_BASE 0x041000 71#define OPB_PSTWO1_BASE 0x041000
72#define CDMAC_NCHAN 2 /* cdmac {Tx,Rx} */ 72#define CDMAC_NCHAN 2 /* cdmac {Tx,Rx} */
73#define CDMAC_INTR_LINE 0 73#define CDMAC_INTR_LINE 0
74 74
75#define TFT_FB_BASE 0x3c00000 75#define TFT_FB_BASE 0x3c00000
76#define TFT_FB_SIZE (2*1024*1024) 76#define TFT_FB_SIZE (2*1024*1024)
77 77
78/* 78/*
79 * CDMAC per-channel interrupt handler. CDMAC has one interrupt signal 79 * CDMAC per-channel interrupt handler. CDMAC has one interrupt signal
80 * per two channels on mpmc2, so we have to dispatch channels manually. 80 * per two channels on mpmc2, so we have to dispatch channels manually.
81 * 81 *
82 * Note: we hardwire priority to IPL_NET, temac(4) is the only device that 82 * Note: we hardwire priority to IPL_NET, temac(4) is the only device that
83 * needs to service DMA interrupts anyway. 83 * needs to service DMA interrupts anyway.
84 */ 84 */
85typedef struct cdmac_intrhand { 85typedef struct cdmac_intrhand {
86 void (*cih_func)(void *); 86 void (*cih_func)(void *);
87 void *cih_arg; 87 void *cih_arg;
88} *cdmac_intrhand_t; 88} *cdmac_intrhand_t;
89 89
90/* Two instantiated channels, one logical interrupt per direction. */ 90/* Two instantiated channels, one logical interrupt per direction. */
91static struct cdmac_intrhand cdmacintr[CDMAC_NCHAN]; 91static struct cdmac_intrhand cdmacintr[CDMAC_NCHAN];
92static void *cdmac_ih; 92static void *cdmac_ih;
93 93
94 94
95/* 95/*
96 * DCR bus space leaf access routines. 96 * DCR bus space leaf access routines.
97 */ 97 */
98 98
 99#ifndef DESIGN_DFC
99static void 100static void
100tft0_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr, 101tft0_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr,
101 uint32_t val) 102 uint32_t val)
102{ 103{
103 addr += h; 104 addr += h;
104 105
105 switch (addr) { 106 switch (addr) {
106 WCASE(DCR_TFT0_BASE, TFT_CTRL); 107 WCASE(DCR_TFT0_BASE, TFT_CTRL);
107 WCASE(DCR_TFT0_BASE, TFT_ADDR); 108 WCASE(DCR_TFT0_BASE, TFT_ADDR);
108 WDEAD(addr); 109 WDEAD(addr);
109 } 110 }
110} 111}
111 112
112static uint32_t 113static uint32_t
113tft0_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr) 114tft0_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr)
114{ 115{
115 uint32_t val; 116 uint32_t val;
116 117
117 addr += h; 118 addr += h;
118 119
119 switch (addr) { 120 switch (addr) {
120 RCASE(DCR_TFT0_BASE, TFT_CTRL); 121 RCASE(DCR_TFT0_BASE, TFT_CTRL);
121 RCASE(DCR_TFT0_BASE, TFT_ADDR); 122 RCASE(DCR_TFT0_BASE, TFT_ADDR);
122 RDEAD(addr); 123 RDEAD(addr);
123 } 124 }
124 125
125 return (val); 126 return (val);
126} 127}
 128#endif /* !DESIGN_DFC */
127 129
128static void 130static void
129tft1_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr, 131tft1_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr,
130 uint32_t val) 132 uint32_t val)
131{ 133{
132 addr += h; 134 addr += h;
133 135
134 switch (addr) { 136 switch (addr) {
135 WCASE(DCR_TFT1_BASE, TFT_CTRL); 137 WCASE(DCR_TFT1_BASE, TFT_CTRL);
136 WCASE(DCR_TFT0_BASE, TFT_ADDR); 138 WCASE(DCR_TFT0_BASE, TFT_ADDR);
137 WDEAD(addr); 139 WDEAD(addr);
138 } 140 }
139} 141}
140 142
141static uint32_t 143static uint32_t
142tft1_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr) 144tft1_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr)
143{ 145{
144 uint32_t val; 146 uint32_t val;
145 147
146 addr += h; 148 addr += h;
147 149
148 switch (addr) { 150 switch (addr) {
149 RCASE(DCR_TFT1_BASE, TFT_CTRL); 151 RCASE(DCR_TFT1_BASE, TFT_CTRL);
150 RCASE(DCR_TFT0_BASE, TFT_ADDR); 152 RCASE(DCR_TFT0_BASE, TFT_ADDR);
151 RDEAD(addr); 153 RDEAD(addr);
152 } 154 }
153 155
154 return (val); 156 return (val);
155} 157}
156 158
157#define DOCHAN(op, base, channel) \ 159#define DOCHAN(op, base, channel) \
158 op(base, channel + CDMAC_NEXT); \ 160 op(base, channel + CDMAC_NEXT); \
159 op(base, channel + CDMAC_CURADDR); \ 161 op(base, channel + CDMAC_CURADDR); \
160 op(base, channel + CDMAC_CURSIZE); \ 162 op(base, channel + CDMAC_CURSIZE); \
161 op(base, channel + CDMAC_CURDESC) 163 op(base, channel + CDMAC_CURDESC)
162 164
163static void 165static void
164cdmac_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr, 166cdmac_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr,
165 uint32_t val) 167 uint32_t val)
166{ 168{
167 addr += h; 169 addr += h;
168 170
169 switch (addr) { 171 switch (addr) {
170 WCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(0)); /* Tx engine */ 172 WCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(0)); /* Tx engine */
171 WCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(1)); /* Rx engine */ 173 WCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(1)); /* Rx engine */
172 WCASE(DCR_CDMAC_BASE, CDMAC_INTR); 174 WCASE(DCR_CDMAC_BASE, CDMAC_INTR);
173 DOCHAN(WCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(0)); 175 DOCHAN(WCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(0));
174 DOCHAN(WCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(1)); 176 DOCHAN(WCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(1));
175 WDEAD(addr); 177 WDEAD(addr);
176 } 178 }
177} 179}
178 180
179static uint32_t 181static uint32_t
180cdmac_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr) 182cdmac_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr)
181{ 183{
182 uint32_t val; 184 uint32_t val;
183 185
184 addr += h; 186 addr += h;
185 187
186 switch (addr) { 188 switch (addr) {
187 RCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(0)); /* Tx engine */ 189 RCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(0)); /* Tx engine */
188 RCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(1)); /* Rx engine */ 190 RCASE(DCR_CDMAC_BASE, CDMAC_STAT_BASE(1)); /* Rx engine */
189 RCASE(DCR_CDMAC_BASE, CDMAC_INTR); 191 RCASE(DCR_CDMAC_BASE, CDMAC_INTR);
190 DOCHAN(RCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(0)); 192 DOCHAN(RCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(0));
191 DOCHAN(RCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(1)); 193 DOCHAN(RCASE, DCR_CDMAC_BASE, CDMAC_CTRL_BASE(1));
192 RDEAD(addr); 194 RDEAD(addr);
193 } 195 }
194 196
195 return (val); 197 return (val);
196} 198}
197 199
198#undef DOCHAN 200#undef DOCHAN
199 201
200static void 202static void
201temac_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr, 203temac_write_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr,
202 uint32_t val) 204 uint32_t val)
203{ 205{
204 addr += h; 206 addr += h;
205 207
206 switch (addr) { 208 switch (addr) {
207 WCASE(DCR_TEMAC_BASE, TEMAC_RESET); 209 WCASE(DCR_TEMAC_BASE, TEMAC_RESET);
208 WDEAD(addr); 210 WDEAD(addr);
209 } 211 }
210} 212}
211 213
212static uint32_t 214static uint32_t
213temac_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr) 215temac_read_4(bus_space_tag_t t, bus_space_handle_t h, uint32_t addr)
214{ 216{
215 uint32_t val; 217 uint32_t val;
216 218
217 addr += h; 219 addr += h;
218 220
219 switch (addr) { 221 switch (addr) {
220 RCASE(DCR_TEMAC_BASE, TEMAC_RESET); 222 RCASE(DCR_TEMAC_BASE, TEMAC_RESET);
221 RDEAD(addr); 223 RDEAD(addr);
222 } 224 }
223 225
224 return (val); 226 return (val);
225} 227}
226 228
227static const struct powerpc_bus_space cdmac_bst = { 229static const struct powerpc_bus_space cdmac_bst = {
228 DCR_BST_BODY(DCR_CDMAC_BASE, cdmac_read_4, cdmac_write_4) 230 DCR_BST_BODY(DCR_CDMAC_BASE, cdmac_read_4, cdmac_write_4)
229}; 231};
230 232
231static const struct powerpc_bus_space temac_bst = { 233static const struct powerpc_bus_space temac_bst = {
232 DCR_BST_BODY(DCR_TEMAC_BASE, temac_read_4, temac_write_4) 234 DCR_BST_BODY(DCR_TEMAC_BASE, temac_read_4, temac_write_4)
233}; 235};
234 236
 237#ifndef DESIGN_DFC
235static const struct powerpc_bus_space tft0_bst = { 238static const struct powerpc_bus_space tft0_bst = {
236 DCR_BST_BODY(DCR_TFT0_BASE, tft0_read_4, tft0_write_4) 239 DCR_BST_BODY(DCR_TFT0_BASE, tft0_read_4, tft0_write_4)
237}; 240};
 241#endif
238 242
239static const struct powerpc_bus_space tft1_bst = { 243static const struct powerpc_bus_space tft1_bst = {
240 DCR_BST_BODY(DCR_TFT1_BASE, tft1_read_4, tft1_write_4) 244 DCR_BST_BODY(DCR_TFT1_BASE, tft1_read_4, tft1_write_4)
241}; 245};
242 246
243static struct powerpc_bus_space opb_bst = { 247static struct powerpc_bus_space opb_bst = {
244 .pbs_flags = _BUS_SPACE_BIG_ENDIAN|_BUS_SPACE_MEM_TYPE, 248 .pbs_flags = _BUS_SPACE_BIG_ENDIAN|_BUS_SPACE_MEM_TYPE,
245 .pbs_base = 0 /*OPB_BASE*/, 249 .pbs_base = 0 /*OPB_BASE*/,
246 .pbs_offset = OPB_BASE, 250 .pbs_offset = OPB_BASE,
247}; 251};
248 252
249static char opb_extent_storage[EXTENT_FIXED_STORAGE_SIZE(8)] __aligned(8); 253static char opb_extent_storage[EXTENT_FIXED_STORAGE_SIZE(8)] __aligned(8);
250 254
251/* 255/*
252 * Master device configuration table for GSRD2 design. 256 * Master device configuration table for GSRD2 design.
253 */ 257 */
254static const struct gsrddev { 258static const struct gsrddev {
255 const char *gdv_name; 259 const char *gdv_name;
256 const char *gdv_attr; 260 const char *gdv_attr;
257 bus_space_tag_t gdv_bst; 261 bus_space_tag_t gdv_bst;
258 bus_addr_t gdv_addr; 262 bus_addr_t gdv_addr;
259 int gdv_intr; 263 int gdv_intr;
260 int gdv_rx_dma; 264 int gdv_rx_dma;
261 int gdv_tx_dma; 265 int gdv_tx_dma;
262 int gdv_dcr; /* XXX bst flag */ 266 int gdv_dcr; /* XXX bst flag */
263} gsrd_devices[] = { 267} gsrd_devices[] = {
264 { /* gsrd_devices[0] */ 268 { /* gsrd_devices[0] */
265 .gdv_name = "xlcom", 269 .gdv_name = "xlcom",
266 .gdv_attr = "xcvbus", 270 .gdv_attr = "xcvbus",
267 .gdv_bst = &opb_bst, 271 .gdv_bst = &opb_bst,
268 .gdv_addr = OPB_XLCOM_BASE, 272 .gdv_addr = OPB_XLCOM_BASE,
269 .gdv_intr = 2, 273 .gdv_intr = 2,
270 .gdv_rx_dma = -1, 274 .gdv_rx_dma = -1,
271 .gdv_tx_dma = -1, 275 .gdv_tx_dma = -1,
272 .gdv_dcr = 0, 276 .gdv_dcr = 0,
273 }, 277 },
274 { /* gsrd_devices[1] */ 278 { /* gsrd_devices[1] */
275 .gdv_name = "temac", 279 .gdv_name = "temac",
276 .gdv_attr = "xcvbus", 280 .gdv_attr = "xcvbus",
277 .gdv_bst = &temac_bst, 281 .gdv_bst = &temac_bst,
278 .gdv_addr = 0, 282 .gdv_addr = 0,
279 .gdv_intr = 1, /* unused MII intr */ 283 .gdv_intr = 1, /* unused MII intr */
280 .gdv_rx_dma = 1, /* cdmac Rx */ 284 .gdv_rx_dma = 1, /* cdmac Rx */
281 .gdv_tx_dma = 0, /* cdmac Tx */ 285 .gdv_tx_dma = 0, /* cdmac Tx */
282 .gdv_dcr = 1, 286 .gdv_dcr = 1,
283 }, 287 },
284#ifndef DESIGN_DFC 288#ifndef DESIGN_DFC
285 { /* gsrd_devices[2] */ 289 { /* gsrd_devices[2] */
286 .gdv_name = "tft", 290 .gdv_name = "tft",
287 .gdv_attr = "plbus", 291 .gdv_attr = "plbus",
288 .gdv_bst = &tft0_bst, 292 .gdv_bst = &tft0_bst,
289 .gdv_addr = 0, 293 .gdv_addr = 0,
290 .gdv_intr = -1, 294 .gdv_intr = -1,
291 .gdv_rx_dma = -1, 295 .gdv_rx_dma = -1,
292 .gdv_tx_dma = -1, 296 .gdv_tx_dma = -1,
293 .gdv_dcr = 1, 297 .gdv_dcr = 1,
294 }, 298 },
295#endif 299#endif
296 { /* gsrd_devices[2] */ 300 { /* gsrd_devices[2] */
297 .gdv_name = "tft", 301 .gdv_name = "tft",
298 .gdv_attr = "plbus", 302 .gdv_attr = "plbus",
299 .gdv_bst = &tft1_bst, 303 .gdv_bst = &tft1_bst,
300 .gdv_addr = 0, 304 .gdv_addr = 0,
301 .gdv_intr = -1, 305 .gdv_intr = -1,
302 .gdv_rx_dma = -1, 306 .gdv_rx_dma = -1,
303 .gdv_tx_dma = -1, 307 .gdv_tx_dma = -1,
304 .gdv_dcr = 1, 308 .gdv_dcr = 1,
305 }, 309 },
306#ifdef DESIGN_DFC 310#ifdef DESIGN_DFC
307 { /* gsrd_devices[3] */ 311 { /* gsrd_devices[3] */
308 .gdv_name = "pstwo", 312 .gdv_name = "pstwo",
309 .gdv_attr = "xcvbus", 313 .gdv_attr = "xcvbus",
310 .gdv_bst = &opb_bst, 314 .gdv_bst = &opb_bst,
311 .gdv_addr = OPB_PSTWO0_BASE, 315 .gdv_addr = OPB_PSTWO0_BASE,
312 .gdv_intr = 3, 316 .gdv_intr = 3,
313 .gdv_rx_dma = -1, 317 .gdv_rx_dma = -1,
314 .gdv_tx_dma = -1, 318 .gdv_tx_dma = -1,
315 .gdv_dcr = 0, 319 .gdv_dcr = 0,
316 }, 320 },
317 { /* gsrd_devices[4] */ 321 { /* gsrd_devices[4] */
318 .gdv_name = "pstwo", 322 .gdv_name = "pstwo",
319 .gdv_attr = "xcvbus", 323 .gdv_attr = "xcvbus",
320 .gdv_bst = &opb_bst, 324 .gdv_bst = &opb_bst,
321 .gdv_addr = OPB_PSTWO1_BASE, 325 .gdv_addr = OPB_PSTWO1_BASE,
322 .gdv_intr = 4, 326 .gdv_intr = 4,
323 .gdv_rx_dma = -1, 327 .gdv_rx_dma = -1,
324 .gdv_tx_dma = -1, 328 .gdv_tx_dma = -1,
325 .gdv_dcr = 0, 329 .gdv_dcr = 0,
326 }, 330 },
327#endif 331#endif
328}; 332};
329 333
330static struct ll_dmac * 334static struct ll_dmac *
331virtex_mpmc_mapdma(int idx, struct ll_dmac *chan) 335virtex_mpmc_mapdma(int idx, struct ll_dmac *chan)
332{ 336{
333 if (idx == -1) 337 if (idx == -1)
334 return (NULL); 338 return (NULL);
335 339
336 KASSERT(idx >= 0 && idx < CDMAC_NCHAN); 340 KASSERT(idx >= 0 && idx < CDMAC_NCHAN);
337 341
338 chan->dmac_iot = &cdmac_bst; 342 chan->dmac_iot = &cdmac_bst;
339 chan->dmac_ctrl_addr = CDMAC_CTRL_BASE(idx); 343 chan->dmac_ctrl_addr = CDMAC_CTRL_BASE(idx);
340 chan->dmac_stat_addr = CDMAC_STAT_BASE(idx); 344 chan->dmac_stat_addr = CDMAC_STAT_BASE(idx);
341 chan->dmac_chan = idx; 345 chan->dmac_chan = idx;
342 346
343 return (chan); 347 return (chan);
344} 348}
345 349
346static int 350static int
347cdmac_intr(void *arg) 351cdmac_intr(void *arg)
348{ 352{
349 uint32_t isr; 353 uint32_t isr;
350 int did = 0; 354 int did = 0;
351 355
352 isr = bus_space_read_4(&cdmac_bst, 0, CDMAC_INTR); 356 isr = bus_space_read_4(&cdmac_bst, 0, CDMAC_INTR);
353 357
354 if (ISSET(isr, CDMAC_INTR_TX0) && cdmacintr[0].cih_func) { 358 if (ISSET(isr, CDMAC_INTR_TX0) && cdmacintr[0].cih_func) {
355 (cdmacintr[0].cih_func)(cdmacintr[0].cih_arg); 359 (cdmacintr[0].cih_func)(cdmacintr[0].cih_arg);
356 did++; 360 did++;
357 } 361 }
358 if (ISSET(isr, CDMAC_INTR_RX0) && cdmacintr[1].cih_func) { 362 if (ISSET(isr, CDMAC_INTR_RX0) && cdmacintr[1].cih_func) {
359 (cdmacintr[1].cih_func)(cdmacintr[1].cih_arg); 363 (cdmacintr[1].cih_func)(cdmacintr[1].cih_arg);
360 did++; 364 did++;
361 } 365 }
362 366
363 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, isr); /* ack */ 367 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, isr); /* ack */
364 368
365 /* XXX This still happens all the time under load. */ 369 /* XXX This still happens all the time under load. */
366#if 0 370#if 0
367 if (did == 0) 371 if (did == 0)
368 aprint_normal("WARNING: stray cdmac isr 0x%x\n", isr); 372 aprint_normal("WARNING: stray cdmac isr 0x%x\n", isr);
369#endif 373#endif
370 return (0); 374 return (0);
371} 375}
372 376
373/* 377/*
374 * Public interface. 378 * Public interface.
375 */ 379 */
376 380
377void 381void
378virtex_autoconf(device_t self, struct plb_attach_args *paa) 382virtex_autoconf(device_t self, struct plb_attach_args *paa)
379{ 383{
380 384
381 struct xcvbus_attach_args vaa; 385 struct xcvbus_attach_args vaa;
382 struct ll_dmac rx, tx; 386 struct ll_dmac rx, tx;
383 int i; 387 int i;
384 388
385 /* Reset DMA channels. */ 389 /* Reset DMA channels. */
386 bus_space_write_4(&cdmac_bst, 0, CDMAC_STAT_BASE(0), CDMAC_STAT_RESET); 390 bus_space_write_4(&cdmac_bst, 0, CDMAC_STAT_BASE(0), CDMAC_STAT_RESET);
387 bus_space_write_4(&cdmac_bst, 0, CDMAC_STAT_BASE(1), CDMAC_STAT_RESET); 391 bus_space_write_4(&cdmac_bst, 0, CDMAC_STAT_BASE(1), CDMAC_STAT_RESET);
388 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, 0); 392 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, 0);
389 393
390 vaa.vaa_dmat = paa->plb_dmat; 394 vaa.vaa_dmat = paa->plb_dmat;
391 395
392 for (i = 0; i < __arraycount(gsrd_devices); i++) { 396 for (i = 0; i < __arraycount(gsrd_devices); i++) {
393 const struct gsrddev *g = &gsrd_devices[i]; 397 const struct gsrddev *g = &gsrd_devices[i];
394 398
395 vaa._vaa_is_dcr = g->gdv_dcr; /* XXX bst flag */ 399 vaa._vaa_is_dcr = g->gdv_dcr; /* XXX bst flag */
396 vaa.vaa_name = g->gdv_name; 400 vaa.vaa_name = g->gdv_name;
397 vaa.vaa_addr = g->gdv_addr; 401 vaa.vaa_addr = g->gdv_addr;
398 vaa.vaa_intr = g->gdv_intr; 402 vaa.vaa_intr = g->gdv_intr;
399 vaa.vaa_iot = g->gdv_bst; 403 vaa.vaa_iot = g->gdv_bst;
400 404
401 vaa.vaa_rx_dmac = virtex_mpmc_mapdma(g->gdv_rx_dma, &rx); 405 vaa.vaa_rx_dmac = virtex_mpmc_mapdma(g->gdv_rx_dma, &rx);
402 vaa.vaa_tx_dmac = virtex_mpmc_mapdma(g->gdv_tx_dma, &tx); 406 vaa.vaa_tx_dmac = virtex_mpmc_mapdma(g->gdv_tx_dma, &tx);
403 407
404 config_found_ia(self, g->gdv_attr, &vaa, xcvbus_print); 408 config_found_ia(self, g->gdv_attr, &vaa, xcvbus_print);
405 } 409 }
406 410
407 /* Setup the dispatch handler. */ 411 /* Setup the dispatch handler. */
408 cdmac_ih = intr_establish(CDMAC_INTR_LINE, IST_LEVEL, IPL_NET, 412 cdmac_ih = intr_establish(CDMAC_INTR_LINE, IST_LEVEL, IPL_NET,
409 cdmac_intr, NULL); 413 cdmac_intr, NULL);
410 if (cdmac_ih == NULL) 414 if (cdmac_ih == NULL)
411 panic("virtex_mpmc_done: could not establish cdmac intr"); 415 panic("virtex_mpmc_done: could not establish cdmac intr");
412 416
413 /* Clear (XXX?) and enable interrupts. */ 417 /* Clear (XXX?) and enable interrupts. */
414 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, ~CDMAC_INTR_MIE); 418 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, ~CDMAC_INTR_MIE);
415 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, CDMAC_INTR_MIE); 419 bus_space_write_4(&cdmac_bst, 0, CDMAC_INTR, CDMAC_INTR_MIE);
416} 420}
417 421
418void * 422void *
419ll_dmac_intr_establish(int chan, void (*handler)(void *), void *arg) 423ll_dmac_intr_establish(int chan, void (*handler)(void *), void *arg)
420{ 424{
421 KASSERT(chan >= 0 && chan < CDMAC_NCHAN); 425 KASSERT(chan >= 0 && chan < CDMAC_NCHAN);
422 KASSERT(cdmacintr[chan].cih_func == NULL); 426 KASSERT(cdmacintr[chan].cih_func == NULL);
423 KASSERT(cdmacintr[chan].cih_arg == NULL); 427 KASSERT(cdmacintr[chan].cih_arg == NULL);
424 428
425 cdmacintr[chan].cih_func = handler; 429 cdmacintr[chan].cih_func = handler;
426 cdmacintr[chan].cih_arg = arg; 430 cdmacintr[chan].cih_arg = arg;
427 431
428 return (&cdmacintr[chan]); 432 return (&cdmacintr[chan]);
429} 433}
430 434
431void 435void
432ll_dmac_intr_disestablish(int chan, void *handle) 436ll_dmac_intr_disestablish(int chan, void *handle)
433{ 437{
434 int s; 438 int s;
435 439
436 KASSERT(chan >= 0 && chan < CDMAC_NCHAN); 440 KASSERT(chan >= 0 && chan < CDMAC_NCHAN);
437 KASSERT(&cdmacintr[chan] == handle); 441 KASSERT(&cdmacintr[chan] == handle);
438 442
439 s = splnet(); 443 s = splnet();
440 cdmacintr[chan].cih_func = NULL; 444 cdmacintr[chan].cih_func = NULL;
441 cdmacintr[chan].cih_arg = NULL; 445 cdmacintr[chan].cih_arg = NULL;
442 splx(s); 446 splx(s);
443} 447}
444 448
445int 449int
446virtex_bus_space_tag(const char *xname, bus_space_tag_t *bst) 450virtex_bus_space_tag(const char *xname, bus_space_tag_t *bst)
447{ 451{
448 if (strncmp(xname, "xlcom", 5) == 0) { 452 if (strncmp(xname, "xlcom", 5) == 0) {
449 *bst = &opb_bst; 453 *bst = &opb_bst;
450 return (0); 454 return (0);
451 } 455 }
452 456
453 return (ENODEV); 457 return (ENODEV);
454} 458}
455 459
456void 460void
457virtex_machdep_init(vaddr_t endva, vsize_t maxsz, struct mem_region *phys, 461virtex_machdep_init(vaddr_t endva, vsize_t maxsz, struct mem_region *phys,
458 struct mem_region *avail) 462 struct mem_region *avail)
459{ 463{
460 ppc4xx_tlb_reserve(OPB_BASE, endva, maxsz, TLB_I | TLB_G); 464 ppc4xx_tlb_reserve(OPB_BASE, endva, maxsz, TLB_I | TLB_G);
461 endva += maxsz; 465 endva += maxsz;
462 466
463 opb_bst.pbs_limit = maxsz; 467 opb_bst.pbs_limit = maxsz;
464 468
465 if (bus_space_init(&opb_bst, "opbtag", opb_extent_storage, 469 if (bus_space_init(&opb_bst, "opbtag", opb_extent_storage,
466 sizeof(opb_extent_storage))) 470 sizeof(opb_extent_storage)))
467 panic("virtex_machdep_init: failed to initialize opb_bst"); 471 panic("virtex_machdep_init: failed to initialize opb_bst");
468 472
469 /* 473 /*
470 * The TFT controller is broken, we can't change FB address. 474 * The TFT controller is broken, we can't change FB address.
471 * Hardwire it at predefined base address, create uncached 475 * Hardwire it at predefined base address, create uncached
472 * mapping. 476 * mapping.
473 */ 477 */
474 478
475 avail[0].size = TFT_FB_BASE - avail[0].start; 479 avail[0].size = TFT_FB_BASE - avail[0].start;
476 ppc4xx_tlb_reserve(TFT_FB_BASE, endva, TFT_FB_SIZE, TLB_I | TLB_G); 480 ppc4xx_tlb_reserve(TFT_FB_BASE, endva, TFT_FB_SIZE, TLB_I | TLB_G);
477} 481}
478 482
479void 483void
480device_register(device_t dev, void *aux) 484device_register(device_t dev, void *aux)
481{ 485{
482 prop_number_t pn; 486 prop_number_t pn;
483 void *fb; 487 void *fb;
484 488
485 if (strncmp(device_xname(dev), "tft0", 4) == 0) { 489 if (strncmp(device_xname(dev), "tft0", 4) == 0) {
486 fb = ppc4xx_tlb_mapiodev(TFT_FB_BASE, TFT_FB_SIZE); 490 fb = ppc4xx_tlb_mapiodev(TFT_FB_BASE, TFT_FB_SIZE);
487 if (fb == NULL) 491 if (fb == NULL)
488 panic("device_register: framebuffer mapping gone!\n"); 492 panic("device_register: framebuffer mapping gone!\n");
489 493
490 pn = prop_number_create_unsigned_integer(TFT_FB_BASE); 494 pn = prop_number_create_unsigned_integer(TFT_FB_BASE);
491 if (pn == NULL) { 495 if (pn == NULL) {
492 printf("WARNING: could not allocate virtex-tft-pa\n"); 496 printf("WARNING: could not allocate virtex-tft-pa\n");
493 return ; 497 return ;
494 } 498 }
495 if (prop_dictionary_set(device_properties(dev), 499 if (prop_dictionary_set(device_properties(dev),
496 "virtex-tft-pa", pn) != true) 500 "virtex-tft-pa", pn) != true)
497 printf("WARNING: could not set virtex-tft-pa\n"); 501 printf("WARNING: could not set virtex-tft-pa\n");
498 prop_object_release(pn); 502 prop_object_release(pn);
499 503
500 pn = prop_number_create_unsigned_integer((uintptr_t)fb); 504 pn = prop_number_create_unsigned_integer((uintptr_t)fb);
501 if (pn == NULL) { 505 if (pn == NULL) {
502 printf("WARNING: could not allocate virtex-tft-va\n"); 506 printf("WARNING: could not allocate virtex-tft-va\n");
503 return ; 507 return ;
504 } 508 }
505 if (prop_dictionary_set(device_properties(dev), 509 if (prop_dictionary_set(device_properties(dev),
506 "virtex-tft-va", pn) != true) 510 "virtex-tft-va", pn) != true)
507 printf("WARNING: could not set virtex-tft-va\n"); 511 printf("WARNING: could not set virtex-tft-va\n");
508 prop_object_release(pn); 512 prop_object_release(pn);
509 } 513 }
510} 514}

cvs diff -r1.25 -r1.26 src/sys/arch/evbppc/virtex/machdep.c (switch to unified diff)

--- src/sys/arch/evbppc/virtex/machdep.c 2020/06/11 19:20:43 1.25
+++ src/sys/arch/evbppc/virtex/machdep.c 2021/03/29 13:14:13 1.26
@@ -1,332 +1,331 @@ @@ -1,332 +1,331 @@
1/* $NetBSD: machdep.c,v 1.25 2020/06/11 19:20:43 ad Exp $ */ 1/* $NetBSD: machdep.c,v 1.26 2021/03/29 13:14:13 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Jachym Holecek 4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written for DFC Design, s.r.o. 7 * Written for DFC Design, s.r.o.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 15 *
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Based on Walnut and Explora. 33 * Based on Walnut and Explora.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.25 2020/06/11 19:20:43 ad Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.26 2021/03/29 13:14:13 rin Exp $");
38 38
39#include "opt_compat_netbsd.h" 39#include "opt_compat_netbsd.h"
40#include "opt_ddb.h" 40#include "opt_ddb.h"
41#include "opt_virtex.h" 41#include "opt_virtex.h"
42#include "opt_kgdb.h" 42#include "opt_kgdb.h"
43 43
44#include <sys/param.h> 44#include <sys/param.h>
45#include <sys/boot_flag.h> 45#include <sys/boot_flag.h>
46#include <sys/buf.h> 46#include <sys/buf.h>
47#include <sys/bus.h> 47#include <sys/bus.h>
48#include <sys/device.h> 48#include <sys/device.h>
49#include <sys/exec.h> 49#include <sys/exec.h>
50#include <sys/malloc.h> 50#include <sys/malloc.h>
51#include <sys/mbuf.h> 51#include <sys/mbuf.h>
52#include <sys/module.h> 52#include <sys/module.h>
53#include <sys/mount.h> 53#include <sys/mount.h>
54#include <sys/msgbuf.h> 54#include <sys/msgbuf.h>
55#include <sys/kernel.h> 55#include <sys/kernel.h>
56#include <sys/ksyms.h> 56#include <sys/ksyms.h>
57#include <sys/proc.h> 57#include <sys/proc.h>
58#include <sys/reboot.h> 58#include <sys/reboot.h>
59#include <sys/syscallargs.h> 59#include <sys/syscallargs.h>
60#include <sys/syslog.h> 60#include <sys/syslog.h>
61#include <sys/systm.h> 61#include <sys/systm.h>
62 62
63#include <uvm/uvm_extern.h> 63#include <uvm/uvm_extern.h>
64 64
65#include <dev/cons.h> 65#include <dev/cons.h>
66 66
67#include <machine/powerpc.h> 67#include <machine/powerpc.h>
68 68
69#include <powerpc/trap.h> 69#include <powerpc/trap.h>
70#include <powerpc/pcb.h> 70#include <powerpc/pcb.h>
71 71
72#include <powerpc/spr.h> 72#include <powerpc/spr.h>
73#include <powerpc/ibm4xx/spr.h> 73#include <powerpc/ibm4xx/spr.h>
74 74
75#include <powerpc/ibm4xx/cpu.h> 75#include <powerpc/ibm4xx/cpu.h>
76 76
77#include <evbppc/virtex/dcr.h> 77#include <evbppc/virtex/dcr.h>
78#include <evbppc/virtex/virtex.h> 78#include <evbppc/virtex/virtex.h>
79 79
80#include "ksyms.h" 80#include "ksyms.h"
81 81
82#if defined(DDB) 82#if defined(DDB)
83#include <powerpc/db_machdep.h> 83#include <powerpc/db_machdep.h>
84#include <ddb/db_extern.h> 84#include <ddb/db_extern.h>
85#endif 85#endif
86 86
87#if defined(KGDB) 87#if defined(KGDB)
88#include <sys/kgdb.h> 88#include <sys/kgdb.h>
89#endif 89#endif
90 90
91/* 91/*
92 * Global variables used here and there 92 * Global variables used here and there
93 */ 93 */
94struct vm_map *phys_map = NULL; 94struct vm_map *phys_map = NULL;
95 95
96/* 96/*
97 * This should probably be in autoconf! XXX 97 * This should probably be in autoconf! XXX
98 */ 98 */
99char machine[] = MACHINE; /* from <machine/param.h> */ 99char machine[] = MACHINE; /* from <machine/param.h> */
100char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */ 100char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
101 101
102char bootpath[256]; 102char bootpath[256];
103vaddr_t msgbuf_vaddr; 
104 103
105void initppc(vaddr_t, vaddr_t); 104void initppc(vaddr_t, vaddr_t);
106 105
107static void dumpsys(void); 106static void dumpsys(void);
108 107
109/* BSS segment start & end. */ 108/* BSS segment start & end. */
110extern char edata[], end[]; 109extern char edata[], end[];
111 110
112/* One region holds all memory, the other is terminator expected by 405 pmap. */ 111/* One region holds all memory, the other is terminator expected by 405 pmap. */
113#define MEMREGIONS 2 112#define MEMREGIONS 2
114struct mem_region physmemr[MEMREGIONS]; 113struct mem_region physmemr[MEMREGIONS];
115struct mem_region availmemr[MEMREGIONS]; 114struct mem_region availmemr[MEMREGIONS];
116 115
117/* Maximum TLB page size. */ 116/* Maximum TLB page size. */
118#define TLB_PG_SIZE (16*1024*1024) 117#define TLB_PG_SIZE (16*1024*1024)
119 118
120void 119void
121initppc(vaddr_t startkernel, vaddr_t endkernel) 120initppc(vaddr_t startkernel, vaddr_t endkernel)
122{ 121{
123 paddr_t addr, memsize; 122 paddr_t addr, memsize;
124 123
125 /* Initialize cache info for memcpy, memset, etc. */ 124 /* Initialize cache info for memcpy, memset, etc. */
126 cpu_probe_cache(); 125 cpu_probe_cache();
127 126
128 memset(physmemr, 0, sizeof(physmemr)); /* [1].size = 0 */ 127 memset(physmemr, 0, sizeof(physmemr)); /* [1].size = 0 */
129 memset(availmemr, 0, sizeof(availmemr)); /* [1].size = 0 */ 128 memset(availmemr, 0, sizeof(availmemr)); /* [1].size = 0 */
130 129
131 memsize = (PHYSMEM * 1024 * 1024) & ~PGOFSET; 130 memsize = (PHYSMEM * 1024 * 1024) & ~PGOFSET;
132 131
133 physmemr[0].start = 0; 132 physmemr[0].start = 0;
134 physmemr[0].size = memsize; 133 physmemr[0].size = memsize;
135 134
136 availmemr[0].start = startkernel; 135 availmemr[0].start = startkernel;
137 availmemr[0].size = memsize - availmemr[0].start; 136 availmemr[0].size = memsize - availmemr[0].start;
138 137
139 /* Map kernel memory linearly. */ 138 /* Map kernel memory linearly. */
140 for (addr = 0; addr < endkernel; addr += TLB_PG_SIZE) 139 for (addr = 0; addr < endkernel; addr += TLB_PG_SIZE)
141 ppc4xx_tlb_reserve(addr, addr, TLB_PG_SIZE, TLB_EX); 140 ppc4xx_tlb_reserve(addr, addr, TLB_PG_SIZE, TLB_EX);
142 141
143 /* Give design-specific code a hint for reserved mappings. */ 142 /* Give design-specific code a hint for reserved mappings. */
144 virtex_machdep_init(roundup(memsize, TLB_PG_SIZE), TLB_PG_SIZE, 143 virtex_machdep_init(roundup(memsize, TLB_PG_SIZE), TLB_PG_SIZE,
145 physmemr, availmemr); 144 physmemr, availmemr);
146 145
147 ibm4xx_init(startkernel, endkernel, pic_ext_intr); 146 ibm4xx_init(startkernel, endkernel, pic_ext_intr);
148 147
149#ifdef DDB 148#ifdef DDB
150 if (boothowto & RB_KDB) 149 if (boothowto & RB_KDB)
151 Debugger(); 150 Debugger();
152#endif 151#endif
153 152
154#ifdef KGDB 153#ifdef KGDB
155 /* 154 /*
156 * Now trap to KGDB 155 * Now trap to KGDB
157 */ 156 */
158 kgdb_connect(1); 157 kgdb_connect(1);
159#endif /* KGDB */ 158#endif /* KGDB */
160 159
161 /* 160 /*
162 * Look for the ibm4xx modules in the right place. 161 * Look for the ibm4xx modules in the right place.
163 */ 162 */
164 module_machine = module_machine_ibm4xx; 163 module_machine = module_machine_ibm4xx;
165} 164}
166 165
167/* 166/*
168 * Machine dependent startup code. 167 * Machine dependent startup code.
169 */ 168 */
170 169
171void 170void
172cpu_startup(void) 171cpu_startup(void)
173{ 172{
174 /* For use by propdb. */ 173 /* For use by propdb. */
175 static u_int memsize = PHYSMEM * 1024 * 1024; 174 static u_int memsize = PHYSMEM * 1024 * 1024;
176 static u_int cpuspeed = CPUFREQ * 1000 * 1000; 175 static u_int cpuspeed = CPUFREQ * 1000 * 1000;
177 prop_number_t pn; 176 prop_number_t pn;
178 177
179 vaddr_t minaddr, maxaddr; 178 vaddr_t minaddr, maxaddr;
180 char pbuf[9]; 179 char pbuf[9];
181 180
182 curcpu()->ci_khz = cpuspeed / 1000; 181 curcpu()->ci_khz = cpuspeed / 1000;
183 182
184 /* Initialize error message buffer. */ 183 /* Initialize error message buffer. */
185 initmsgbuf((void *)msgbuf, round_page(MSGBUFSIZE)); 184 initmsgbuf((void *)msgbuf, round_page(MSGBUFSIZE));
186 185
187 printf("%s%s", copyright, version); 186 printf("%s%s", copyright, version);
188 187
189 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 188 format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
190 printf("total memory = %s\n", pbuf); 189 printf("total memory = %s\n", pbuf);
191 190
192 minaddr = 0; 191 minaddr = 0;
193 /* 192 /*
194 * Allocate a submap for physio 193 * Allocate a submap for physio
195 */ 194 */
196 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 195 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
197 VM_PHYS_SIZE, 0, false, NULL); 196 VM_PHYS_SIZE, 0, false, NULL);
198 197
199 /* 198 /*
200 * No need to allocate an mbuf cluster submap. Mbuf clusters 199 * No need to allocate an mbuf cluster submap. Mbuf clusters
201 * are allocated via the pool allocator, and we use direct-mapped 200 * are allocated via the pool allocator, and we use direct-mapped
202 * pool pages. 201 * pool pages.
203 */ 202 */
204 203
205 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false))); 204 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
206 printf("avail memory = %s\n", pbuf); 205 printf("avail memory = %s\n", pbuf);
207 206
208 /* 207 /*
209 * Set up the board properties database. 208 * Set up the board properties database.
210 */ 209 */
211 board_info_init(); 210 board_info_init();
212 211
213 pn = prop_number_create_integer(memsize); 212 pn = prop_number_create_integer(memsize);
214 KASSERT(pn != NULL); 213 KASSERT(pn != NULL);
215 if (prop_dictionary_set(board_properties, "mem-size", pn) == false) 214 if (prop_dictionary_set(board_properties, "mem-size", pn) == false)
216 panic("setting mem-size"); 215 panic("setting mem-size");
217 prop_object_release(pn); 216 prop_object_release(pn);
218 217
219 pn = prop_number_create_integer(cpuspeed); 218 pn = prop_number_create_integer(cpuspeed);
220 KASSERT(pn != NULL); 219 KASSERT(pn != NULL);
221 if (prop_dictionary_set(board_properties, "processor-frequency", 220 if (prop_dictionary_set(board_properties, "processor-frequency",
222 pn) == false) 221 pn) == false)
223 panic("setting processor-frequency"); 222 panic("setting processor-frequency");
224 prop_object_release(pn); 223 prop_object_release(pn);
225 224
226 /* 225 /*
227 * Now that we have VM, malloc()s are OK in bus_space. 226 * Now that we have VM, malloc()s are OK in bus_space.
228 */ 227 */
229 bus_space_mallocok(); 228 bus_space_mallocok();
230 fake_mapiodev = 0; 229 fake_mapiodev = 0;
231} 230}
232 231
233 232
234static void 233static void
235dumpsys(void) 234dumpsys(void)
236{ 235{
237 printf("dumpsys: TBD\n"); 236 printf("dumpsys: TBD\n");
238} 237}
239 238
240/* Hook used by 405 pmap module. */ 239/* Hook used by 405 pmap module. */
241void 240void
242mem_regions(struct mem_region **mem, struct mem_region **avail) 241mem_regions(struct mem_region **mem, struct mem_region **avail)
243{ 242{
244 *mem = physmemr; 243 *mem = physmemr;
245 *avail = availmemr; 244 *avail = availmemr;
246} 245}
247 246
248/* 247/*
249 * Halt or reboot the machine after syncing/dumping according to howto. 248 * Halt or reboot the machine after syncing/dumping according to howto.
250 */ 249 */
251void 250void
252cpu_reboot(int howto, char *what) 251cpu_reboot(int howto, char *what)
253{ 252{
254 static int syncing; 253 static int syncing;
255 static char str[256]; 254 static char str[256];
256 char *ap = str, *ap1 = ap; 255 char *ap = str, *ap1 = ap;
257 256
258 boothowto = howto; 257 boothowto = howto;
259 if (!cold && !(howto & RB_NOSYNC) && !syncing) { 258 if (!cold && !(howto & RB_NOSYNC) && !syncing) {
260 syncing = 1; 259 syncing = 1;
261 vfs_shutdown(); /* sync */ 260 vfs_shutdown(); /* sync */
262 resettodr(); /* set wall clock */ 261 resettodr(); /* set wall clock */
263 } 262 }
264 263
265 splhigh(); 264 splhigh();
266 265
267 if (!cold && (howto & RB_DUMP)) 266 if (!cold && (howto & RB_DUMP))
268 dumpsys(); 267 dumpsys();
269 268
270 doshutdownhooks(); 269 doshutdownhooks();
271 270
272 pmf_system_shutdown(boothowto); 271 pmf_system_shutdown(boothowto);
273 272
274 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { 273 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
275 /* Power off here if we know how...*/ 274 /* Power off here if we know how...*/
276 } 275 }
277 276
278 if (howto & RB_HALT) { 277 if (howto & RB_HALT) {
279 printf("halted\n\n"); 278 printf("halted\n\n");
280 279
281 goto reboot; /* XXX for now... */ 280 goto reboot; /* XXX for now... */
282 281
283#ifdef DDB 282#ifdef DDB
284 printf("dropping to debugger\n"); 283 printf("dropping to debugger\n");
285 while(1) 284 while(1)
286 Debugger(); 285 Debugger();
287#endif 286#endif
288#ifdef KGDB 287#ifdef KGDB
289 printf("dropping to kgdb\n"); 288 printf("dropping to kgdb\n");
290 while(1) 289 while(1)
291 kgdb_connect(1); 290 kgdb_connect(1);
292#endif 291#endif
293 } 292 }
294 293
295 printf("rebooting\n\n"); 294 printf("rebooting\n\n");
296 if (what && *what) { 295 if (what && *what) {
297 if (strlen(what) > sizeof str - 5) 296 if (strlen(what) > sizeof str - 5)
298 printf("boot string too large, ignored\n"); 297 printf("boot string too large, ignored\n");
299 else { 298 else {
300 strcpy(str, what); 299 strcpy(str, what);
301 ap1 = ap = str + strlen(str); 300 ap1 = ap = str + strlen(str);
302 *ap++ = ' '; 301 *ap++ = ' ';
303 } 302 }
304 } 303 }
305 *ap++ = '-'; 304 *ap++ = '-';
306 if (howto & RB_SINGLE) 305 if (howto & RB_SINGLE)
307 *ap++ = 's'; 306 *ap++ = 's';
308 if (howto & RB_KDB) 307 if (howto & RB_KDB)
309 *ap++ = 'd'; 308 *ap++ = 'd';
310 *ap++ = 0; 309 *ap++ = 0;
311 if (ap[-2] == '-') 310 if (ap[-2] == '-')
312 *ap1 = 0; 311 *ap1 = 0;
313 312
314 /* flush cache for msgbuf */ 313 /* flush cache for msgbuf */
315 __syncicache((void *)msgbuf_paddr, round_page(MSGBUFSIZE)); 314 __syncicache((void *)msgbuf_paddr, round_page(MSGBUFSIZE));
316 315
317 reboot: 316 reboot:
318 ppc4xx_reset(); 317 ppc4xx_reset();
319 318
320 printf("ppc4xx_reset() failed!\n"); 319 printf("ppc4xx_reset() failed!\n");
321#ifdef DDB 320#ifdef DDB
322 while(1) 321 while(1)
323 Debugger(); 322 Debugger();
324#endif 323#endif
325#ifdef KGDB 324#ifdef KGDB
326 while(1) 325 while(1)
327 kgdb_connect(1); 326 kgdb_connect(1);
328#else 327#else
329 while (1) 328 while (1)
330 /* nothing */; 329 /* nothing */;
331#endif 330#endif
332} 331}

cvs diff -r1.16 -r1.17 src/sys/arch/evbppc/virtex/dev/if_temac.c (switch to unified diff)

--- src/sys/arch/evbppc/virtex/dev/if_temac.c 2020/02/04 07:36:04 1.16
+++ src/sys/arch/evbppc/virtex/dev/if_temac.c 2021/03/29 13:14:13 1.17
@@ -1,1304 +1,1304 @@ @@ -1,1304 +1,1304 @@
1/* $NetBSD: if_temac.c,v 1.16 2020/02/04 07:36:04 skrll Exp $ */ 1/* $NetBSD: if_temac.c,v 1.17 2021/03/29 13:14:13 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Jachym Holecek 4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written for DFC Design, s.r.o. 7 * Written for DFC Design, s.r.o.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 15 *
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform. 33 * Driver for Xilinx LocalLink TEMAC as wired on the GSRD platform.
34 * 34 *
35 * TODO: 35 * TODO:
36 * - Optimize 36 * - Optimize
37 * - Checksum offload 37 * - Checksum offload
38 * - Address filters 38 * - Address filters
39 * - Support jumbo frames 39 * - Support jumbo frames
40 */ 40 */
41 41
42#include <sys/cdefs.h> 42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.16 2020/02/04 07:36:04 skrll Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: if_temac.c,v 1.17 2021/03/29 13:14:13 rin Exp $");
44 44
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47#include <sys/systm.h> 47#include <sys/systm.h>
48#include <sys/mbuf.h> 48#include <sys/mbuf.h>
49#include <sys/kernel.h> 49#include <sys/kernel.h>
50#include <sys/socket.h> 50#include <sys/socket.h>
51#include <sys/ioctl.h> 51#include <sys/ioctl.h>
52#include <sys/device.h> 52#include <sys/device.h>
53#include <sys/bus.h> 53#include <sys/bus.h>
54#include <sys/cpu.h> 54#include <sys/cpu.h>
55 55
56#include <uvm/uvm_extern.h> 56#include <uvm/uvm_extern.h>
57 57
58#include <net/if.h> 58#include <net/if.h>
59#include <net/if_dl.h> 59#include <net/if_dl.h>
60#include <net/if_media.h> 60#include <net/if_media.h>
61#include <net/if_ether.h> 61#include <net/if_ether.h>
62 62
63#include <net/bpf.h> 63#include <net/bpf.h>
64 64
65#include <powerpc/ibm4xx/cpu.h> 65#include <powerpc/ibm4xx/cpu.h>
66 66
67#include <evbppc/virtex/idcr.h> 67#include <evbppc/virtex/idcr.h>
68#include <evbppc/virtex/dev/xcvbusvar.h> 68#include <evbppc/virtex/dev/xcvbusvar.h>
69#include <evbppc/virtex/dev/cdmacreg.h> 69#include <evbppc/virtex/dev/cdmacreg.h>
70#include <evbppc/virtex/dev/temacreg.h> 70#include <evbppc/virtex/dev/temacreg.h>
71#include <evbppc/virtex/dev/temacvar.h> 71#include <evbppc/virtex/dev/temacvar.h>
72 72
73#include <dev/mii/miivar.h> 73#include <dev/mii/miivar.h>
74 74
75 75
76/* This is outside of TEMAC's DCR window, we have to hardcode it... */ 76/* This is outside of TEMAC's DCR window, we have to hardcode it... */
77#define DCR_ETH_BASE 0x0030 77#define DCR_ETH_BASE 0x0030
78 78
79#define TEMAC_REGDEBUG 0 79#define TEMAC_REGDEBUG 0
80#define TEMAC_RXDEBUG 0 80#define TEMAC_RXDEBUG 0
81#define TEMAC_TXDEBUG 0 81#define TEMAC_TXDEBUG 0
82 82
83#if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0 83#if TEMAC_RXDEBUG > 0 || TEMAC_TXDEBUG > 0
84#define TEMAC_DEBUG 1 84#define TEMAC_DEBUG 1
85#else 85#else
86#define TEMAC_DEBUG 0 86#define TEMAC_DEBUG 0
87#endif 87#endif
88 88
89#if TEMAC_REGDEBUG > 0 89#if TEMAC_REGDEBUG > 0
90#define TRACEREG(arg) printf arg 90#define TRACEREG(arg) printf arg
91#else 91#else
92#define TRACEREG(arg) /* nop */ 92#define TRACEREG(arg) /* nop */
93#endif 93#endif
94 94
95/* DMA control chains take up one (16KB) page. */ 95/* DMA control chains take up one (16KB) page. */
96#define TEMAC_NTXDESC 256 96#define TEMAC_NTXDESC 256
97#define TEMAC_NRXDESC 256 97#define TEMAC_NRXDESC 256
98 98
99#define TEMAC_TXQLEN 64 /* Software Tx queue length */ 99#define TEMAC_TXQLEN 64 /* Software Tx queue length */
100#define TEMAC_NTXSEG 16 /* Maximum Tx segments per packet */ 100#define TEMAC_NTXSEG 16 /* Maximum Tx segments per packet */
101 101
102#define TEMAC_NRXSEG 1 /* Maximum Rx segments per packet */ 102#define TEMAC_NRXSEG 1 /* Maximum Rx segments per packet */
103#define TEMAC_RXPERIOD 1 /* Interrupt every N descriptors. */ 103#define TEMAC_RXPERIOD 1 /* Interrupt every N descriptors. */
104#define TEMAC_RXTIMO_HZ 100 /* Rx reaper frequency */ 104#define TEMAC_RXTIMO_HZ 100 /* Rx reaper frequency */
105 105
106/* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */ 106/* Next Tx descriptor and descriptor's offset WRT sc_cdaddr. */
107#define TEMAC_TXSINC(n, i) (((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN) 107#define TEMAC_TXSINC(n, i) (((n) + TEMAC_TXQLEN + (i)) % TEMAC_TXQLEN)
108#define TEMAC_TXINC(n, i) (((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC) 108#define TEMAC_TXINC(n, i) (((n) + TEMAC_NTXDESC + (i)) % TEMAC_NTXDESC)
109 109
110#define TEMAC_TXSNEXT(n) TEMAC_TXSINC((n), 1) 110#define TEMAC_TXSNEXT(n) TEMAC_TXSINC((n), 1)
111#define TEMAC_TXNEXT(n) TEMAC_TXINC((n), 1) 111#define TEMAC_TXNEXT(n) TEMAC_TXINC((n), 1)
112#define TEMAC_TXDOFF(n) (offsetof(struct temac_control, cd_txdesc) + \ 112#define TEMAC_TXDOFF(n) (offsetof(struct temac_control, cd_txdesc) + \
113 (n) * sizeof(struct cdmac_descr)) 113 (n) * sizeof(struct cdmac_descr))
114 114
115/* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */ 115/* Next Rx descriptor and descriptor's offset WRT sc_cdaddr. */
116#define TEMAC_RXINC(n, i) (((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC) 116#define TEMAC_RXINC(n, i) (((n) + TEMAC_NRXDESC + (i)) % TEMAC_NRXDESC)
117#define TEMAC_RXNEXT(n) TEMAC_RXINC((n), 1) 117#define TEMAC_RXNEXT(n) TEMAC_RXINC((n), 1)
118#define TEMAC_RXDOFF(n) (offsetof(struct temac_control, cd_rxdesc) + \ 118#define TEMAC_RXDOFF(n) (offsetof(struct temac_control, cd_rxdesc) + \
119 (n) * sizeof(struct cdmac_descr)) 119 (n) * sizeof(struct cdmac_descr))
120#define TEMAC_ISINTR(i) (((i) % TEMAC_RXPERIOD) == 0) 120#define TEMAC_ISINTR(i) (((i) % TEMAC_RXPERIOD) == 0)
121#define TEMAC_ISLAST(i) ((i) == (TEMAC_NRXDESC - 1)) 121#define TEMAC_ISLAST(i) ((i) == (TEMAC_NRXDESC - 1))
122 122
123 123
124struct temac_control { 124struct temac_control {
125 struct cdmac_descr cd_txdesc[TEMAC_NTXDESC]; 125 struct cdmac_descr cd_txdesc[TEMAC_NTXDESC];
126 struct cdmac_descr cd_rxdesc[TEMAC_NRXDESC]; 126 struct cdmac_descr cd_rxdesc[TEMAC_NRXDESC];
127}; 127};
128 128
129struct temac_txsoft { 129struct temac_txsoft {
130 bus_dmamap_t txs_dmap; 130 bus_dmamap_t txs_dmap;
131 struct mbuf *txs_mbuf; 131 struct mbuf *txs_mbuf;
132 int txs_last; 132 int txs_last;
133}; 133};
134 134
135struct temac_rxsoft { 135struct temac_rxsoft {
136 bus_dmamap_t rxs_dmap; 136 bus_dmamap_t rxs_dmap;
137 struct mbuf *rxs_mbuf; 137 struct mbuf *rxs_mbuf;
138}; 138};
139 139
140struct temac_softc { 140struct temac_softc {
141 device_t sc_dev; 141 device_t sc_dev;
142 struct ethercom sc_ec; 142 struct ethercom sc_ec;
143#define sc_if sc_ec.ec_if 143#define sc_if sc_ec.ec_if
144 144
145 /* Peripheral registers */ 145 /* Peripheral registers */
146 bus_space_tag_t sc_iot; 146 bus_space_tag_t sc_iot;
147 bus_space_handle_t sc_ioh; 147 bus_space_handle_t sc_ioh;
148 148
149 /* CDMAC channel registers */ 149 /* CDMAC channel registers */
150 bus_space_tag_t sc_dma_rxt; 150 bus_space_tag_t sc_dma_rxt;
151 bus_space_handle_t sc_dma_rxh; /* Rx channel */ 151 bus_space_handle_t sc_dma_rxh; /* Rx channel */
152 bus_space_handle_t sc_dma_rsh; /* Rx status */ 152 bus_space_handle_t sc_dma_rsh; /* Rx status */
153 153
154 bus_space_tag_t sc_dma_txt; 154 bus_space_tag_t sc_dma_txt;
155 bus_space_handle_t sc_dma_txh; /* Tx channel */ 155 bus_space_handle_t sc_dma_txh; /* Tx channel */
156 bus_space_handle_t sc_dma_tsh; /* Tx status */ 156 bus_space_handle_t sc_dma_tsh; /* Tx status */
157 157
158 struct temac_txsoft sc_txsoft[TEMAC_TXQLEN]; 158 struct temac_txsoft sc_txsoft[TEMAC_TXQLEN];
159 struct temac_rxsoft sc_rxsoft[TEMAC_NRXDESC]; 159 struct temac_rxsoft sc_rxsoft[TEMAC_NRXDESC];
160 160
161 struct callout sc_rx_timo; 161 struct callout sc_rx_timo;
162 struct callout sc_mii_tick; 162 struct callout sc_mii_tick;
163 struct mii_data sc_mii; 163 struct mii_data sc_mii;
164 164
165 bus_dmamap_t sc_control_dmap; 165 bus_dmamap_t sc_control_dmap;
166#define sc_cdaddr sc_control_dmap->dm_segs[0].ds_addr 166#define sc_cdaddr sc_control_dmap->dm_segs[0].ds_addr
167 167
168 struct temac_control *sc_control_data; 168 struct temac_control *sc_control_data;
169#define sc_rxdescs sc_control_data->cd_rxdesc 169#define sc_rxdescs sc_control_data->cd_rxdesc
170#define sc_txdescs sc_control_data->cd_txdesc 170#define sc_txdescs sc_control_data->cd_txdesc
171 171
172 int sc_txbusy; 172 int sc_txbusy;
173 173
174 int sc_txfree; 174 int sc_txfree;
175 int sc_txcur; 175 int sc_txcur;
176 int sc_txreap; 176 int sc_txreap;
177 177
178 int sc_rxreap; 178 int sc_rxreap;
179 179
180 int sc_txsfree; 180 int sc_txsfree;
181 int sc_txscur; 181 int sc_txscur;
182 int sc_txsreap; 182 int sc_txsreap;
183 183
184 int sc_dead; /* Rx/Tx DMA error (fatal) */ 184 int sc_dead; /* Rx/Tx DMA error (fatal) */
185 int sc_rx_drained; 185 int sc_rx_drained;
186 186
187 int sc_rx_chan; 187 int sc_rx_chan;
188 int sc_tx_chan; 188 int sc_tx_chan;
189 189
190 void *sc_sdhook; 190 void *sc_sdhook;
191 void *sc_rx_ih; 191 void *sc_rx_ih;
192 void *sc_tx_ih; 192 void *sc_tx_ih;
193 193
194 bus_dma_tag_t sc_dmat; 194 bus_dma_tag_t sc_dmat;
195}; 195};
196 196
197/* Device interface. */ 197/* Device interface. */
198static void temac_attach(device_t, device_t, void *); 198static void temac_attach(device_t, device_t, void *);
199 199
200/* Ifnet interface. */ 200/* Ifnet interface. */
201static int temac_init(struct ifnet *); 201static int temac_init(struct ifnet *);
202static int temac_ioctl(struct ifnet *, u_long, void *); 202static int temac_ioctl(struct ifnet *, u_long, void *);
203static void temac_start(struct ifnet *); 203static void temac_start(struct ifnet *);
204static void temac_stop(struct ifnet *, int); 204static void temac_stop(struct ifnet *, int);
205 205
206/* Media management. */ 206/* Media management. */
207static int temac_mii_readreg(device_t, int, int, uint16_t *); 207static int temac_mii_readreg(device_t, int, int, uint16_t *);
208static void temac_mii_statchg(struct ifnet *); 208static void temac_mii_statchg(struct ifnet *);
209static void temac_mii_tick(void *); 209static void temac_mii_tick(void *);
210static int temac_mii_writereg(device_t, int, int, uint16_t); 210static int temac_mii_writereg(device_t, int, int, uint16_t);
211 211
212/* Indirect hooks. */ 212/* Indirect hooks. */
213static void temac_shutdown(void *); 213static void temac_shutdown(void *);
214static void temac_rx_intr(void *); 214static void temac_rx_intr(void *);
215static void temac_tx_intr(void *); 215static void temac_tx_intr(void *);
216 216
217/* Tools. */ 217/* Tools. */
218static inline void temac_rxcdsync(struct temac_softc *, int, int, int); 218static inline void temac_rxcdsync(struct temac_softc *, int, int, int);
219static inline void temac_txcdsync(struct temac_softc *, int, int, int); 219static inline void temac_txcdsync(struct temac_softc *, int, int, int);
220static void temac_txreap(struct temac_softc *); 220static void temac_txreap(struct temac_softc *);
221static void temac_rxreap(struct temac_softc *); 221static void temac_rxreap(struct temac_softc *);
222static int temac_rxalloc(struct temac_softc *, int, int); 222static int temac_rxalloc(struct temac_softc *, int, int);
223static void temac_rxtimo(void *); 223static void temac_rxtimo(void *);
224static void temac_rxdrain(struct temac_softc *); 224static void temac_rxdrain(struct temac_softc *);
225static void temac_reset(struct temac_softc *); 225static void temac_reset(struct temac_softc *);
226static void temac_txkick(struct temac_softc *); 226static void temac_txkick(struct temac_softc *);
227 227
228/* Register access. */ 228/* Register access. */
229static inline void gmi_write_8(uint32_t, uint32_t, uint32_t); 229static inline void gmi_write_8(uint32_t, uint32_t, uint32_t);
230static inline void gmi_write_4(uint32_t, uint32_t); 230static inline void gmi_write_4(uint32_t, uint32_t);
231static inline void gmi_read_8(uint32_t, uint32_t *, uint32_t *); 231static inline void gmi_read_8(uint32_t, uint32_t *, uint32_t *);
232static inline uint32_t gmi_read_4(uint32_t); 232static inline uint32_t gmi_read_4(uint32_t);
233static inline void hif_wait_stat(uint32_t); 233static inline int hif_wait_stat(uint32_t);
234 234
235#define cdmac_rx_stat(sc) \ 235#define cdmac_rx_stat(sc) \
236 bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */) 236 bus_space_read_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0 /* XXX hack */)
237 237
238#define cdmac_rx_reset(sc) \ 238#define cdmac_rx_reset(sc) \
239 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET) 239 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rsh, 0, CDMAC_STAT_RESET)
240 240
241#define cdmac_rx_start(sc, val) \ 241#define cdmac_rx_start(sc, val) \
242 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val)) 242 bus_space_write_4((sc)->sc_dma_rxt, (sc)->sc_dma_rxh, CDMAC_CURDESC, (val))
243 243
244#define cdmac_tx_stat(sc) \ 244#define cdmac_tx_stat(sc) \
245 bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */) 245 bus_space_read_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0 /* XXX hack */)
246 246
247#define cdmac_tx_reset(sc) \ 247#define cdmac_tx_reset(sc) \
248 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET) 248 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_tsh, 0, CDMAC_STAT_RESET)
249 249
250#define cdmac_tx_start(sc, val) \ 250#define cdmac_tx_start(sc, val) \
251 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val)) 251 bus_space_write_4((sc)->sc_dma_txt, (sc)->sc_dma_txh, CDMAC_CURDESC, (val))
252 252
253 253
254CFATTACH_DECL_NEW(temac, sizeof(struct temac_softc), 254CFATTACH_DECL_NEW(temac, sizeof(struct temac_softc),
255 xcvbus_child_match, temac_attach, NULL, NULL); 255 xcvbus_child_match, temac_attach, NULL, NULL);
256 256
257 257
258/* 258/*
259 * Private bus utilities. 259 * Private bus utilities.
260 */ 260 */
261static inline int 261static inline int
262hif_wait_stat(uint32_t mask) 262hif_wait_stat(uint32_t mask)
263{ 263{
264 int i = 0; 264 int i = 0;
265 int rv = 0; 265 int rv = 0;
266 266
267 while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) { 267 while (mask != (mfidcr(IDCR_HIF_STAT) & mask)) {
268 if (i++ > 100) { 268 if (i++ > 100) {
269 printf("%s: timeout waiting for 0x%08x\n", 269 printf("%s: timeout waiting for 0x%08x\n",
270 __func__, mask); 270 __func__, mask);
271 rv = ETIMEDOUT; 271 rv = ETIMEDOUT;
272 break; 272 break;
273 } 273 }
274 delay(5); 274 delay(5);
275 } 275 }
276 276
277 TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i)); 277 TRACEREG(("%s: stat %#08x loops %d\n", __func__, mask, i));
278 return rv; 278 return rv;
279} 279}
280 280
281static inline void 281static inline void
282gmi_write_4(uint32_t addr, uint32_t lo) 282gmi_write_4(uint32_t addr, uint32_t lo)
283{ 283{
284 mtidcr(IDCR_HIF_ARG0, lo); 284 mtidcr(IDCR_HIF_ARG0, lo);
285 mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE); 285 mtidcr(IDCR_HIF_CTRL, (addr & HIF_CTRL_GMIADDR) | HIF_CTRL_WRITE);
286 hif_wait_stat(HIF_STAT_GMIWR); 286 hif_wait_stat(HIF_STAT_GMIWR);
287 287
288 TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo)); 288 TRACEREG(("%s: %#08x <- %#08x\n", __func__, addr, lo));
289} 289}
290 290
291static inline void 291static inline void
292gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi) 292gmi_write_8(uint32_t addr, uint32_t lo, uint32_t hi)
293{ 293{
294 mtidcr(IDCR_HIF_ARG1, hi); 294 mtidcr(IDCR_HIF_ARG1, hi);
295 gmi_write_4(addr, lo); 295 gmi_write_4(addr, lo);
296} 296}
297 297
298static inline void 298static inline void
299gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi) 299gmi_read_8(uint32_t addr, uint32_t *lo, uint32_t *hi)
300{ 300{
301 *lo = gmi_read_4(addr); 301 *lo = gmi_read_4(addr);
302 *hi = mfidcr(IDCR_HIF_ARG1); 302 *hi = mfidcr(IDCR_HIF_ARG1);
303} 303}
304 304
305static inline uint32_t 305static inline uint32_t
306gmi_read_4(uint32_t addr) 306gmi_read_4(uint32_t addr)
307{ 307{
308 uint32_t res; 308 uint32_t res;
309 309
310 mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR); 310 mtidcr(IDCR_HIF_CTRL, addr & HIF_CTRL_GMIADDR);
311 hif_wait_stat(HIF_STAT_GMIRR); 311 hif_wait_stat(HIF_STAT_GMIRR);
312 312
313 res = mfidcr(IDCR_HIF_ARG0); 313 res = mfidcr(IDCR_HIF_ARG0);
314 TRACEREG(("%s: %#08x -> %#08x\n", __func__, addr, res)); 314 TRACEREG(("%s: %#08x -> %#08x\n", __func__, addr, res));
315 return (res); 315 return (res);
316} 316}
317 317
318/* 318/*
319 * Generic device. 319 * Generic device.
320 */ 320 */
321static void 321static void
322temac_attach(device_t parent, device_t self, void *aux) 322temac_attach(device_t parent, device_t self, void *aux)
323{ 323{
324 struct xcvbus_attach_args *vaa = aux; 324 struct xcvbus_attach_args *vaa = aux;
325 struct ll_dmac *rx = vaa->vaa_rx_dmac; 325 struct ll_dmac *rx = vaa->vaa_rx_dmac;
326 struct ll_dmac *tx = vaa->vaa_tx_dmac; 326 struct ll_dmac *tx = vaa->vaa_tx_dmac;
327 struct temac_softc *sc = device_private(self); 327 struct temac_softc *sc = device_private(self);
328 struct ifnet *ifp = &sc->sc_if; 328 struct ifnet *ifp = &sc->sc_if;
329 struct mii_data *mii = &sc->sc_mii; 329 struct mii_data *mii = &sc->sc_mii;
330 uint8_t enaddr[ETHER_ADDR_LEN]; 330 uint8_t enaddr[ETHER_ADDR_LEN];
331 bus_dma_segment_t seg; 331 bus_dma_segment_t seg;
332 int error, nseg, i; 332 int error, nseg, i;
333 const char * const xname = device_xname(self); 333 const char * const xname = device_xname(self);
334 334
335 aprint_normal(": TEMAC\n"); /* XXX will be LL_TEMAC, PLB_TEMAC */ 335 aprint_normal(": TEMAC\n"); /* XXX will be LL_TEMAC, PLB_TEMAC */
336 336
337 KASSERT(rx); 337 KASSERT(rx);
338 KASSERT(tx); 338 KASSERT(tx);
339 339
340 sc->sc_dev = self; 340 sc->sc_dev = self;
341 sc->sc_dmat = vaa->vaa_dmat; 341 sc->sc_dmat = vaa->vaa_dmat;
342 sc->sc_dead = 0; 342 sc->sc_dead = 0;
343 sc->sc_rx_drained = 1; 343 sc->sc_rx_drained = 1;
344 sc->sc_txbusy = 0; 344 sc->sc_txbusy = 0;
345 sc->sc_iot = vaa->vaa_iot; 345 sc->sc_iot = vaa->vaa_iot;
346 sc->sc_dma_rxt = rx->dmac_iot; 346 sc->sc_dma_rxt = rx->dmac_iot;
347 sc->sc_dma_txt = tx->dmac_iot; 347 sc->sc_dma_txt = tx->dmac_iot;
348 348
349 /* 349 /*
350 * Map HIF and receive/transmit dmac registers. 350 * Map HIF and receive/transmit dmac registers.
351 */ 351 */
352 if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0, 352 if ((error = bus_space_map(vaa->vaa_iot, vaa->vaa_addr, TEMAC_SIZE, 0,
353 &sc->sc_ioh)) != 0) { 353 &sc->sc_ioh)) != 0) {
354 aprint_error_dev(self, "could not map registers\n"); 354 aprint_error_dev(self, "could not map registers\n");
355 goto fail_0; 355 goto fail_0;
356 } 356 }
357 357
358 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr, 358 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_ctrl_addr,
359 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) { 359 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_rxh)) != 0) {
360 aprint_error_dev(self, "could not map Rx control registers\n"); 360 aprint_error_dev(self, "could not map Rx control registers\n");
361 goto fail_0; 361 goto fail_0;
362 } 362 }
363 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr, 363 if ((error = bus_space_map(sc->sc_dma_rxt, rx->dmac_stat_addr,
364 CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) { 364 CDMAC_STAT_SIZE, 0, &sc->sc_dma_rsh)) != 0) {
365 aprint_error_dev(self, "could not map Rx status register\n"); 365 aprint_error_dev(self, "could not map Rx status register\n");
366 goto fail_0; 366 goto fail_0;
367 } 367 }
368 368
369 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr, 369 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_ctrl_addr,
370 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) { 370 CDMAC_CTRL_SIZE, 0, &sc->sc_dma_txh)) != 0) {
371 aprint_error_dev(self, "could not map Tx control registers\n"); 371 aprint_error_dev(self, "could not map Tx control registers\n");
372 goto fail_0; 372 goto fail_0;
373 } 373 }
374 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr, 374 if ((error = bus_space_map(sc->sc_dma_txt, tx->dmac_stat_addr,
375 CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) { 375 CDMAC_STAT_SIZE, 0, &sc->sc_dma_tsh)) != 0) {
376 aprint_error_dev(self, "could not map Tx status register\n"); 376 aprint_error_dev(self, "could not map Tx status register\n");
377 goto fail_0; 377 goto fail_0;
378 } 378 }
379 379
380 /* 380 /*
381 * Allocate and initialize DMA control chains. 381 * Allocate and initialize DMA control chains.
382 */ 382 */
383 if ((error = bus_dmamem_alloc(sc->sc_dmat, 383 if ((error = bus_dmamem_alloc(sc->sc_dmat,
384 sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) { 384 sizeof(struct temac_control), 8, 0, &seg, 1, &nseg, 0)) != 0) {
385 aprint_error_dev(self, "could not allocate control data\n"); 385 aprint_error_dev(self, "could not allocate control data\n");
386 goto fail_0; 386 goto fail_0;
387 } 387 }
388 388
389 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 389 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
390 sizeof(struct temac_control), 390 sizeof(struct temac_control),
391 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) { 391 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
392 aprint_error_dev(self, "could not map control data\n"); 392 aprint_error_dev(self, "could not map control data\n");
393 goto fail_1; 393 goto fail_1;
394 } 394 }
395 395
396 if ((error = bus_dmamap_create(sc->sc_dmat, 396 if ((error = bus_dmamap_create(sc->sc_dmat,
397 sizeof(struct temac_control), 1, 397 sizeof(struct temac_control), 1,
398 sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) { 398 sizeof(struct temac_control), 0, 0, &sc->sc_control_dmap)) != 0) {
399 aprint_error_dev(self, 399 aprint_error_dev(self,
400 "could not create control data DMA map\n"); 400 "could not create control data DMA map\n");
401 goto fail_2; 401 goto fail_2;
402 } 402 }
403 403
404 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap, 404 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_control_dmap,
405 sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) { 405 sc->sc_control_data, sizeof(struct temac_control), NULL, 0)) != 0) {
406 aprint_error_dev(self, "could not load control data DMA map\n"); 406 aprint_error_dev(self, "could not load control data DMA map\n");
407 goto fail_3; 407 goto fail_3;
408 } 408 }
409 409
410 /* 410 /*
411 * Link descriptor chains. 411 * Link descriptor chains.
412 */ 412 */
413 memset(sc->sc_control_data, 0, sizeof(struct temac_control)); 413 memset(sc->sc_control_data, 0, sizeof(struct temac_control));
414 414
415 for (i = 0; i < TEMAC_NTXDESC; i++) { 415 for (i = 0; i < TEMAC_NTXDESC; i++) {
416 sc->sc_txdescs[i].desc_next = sc->sc_cdaddr + 416 sc->sc_txdescs[i].desc_next = sc->sc_cdaddr +
417 TEMAC_TXDOFF(TEMAC_TXNEXT(i)); 417 TEMAC_TXDOFF(TEMAC_TXNEXT(i));
418 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE; 418 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
419 } 419 }
420 for (i = 0; i < TEMAC_NRXDESC; i++) { 420 for (i = 0; i < TEMAC_NRXDESC; i++) {
421 sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr + 421 sc->sc_rxdescs[i].desc_next = sc->sc_cdaddr +
422 TEMAC_RXDOFF(TEMAC_RXNEXT(i)); 422 TEMAC_RXDOFF(TEMAC_RXNEXT(i));
423 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE; 423 sc->sc_txdescs[i].desc_stat = CDMAC_STAT_DONE;
424 } 424 }
425 425
426 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0, 426 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 0,
427 sizeof(struct temac_control), 427 sizeof(struct temac_control),
428 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 428 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
429 429
430 /* 430 /*
431 * Initialize software state for transmit/receive jobs. 431 * Initialize software state for transmit/receive jobs.
432 */ 432 */
433 for (i = 0; i < TEMAC_TXQLEN; i++) { 433 for (i = 0; i < TEMAC_TXQLEN; i++) {
434 if ((error = bus_dmamap_create(sc->sc_dmat, 434 if ((error = bus_dmamap_create(sc->sc_dmat,
435 ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO, 435 ETHER_MAX_LEN_JUMBO, TEMAC_NTXSEG, ETHER_MAX_LEN_JUMBO,
436 0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) { 436 0, 0, &sc->sc_txsoft[i].txs_dmap)) != 0) {
437 aprint_error_dev(self, 437 aprint_error_dev(self,
438 "could not create Tx DMA map %d\n", 438 "could not create Tx DMA map %d\n",
439 i); 439 i);
440 goto fail_4; 440 goto fail_4;
441 } 441 }
442 sc->sc_txsoft[i].txs_mbuf = NULL; 442 sc->sc_txsoft[i].txs_mbuf = NULL;
443 sc->sc_txsoft[i].txs_last = 0; 443 sc->sc_txsoft[i].txs_last = 0;
444 } 444 }
445 445
446 for (i = 0; i < TEMAC_NRXDESC; i++) { 446 for (i = 0; i < TEMAC_NRXDESC; i++) {
447 if ((error = bus_dmamap_create(sc->sc_dmat, 447 if ((error = bus_dmamap_create(sc->sc_dmat,
448 MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0, 448 MCLBYTES, TEMAC_NRXSEG, MCLBYTES, 0, 0,
449 &sc->sc_rxsoft[i].rxs_dmap)) != 0) { 449 &sc->sc_rxsoft[i].rxs_dmap)) != 0) {
450 aprint_error_dev(self, 450 aprint_error_dev(self,
451 "could not create Rx DMA map %d\n", i); 451 "could not create Rx DMA map %d\n", i);
452 goto fail_5; 452 goto fail_5;
453 } 453 }
454 sc->sc_rxsoft[i].rxs_mbuf = NULL; 454 sc->sc_rxsoft[i].rxs_mbuf = NULL;
455 } 455 }
456 456
457 /* 457 /*
458 * Setup transfer interrupt handlers. 458 * Setup transfer interrupt handlers.
459 */ 459 */
460 error = ENOMEM; 460 error = ENOMEM;
461 461
462 sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan, 462 sc->sc_rx_ih = ll_dmac_intr_establish(rx->dmac_chan,
463 temac_rx_intr, sc); 463 temac_rx_intr, sc);
464 if (sc->sc_rx_ih == NULL) { 464 if (sc->sc_rx_ih == NULL) {
465 aprint_error_dev(self, "could not establish Rx interrupt\n"); 465 aprint_error_dev(self, "could not establish Rx interrupt\n");
466 goto fail_5; 466 goto fail_5;
467 } 467 }
468 468
469 sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan, 469 sc->sc_tx_ih = ll_dmac_intr_establish(tx->dmac_chan,
470 temac_tx_intr, sc); 470 temac_tx_intr, sc);
471 if (sc->sc_tx_ih == NULL) { 471 if (sc->sc_tx_ih == NULL) {
472 aprint_error_dev(self, "could not establish Tx interrupt\n"); 472 aprint_error_dev(self, "could not establish Tx interrupt\n");
473 goto fail_6; 473 goto fail_6;
474 } 474 }
475 475
476 /* XXXFreza: faked, should read unicast address filter. */ 476 /* XXXFreza: faked, should read unicast address filter. */
477 enaddr[0] = 0x00; 477 enaddr[0] = 0x00;
478 enaddr[1] = 0x11; 478 enaddr[1] = 0x11;
479 enaddr[2] = 0x17; 479 enaddr[2] = 0x17;
480 enaddr[3] = 0xff; 480 enaddr[3] = 0xff;
481 enaddr[4] = 0xff; 481 enaddr[4] = 0xff;
482 enaddr[5] = 0x01; 482 enaddr[5] = 0x01;
483 483
484 /* 484 /*
485 * Initialize the TEMAC. 485 * Initialize the TEMAC.
486 */ 486 */
487 temac_reset(sc); 487 temac_reset(sc);
488 488
489 /* Configure MDIO link. */ 489 /* Configure MDIO link. */
490 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO); 490 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
491 491
492 /* Initialize PHY. */ 492 /* Initialize PHY. */
493 mii->mii_ifp = ifp; 493 mii->mii_ifp = ifp;
494 mii->mii_readreg = temac_mii_readreg; 494 mii->mii_readreg = temac_mii_readreg;
495 mii->mii_writereg = temac_mii_writereg; 495 mii->mii_writereg = temac_mii_writereg;
496 mii->mii_statchg = temac_mii_statchg; 496 mii->mii_statchg = temac_mii_statchg;
497 sc->sc_ec.ec_mii = mii; 497 sc->sc_ec.ec_mii = mii;
498 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 498 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
499 499
500 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 500 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
501 MII_OFFSET_ANY, 0); 501 MII_OFFSET_ANY, 0);
502 if (LIST_FIRST(&mii->mii_phys) == NULL) { 502 if (LIST_FIRST(&mii->mii_phys) == NULL) {
503 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 503 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
504 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 504 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
505 } else { 505 } else {
506 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 506 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
507 } 507 }
508 508
509 /* Hold PHY in reset. */ 509 /* Hold PHY in reset. */
510 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY); 510 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, TEMAC_RESET_PHY);
511 511
512 /* Reset EMAC. */ 512 /* Reset EMAC. */
513 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, 513 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
514 TEMAC_RESET_EMAC); 514 TEMAC_RESET_EMAC);
515 delay(10000); 515 delay(10000);
516 516
517 /* Reset peripheral, awakes PHY and EMAC. */ 517 /* Reset peripheral, awakes PHY and EMAC. */
518 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET, 518 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TEMAC_RESET,
519 TEMAC_RESET_PERIPH); 519 TEMAC_RESET_PERIPH);
520 delay(40000); 520 delay(40000);
521 521
522 /* (Re-)Configure MDIO link. */ 522 /* (Re-)Configure MDIO link. */
523 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO); 523 gmi_write_4(TEMAC_GMI_MGMTCF, GMI_MGMT_CLKDIV_100MHz | GMI_MGMT_MDIO);
524 524
525 /* 525 /*
526 * Hook up with network stack. 526 * Hook up with network stack.
527 */ 527 */
528 strcpy(ifp->if_xname, xname); 528 strcpy(ifp->if_xname, xname);
529 ifp->if_softc = sc; 529 ifp->if_softc = sc;
530 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 530 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
531 ifp->if_ioctl = temac_ioctl; 531 ifp->if_ioctl = temac_ioctl;
532 ifp->if_start = temac_start; 532 ifp->if_start = temac_start;
533 ifp->if_init = temac_init; 533 ifp->if_init = temac_init;
534 ifp->if_stop = temac_stop; 534 ifp->if_stop = temac_stop;
535 ifp->if_watchdog = NULL; 535 ifp->if_watchdog = NULL;
536 IFQ_SET_READY(&ifp->if_snd); 536 IFQ_SET_READY(&ifp->if_snd);
537 IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN); 537 IFQ_SET_MAXLEN(&ifp->if_snd, TEMAC_TXQLEN);
538 538
539 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 539 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
540 540
541 if_attach(ifp); 541 if_attach(ifp);
542 ether_ifattach(ifp, enaddr); 542 ether_ifattach(ifp, enaddr);
543 543
544 sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc); 544 sc->sc_sdhook = shutdownhook_establish(temac_shutdown, sc);
545 if (sc->sc_sdhook == NULL) 545 if (sc->sc_sdhook == NULL)
546 aprint_error_dev(self, 546 aprint_error_dev(self,
547 "WARNING: unable to establish shutdown hook\n"); 547 "WARNING: unable to establish shutdown hook\n");
548 548
549 callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc); 549 callout_setfunc(&sc->sc_mii_tick, temac_mii_tick, sc);
550 callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc); 550 callout_setfunc(&sc->sc_rx_timo, temac_rxtimo, sc);
551 551
552 return ; 552 return ;
553 553
554 fail_6: 554 fail_6:
555 ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih); 555 ll_dmac_intr_disestablish(rx->dmac_chan, sc->sc_rx_ih);
556 i = TEMAC_NRXDESC; 556 i = TEMAC_NRXDESC;
557 fail_5: 557 fail_5:
558 for (--i; i >= 0; i--) 558 for (--i; i >= 0; i--)
559 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap); 559 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmap);
560 i = TEMAC_TXQLEN; 560 i = TEMAC_TXQLEN;
561 fail_4: 561 fail_4:
562 for (--i; i >= 0; i--) 562 for (--i; i >= 0; i--)
563 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap); 563 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmap);
564 fail_3: 564 fail_3:
565 bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap); 565 bus_dmamap_destroy(sc->sc_dmat, sc->sc_control_dmap);
566 fail_2: 566 fail_2:
567 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 567 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
568 sizeof(struct temac_control)); 568 sizeof(struct temac_control));
569 fail_1: 569 fail_1:
570 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 570 bus_dmamem_free(sc->sc_dmat, &seg, nseg);
571 fail_0: 571 fail_0:
572 aprint_error_dev(self, "error = %d\n", error); 572 aprint_error_dev(self, "error = %d\n", error);
573} 573}
574 574
575/* 575/*
576 * Network device. 576 * Network device.
577 */ 577 */
578static int 578static int
579temac_init(struct ifnet *ifp) 579temac_init(struct ifnet *ifp)
580{ 580{
581 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 581 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
582 uint32_t rcr, tcr; 582 uint32_t rcr, tcr;
583 int i, error; 583 int i, error;
584 584
585 /* Reset DMA channels. */ 585 /* Reset DMA channels. */
586 cdmac_tx_reset(sc); 586 cdmac_tx_reset(sc);
587 cdmac_rx_reset(sc); 587 cdmac_rx_reset(sc);
588 588
589 /* Set current media. */ 589 /* Set current media. */
590 if ((error = ether_mediachange(ifp)) != 0) 590 if ((error = ether_mediachange(ifp)) != 0)
591 return error; 591 return error;
592 592
593 callout_schedule(&sc->sc_mii_tick, hz); 593 callout_schedule(&sc->sc_mii_tick, hz);
594 594
595 /* Enable EMAC engine. */ 595 /* Enable EMAC engine. */
596 rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) & 596 rcr = (gmi_read_4(TEMAC_GMI_RXCF1) | GMI_RX_ENABLE) &
597 ~(GMI_RX_JUMBO | GMI_RX_FCS); 597 ~(GMI_RX_JUMBO | GMI_RX_FCS);
598 gmi_write_4(TEMAC_GMI_RXCF1, rcr); 598 gmi_write_4(TEMAC_GMI_RXCF1, rcr);
599 599
600 tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) & 600 tcr = (gmi_read_4(TEMAC_GMI_TXCF) | GMI_TX_ENABLE) &
601 ~(GMI_TX_JUMBO | GMI_TX_FCS); 601 ~(GMI_TX_JUMBO | GMI_TX_FCS);
602 gmi_write_4(TEMAC_GMI_TXCF, tcr); 602 gmi_write_4(TEMAC_GMI_TXCF, tcr);
603 603
604 /* XXXFreza: Force promiscuous mode, for now. */ 604 /* XXXFreza: Force promiscuous mode, for now. */
605 gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC); 605 gmi_write_4(TEMAC_GMI_AFM, GMI_AFM_PROMISC);
606 ifp->if_flags |= IFF_PROMISC; 606 ifp->if_flags |= IFF_PROMISC;
607 607
608 /* Rx/Tx queues are drained -- either from attach() or stop(). */ 608 /* Rx/Tx queues are drained -- either from attach() or stop(). */
609 sc->sc_txsfree = TEMAC_TXQLEN; 609 sc->sc_txsfree = TEMAC_TXQLEN;
610 sc->sc_txsreap = 0; 610 sc->sc_txsreap = 0;
611 sc->sc_txscur = 0; 611 sc->sc_txscur = 0;
612 612
613 sc->sc_txfree = TEMAC_NTXDESC; 613 sc->sc_txfree = TEMAC_NTXDESC;
614 sc->sc_txreap = 0; 614 sc->sc_txreap = 0;
615 sc->sc_txcur = 0; 615 sc->sc_txcur = 0;
616 616
617 sc->sc_rxreap = 0; 617 sc->sc_rxreap = 0;
618 618
619 /* Allocate and map receive buffers. */ 619 /* Allocate and map receive buffers. */
620 if (sc->sc_rx_drained) { 620 if (sc->sc_rx_drained) {
621 for (i = 0; i < TEMAC_NRXDESC; i++) { 621 for (i = 0; i < TEMAC_NRXDESC; i++) {
622 if ((error = temac_rxalloc(sc, i, 1)) != 0) { 622 if ((error = temac_rxalloc(sc, i, 1)) != 0) {
623 aprint_error_dev(sc->sc_dev, 623 aprint_error_dev(sc->sc_dev,
624 "failed to allocate Rx descriptor %d\n", 624 "failed to allocate Rx descriptor %d\n",
625 i); 625 i);
626 temac_rxdrain(sc); 626 temac_rxdrain(sc);
627 return (error); 627 return (error);
628 } 628 }
629 } 629 }
630 sc->sc_rx_drained = 0; 630 sc->sc_rx_drained = 0;
631 631
632 temac_rxcdsync(sc, 0, TEMAC_NRXDESC, 632 temac_rxcdsync(sc, 0, TEMAC_NRXDESC,
633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
634 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0)); 634 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
635 } 635 }
636 636
637 ifp->if_flags |= IFF_RUNNING; 637 ifp->if_flags |= IFF_RUNNING;
638 ifp->if_flags &= ~IFF_OACTIVE; 638 ifp->if_flags &= ~IFF_OACTIVE;
639 639
640 return (0); 640 return (0);
641} 641}
642 642
643static int 643static int
644temac_ioctl(struct ifnet *ifp, u_long cmd, void *data) 644temac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
645{ 645{
646 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 646 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
647 int s, ret; 647 int s, ret;
648 648
649 s = splnet(); 649 s = splnet();
650 if (sc->sc_dead) 650 if (sc->sc_dead)
651 ret = EIO; 651 ret = EIO;
652 else 652 else
653 ret = ether_ioctl(ifp, cmd, data); 653 ret = ether_ioctl(ifp, cmd, data);
654 splx(s); 654 splx(s);
655 return (ret); 655 return (ret);
656} 656}
657 657
658static void 658static void
659temac_start(struct ifnet *ifp) 659temac_start(struct ifnet *ifp)
660{ 660{
661 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 661 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
662 struct temac_txsoft *txs; 662 struct temac_txsoft *txs;
663 struct mbuf *m; 663 struct mbuf *m;
664 bus_dmamap_t dmap; 664 bus_dmamap_t dmap;
665 int error, head, nsegs, i; 665 int error, head, nsegs, i;
666 666
667 nsegs = 0; 667 nsegs = 0;
668 head = sc->sc_txcur; 668 head = sc->sc_txcur;
669 txs = NULL; /* gcc */ 669 txs = NULL; /* gcc */
670 670
671 if (sc->sc_dead) 671 if (sc->sc_dead)
672 return; 672 return;
673 673
674 KASSERT(sc->sc_txfree >= 0); 674 KASSERT(sc->sc_txfree >= 0);
675 KASSERT(sc->sc_txsfree >= 0); 675 KASSERT(sc->sc_txsfree >= 0);
676 676
677 /* 677 /*
678 * Push mbufs into descriptor chain until we drain the interface 678 * Push mbufs into descriptor chain until we drain the interface
679 * queue or run out of descriptors. We'll mark the first segment 679 * queue or run out of descriptors. We'll mark the first segment
680 * as "done" in hope that we might put CDMAC interrupt above IPL_NET 680 * as "done" in hope that we might put CDMAC interrupt above IPL_NET
681 * and have it start jobs & mark packets for GC preemtively for 681 * and have it start jobs & mark packets for GC preemtively for
682 * us -- creativity due to limitations in CDMAC transfer engine 682 * us -- creativity due to limitations in CDMAC transfer engine
683 * (it really consumes lists, not circular queues, AFAICS). 683 * (it really consumes lists, not circular queues, AFAICS).
684 * 684 *
685 * We schedule one interrupt per Tx batch. 685 * We schedule one interrupt per Tx batch.
686 */ 686 */
687 while (1) { 687 while (1) {
688 IFQ_POLL(&ifp->if_snd, m); 688 IFQ_POLL(&ifp->if_snd, m);
689 if (m == NULL) 689 if (m == NULL)
690 break; 690 break;
691 691
692 if (sc->sc_txsfree == 0) { 692 if (sc->sc_txsfree == 0) {
693 ifp->if_flags |= IFF_OACTIVE; 693 ifp->if_flags |= IFF_OACTIVE;
694 break; 694 break;
695 } 695 }
696 696
697 txs = &sc->sc_txsoft[sc->sc_txscur]; 697 txs = &sc->sc_txsoft[sc->sc_txscur];
698 dmap = txs->txs_dmap; 698 dmap = txs->txs_dmap;
699 699
700 if (txs->txs_mbuf != NULL) 700 if (txs->txs_mbuf != NULL)
701 printf("FOO\n"); 701 printf("FOO\n");
702 if (txs->txs_last) 702 if (txs->txs_last)
703 printf("BAR\n"); 703 printf("BAR\n");
704 704
705 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, 705 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m,
706 BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) { 706 BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0) {
707 if (error == EFBIG) { 707 if (error == EFBIG) {
708 aprint_error_dev(sc->sc_dev, 708 aprint_error_dev(sc->sc_dev,
709 "Tx consumes too many segments, dropped\n"); 709 "Tx consumes too many segments, dropped\n");
710 IFQ_DEQUEUE(&ifp->if_snd, m); 710 IFQ_DEQUEUE(&ifp->if_snd, m);
711 m_freem(m); 711 m_freem(m);
712 continue; 712 continue;
713 } else { 713 } else {
714 aprint_debug_dev(sc->sc_dev, 714 aprint_debug_dev(sc->sc_dev,
715 "Tx stall due to resource shortage\n"); 715 "Tx stall due to resource shortage\n");
716 break; 716 break;
717 } 717 }
718 } 718 }
719 719
720 /* 720 /*
721 * If we're short on DMA descriptors, notify upper layers 721 * If we're short on DMA descriptors, notify upper layers
722 * and leave this packet for later. 722 * and leave this packet for later.
723 */ 723 */
724 if (dmap->dm_nsegs > sc->sc_txfree) { 724 if (dmap->dm_nsegs > sc->sc_txfree) {
725 bus_dmamap_unload(sc->sc_dmat, dmap); 725 bus_dmamap_unload(sc->sc_dmat, dmap);
726 ifp->if_flags |= IFF_OACTIVE; 726 ifp->if_flags |= IFF_OACTIVE;
727 break; 727 break;
728 } 728 }
729 729
730 IFQ_DEQUEUE(&ifp->if_snd, m); 730 IFQ_DEQUEUE(&ifp->if_snd, m);
731 731
732 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 732 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
733 BUS_DMASYNC_PREWRITE); 733 BUS_DMASYNC_PREWRITE);
734 txs->txs_mbuf = m; 734 txs->txs_mbuf = m;
735 735
736 /* 736 /*
737 * Map the packet into descriptor chain. XXX We'll want 737 * Map the packet into descriptor chain. XXX We'll want
738 * to fill checksum offload commands here. 738 * to fill checksum offload commands here.
739 * 739 *
740 * We would be in a race if we weren't blocking CDMAC intr 740 * We would be in a race if we weren't blocking CDMAC intr
741 * at this point -- we need to be locked against txreap() 741 * at this point -- we need to be locked against txreap()
742 * because of dmasync ops. 742 * because of dmasync ops.
743 */ 743 */
744 744
745 temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs, 745 temac_txcdsync(sc, sc->sc_txcur, dmap->dm_nsegs,
746 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 746 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
747 747
748 for (i = 0; i < dmap->dm_nsegs; i++) { 748 for (i = 0; i < dmap->dm_nsegs; i++) {
749 sc->sc_txdescs[sc->sc_txcur].desc_addr = 749 sc->sc_txdescs[sc->sc_txcur].desc_addr =
750 dmap->dm_segs[i].ds_addr; 750 dmap->dm_segs[i].ds_addr;
751 sc->sc_txdescs[sc->sc_txcur].desc_size = 751 sc->sc_txdescs[sc->sc_txcur].desc_size =
752 dmap->dm_segs[i].ds_len; 752 dmap->dm_segs[i].ds_len;
753 sc->sc_txdescs[sc->sc_txcur].desc_stat = 753 sc->sc_txdescs[sc->sc_txcur].desc_stat =
754 (i == 0 ? CDMAC_STAT_SOP : 0) | 754 (i == 0 ? CDMAC_STAT_SOP : 0) |
755 (i == (dmap->dm_nsegs - 1) ? CDMAC_STAT_EOP : 0); 755 (i == (dmap->dm_nsegs - 1) ? CDMAC_STAT_EOP : 0);
756 756
757 sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur); 757 sc->sc_txcur = TEMAC_TXNEXT(sc->sc_txcur);
758 } 758 }
759 759
760 sc->sc_txfree -= dmap->dm_nsegs; 760 sc->sc_txfree -= dmap->dm_nsegs;
761 nsegs += dmap->dm_nsegs; 761 nsegs += dmap->dm_nsegs;
762 762
763 sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur); 763 sc->sc_txscur = TEMAC_TXSNEXT(sc->sc_txscur);
764 sc->sc_txsfree--; 764 sc->sc_txsfree--;
765 } 765 }
766 766
767 /* Get data running if we queued any. */ 767 /* Get data running if we queued any. */
768 if (nsegs > 0) { 768 if (nsegs > 0) {
769 int tail = TEMAC_TXINC(sc->sc_txcur, -1); 769 int tail = TEMAC_TXINC(sc->sc_txcur, -1);
770 770
771 /* Mark the last packet in this job. */ 771 /* Mark the last packet in this job. */
772 txs->txs_last = 1; 772 txs->txs_last = 1;
773 773
774 /* Mark the last descriptor in this job. */ 774 /* Mark the last descriptor in this job. */
775 sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP | 775 sc->sc_txdescs[tail].desc_stat |= CDMAC_STAT_STOP |
776 CDMAC_STAT_INTR; 776 CDMAC_STAT_INTR;
777 temac_txcdsync(sc, head, nsegs, 777 temac_txcdsync(sc, head, nsegs,
778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
779 779
780 temac_txkick(sc); 780 temac_txkick(sc);
781#if TEMAC_TXDEBUG > 0 781#if TEMAC_TXDEBUG > 0
782 aprint_debug_dev(sc->sc_dev, 782 aprint_debug_dev(sc->sc_dev,
783 "start: txcur %03d -> %03d, nseg %03d\n", 783 "start: txcur %03d -> %03d, nseg %03d\n",
784 head, sc->sc_txcur, nsegs); 784 head, sc->sc_txcur, nsegs);
785#endif 785#endif
786 } 786 }
787} 787}
788 788
789static void 789static void
790temac_stop(struct ifnet *ifp, int disable) 790temac_stop(struct ifnet *ifp, int disable)
791{ 791{
792 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc; 792 struct temac_softc *sc = (struct temac_softc *)ifp->if_softc;
793 struct temac_txsoft *txs; 793 struct temac_txsoft *txs;
794 int i; 794 int i;
795 795
796#if TEMAC_DEBUG > 0 796#if TEMAC_DEBUG > 0
797 aprint_debug_dev(sc->sc_dev, "stop\n"); 797 aprint_debug_dev(sc->sc_dev, "stop\n");
798#endif 798#endif
799 799
800 /* Down the MII. */ 800 /* Down the MII. */
801 callout_stop(&sc->sc_mii_tick); 801 callout_stop(&sc->sc_mii_tick);
802 mii_down(&sc->sc_mii); 802 mii_down(&sc->sc_mii);
803 803
804 /* Stop the engine. */ 804 /* Stop the engine. */
805 temac_reset(sc); 805 temac_reset(sc);
806 806
807 /* Drain buffers queues (unconditionally). */ 807 /* Drain buffers queues (unconditionally). */
808 temac_rxdrain(sc); 808 temac_rxdrain(sc);
809 809
810 for (i = 0; i < TEMAC_TXQLEN; i++) { 810 for (i = 0; i < TEMAC_TXQLEN; i++) {
811 txs = &sc->sc_txsoft[i]; 811 txs = &sc->sc_txsoft[i];
812 812
813 if (txs->txs_mbuf != NULL) { 813 if (txs->txs_mbuf != NULL) {
814 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap); 814 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
815 m_freem(txs->txs_mbuf); 815 m_freem(txs->txs_mbuf);
816 txs->txs_mbuf = NULL; 816 txs->txs_mbuf = NULL;
817 txs->txs_last = 0; 817 txs->txs_last = 0;
818 } 818 }
819 } 819 }
820 sc->sc_txbusy = 0; 820 sc->sc_txbusy = 0;
821 821
822 /* Acknowledge we're down. */ 822 /* Acknowledge we're down. */
823 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 823 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
824} 824}
825 825
826static int 826static int
827temac_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 827temac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
828{ 828{
829 int rv; 829 int rv;
830 830
831 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg); 831 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
832 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR); 832 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR);
833 833
834 if ((rv = hif_wait_stat(HIF_STAT_MIIRR)) != 0) 834 if ((rv = hif_wait_stat(HIF_STAT_MIIRR)) != 0)
835 return rv; 835 return rv;
836 836
837 *val = mfidcr(IDCR_HIF_ARG0) & 0xffff; 837 *val = mfidcr(IDCR_HIF_ARG0) & 0xffff;
838 return 0; 838 return 0;
839} 839}
840 840
841static int 841static int
842temac_mii_writereg(device_t self, int phy, int reg, uint16_t val) 842temac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
843{ 843{
844 mtidcr(IDCR_HIF_ARG0, val); 844 mtidcr(IDCR_HIF_ARG0, val);
845 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE); 845 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_WRVAL | HIF_CTRL_WRITE);
846 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg); 846 mtidcr(IDCR_HIF_ARG0, (phy << 5) | reg);
847 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE); 847 mtidcr(IDCR_HIF_CTRL, TEMAC_GMI_MII_ADDR | HIF_CTRL_WRITE);
848 return hif_wait_stat(HIF_STAT_MIIWR); 848 return hif_wait_stat(HIF_STAT_MIIWR);
849} 849}
850 850
851static void 851static void
852temac_mii_statchg(struct ifnet *ifp) 852temac_mii_statchg(struct ifnet *ifp)
853{ 853{
854 struct temac_softc *sc = ifp->if_softc; 854 struct temac_softc *sc = ifp->if_softc;
855 uint32_t rcf, tcf, mmc; 855 uint32_t rcf, tcf, mmc;
856 856
857 /* Full/half duplex link. */ 857 /* Full/half duplex link. */
858 rcf = gmi_read_4(TEMAC_GMI_RXCF1); 858 rcf = gmi_read_4(TEMAC_GMI_RXCF1);
859 tcf = gmi_read_4(TEMAC_GMI_TXCF); 859 tcf = gmi_read_4(TEMAC_GMI_TXCF);
860 860
861 if (sc->sc_mii.mii_media_active & IFM_FDX) { 861 if (sc->sc_mii.mii_media_active & IFM_FDX) {
862 gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX); 862 gmi_write_4(TEMAC_GMI_RXCF1, rcf & ~GMI_RX_HDX);
863 gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX); 863 gmi_write_4(TEMAC_GMI_TXCF, tcf & ~GMI_TX_HDX);
864 } else { 864 } else {
865 gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX); 865 gmi_write_4(TEMAC_GMI_RXCF1, rcf | GMI_RX_HDX);
866 gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX); 866 gmi_write_4(TEMAC_GMI_TXCF, tcf | GMI_TX_HDX);
867 } 867 }
868 868
869 /* Link speed. */ 869 /* Link speed. */
870 mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK; 870 mmc = gmi_read_4(TEMAC_GMI_MMC) & ~GMI_MMC_SPEED_MASK;
871 871
872 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 872 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
873 case IFM_10_T: 873 case IFM_10_T:
874 /* 874 /*
875 * XXXFreza: the GMAC is not happy with 10Mbit ethernet, 875 * XXXFreza: the GMAC is not happy with 10Mbit ethernet,
876 * although the documentation claims it's supported. Maybe 876 * although the documentation claims it's supported. Maybe
877 * it's just my equipment... 877 * it's just my equipment...
878 */ 878 */
879 mmc |= GMI_MMC_SPEED_10; 879 mmc |= GMI_MMC_SPEED_10;
880 break; 880 break;
881 case IFM_100_TX: 881 case IFM_100_TX:
882 mmc |= GMI_MMC_SPEED_100; 882 mmc |= GMI_MMC_SPEED_100;
883 break; 883 break;
884 case IFM_1000_T: 884 case IFM_1000_T:
885 mmc |= GMI_MMC_SPEED_1000; 885 mmc |= GMI_MMC_SPEED_1000;
886 break; 886 break;
887 } 887 }
888 888
889 gmi_write_4(TEMAC_GMI_MMC, mmc); 889 gmi_write_4(TEMAC_GMI_MMC, mmc);
890} 890}
891 891
892static void 892static void
893temac_mii_tick(void *arg) 893temac_mii_tick(void *arg)
894{ 894{
895 struct temac_softc *sc = (struct temac_softc *)arg; 895 struct temac_softc *sc = (struct temac_softc *)arg;
896 int s; 896 int s;
897 897
898 if (!device_is_active(sc->sc_dev)) 898 if (!device_is_active(sc->sc_dev))
899 return; 899 return;
900 900
901 s = splnet(); 901 s = splnet();
902 mii_tick(&sc->sc_mii); 902 mii_tick(&sc->sc_mii);
903 splx(s); 903 splx(s);
904 904
905 callout_schedule(&sc->sc_mii_tick, hz); 905 callout_schedule(&sc->sc_mii_tick, hz);
906} 906}
907 907
908/* 908/*
909 * External hooks. 909 * External hooks.
910 */ 910 */
911static void 911static void
912temac_shutdown(void *arg) 912temac_shutdown(void *arg)
913{ 913{
914 struct temac_softc *sc = (struct temac_softc *)arg; 914 struct temac_softc *sc = (struct temac_softc *)arg;
915 915
916 temac_reset(sc); 916 temac_reset(sc);
917} 917}
918 918
919static void 919static void
920temac_tx_intr(void *arg) 920temac_tx_intr(void *arg)
921{ 921{
922 struct temac_softc *sc = (struct temac_softc *)arg; 922 struct temac_softc *sc = (struct temac_softc *)arg;
923 uint32_t stat; 923 uint32_t stat;
924 924
925 /* XXX: We may need to splnet() here if cdmac(4) changes. */ 925 /* XXX: We may need to splnet() here if cdmac(4) changes. */
926 926
927 if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) { 927 if ((stat = cdmac_tx_stat(sc)) & CDMAC_STAT_ERROR) {
928 aprint_error_dev(sc->sc_dev, 928 aprint_error_dev(sc->sc_dev,
929 "transmit DMA is toast (%#08x), halted!\n", 929 "transmit DMA is toast (%#08x), halted!\n",
930 stat); 930 stat);
931 931
932 /* XXXFreza: how to signal this upstream? */ 932 /* XXXFreza: how to signal this upstream? */
933 temac_stop(&sc->sc_if, 1); 933 temac_stop(&sc->sc_if, 1);
934 sc->sc_dead = 1; 934 sc->sc_dead = 1;
935 } 935 }
936 936
937#if TEMAC_DEBUG > 0 937#if TEMAC_DEBUG > 0
938 aprint_debug_dev(sc->sc_dev, "tx intr 0x%08x\n", stat); 938 aprint_debug_dev(sc->sc_dev, "tx intr 0x%08x\n", stat);
939#endif 939#endif
940 temac_txreap(sc); 940 temac_txreap(sc);
941} 941}
942 942
943static void 943static void
944temac_rx_intr(void *arg) 944temac_rx_intr(void *arg)
945{ 945{
946 struct temac_softc *sc = (struct temac_softc *)arg; 946 struct temac_softc *sc = (struct temac_softc *)arg;
947 uint32_t stat; 947 uint32_t stat;
948 948
949 /* XXX: We may need to splnet() here if cdmac(4) changes. */ 949 /* XXX: We may need to splnet() here if cdmac(4) changes. */
950 950
951 if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) { 951 if ((stat = cdmac_rx_stat(sc)) & CDMAC_STAT_ERROR) {
952 aprint_error_dev(sc->sc_dev, 952 aprint_error_dev(sc->sc_dev,
953 "receive DMA is toast (%#08x), halted!\n", 953 "receive DMA is toast (%#08x), halted!\n",
954 stat); 954 stat);
955 955
956 /* XXXFreza: how to signal this upstream? */ 956 /* XXXFreza: how to signal this upstream? */
957 temac_stop(&sc->sc_if, 1); 957 temac_stop(&sc->sc_if, 1);
958 sc->sc_dead = 1; 958 sc->sc_dead = 1;
959 } 959 }
960 960
961#if TEMAC_DEBUG > 0 961#if TEMAC_DEBUG > 0
962 aprint_debug_dev(sc->sc_dev, "rx intr 0x%08x\n", stat); 962 aprint_debug_dev(sc->sc_dev, "rx intr 0x%08x\n", stat);
963#endif 963#endif
964 temac_rxreap(sc); 964 temac_rxreap(sc);
965} 965}
966 966
967/* 967/*
968 * Utils. 968 * Utils.
969 */ 969 */
970static inline void 970static inline void
971temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag) 971temac_txcdsync(struct temac_softc *sc, int first, int cnt, int flag)
972{ 972{
973 if ((first + cnt) > TEMAC_NTXDESC) { 973 if ((first + cnt) > TEMAC_NTXDESC) {
974 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 974 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
975 TEMAC_TXDOFF(first), 975 TEMAC_TXDOFF(first),
976 sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first), 976 sizeof(struct cdmac_descr) * (TEMAC_NTXDESC - first),
977 flag); 977 flag);
978 cnt = (first + cnt) % TEMAC_NTXDESC; 978 cnt = (first + cnt) % TEMAC_NTXDESC;
979 first = 0; 979 first = 0;
980 } 980 }
981 981
982 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 982 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
983 TEMAC_TXDOFF(first), 983 TEMAC_TXDOFF(first),
984 sizeof(struct cdmac_descr) * cnt, 984 sizeof(struct cdmac_descr) * cnt,
985 flag); 985 flag);
986} 986}
987 987
988static inline void 988static inline void
989temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag) 989temac_rxcdsync(struct temac_softc *sc, int first, int cnt, int flag)
990{ 990{
991 if ((first + cnt) > TEMAC_NRXDESC) { 991 if ((first + cnt) > TEMAC_NRXDESC) {
992 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 992 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
993 TEMAC_RXDOFF(first), 993 TEMAC_RXDOFF(first),
994 sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first), 994 sizeof(struct cdmac_descr) * (TEMAC_NRXDESC - first),
995 flag); 995 flag);
996 cnt = (first + cnt) % TEMAC_NRXDESC; 996 cnt = (first + cnt) % TEMAC_NRXDESC;
997 first = 0; 997 first = 0;
998 } 998 }
999 999
1000 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap, 1000 bus_dmamap_sync(sc->sc_dmat, sc->sc_control_dmap,
1001 TEMAC_RXDOFF(first), 1001 TEMAC_RXDOFF(first),
1002 sizeof(struct cdmac_descr) * cnt, 1002 sizeof(struct cdmac_descr) * cnt,
1003 flag); 1003 flag);
1004} 1004}
1005 1005
1006static void 1006static void
1007temac_txreap(struct temac_softc *sc) 1007temac_txreap(struct temac_softc *sc)
1008{ 1008{
1009 struct temac_txsoft *txs; 1009 struct temac_txsoft *txs;
1010 bus_dmamap_t dmap; 1010 bus_dmamap_t dmap;
1011 int sent = 0; 1011 int sent = 0;
1012 1012
1013 /* 1013 /*
1014 * Transmit interrupts happen on the last descriptor of Tx jobs. 1014 * Transmit interrupts happen on the last descriptor of Tx jobs.
1015 * Hence, every time we're called (and we assume txintr is our 1015 * Hence, every time we're called (and we assume txintr is our
1016 * only caller!), we reap packets upto and including the one 1016 * only caller!), we reap packets upto and including the one
1017 * marked as last-in-batch. 1017 * marked as last-in-batch.
1018 * 1018 *
1019 * XXX we rely on that we make EXACTLY one batch per intr, no more 1019 * XXX we rely on that we make EXACTLY one batch per intr, no more
1020 */ 1020 */
1021 while (sc->sc_txsfree != TEMAC_TXQLEN) { 1021 while (sc->sc_txsfree != TEMAC_TXQLEN) {
1022 txs = &sc->sc_txsoft[sc->sc_txsreap]; 1022 txs = &sc->sc_txsoft[sc->sc_txsreap];
1023 dmap = txs->txs_dmap; 1023 dmap = txs->txs_dmap;
1024 1024
1025 sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs); 1025 sc->sc_txreap = TEMAC_TXINC(sc->sc_txreap, dmap->dm_nsegs);
1026 sc->sc_txfree += dmap->dm_nsegs; 1026 sc->sc_txfree += dmap->dm_nsegs;
1027 1027
1028 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap); 1028 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmap);
1029 m_freem(txs->txs_mbuf); 1029 m_freem(txs->txs_mbuf);
1030 txs->txs_mbuf = NULL; 1030 txs->txs_mbuf = NULL;
1031 1031
1032 sc->sc_if.if_opackets++; 1032 if_statinc(&sc->sc_if, if_opackets);
1033 sent = 1; 1033 sent = 1;
1034 1034
1035 sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap); 1035 sc->sc_txsreap = TEMAC_TXSNEXT(sc->sc_txsreap);
1036 sc->sc_txsfree++; 1036 sc->sc_txsfree++;
1037 1037
1038 if (txs->txs_last) { 1038 if (txs->txs_last) {
1039 txs->txs_last = 0; 1039 txs->txs_last = 0;
1040 sc->sc_txbusy = 0; /* channel stopped now */ 1040 sc->sc_txbusy = 0; /* channel stopped now */
1041 1041
1042 temac_txkick(sc); 1042 temac_txkick(sc);
1043 break; 1043 break;
1044 } 1044 }
1045 } 1045 }
1046 1046
1047 if (sent && (sc->sc_if.if_flags & IFF_OACTIVE)) 1047 if (sent && (sc->sc_if.if_flags & IFF_OACTIVE))
1048 sc->sc_if.if_flags &= ~IFF_OACTIVE; 1048 sc->sc_if.if_flags &= ~IFF_OACTIVE;
1049} 1049}
1050 1050
1051static int 1051static int
1052temac_rxalloc(struct temac_softc *sc, int which, int verbose) 1052temac_rxalloc(struct temac_softc *sc, int which, int verbose)
1053{ 1053{
1054 struct temac_rxsoft *rxs; 1054 struct temac_rxsoft *rxs;
1055 struct mbuf *m; 1055 struct mbuf *m;
1056 uint32_t stat; 1056 uint32_t stat;
1057 int error; 1057 int error;
1058 1058
1059 rxs = &sc->sc_rxsoft[which]; 1059 rxs = &sc->sc_rxsoft[which];
1060 1060
1061 /* The mbuf itself is not our problem, just clear DMA related stuff. */ 1061 /* The mbuf itself is not our problem, just clear DMA related stuff. */
1062 if (rxs->rxs_mbuf != NULL) { 1062 if (rxs->rxs_mbuf != NULL) {
1063 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap); 1063 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1064 rxs->rxs_mbuf = NULL; 1064 rxs->rxs_mbuf = NULL;
1065 } 1065 }
1066 1066
1067 /* 1067 /*
1068 * We would like to store mbuf and dmap in application specific 1068 * We would like to store mbuf and dmap in application specific
1069 * fields of the descriptor, but that doesn't work for Rx. Shame 1069 * fields of the descriptor, but that doesn't work for Rx. Shame
1070 * on Xilinx for this (and for the useless timer architecture). 1070 * on Xilinx for this (and for the useless timer architecture).
1071 * 1071 *
1072 * Hence each descriptor needs its own soft state. We may want 1072 * Hence each descriptor needs its own soft state. We may want
1073 * to merge multiple rxs's into a monster mbuf when we support 1073 * to merge multiple rxs's into a monster mbuf when we support
1074 * jumbo frames though. Also, we use single set of indexing 1074 * jumbo frames though. Also, we use single set of indexing
1075 * variables for both sc_rxdescs[] and sc_rxsoft[]. 1075 * variables for both sc_rxdescs[] and sc_rxsoft[].
1076 */ 1076 */
1077 MGETHDR(m, M_DONTWAIT, MT_DATA); 1077 MGETHDR(m, M_DONTWAIT, MT_DATA);
1078 if (m == NULL) { 1078 if (m == NULL) {
1079 if (verbose) 1079 if (verbose)
1080 aprint_debug_dev(sc->sc_dev, 1080 aprint_debug_dev(sc->sc_dev,
1081 "out of Rx header mbufs\n"); 1081 "out of Rx header mbufs\n");
1082 return (ENOBUFS); 1082 return (ENOBUFS);
1083 } 1083 }
1084 MCLAIM(m, &sc->sc_ec.ec_rx_mowner); 1084 MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
1085 1085
1086 MCLGET(m, M_DONTWAIT); 1086 MCLGET(m, M_DONTWAIT);
1087 if ((m->m_flags & M_EXT) == 0) { 1087 if ((m->m_flags & M_EXT) == 0) {
1088 if (verbose) 1088 if (verbose)
1089 aprint_debug_dev(sc->sc_dev, 1089 aprint_debug_dev(sc->sc_dev,
1090 "out of Rx cluster mbufs\n"); 1090 "out of Rx cluster mbufs\n");
1091 m_freem(m); 1091 m_freem(m);
1092 return (ENOBUFS); 1092 return (ENOBUFS);
1093 } 1093 }
1094 1094
1095 rxs->rxs_mbuf = m; 1095 rxs->rxs_mbuf = m;
1096 m->m_pkthdr.len = m->m_len = MCLBYTES; 1096 m->m_pkthdr.len = m->m_len = MCLBYTES;
1097 1097
1098 /* Make sure the payload after ethernet header is 4-aligned. */ 1098 /* Make sure the payload after ethernet header is 4-aligned. */
1099 m_adj(m, 2); 1099 m_adj(m, 2);
1100 1100
1101 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m, 1101 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmap, m,
1102 BUS_DMA_NOWAIT); 1102 BUS_DMA_NOWAIT);
1103 if (error) { 1103 if (error) {
1104 if (verbose) 1104 if (verbose)
1105 aprint_debug_dev(sc->sc_dev, 1105 aprint_debug_dev(sc->sc_dev,
1106 "could not map Rx descriptor %d, error = %d\n", 1106 "could not map Rx descriptor %d, error = %d\n",
1107 which, error); 1107 which, error);
1108 1108
1109 rxs->rxs_mbuf = NULL; 1109 rxs->rxs_mbuf = NULL;
1110 m_freem(m); 1110 m_freem(m);
1111 1111
1112 return (error); 1112 return (error);
1113 } 1113 }
1114 1114
1115 stat = 1115 stat =
1116 (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) | 1116 (TEMAC_ISINTR(which) ? CDMAC_STAT_INTR : 0) |
1117 (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0); 1117 (TEMAC_ISLAST(which) ? CDMAC_STAT_STOP : 0);
1118 1118
1119 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0, 1119 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmap, 0,
1120 rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD); 1120 rxs->rxs_dmap->dm_mapsize, BUS_DMASYNC_PREREAD);
1121 1121
1122 /* Descriptor post-sync, if needed, left to the caller. */ 1122 /* Descriptor post-sync, if needed, left to the caller. */
1123 1123
1124 sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr; 1124 sc->sc_rxdescs[which].desc_addr = rxs->rxs_dmap->dm_segs[0].ds_addr;
1125 sc->sc_rxdescs[which].desc_size = rxs->rxs_dmap->dm_segs[0].ds_len; 1125 sc->sc_rxdescs[which].desc_size = rxs->rxs_dmap->dm_segs[0].ds_len;
1126 sc->sc_rxdescs[which].desc_stat = stat; 1126 sc->sc_rxdescs[which].desc_stat = stat;
1127 1127
1128 /* Descriptor pre-sync, if needed, left to the caller. */ 1128 /* Descriptor pre-sync, if needed, left to the caller. */
1129 1129
1130 return (0); 1130 return (0);
1131} 1131}
1132 1132
1133static void 1133static void
1134temac_rxreap(struct temac_softc *sc) 1134temac_rxreap(struct temac_softc *sc)
1135{ 1135{
1136 struct ifnet *ifp = &sc->sc_if; 1136 struct ifnet *ifp = &sc->sc_if;
1137 uint32_t stat, rxstat, rxsize; 1137 uint32_t stat, rxstat, rxsize;
1138 struct mbuf *m; 1138 struct mbuf *m;
1139 int nseg, head, tail; 1139 int nseg, head, tail;
1140 1140
1141 head = sc->sc_rxreap; 1141 head = sc->sc_rxreap;
1142 tail = 0; /* gcc */ 1142 tail = 0; /* gcc */
1143 nseg = 0; 1143 nseg = 0;
1144 1144
1145 /* 1145 /*
1146 * Collect finished entries on the Rx list, kick DMA if we hit 1146 * Collect finished entries on the Rx list, kick DMA if we hit
1147 * the end. DMA will always stop on the last descriptor in chain, 1147 * the end. DMA will always stop on the last descriptor in chain,
1148 * so it will never hit a reap-in-progress descriptor. 1148 * so it will never hit a reap-in-progress descriptor.
1149 */ 1149 */
1150 while (1) { 1150 while (1) {
1151 /* Maybe we previously failed to refresh this one? */ 1151 /* Maybe we previously failed to refresh this one? */
1152 if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) { 1152 if (sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf == NULL) {
1153 if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0) 1153 if (temac_rxalloc(sc, sc->sc_rxreap, 0) != 0)
1154 break; 1154 break;
1155 1155
1156 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap); 1156 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1157 continue; 1157 continue;
1158 } 1158 }
1159 temac_rxcdsync(sc, sc->sc_rxreap, 1, 1159 temac_rxcdsync(sc, sc->sc_rxreap, 1,
1160 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1160 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1161 1161
1162 stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat; 1162 stat = sc->sc_rxdescs[sc->sc_rxreap].desc_stat;
1163 m = NULL; 1163 m = NULL;
1164 1164
1165 if ((stat & CDMAC_STAT_DONE) == 0) 1165 if ((stat & CDMAC_STAT_DONE) == 0)
1166 break; 1166 break;
1167 1167
1168 /* Count any decriptor we've collected, regardless of status. */ 1168 /* Count any decriptor we've collected, regardless of status. */
1169 nseg ++; 1169 nseg ++;
1170 1170
1171 /* XXXFreza: This won't work for jumbo frames. */ 1171 /* XXXFreza: This won't work for jumbo frames. */
1172 1172
1173 if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) != 1173 if ((stat & (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) !=
1174 (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) { 1174 (CDMAC_STAT_EOP | CDMAC_STAT_SOP)) {
1175 aprint_error_dev(sc->sc_dev, 1175 aprint_error_dev(sc->sc_dev,
1176 "Rx packet doesn't fit in one descriptor, " 1176 "Rx packet doesn't fit in one descriptor, "
1177 "stat = %#08x\n", stat); 1177 "stat = %#08x\n", stat);
1178 goto badframe; 1178 goto badframe;
1179 } 1179 }
1180 1180
1181 /* Dissect TEMAC footer if this is end of packet. */ 1181 /* Dissect TEMAC footer if this is end of packet. */
1182 rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat; 1182 rxstat = sc->sc_rxdescs[sc->sc_rxreap].desc_rxstat;
1183 rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize & 1183 rxsize = sc->sc_rxdescs[sc->sc_rxreap].desc_rxsize &
1184 RXSIZE_MASK; 1184 RXSIZE_MASK;
1185 1185
1186 if ((rxstat & RXSTAT_GOOD) == 0 || 1186 if ((rxstat & RXSTAT_GOOD) == 0 ||
1187 (rxstat & RXSTAT_SICK) != 0) { 1187 (rxstat & RXSTAT_SICK) != 0) {
1188 aprint_error_dev(sc->sc_dev, 1188 aprint_error_dev(sc->sc_dev,
1189 "corrupt Rx packet, rxstat = %#08x\n", 1189 "corrupt Rx packet, rxstat = %#08x\n",
1190 rxstat); 1190 rxstat);
1191 goto badframe; 1191 goto badframe;
1192 } 1192 }
1193 1193
1194 /* We are now bound to succeed. */ 1194 /* We are now bound to succeed. */
1195 bus_dmamap_sync(sc->sc_dmat, 1195 bus_dmamap_sync(sc->sc_dmat,
1196 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0, 1196 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap, 0,
1197 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize, 1197 sc->sc_rxsoft[sc->sc_rxreap].rxs_dmap->dm_mapsize,
1198 BUS_DMASYNC_POSTREAD); 1198 BUS_DMASYNC_POSTREAD);
1199 1199
1200 m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf; 1200 m = sc->sc_rxsoft[sc->sc_rxreap].rxs_mbuf;
1201 m_set_rcvif(m, ifp); 1201 m_set_rcvif(m, ifp);
1202 m->m_pkthdr.len = m->m_len = rxsize; 1202 m->m_pkthdr.len = m->m_len = rxsize;
1203 1203
1204 badframe: 1204 badframe:
1205 /* Get ready for more work. */ 1205 /* Get ready for more work. */
1206 tail = sc->sc_rxreap; 1206 tail = sc->sc_rxreap;
1207 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap); 1207 sc->sc_rxreap = TEMAC_RXNEXT(sc->sc_rxreap);
1208 1208
1209 /* On failures we reuse the descriptor and go ahead. */ 1209 /* On failures we reuse the descriptor and go ahead. */
1210 if (m == NULL) { 1210 if (m == NULL) {
1211 sc->sc_rxdescs[tail].desc_stat = 1211 sc->sc_rxdescs[tail].desc_stat =
1212 (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) | 1212 (TEMAC_ISINTR(tail) ? CDMAC_STAT_INTR : 0) |
1213 (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0); 1213 (TEMAC_ISLAST(tail) ? CDMAC_STAT_STOP : 0);
1214 1214
1215 if_statinc(ifp, if_ierrors); 1215 if_statinc(ifp, if_ierrors);
1216 continue; 1216 continue;
1217 } 1217 }
1218 1218
1219 if_percpuq_enqueue(ifp->if_percpuq, m); 1219 if_percpuq_enqueue(ifp->if_percpuq, m);
1220 1220
1221 /* Refresh descriptor, bail out if we're out of buffers. */ 1221 /* Refresh descriptor, bail out if we're out of buffers. */
1222 if (temac_rxalloc(sc, tail, 1) != 0) { 1222 if (temac_rxalloc(sc, tail, 1) != 0) {
1223 sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1); 1223 sc->sc_rxreap = TEMAC_RXINC(sc->sc_rxreap, -1);
1224 aprint_error_dev(sc->sc_dev, "Rx give up for now\n"); 1224 aprint_error_dev(sc->sc_dev, "Rx give up for now\n");
1225 break; 1225 break;
1226 } 1226 }
1227 } 1227 }
1228 1228
1229 /* We may now have a contiguous ready-to-go chunk of descriptors. */ 1229 /* We may now have a contiguous ready-to-go chunk of descriptors. */
1230 if (nseg > 0) { 1230 if (nseg > 0) {
1231#if TEMAC_RXDEBUG > 0 1231#if TEMAC_RXDEBUG > 0
1232 aprint_debug_dev(sc->sc_dev, 1232 aprint_debug_dev(sc->sc_dev,
1233 "rxreap: rxreap %03d -> %03d, nseg %03d\n", 1233 "rxreap: rxreap %03d -> %03d, nseg %03d\n",
1234 head, sc->sc_rxreap, nseg); 1234 head, sc->sc_rxreap, nseg);
1235#endif 1235#endif
1236 temac_rxcdsync(sc, head, nseg, 1236 temac_rxcdsync(sc, head, nseg,
1237 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1237 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1238 1238
1239 if (TEMAC_ISLAST(tail)) 1239 if (TEMAC_ISLAST(tail))
1240 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0)); 1240 cdmac_rx_start(sc, sc->sc_cdaddr + TEMAC_RXDOFF(0));
1241 } 1241 }
1242 1242
1243 /* Ensure maximum Rx latency is kept under control. */ 1243 /* Ensure maximum Rx latency is kept under control. */
1244 callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ); 1244 callout_schedule(&sc->sc_rx_timo, hz / TEMAC_RXTIMO_HZ);
1245} 1245}
1246 1246
1247static void 1247static void
1248temac_rxtimo(void *arg) 1248temac_rxtimo(void *arg)
1249{ 1249{
1250 struct temac_softc *sc = (struct temac_softc *)arg; 1250 struct temac_softc *sc = (struct temac_softc *)arg;
1251 int s; 1251 int s;
1252 1252
1253 /* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */ 1253 /* We run TEMAC_RXTIMO_HZ times/sec to ensure Rx doesn't stall. */
1254 s = splnet(); 1254 s = splnet();
1255 temac_rxreap(sc); 1255 temac_rxreap(sc);
1256 splx(s); 1256 splx(s);
1257} 1257}
1258 1258
1259static void 1259static void
1260temac_reset(struct temac_softc *sc) 1260temac_reset(struct temac_softc *sc)
1261{ 1261{
1262 uint32_t rcr, tcr; 1262 uint32_t rcr, tcr;
1263 1263
1264 /* Kill CDMAC channels. */ 1264 /* Kill CDMAC channels. */
1265 cdmac_tx_reset(sc); 1265 cdmac_tx_reset(sc);
1266 cdmac_rx_reset(sc); 1266 cdmac_rx_reset(sc);
1267 1267
1268 /* Disable receiver. */ 1268 /* Disable receiver. */
1269 rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE; 1269 rcr = gmi_read_4(TEMAC_GMI_RXCF1) & ~GMI_RX_ENABLE;
1270 gmi_write_4(TEMAC_GMI_RXCF1, rcr); 1270 gmi_write_4(TEMAC_GMI_RXCF1, rcr);
1271 1271
1272 /* Disable transmitter. */ 1272 /* Disable transmitter. */
1273 tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE; 1273 tcr = gmi_read_4(TEMAC_GMI_TXCF) & ~GMI_TX_ENABLE;
1274 gmi_write_4(TEMAC_GMI_TXCF, tcr); 1274 gmi_write_4(TEMAC_GMI_TXCF, tcr);
1275} 1275}
1276 1276
1277static void 1277static void
1278temac_rxdrain(struct temac_softc *sc) 1278temac_rxdrain(struct temac_softc *sc)
1279{ 1279{
1280 struct temac_rxsoft *rxs; 1280 struct temac_rxsoft *rxs;
1281 int i; 1281 int i;
1282 1282
1283 for (i = 0; i < TEMAC_NRXDESC; i++) { 1283 for (i = 0; i < TEMAC_NRXDESC; i++) {
1284 rxs = &sc->sc_rxsoft[i]; 1284 rxs = &sc->sc_rxsoft[i];
1285 1285
1286 if (rxs->rxs_mbuf != NULL) { 1286 if (rxs->rxs_mbuf != NULL) {
1287 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap); 1287 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmap);
1288 m_freem(rxs->rxs_mbuf); 1288 m_freem(rxs->rxs_mbuf);
1289 rxs->rxs_mbuf = NULL; 1289 rxs->rxs_mbuf = NULL;
1290 } 1290 }
1291 } 1291 }
1292 1292
1293 sc->sc_rx_drained = 1; 1293 sc->sc_rx_drained = 1;
1294} 1294}
1295 1295
1296static void 1296static void
1297temac_txkick(struct temac_softc *sc) 1297temac_txkick(struct temac_softc *sc)
1298{ 1298{
1299 if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL && 1299 if (sc->sc_txsoft[sc->sc_txsreap].txs_mbuf != NULL &&
1300 sc->sc_txbusy == 0) { 1300 sc->sc_txbusy == 0) {
1301 cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap)); 1301 cdmac_tx_start(sc, sc->sc_cdaddr + TEMAC_TXDOFF(sc->sc_txreap));
1302 sc->sc_txbusy = 1; 1302 sc->sc_txbusy = 1;
1303 } 1303 }
1304} 1304}

cvs diff -r1.4 -r1.5 src/sys/arch/evbppc/virtex/dev/tft_ll.c (switch to unified diff)

--- src/sys/arch/evbppc/virtex/dev/tft_ll.c 2011/07/01 19:03:50 1.4
+++ src/sys/arch/evbppc/virtex/dev/tft_ll.c 2021/03/29 13:14:13 1.5
@@ -1,222 +1,224 @@ @@ -1,222 +1,224 @@
1/* $NetBSD: tft_ll.c,v 1.4 2011/07/01 19:03:50 dyoung Exp $ */ 1/* $NetBSD: tft_ll.c,v 1.5 2021/03/29 13:14:13 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Jachym Holecek 4 * Copyright (c) 2006 Jachym Holecek
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written for DFC Design, s.r.o. 7 * Written for DFC Design, s.r.o.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 15 *
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: tft_ll.c,v 1.4 2011/07/01 19:03:50 dyoung Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: tft_ll.c,v 1.5 2021/03/29 13:14:13 rin Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/mbuf.h> 37#include <sys/mbuf.h>
38#include <sys/kernel.h> 38#include <sys/kernel.h>
39#include <sys/socket.h> 39#include <sys/socket.h>
40#include <sys/ioctl.h> 40#include <sys/ioctl.h>
41#include <sys/device.h> 41#include <sys/device.h>
42#include <sys/queue.h> 42#include <sys/queue.h>
43 43
44#include <uvm/uvm_extern.h> 44#include <uvm/uvm_extern.h>
45 45
46#include <sys/bus.h> 46#include <sys/bus.h>
47 47
48/* XXX needed? */ 48/* XXX needed? */
49#include <dev/wscons/wsdisplayvar.h> 49#include <dev/wscons/wsdisplayvar.h>
50#include <dev/wscons/wsconsio.h> 50#include <dev/wscons/wsconsio.h>
51#include <dev/rasops/rasops.h> 51#include <dev/rasops/rasops.h>
52#include <dev/wscons/wsdisplay_vconsvar.h> 52#include <dev/wscons/wsdisplay_vconsvar.h>
53 53
54#include <evbppc/virtex/dev/xcvbusvar.h> 54#include <evbppc/virtex/dev/xcvbusvar.h>
55#include <evbppc/virtex/dev/cdmacreg.h> 55#include <evbppc/virtex/dev/cdmacreg.h>
56#include <evbppc/virtex/dev/tftreg.h> 56#include <evbppc/virtex/dev/tftreg.h>
57#include <evbppc/virtex/dev/tftvar.h> 57#include <evbppc/virtex/dev/tftvar.h>
58 58
59 59
60struct ll_tft_control { 60struct ll_tft_control {
61 struct cdmac_descr cd_dsc; 61 struct cdmac_descr cd_dsc;
62 u_char cd_img[]; 62 u_char cd_img[];
63} __packed; 63};
 64
 65CTASSERT(offsetof(struct ll_tft_control, cd_img) == sizeof(struct cdmac_descr));
64 66
65struct ll_tft_softc { 67struct ll_tft_softc {
66 struct tft_softc lsc_sc; 68 struct tft_softc lsc_sc;
67 69
68 bus_space_tag_t lsc_dma_iot; 70 bus_space_tag_t lsc_dma_iot;
69 bus_space_handle_t lsc_dma_ioh; 71 bus_space_handle_t lsc_dma_ioh;
70 72
71 bus_dma_tag_t lsc_dmat; 73 bus_dma_tag_t lsc_dmat;
72 bus_dmamap_t lsc_dmap; 74 bus_dmamap_t lsc_dmap;
73 75
74 struct ll_tft_control *lsc_cd; 76 struct ll_tft_control *lsc_cd;
75 bus_dma_segment_t lsc_seg; 77 bus_dma_segment_t lsc_seg;
76}; 78};
77 79
78static void ll_tft_attach(device_t, device_t, void *); 80static void ll_tft_attach(device_t, device_t, void *);
79static paddr_t ll_tft_mmap(void *, void *, off_t, int); 81static paddr_t ll_tft_mmap(void *, void *, off_t, int);
80static void ll_tft_shutdown(void *); 82static void ll_tft_shutdown(void *);
81 83
82CFATTACH_DECL_NEW(ll_tft, sizeof(struct ll_tft_softc), 84CFATTACH_DECL_NEW(ll_tft, sizeof(struct ll_tft_softc),
83 xcvbus_child_match, ll_tft_attach, NULL, NULL); 85 xcvbus_child_match, ll_tft_attach, NULL, NULL);
84 86
85 87
86static struct wsdisplay_accessops ll_tft_accessops = { 88static struct wsdisplay_accessops ll_tft_accessops = {
87 .mmap = ll_tft_mmap, 89 .mmap = ll_tft_mmap,
88}; 90};
89 91
90 92
91static void 93static void
92ll_tft_attach(device_t parent, device_t self, void *aux) 94ll_tft_attach(device_t parent, device_t self, void *aux)
93{ 95{
94 struct xcvbus_attach_args *vaa = aux; 96 struct xcvbus_attach_args *vaa = aux;
95 struct ll_dmac *tx = vaa->vaa_tx_dmac; 97 struct ll_dmac *tx = vaa->vaa_tx_dmac;
96 struct ll_tft_softc *lsc = device_private(self); 98 struct ll_tft_softc *lsc = device_private(self);
97 struct tft_softc *sc = &lsc->lsc_sc; 99 struct tft_softc *sc = &lsc->lsc_sc;
98 int nseg, error; 100 int nseg, error;
99 101
100 KASSERT(tx); 102 KASSERT(tx);
101 103
102 lsc->lsc_dma_iot = tx->dmac_iot; 104 lsc->lsc_dma_iot = tx->dmac_iot;
103 lsc->lsc_dmat = vaa->vaa_dmat; 105 lsc->lsc_dmat = vaa->vaa_dmat;
104 sc->sc_iot = vaa->vaa_iot; 106 sc->sc_iot = vaa->vaa_iot;
105 sc->sc_dev = self; 107 sc->sc_dev = self;
106 108
107 aprint_normal(": LL_TFT\n"); 109 aprint_normal(": LL_TFT\n");
108 110
109 if ((error = bus_space_map(sc->sc_iot, vaa->vaa_addr, TFT_SIZE, 111 if ((error = bus_space_map(sc->sc_iot, vaa->vaa_addr, TFT_SIZE,
110 0, &sc->sc_ioh)) != 0) { 112 0, &sc->sc_ioh)) != 0) {
111 aprint_error_dev(self, "could not map device registers\n"); 113 aprint_error_dev(self, "could not map device registers\n");
112 goto fail_0; 114 goto fail_0;
113 } 115 }
114 if ((error = bus_space_map(lsc->lsc_dma_iot, tx->dmac_ctrl_addr, 116 if ((error = bus_space_map(lsc->lsc_dma_iot, tx->dmac_ctrl_addr,
115 CDMAC_CTRL_SIZE, 0, &lsc->lsc_dma_ioh)) != 0) { 117 CDMAC_CTRL_SIZE, 0, &lsc->lsc_dma_ioh)) != 0) {
116 aprint_error_dev(self, "could not map dmac registers\n"); 118 aprint_error_dev(self, "could not map dmac registers\n");
117 goto fail_1; 119 goto fail_1;
118 } 120 }
119 121
120 /* Fill in resolution, depth, size. */ 122 /* Fill in resolution, depth, size. */
121 tft_mode(sc->sc_dev); 123 tft_mode(sc->sc_dev);
122 124
123 /* Allocate and map framebuffer control data. */ 125 /* Allocate and map framebuffer control data. */
124 if ((error = bus_dmamem_alloc(lsc->lsc_dmat, 126 if ((error = bus_dmamem_alloc(lsc->lsc_dmat,
125 sizeof(struct ll_tft_control) + sc->sc_size, 8, 0, 127 sizeof(struct ll_tft_control) + sc->sc_size, 8, 0,
126 &lsc->lsc_seg, 1, &nseg, 0)) != 0) { 128 &lsc->lsc_seg, 1, &nseg, 0)) != 0) {
127 aprint_error_dev(self, "could not allocate framebuffer\n"); 129 aprint_error_dev(self, "could not allocate framebuffer\n");
128 goto fail_2; 130 goto fail_2;
129 } 131 }
130 if ((error = bus_dmamem_map(lsc->lsc_dmat, &lsc->lsc_seg, nseg, 132 if ((error = bus_dmamem_map(lsc->lsc_dmat, &lsc->lsc_seg, nseg,
131 sizeof(struct ll_tft_control) + sc->sc_size, 133 sizeof(struct ll_tft_control) + sc->sc_size,
132 (void **)&lsc->lsc_cd, BUS_DMA_COHERENT)) != 0) { 134 (void **)&lsc->lsc_cd, BUS_DMA_COHERENT)) != 0) {
133 aprint_error_dev(self, "could not map framebuffer\n"); 135 aprint_error_dev(self, "could not map framebuffer\n");
134 goto fail_3; 136 goto fail_3;
135 } 137 }
136 if ((error = bus_dmamap_create(lsc->lsc_dmat, 138 if ((error = bus_dmamap_create(lsc->lsc_dmat,
137 sizeof(struct ll_tft_control) + sc->sc_size, 1, 139 sizeof(struct ll_tft_control) + sc->sc_size, 1,
138 sizeof(struct ll_tft_control) + sc->sc_size, 0, 0, 140 sizeof(struct ll_tft_control) + sc->sc_size, 0, 0,
139 &lsc->lsc_dmap)) != 0) { 141 &lsc->lsc_dmap)) != 0) {
140 aprint_error_dev(self, "could not create framebuffer DMA map\n"); 142 aprint_error_dev(self, "could not create framebuffer DMA map\n");
141 goto fail_4; 143 goto fail_4;
142 } 144 }
143 if ((error = bus_dmamap_load(lsc->lsc_dmat, lsc->lsc_dmap, lsc->lsc_cd, 145 if ((error = bus_dmamap_load(lsc->lsc_dmat, lsc->lsc_dmap, lsc->lsc_cd,
144 sizeof(struct ll_tft_control) + sc->sc_size, NULL, 0)) != 0) { 146 sizeof(struct ll_tft_control) + sc->sc_size, NULL, 0)) != 0) {
145 aprint_error_dev(self, "could not load framebuffer DMA map\n"); 147 aprint_error_dev(self, "could not load framebuffer DMA map\n");
146 goto fail_5; 148 goto fail_5;
147 } 149 }
148 150
149 /* Clear screen, setup descriptor. */ 151 /* Clear screen, setup descriptor. */
150 memset(lsc->lsc_cd, 0x00, sizeof(struct ll_tft_control)); 152 memset(lsc->lsc_cd, 0x00, sizeof(struct ll_tft_control));
151 sc->sc_image = lsc->lsc_cd->cd_img; 153 sc->sc_image = lsc->lsc_cd->cd_img;
152 154
153 lsc->lsc_cd->cd_dsc.desc_next = lsc->lsc_dmap->dm_segs[0].ds_addr; 155 lsc->lsc_cd->cd_dsc.desc_next = lsc->lsc_dmap->dm_segs[0].ds_addr;
154 lsc->lsc_cd->cd_dsc.desc_addr = lsc->lsc_dmap->dm_segs[0].ds_addr + 156 lsc->lsc_cd->cd_dsc.desc_addr = lsc->lsc_dmap->dm_segs[0].ds_addr +
155 offsetof(struct ll_tft_control, cd_img); 157 offsetof(struct ll_tft_control, cd_img);
156 lsc->lsc_cd->cd_dsc.desc_size = sc->sc_size; 158 lsc->lsc_cd->cd_dsc.desc_size = sc->sc_size;
157 lsc->lsc_cd->cd_dsc.desc_stat = CDMAC_STAT_SOP; 159 lsc->lsc_cd->cd_dsc.desc_stat = CDMAC_STAT_SOP;
158 160
159 bus_dmamap_sync(lsc->lsc_dmat, lsc->lsc_dmap, 0, 161 bus_dmamap_sync(lsc->lsc_dmat, lsc->lsc_dmap, 0,
160 sizeof(struct ll_tft_control) + sc->sc_size, 162 sizeof(struct ll_tft_control) + sc->sc_size,
161 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 163 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
162 164
163 sc->sc_sdhook = shutdownhook_establish(ll_tft_shutdown, sc); 165 sc->sc_sdhook = shutdownhook_establish(ll_tft_shutdown, sc);
164 if (sc->sc_sdhook == NULL) 166 if (sc->sc_sdhook == NULL)
165 aprint_error_dev(self, 167 aprint_error_dev(self,
166 "WARNING: unable to establish shutdown hook\n"); 168 "WARNING: unable to establish shutdown hook\n");
167 169
168 tft_attach(self, &ll_tft_accessops); 170 tft_attach(self, &ll_tft_accessops);
169 171
170 aprint_normal_dev(self, "video memory pa 0x%08x\n", 172 aprint_normal_dev(self, "video memory pa 0x%08x\n",
171 (uint32_t)lsc->lsc_cd->cd_dsc.desc_addr); 173 (uint32_t)lsc->lsc_cd->cd_dsc.desc_addr);
172 174
173 /* Timing sensitive... */ 175 /* Timing sensitive... */
174 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_RESET); 176 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_RESET);
175 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_ENABLE); 177 bus_space_write_4(sc->sc_iot, sc->sc_ioh, TFT_CTRL, CTRL_ENABLE);
176 bus_space_write_4(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, CDMAC_CURDESC, 178 bus_space_write_4(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, CDMAC_CURDESC,
177 lsc->lsc_dmap->dm_segs[0].ds_addr); 179 lsc->lsc_dmap->dm_segs[0].ds_addr);
178 180
179 return ; 181 return ;
180 182
181 fail_5: 183 fail_5:
182 bus_dmamap_destroy(lsc->lsc_dmat, lsc->lsc_dmap); 184 bus_dmamap_destroy(lsc->lsc_dmat, lsc->lsc_dmap);
183 fail_4: 185 fail_4:
184 bus_dmamem_unmap(lsc->lsc_dmat, (void *)lsc->lsc_cd, 186 bus_dmamem_unmap(lsc->lsc_dmat, (void *)lsc->lsc_cd,
185 sizeof(struct ll_tft_control) + sc->sc_size); 187 sizeof(struct ll_tft_control) + sc->sc_size);
186 fail_3: 188 fail_3:
187 bus_dmamem_free(lsc->lsc_dmat, &lsc->lsc_seg, nseg); 189 bus_dmamem_free(lsc->lsc_dmat, &lsc->lsc_seg, nseg);
188 fail_2: 190 fail_2:
189 bus_space_unmap(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, CDMAC_CTRL_SIZE); 191 bus_space_unmap(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, CDMAC_CTRL_SIZE);
190 fail_1: 192 fail_1:
191 bus_space_unmap(sc->sc_iot, sc->sc_ioh, TFT_SIZE); 193 bus_space_unmap(sc->sc_iot, sc->sc_ioh, TFT_SIZE);
192 fail_0: 194 fail_0:
193 aprint_error_dev(self, "error %d\n", error); 195 aprint_error_dev(self, "error %d\n", error);
194} 196}
195 197
196static paddr_t 198static paddr_t
197ll_tft_mmap(void *arg, void *scr, off_t offs, int prot) 199ll_tft_mmap(void *arg, void *scr, off_t offs, int prot)
198{ 200{
199 struct ll_tft_softc *lsc = arg; 201 struct ll_tft_softc *lsc = arg;
200 paddr_t pa; 202 paddr_t pa;
201 203
202 if (offs < lsc->lsc_sc.sc_size) { 204 if (offs < lsc->lsc_sc.sc_size) {
203 pa = bus_dmamem_mmap(lsc->lsc_dmat, &lsc->lsc_seg, 1, 205 pa = bus_dmamem_mmap(lsc->lsc_dmat, &lsc->lsc_seg, 1,
204 offs + offsetof(struct ll_tft_control, cd_img), 206 offs + offsetof(struct ll_tft_control, cd_img),
205 prot, BUS_DMA_WAITOK | BUS_DMA_COHERENT); 207 prot, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
206 208
207 return (pa); 209 return (pa);
208 } 210 }
209 211
210 return (-1); 212 return (-1);
211}  213}
212 214
213static void 215static void
214ll_tft_shutdown(void *arg) 216ll_tft_shutdown(void *arg)
215{ 217{
216 struct ll_tft_softc *lsc = arg; 218 struct ll_tft_softc *lsc = arg;
217 219
218 bus_space_write_4(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, 0, 220 bus_space_write_4(lsc->lsc_dma_iot, lsc->lsc_dma_ioh, 0,
219 CDMAC_STAT_RESET); 221 CDMAC_STAT_RESET);
220 222
221 tft_shutdown(&lsc->lsc_sc); 223 tft_shutdown(&lsc->lsc_sc);
222} 224}