Remove duplicate "when" word in comments.diff -r1.71 -r1.72 src/sys/arch/mac68k/dev/adb_direct.c
(andvar)
--- src/sys/arch/mac68k/dev/adb_direct.c 2024/02/28 13:05:39 1.71
+++ src/sys/arch/mac68k/dev/adb_direct.c 2024/03/05 20:58:05 1.72
@@ -1,2243 +1,2243 @@ | @@ -1,2243 +1,2243 @@ | |||
1 | /* $NetBSD: adb_direct.c,v 1.71 2024/02/28 13:05:39 thorpej Exp $ */ | 1 | /* $NetBSD: adb_direct.c,v 1.72 2024/03/05 20:58:05 andvar Exp $ */ | |
2 | 2 | |||
3 | /* From: adb_direct.c 2.02 4/18/97 jpw */ | 3 | /* From: adb_direct.c 2.02 4/18/97 jpw */ | |
4 | 4 | |||
5 | /* | 5 | /* | |
6 | * Copyright (C) 1996, 1997 John P. Wittkoski | 6 | * Copyright (C) 1996, 1997 John P. Wittkoski | |
7 | * All rights reserved. | 7 | * All rights reserved. | |
8 | * | 8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | 9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | 10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | 11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | 12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | 13 | * notice, this list of conditions and the following disclaimer. | |
14 | * 2. Redistributions in binary form must reproduce the above copyright | 14 | * 2. Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in the | 15 | * notice, this list of conditions and the following disclaimer in the | |
16 | * documentation and/or other materials provided with the distribution. | 16 | * documentation and/or other materials provided with the distribution. | |
17 | * 3. All advertising materials mentioning features or use of this software | 17 | * 3. All advertising materials mentioning features or use of this software | |
18 | * must display the following acknowledgement: | 18 | * must display the following acknowledgement: | |
19 | * This product includes software developed by John P. Wittkoski. | 19 | * This product includes software developed by John P. Wittkoski. | |
20 | * 4. The name of the author may not be used to endorse or promote products | 20 | * 4. The name of the author may not be used to endorse or promote products | |
21 | * derived from this software without specific prior written permission. | 21 | * derived from this software without specific prior written permission. | |
22 | * | 22 | * | |
23 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | 23 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
24 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | 24 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
25 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | 25 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
26 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | 26 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
29 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 29 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
30 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 30 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
32 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | */ | 33 | */ | |
34 | 34 | |||
35 | /* | 35 | /* | |
36 | * This code is rather messy, but I don't have time right now | 36 | * This code is rather messy, but I don't have time right now | |
37 | * to clean it up as much as I would like. | 37 | * to clean it up as much as I would like. | |
38 | * But it works, so I'm happy. :-) jpw | 38 | * But it works, so I'm happy. :-) jpw | |
39 | */ | 39 | */ | |
40 | 40 | |||
41 | /* | 41 | /* | |
42 | * TO DO: | 42 | * TO DO: | |
43 | * - We could reduce the time spent in the adb_intr_* routines | 43 | * - We could reduce the time spent in the adb_intr_* routines | |
44 | * by having them save the incoming and outgoing data directly | 44 | * by having them save the incoming and outgoing data directly | |
45 | * in the adbInbound and adbOutbound queues, as it would reduce | 45 | * in the adbInbound and adbOutbound queues, as it would reduce | |
46 | * the number of times we need to copy the data around. It | 46 | * the number of times we need to copy the data around. It | |
47 | * would also make the code more readable and easier to follow. | 47 | * would also make the code more readable and easier to follow. | |
48 | * - (Related to above) Use the header part of adbCommand to | 48 | * - (Related to above) Use the header part of adbCommand to | |
49 | * reduce the number of copies we have to do of the data. | 49 | * reduce the number of copies we have to do of the data. | |
50 | * - (Related to above) Actually implement the adbOutbound queue. | 50 | * - (Related to above) Actually implement the adbOutbound queue. | |
51 | * This is fairly easy once you switch all the intr routines | 51 | * This is fairly easy once you switch all the intr routines | |
52 | * over to using adbCommand structs directly. | 52 | * over to using adbCommand structs directly. | |
53 | * - There is a bug in the state machine of adb_intr_cuda | 53 | * - There is a bug in the state machine of adb_intr_cuda | |
54 | * code that causes hangs, especially on 030 machines, probably | 54 | * code that causes hangs, especially on 030 machines, probably | |
55 | * because of some timing issues. Because I have been unable to | 55 | * because of some timing issues. Because I have been unable to | |
56 | * determine the exact cause of this bug, I used the timeout function | 56 | * determine the exact cause of this bug, I used the timeout function | |
57 | * to check for and recover from this condition. If anyone finds | 57 | * to check for and recover from this condition. If anyone finds | |
58 | * the actual cause of this bug, the calls to timeout and the | 58 | * the actual cause of this bug, the calls to timeout and the | |
59 | * adb_cuda_tickle routine can be removed. | 59 | * adb_cuda_tickle routine can be removed. | |
60 | */ | 60 | */ | |
61 | 61 | |||
62 | #ifdef __NetBSD__ | 62 | #ifdef __NetBSD__ | |
63 | 63 | |||
64 | #include <sys/cdefs.h> | 64 | #include <sys/cdefs.h> | |
65 | __KERNEL_RCSID(0, "$NetBSD: adb_direct.c,v 1.71 2024/02/28 13:05:39 thorpej Exp $"); | 65 | __KERNEL_RCSID(0, "$NetBSD: adb_direct.c,v 1.72 2024/03/05 20:58:05 andvar Exp $"); | |
66 | 66 | |||
67 | #include "opt_adb.h" | 67 | #include "opt_adb.h" | |
68 | 68 | |||
69 | #include <sys/param.h> | 69 | #include <sys/param.h> | |
70 | #include <sys/pool.h> | 70 | #include <sys/pool.h> | |
71 | #include <sys/queue.h> | 71 | #include <sys/queue.h> | |
72 | #include <sys/systm.h> | 72 | #include <sys/systm.h> | |
73 | #include <sys/callout.h> | 73 | #include <sys/callout.h> | |
74 | #include <sys/cpu.h> | 74 | #include <sys/cpu.h> | |
75 | #include <sys/intr.h> | 75 | #include <sys/intr.h> | |
76 | 76 | |||
77 | #include <machine/viareg.h> | 77 | #include <machine/viareg.h> | |
78 | #include <machine/adbsys.h> /* required for adbvar.h */ | 78 | #include <machine/adbsys.h> /* required for adbvar.h */ | |
79 | #include <machine/iopreg.h> /* required for IOP support */ | 79 | #include <machine/iopreg.h> /* required for IOP support */ | |
80 | 80 | |||
81 | #include <m68k/vectors.h> | 81 | #include <m68k/vectors.h> | |
82 | 82 | |||
83 | #include <mac68k/mac68k/macrom.h> | 83 | #include <mac68k/mac68k/macrom.h> | |
84 | #include <mac68k/dev/adbvar.h> | 84 | #include <mac68k/dev/adbvar.h> | |
85 | #define printf_intr printf | 85 | #define printf_intr printf | |
86 | #else /* !__NetBSD__, i.e. Mac OS */ | 86 | #else /* !__NetBSD__, i.e. Mac OS */ | |
87 | #include "via.h" /* for macos based testing */ | 87 | #include "via.h" /* for macos based testing */ | |
88 | /* #define ADB_DEBUG */ /* more verbose for testing */ | 88 | /* #define ADB_DEBUG */ /* more verbose for testing */ | |
89 | 89 | |||
90 | /* Types of ADB hardware that we support */ | 90 | /* Types of ADB hardware that we support */ | |
91 | #define ADB_HW_UNKNOWN 0x0 /* don't know */ | 91 | #define ADB_HW_UNKNOWN 0x0 /* don't know */ | |
92 | #define ADB_HW_II 0x1 /* Mac II series */ | 92 | #define ADB_HW_II 0x1 /* Mac II series */ | |
93 | #define ADB_HW_IISI 0x2 /* Mac IIsi series */ | 93 | #define ADB_HW_IISI 0x2 /* Mac IIsi series */ | |
94 | #define ADB_HW_PB 0x3 /* PowerBook series */ | 94 | #define ADB_HW_PB 0x3 /* PowerBook series */ | |
95 | #define ADB_HW_CUDA 0x4 /* Machines with a Cuda chip */ | 95 | #define ADB_HW_CUDA 0x4 /* Machines with a Cuda chip */ | |
96 | #endif /* __NetBSD__ */ | 96 | #endif /* __NetBSD__ */ | |
97 | 97 | |||
98 | /* some misc. leftovers */ | 98 | /* some misc. leftovers */ | |
99 | #define vPB 0x0000 | 99 | #define vPB 0x0000 | |
100 | #define vPB3 0x08 | 100 | #define vPB3 0x08 | |
101 | #define vPB4 0x10 | 101 | #define vPB4 0x10 | |
102 | #define vPB5 0x20 | 102 | #define vPB5 0x20 | |
103 | #define vSR_INT 0x04 | 103 | #define vSR_INT 0x04 | |
104 | #define vSR_OUT 0x10 | 104 | #define vSR_OUT 0x10 | |
105 | 105 | |||
106 | /* the type of ADB action that we are currently preforming */ | 106 | /* the type of ADB action that we are currently preforming */ | |
107 | #define ADB_ACTION_NOTREADY 0x1 /* has not been initialized yet */ | 107 | #define ADB_ACTION_NOTREADY 0x1 /* has not been initialized yet */ | |
108 | #define ADB_ACTION_IDLE 0x2 /* the bus is currently idle */ | 108 | #define ADB_ACTION_IDLE 0x2 /* the bus is currently idle */ | |
109 | #define ADB_ACTION_OUT 0x3 /* sending out a command */ | 109 | #define ADB_ACTION_OUT 0x3 /* sending out a command */ | |
110 | #define ADB_ACTION_IN 0x4 /* receiving data */ | 110 | #define ADB_ACTION_IN 0x4 /* receiving data */ | |
111 | #define ADB_ACTION_POLLING 0x5 /* polling - II only */ | 111 | #define ADB_ACTION_POLLING 0x5 /* polling - II only */ | |
112 | #define ADB_ACTION_RUNNING 0x6 /* running - IOP only */ | 112 | #define ADB_ACTION_RUNNING 0x6 /* running - IOP only */ | |
113 | 113 | |||
114 | /* | 114 | /* | |
115 | * These describe the state of the ADB bus itself, although they | 115 | * These describe the state of the ADB bus itself, although they | |
116 | * don't necessarily correspond directly to ADB states. | 116 | * don't necessarily correspond directly to ADB states. | |
117 | * Note: these are not really used in the IIsi code. | 117 | * Note: these are not really used in the IIsi code. | |
118 | */ | 118 | */ | |
119 | #define ADB_BUS_UNKNOWN 0x1 /* we don't know yet - all models */ | 119 | #define ADB_BUS_UNKNOWN 0x1 /* we don't know yet - all models */ | |
120 | #define ADB_BUS_IDLE 0x2 /* bus is idle - all models */ | 120 | #define ADB_BUS_IDLE 0x2 /* bus is idle - all models */ | |
121 | #define ADB_BUS_CMD 0x3 /* starting a command - II models */ | 121 | #define ADB_BUS_CMD 0x3 /* starting a command - II models */ | |
122 | #define ADB_BUS_ODD 0x4 /* the "odd" state - II models */ | 122 | #define ADB_BUS_ODD 0x4 /* the "odd" state - II models */ | |
123 | #define ADB_BUS_EVEN 0x5 /* the "even" state - II models */ | 123 | #define ADB_BUS_EVEN 0x5 /* the "even" state - II models */ | |
124 | #define ADB_BUS_ACTIVE 0x6 /* active state - IIsi models */ | 124 | #define ADB_BUS_ACTIVE 0x6 /* active state - IIsi models */ | |
125 | #define ADB_BUS_ACK 0x7 /* currently ACKing - IIsi models */ | 125 | #define ADB_BUS_ACK 0x7 /* currently ACKing - IIsi models */ | |
126 | 126 | |||
127 | /* | 127 | /* | |
128 | * Shortcuts for setting or testing the VIA bit states. | 128 | * Shortcuts for setting or testing the VIA bit states. | |
129 | * Not all shortcuts are used for every type of ADB hardware. | 129 | * Not all shortcuts are used for every type of ADB hardware. | |
130 | */ | 130 | */ | |
131 | #define ADB_SET_STATE_IDLE_II() via_reg(VIA1, vBufB) |= (vPB4 | vPB5) | 131 | #define ADB_SET_STATE_IDLE_II() via_reg(VIA1, vBufB) |= (vPB4 | vPB5) | |
132 | #define ADB_SET_STATE_IDLE_IISI() via_reg(VIA1, vBufB) &= ~(vPB4 | vPB5) | 132 | #define ADB_SET_STATE_IDLE_IISI() via_reg(VIA1, vBufB) &= ~(vPB4 | vPB5) | |
133 | #define ADB_SET_STATE_IDLE_CUDA() via_reg(VIA1, vBufB) |= (vPB4 | vPB5) | 133 | #define ADB_SET_STATE_IDLE_CUDA() via_reg(VIA1, vBufB) |= (vPB4 | vPB5) | |
134 | #define ADB_SET_STATE_CMD() via_reg(VIA1, vBufB) &= ~(vPB4 | vPB5) | 134 | #define ADB_SET_STATE_CMD() via_reg(VIA1, vBufB) &= ~(vPB4 | vPB5) | |
135 | #define ADB_SET_STATE_EVEN() via_reg(VIA1, vBufB) = ((via_reg(VIA1, \ | 135 | #define ADB_SET_STATE_EVEN() via_reg(VIA1, vBufB) = ((via_reg(VIA1, \ | |
136 | vBufB) | vPB4) & ~vPB5) | 136 | vBufB) | vPB4) & ~vPB5) | |
137 | #define ADB_SET_STATE_ODD() via_reg(VIA1, vBufB) = ((via_reg(VIA1, \ | 137 | #define ADB_SET_STATE_ODD() via_reg(VIA1, vBufB) = ((via_reg(VIA1, \ | |
138 | vBufB) | vPB5) & ~vPB4) | 138 | vBufB) | vPB5) & ~vPB4) | |
139 | #define ADB_SET_STATE_ACTIVE() via_reg(VIA1, vBufB) |= vPB5 | 139 | #define ADB_SET_STATE_ACTIVE() via_reg(VIA1, vBufB) |= vPB5 | |
140 | #define ADB_SET_STATE_INACTIVE() via_reg(VIA1, vBufB) &= ~vPB5 | 140 | #define ADB_SET_STATE_INACTIVE() via_reg(VIA1, vBufB) &= ~vPB5 | |
141 | #define ADB_SET_STATE_TIP() via_reg(VIA1, vBufB) &= ~vPB5 | 141 | #define ADB_SET_STATE_TIP() via_reg(VIA1, vBufB) &= ~vPB5 | |
142 | #define ADB_CLR_STATE_TIP() via_reg(VIA1, vBufB) |= vPB5 | 142 | #define ADB_CLR_STATE_TIP() via_reg(VIA1, vBufB) |= vPB5 | |
143 | #define ADB_SET_STATE_ACKON() via_reg(VIA1, vBufB) |= vPB4 | 143 | #define ADB_SET_STATE_ACKON() via_reg(VIA1, vBufB) |= vPB4 | |
144 | #define ADB_SET_STATE_ACKOFF() via_reg(VIA1, vBufB) &= ~vPB4 | 144 | #define ADB_SET_STATE_ACKOFF() via_reg(VIA1, vBufB) &= ~vPB4 | |
145 | #define ADB_TOGGLE_STATE_ACK_CUDA() via_reg(VIA1, vBufB) ^= vPB4 | 145 | #define ADB_TOGGLE_STATE_ACK_CUDA() via_reg(VIA1, vBufB) ^= vPB4 | |
146 | #define ADB_SET_STATE_ACKON_CUDA() via_reg(VIA1, vBufB) &= ~vPB4 | 146 | #define ADB_SET_STATE_ACKON_CUDA() via_reg(VIA1, vBufB) &= ~vPB4 | |
147 | #define ADB_SET_STATE_ACKOFF_CUDA() via_reg(VIA1, vBufB) |= vPB4 | 147 | #define ADB_SET_STATE_ACKOFF_CUDA() via_reg(VIA1, vBufB) |= vPB4 | |
148 | #define ADB_SET_SR_INPUT() via_reg(VIA1, vACR) &= ~vSR_OUT | 148 | #define ADB_SET_SR_INPUT() via_reg(VIA1, vACR) &= ~vSR_OUT | |
149 | #define ADB_SET_SR_OUTPUT() via_reg(VIA1, vACR) |= vSR_OUT | 149 | #define ADB_SET_SR_OUTPUT() via_reg(VIA1, vACR) |= vSR_OUT | |
150 | #define ADB_SR() via_reg(VIA1, vSR) | 150 | #define ADB_SR() via_reg(VIA1, vSR) | |
151 | #define ADB_VIA_INTR_ENABLE() via_reg(VIA1, vIER) = 0x84 | 151 | #define ADB_VIA_INTR_ENABLE() via_reg(VIA1, vIER) = 0x84 | |
152 | #define ADB_VIA_INTR_DISABLE() via_reg(VIA1, vIER) = 0x04 | 152 | #define ADB_VIA_INTR_DISABLE() via_reg(VIA1, vIER) = 0x04 | |
153 | #define ADB_VIA_CLR_INTR() via_reg(VIA1, vIFR) = 0x04 | 153 | #define ADB_VIA_CLR_INTR() via_reg(VIA1, vIFR) = 0x04 | |
154 | #define ADB_INTR_IS_OFF (vPB3 == (via_reg(VIA1, vBufB) & vPB3)) | 154 | #define ADB_INTR_IS_OFF (vPB3 == (via_reg(VIA1, vBufB) & vPB3)) | |
155 | #define ADB_INTR_IS_ON (0 == (via_reg(VIA1, vBufB) & vPB3)) | 155 | #define ADB_INTR_IS_ON (0 == (via_reg(VIA1, vBufB) & vPB3)) | |
156 | #define ADB_SR_INTR_IS_OFF (0 == (via_reg(VIA1, vIFR) & vSR_INT)) | 156 | #define ADB_SR_INTR_IS_OFF (0 == (via_reg(VIA1, vIFR) & vSR_INT)) | |
157 | #define ADB_SR_INTR_IS_ON (vSR_INT == (via_reg(VIA1, \ | 157 | #define ADB_SR_INTR_IS_ON (vSR_INT == (via_reg(VIA1, \ | |
158 | vIFR) & vSR_INT)) | 158 | vIFR) & vSR_INT)) | |
159 | 159 | |||
160 | /* | 160 | /* | |
161 | * This is the delay that is required (in uS) between certain | 161 | * This is the delay that is required (in uS) between certain | |
162 | * ADB transactions. The actual timing delay for each uS is | 162 | * ADB transactions. The actual timing delay for each uS is | |
163 | * calculated at boot time to account for differences in machine speed. | 163 | * calculated at boot time to account for differences in machine speed. | |
164 | */ | 164 | */ | |
165 | #define ADB_DELAY 150 | 165 | #define ADB_DELAY 150 | |
166 | 166 | |||
167 | /* | 167 | /* | |
168 | * Maximum ADB message length; includes space for data, result, and | 168 | * Maximum ADB message length; includes space for data, result, and | |
169 | * device code - plus a little for safety. | 169 | * device code - plus a little for safety. | |
170 | */ | 170 | */ | |
171 | #define ADB_MAX_MSG_LENGTH 16 | 171 | #define ADB_MAX_MSG_LENGTH 16 | |
172 | #define ADB_MAX_HDR_LENGTH 8 | 172 | #define ADB_MAX_HDR_LENGTH 8 | |
173 | 173 | |||
174 | #define ADB_QUEUE 32 | 174 | #define ADB_QUEUE 32 | |
175 | #define ADB_TICKLE_TICKS 4 | 175 | #define ADB_TICKLE_TICKS 4 | |
176 | 176 | |||
177 | /* | 177 | /* | |
178 | * A structure for storing information about each ADB device. | 178 | * A structure for storing information about each ADB device. | |
179 | */ | 179 | */ | |
180 | struct ADBDevEntry { | 180 | struct ADBDevEntry { | |
181 | void (*ServiceRtPtr)(void); | 181 | void (*ServiceRtPtr)(void); | |
182 | void *DataAreaAddr; | 182 | void *DataAreaAddr; | |
183 | int devType; | 183 | int devType; | |
184 | int origAddr; | 184 | int origAddr; | |
185 | int currentAddr; | 185 | int currentAddr; | |
186 | }; | 186 | }; | |
187 | 187 | |||
188 | /* | 188 | /* | |
189 | * Used to hold ADB commands that are waiting to be sent out. | 189 | * Used to hold ADB commands that are waiting to be sent out. | |
190 | */ | 190 | */ | |
191 | struct adbCmdHoldEntry { | 191 | struct adbCmdHoldEntry { | |
192 | u_char outBuf[ADB_MAX_MSG_LENGTH]; /* our message */ | 192 | u_char outBuf[ADB_MAX_MSG_LENGTH]; /* our message */ | |
193 | u_char *saveBuf; /* buffer to know where to save result */ | 193 | u_char *saveBuf; /* buffer to know where to save result */ | |
194 | u_char *compRout; /* completion routine pointer */ | 194 | u_char *compRout; /* completion routine pointer */ | |
195 | u_char *data; /* completion routine data pointer */ | 195 | u_char *data; /* completion routine data pointer */ | |
196 | }; | 196 | }; | |
197 | 197 | |||
198 | /* | 198 | /* | |
199 | * Eventually used for two separate queues, the queue between | 199 | * Eventually used for two separate queues, the queue between | |
200 | * the upper and lower halves, and the outgoing packet queue. | 200 | * the upper and lower halves, and the outgoing packet queue. | |
201 | * TO DO: adbCommand can replace all of adbCmdHoldEntry eventually | 201 | * TO DO: adbCommand can replace all of adbCmdHoldEntry eventually | |
202 | */ | 202 | */ | |
203 | struct adbCommand { | 203 | struct adbCommand { | |
204 | u_char header[ADB_MAX_HDR_LENGTH]; /* not used yet */ | 204 | u_char header[ADB_MAX_HDR_LENGTH]; /* not used yet */ | |
205 | u_char data[ADB_MAX_MSG_LENGTH]; /* packet data only */ | 205 | u_char data[ADB_MAX_MSG_LENGTH]; /* packet data only */ | |
206 | u_char *saveBuf; /* where to save result */ | 206 | u_char *saveBuf; /* where to save result */ | |
207 | u_char *compRout; /* completion routine pointer */ | 207 | u_char *compRout; /* completion routine pointer */ | |
208 | u_char *compData; /* completion routine data pointer */ | 208 | u_char *compData; /* completion routine data pointer */ | |
209 | u_int cmd; /* the original command for this data */ | 209 | u_int cmd; /* the original command for this data */ | |
210 | u_int unsol; /* 1 if packet was unsolicited */ | 210 | u_int unsol; /* 1 if packet was unsolicited */ | |
211 | u_int ack_only; /* 1 for no special processing */ | 211 | u_int ack_only; /* 1 for no special processing */ | |
212 | }; | 212 | }; | |
213 | 213 | |||
214 | /* | 214 | /* | |
215 | * Text representations of each hardware class | 215 | * Text representations of each hardware class | |
216 | */ | 216 | */ | |
217 | const char *adbHardwareDescr[MAX_ADB_HW + 1] = { | 217 | const char *adbHardwareDescr[MAX_ADB_HW + 1] = { | |
218 | "unknown", | 218 | "unknown", | |
219 | "II series", | 219 | "II series", | |
220 | "IIsi series", | 220 | "IIsi series", | |
221 | "PowerBook", | 221 | "PowerBook", | |
222 | "Cuda", | 222 | "Cuda", | |
223 | "IOP", | 223 | "IOP", | |
224 | }; | 224 | }; | |
225 | 225 | |||
226 | /* | 226 | /* | |
227 | * A few variables that we need and their initial values. | 227 | * A few variables that we need and their initial values. | |
228 | */ | 228 | */ | |
229 | int adbHardware = ADB_HW_UNKNOWN; | 229 | int adbHardware = ADB_HW_UNKNOWN; | |
230 | int adbActionState = ADB_ACTION_NOTREADY; | 230 | int adbActionState = ADB_ACTION_NOTREADY; | |
231 | int adbBusState = ADB_BUS_UNKNOWN; | 231 | int adbBusState = ADB_BUS_UNKNOWN; | |
232 | int adbWaiting = 0; /* waiting for return data from the device */ | 232 | int adbWaiting = 0; /* waiting for return data from the device */ | |
233 | int adbWriteDelay = 0; /* working on (or waiting to do) a write */ | 233 | int adbWriteDelay = 0; /* working on (or waiting to do) a write */ | |
234 | int adbOutQueueHasData = 0; /* something in the queue waiting to go out */ | 234 | int adbOutQueueHasData = 0; /* something in the queue waiting to go out */ | |
235 | int adbNextEnd = 0; /* the next incoming bute is the last (II) */ | 235 | int adbNextEnd = 0; /* the next incoming bute is the last (II) */ | |
236 | int adbSoftPower = 0; /* machine supports soft power */ | 236 | int adbSoftPower = 0; /* machine supports soft power */ | |
237 | 237 | |||
238 | int adbWaitingCmd = 0; /* ADB command we are waiting for */ | 238 | int adbWaitingCmd = 0; /* ADB command we are waiting for */ | |
239 | u_char *adbBuffer = (long)0; /* pointer to user data area */ | 239 | u_char *adbBuffer = (long)0; /* pointer to user data area */ | |
240 | void *adbCompRout = (long)0; /* pointer to the completion routine */ | 240 | void *adbCompRout = (long)0; /* pointer to the completion routine */ | |
241 | void *adbCompData = (long)0; /* pointer to the completion routine data */ | 241 | void *adbCompData = (long)0; /* pointer to the completion routine data */ | |
242 | long adbFakeInts = 0; /* keeps track of fake ADB interrupts for | 242 | long adbFakeInts = 0; /* keeps track of fake ADB interrupts for | |
243 | * timeouts (II) */ | 243 | * timeouts (II) */ | |
244 | int adbStarting = 1; /* doing ADBReInit so do polling differently */ | 244 | int adbStarting = 1; /* doing ADBReInit so do polling differently */ | |
245 | int adbSendTalk = 0; /* the intr routine is sending the talk, not | 245 | int adbSendTalk = 0; /* the intr routine is sending the talk, not | |
246 | * the user (II) */ | 246 | * the user (II) */ | |
247 | int adbPolling = 0; /* we are polling for service request */ | 247 | int adbPolling = 0; /* we are polling for service request */ | |
248 | int adbPollCmd = 0; /* the last poll command we sent */ | 248 | int adbPollCmd = 0; /* the last poll command we sent */ | |
249 | 249 | |||
250 | u_char adbInputBuffer[ADB_MAX_MSG_LENGTH]; /* data input buffer */ | 250 | u_char adbInputBuffer[ADB_MAX_MSG_LENGTH]; /* data input buffer */ | |
251 | u_char adbOutputBuffer[ADB_MAX_MSG_LENGTH]; /* data output buffer */ | 251 | u_char adbOutputBuffer[ADB_MAX_MSG_LENGTH]; /* data output buffer */ | |
252 | struct adbCmdHoldEntry adbOutQueue; /* our 1 entry output queue */ | 252 | struct adbCmdHoldEntry adbOutQueue; /* our 1 entry output queue */ | |
253 | 253 | |||
254 | int adbSentChars = 0; /* how many characters we have sent */ | 254 | int adbSentChars = 0; /* how many characters we have sent */ | |
255 | int adbLastDevice = 0; /* last ADB dev we heard from (II ONLY) */ | 255 | int adbLastDevice = 0; /* last ADB dev we heard from (II ONLY) */ | |
256 | int adbLastDevIndex = 0; /* last ADB dev loc in dev table (II ONLY) */ | 256 | int adbLastDevIndex = 0; /* last ADB dev loc in dev table (II ONLY) */ | |
257 | int adbLastCommand = 0; /* the last ADB command we sent (II) */ | 257 | int adbLastCommand = 0; /* the last ADB command we sent (II) */ | |
258 | 258 | |||
259 | struct ADBDevEntry ADBDevTable[16]; /* our ADB device table */ | 259 | struct ADBDevEntry ADBDevTable[16]; /* our ADB device table */ | |
260 | int ADBNumDevices; /* num. of ADB devices found with ADBReInit */ | 260 | int ADBNumDevices; /* num. of ADB devices found with ADBReInit */ | |
261 | 261 | |||
262 | struct adbCommand adbInbound[ADB_QUEUE]; /* incoming queue */ | 262 | struct adbCommand adbInbound[ADB_QUEUE]; /* incoming queue */ | |
263 | volatile int adbInCount = 0; /* how many packets in in queue */ | 263 | volatile int adbInCount = 0; /* how many packets in in queue */ | |
264 | int adbInHead = 0; /* head of in queue */ | 264 | int adbInHead = 0; /* head of in queue */ | |
265 | int adbInTail = 0; /* tail of in queue */ | 265 | int adbInTail = 0; /* tail of in queue */ | |
266 | struct adbCommand adbOutbound[ADB_QUEUE]; /* outgoing queue - not used yet */ | 266 | struct adbCommand adbOutbound[ADB_QUEUE]; /* outgoing queue - not used yet */ | |
267 | int adbOutCount = 0; /* how many packets in out queue */ | 267 | int adbOutCount = 0; /* how many packets in out queue */ | |
268 | int adbOutHead = 0; /* head of out queue */ | 268 | int adbOutHead = 0; /* head of out queue */ | |
269 | int adbOutTail = 0; /* tail of out queue */ | 269 | int adbOutTail = 0; /* tail of out queue */ | |
270 | 270 | |||
271 | int tickle_count = 0; /* how many tickles seen for this packet? */ | 271 | int tickle_count = 0; /* how many tickles seen for this packet? */ | |
272 | int tickle_serial = 0; /* the last packet tickled */ | 272 | int tickle_serial = 0; /* the last packet tickled */ | |
273 | int adb_cuda_serial = 0; /* the current packet */ | 273 | int adb_cuda_serial = 0; /* the current packet */ | |
274 | 274 | |||
275 | callout_t adb_cuda_tickle_ch; | 275 | callout_t adb_cuda_tickle_ch; | |
276 | 276 | |||
277 | void *adb_softintr_cookie; | 277 | void *adb_softintr_cookie; | |
278 | 278 | |||
279 | extern struct mac68k_machine_S mac68k_machine; | 279 | extern struct mac68k_machine_S mac68k_machine; | |
280 | 280 | |||
281 | void pm_setup_adb(void); | 281 | void pm_setup_adb(void); | |
282 | void pm_hw_setup(void); | 282 | void pm_hw_setup(void); | |
283 | void pm_check_adb_devices(int); | 283 | void pm_check_adb_devices(int); | |
284 | void pm_intr(void *); | 284 | void pm_intr(void *); | |
285 | int pm_adb_op(u_char *, void *, void *, int); | 285 | int pm_adb_op(u_char *, void *, void *, int); | |
286 | void pm_init_adb_device(void); | 286 | void pm_init_adb_device(void); | |
287 | 287 | |||
288 | /* | 288 | /* | |
289 | * The following are private routines. | 289 | * The following are private routines. | |
290 | */ | 290 | */ | |
291 | #ifdef ADB_DEBUG | 291 | #ifdef ADB_DEBUG | |
292 | void print_single(u_char *); | 292 | void print_single(u_char *); | |
293 | #endif | 293 | #endif | |
294 | void adb_intr(void *); | 294 | void adb_intr(void *); | |
295 | void adb_intr_II(void *); | 295 | void adb_intr_II(void *); | |
296 | void adb_intr_IIsi(void *); | 296 | void adb_intr_IIsi(void *); | |
297 | void adb_intr_cuda(void *); | 297 | void adb_intr_cuda(void *); | |
298 | void adb_soft_intr(void); | 298 | void adb_soft_intr(void); | |
299 | int send_adb_II(u_char *, u_char *, void *, void *, int); | 299 | int send_adb_II(u_char *, u_char *, void *, void *, int); | |
300 | int send_adb_IIsi(u_char *, u_char *, void *, void *, int); | 300 | int send_adb_IIsi(u_char *, u_char *, void *, void *, int); | |
301 | int send_adb_cuda(u_char *, u_char *, void *, void *, int); | 301 | int send_adb_cuda(u_char *, u_char *, void *, void *, int); | |
302 | void adb_intr_cuda_test(void); | 302 | void adb_intr_cuda_test(void); | |
303 | void adb_cuda_tickle(void); | 303 | void adb_cuda_tickle(void); | |
304 | void adb_pass_up(struct adbCommand *); | 304 | void adb_pass_up(struct adbCommand *); | |
305 | void adb_op_comprout(void); | 305 | void adb_op_comprout(void); | |
306 | void adb_reinit(void); | 306 | void adb_reinit(void); | |
307 | int count_adbs(void); | 307 | int count_adbs(void); | |
308 | int get_ind_adb_info(ADBDataBlock *, int); | 308 | int get_ind_adb_info(ADBDataBlock *, int); | |
309 | int get_adb_info(ADBDataBlock *, int); | 309 | int get_adb_info(ADBDataBlock *, int); | |
310 | int set_adb_info(ADBSetInfoBlock *, int); | 310 | int set_adb_info(ADBSetInfoBlock *, int); | |
311 | void adb_setup_hw_type(void); | 311 | void adb_setup_hw_type(void); | |
312 | int adb_op(Ptr, Ptr, Ptr, short); | 312 | int adb_op(Ptr, Ptr, Ptr, short); | |
313 | void adb_read_II(u_char *); | 313 | void adb_read_II(u_char *); | |
314 | void adb_hw_setup(void); | 314 | void adb_hw_setup(void); | |
315 | void adb_hw_setup_IIsi(u_char *); | 315 | void adb_hw_setup_IIsi(u_char *); | |
316 | void adb_comp_exec(void); | 316 | void adb_comp_exec(void); | |
317 | int adb_cmd_result(u_char *); | 317 | int adb_cmd_result(u_char *); | |
318 | int adb_cmd_extra(u_char *); | 318 | int adb_cmd_extra(u_char *); | |
319 | int adb_guess_next_device(void); | 319 | int adb_guess_next_device(void); | |
320 | int adb_prog_switch_enable(void); | 320 | int adb_prog_switch_enable(void); | |
321 | int adb_prog_switch_disable(void); | 321 | int adb_prog_switch_disable(void); | |
322 | /* we should create this and it will be the public version */ | 322 | /* we should create this and it will be the public version */ | |
323 | int send_adb(u_char *, void *, void *); | 323 | int send_adb(u_char *, void *, void *); | |
324 | void adb_iop_recv(IOP *, struct iop_msg *); | 324 | void adb_iop_recv(IOP *, struct iop_msg *); | |
325 | int send_adb_iop(int, u_char *, void *, void *); | 325 | int send_adb_iop(int, u_char *, void *, void *); | |
326 | 326 | |||
327 | #ifdef ADB_DEBUG | 327 | #ifdef ADB_DEBUG | |
328 | /* | 328 | /* | |
329 | * print_single | 329 | * print_single | |
330 | * Diagnostic display routine. Displays the hex values of the | 330 | * Diagnostic display routine. Displays the hex values of the | |
331 | * specified elements of the u_char. The length of the "string" | 331 | * specified elements of the u_char. The length of the "string" | |
332 | * is in [0]. | 332 | * is in [0]. | |
333 | */ | 333 | */ | |
334 | void | 334 | void | |
335 | print_single(u_char *str) | 335 | print_single(u_char *str) | |
336 | { | 336 | { | |
337 | int x; | 337 | int x; | |
338 | 338 | |||
339 | if (str == 0) { | 339 | if (str == 0) { | |
340 | printf_intr("no data - null pointer\n"); | 340 | printf_intr("no data - null pointer\n"); | |
341 | return; | 341 | return; | |
342 | } | 342 | } | |
343 | if (*str == 0) { | 343 | if (*str == 0) { | |
344 | printf_intr("nothing returned\n"); | 344 | printf_intr("nothing returned\n"); | |
345 | return; | 345 | return; | |
346 | } | 346 | } | |
347 | if (*str > 20) { | 347 | if (*str > 20) { | |
348 | printf_intr("ADB: ACK > 20 no way!\n"); | 348 | printf_intr("ADB: ACK > 20 no way!\n"); | |
349 | *str = (u_char)20; | 349 | *str = (u_char)20; | |
350 | } | 350 | } | |
351 | printf_intr("(length=0x%x):", (u_int)*str); | 351 | printf_intr("(length=0x%x):", (u_int)*str); | |
352 | for (x = 1; x <= *str; x++) | 352 | for (x = 1; x <= *str; x++) | |
353 | printf_intr(" 0x%02x", (u_int)*(str + x)); | 353 | printf_intr(" 0x%02x", (u_int)*(str + x)); | |
354 | printf_intr("\n"); | 354 | printf_intr("\n"); | |
355 | } | 355 | } | |
356 | #endif | 356 | #endif | |
357 | 357 | |||
358 | static inline void | 358 | static inline void | |
359 | adb_process_serial_intrs(void) | 359 | adb_process_serial_intrs(void) | |
360 | { | 360 | { | |
361 | /* grab any serial interrupts (autovector IPL 4) */ | 361 | /* grab any serial interrupts (autovector IPL 4) */ | |
362 | struct clockframe dummy_frame = { | 362 | struct clockframe dummy_frame = { | |
363 | .cf_sr = PSL_S, | 363 | .cf_sr = PSL_S, | |
364 | .cf_vo = VECI_TO_VECO(VECI_INTRAV4), | 364 | .cf_vo = VECI_TO_VECO(VECI_INTRAV4), | |
365 | }; | 365 | }; | |
366 | (void)intr_dispatch(dummy_frame); | 366 | (void)intr_dispatch(dummy_frame); | |
367 | } | 367 | } | |
368 | 368 | |||
369 | void | 369 | void | |
370 | adb_cuda_tickle(void) | 370 | adb_cuda_tickle(void) | |
371 | { | 371 | { | |
372 | volatile int s; | 372 | volatile int s; | |
373 | 373 | |||
374 | if (adbActionState == ADB_ACTION_IN) { | 374 | if (adbActionState == ADB_ACTION_IN) { | |
375 | if (tickle_serial == adb_cuda_serial) { | 375 | if (tickle_serial == adb_cuda_serial) { | |
376 | if (++tickle_count > 0) { | 376 | if (++tickle_count > 0) { | |
377 | s = splhigh(); | 377 | s = splhigh(); | |
378 | adbActionState = ADB_ACTION_IDLE; | 378 | adbActionState = ADB_ACTION_IDLE; | |
379 | adbInputBuffer[0] = 0; | 379 | adbInputBuffer[0] = 0; | |
380 | ADB_SET_STATE_IDLE_CUDA(); | 380 | ADB_SET_STATE_IDLE_CUDA(); | |
381 | splx(s); | 381 | splx(s); | |
382 | } | 382 | } | |
383 | } else { | 383 | } else { | |
384 | tickle_serial = adb_cuda_serial; | 384 | tickle_serial = adb_cuda_serial; | |
385 | tickle_count = 0; | 385 | tickle_count = 0; | |
386 | } | 386 | } | |
387 | } else { | 387 | } else { | |
388 | tickle_serial = adb_cuda_serial; | 388 | tickle_serial = adb_cuda_serial; | |
389 | tickle_count = 0; | 389 | tickle_count = 0; | |
390 | } | 390 | } | |
391 | 391 | |||
392 | callout_reset(&adb_cuda_tickle_ch, ADB_TICKLE_TICKS, | 392 | callout_reset(&adb_cuda_tickle_ch, ADB_TICKLE_TICKS, | |
393 | (void *)adb_cuda_tickle, NULL); | 393 | (void *)adb_cuda_tickle, NULL); | |
394 | } | 394 | } | |
395 | 395 | |||
396 | /* | 396 | /* | |
397 | * called when when an adb interrupt happens | 397 | * called when an adb interrupt happens | |
398 | * | 398 | * | |
399 | * Cuda version of adb_intr | 399 | * Cuda version of adb_intr | |
400 | * TO DO: do we want to add some calls to intr_dispatch() here to | 400 | * TO DO: do we want to add some calls to intr_dispatch() here to | |
401 | * grab serial interrupts? | 401 | * grab serial interrupts? | |
402 | */ | 402 | */ | |
403 | void | 403 | void | |
404 | adb_intr_cuda(void *arg) | 404 | adb_intr_cuda(void *arg) | |
405 | { | 405 | { | |
406 | volatile int i __unused, ending; | 406 | volatile int i __unused, ending; | |
407 | volatile unsigned int s; | 407 | volatile unsigned int s; | |
408 | struct adbCommand packet; | 408 | struct adbCommand packet; | |
409 | 409 | |||
410 | s = splhigh(); /* can't be too careful - might be called */ | 410 | s = splhigh(); /* can't be too careful - might be called */ | |
411 | /* from a routine, NOT an interrupt */ | 411 | /* from a routine, NOT an interrupt */ | |
412 | 412 | |||
413 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | 413 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | |
414 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | 414 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | |
415 | 415 | |||
416 | switch_start: | 416 | switch_start: | |
417 | switch (adbActionState) { | 417 | switch (adbActionState) { | |
418 | case ADB_ACTION_IDLE: | 418 | case ADB_ACTION_IDLE: | |
419 | /* | 419 | /* | |
420 | * This is an unexpected packet, so grab the first (dummy) | 420 | * This is an unexpected packet, so grab the first (dummy) | |
421 | * byte, set up the proper vars, and tell the chip we are | 421 | * byte, set up the proper vars, and tell the chip we are | |
422 | * starting to receive the packet by setting the TIP bit. | 422 | * starting to receive the packet by setting the TIP bit. | |
423 | */ | 423 | */ | |
424 | adbInputBuffer[1] = ADB_SR(); | 424 | adbInputBuffer[1] = ADB_SR(); | |
425 | adb_cuda_serial++; | 425 | adb_cuda_serial++; | |
426 | if (ADB_INTR_IS_OFF) /* must have been a fake start */ | 426 | if (ADB_INTR_IS_OFF) /* must have been a fake start */ | |
427 | break; | 427 | break; | |
428 | 428 | |||
429 | ADB_SET_SR_INPUT(); | 429 | ADB_SET_SR_INPUT(); | |
430 | ADB_SET_STATE_TIP(); | 430 | ADB_SET_STATE_TIP(); | |
431 | 431 | |||
432 | adbInputBuffer[0] = 1; | 432 | adbInputBuffer[0] = 1; | |
433 | adbActionState = ADB_ACTION_IN; | 433 | adbActionState = ADB_ACTION_IN; | |
434 | #ifdef ADB_DEBUG | 434 | #ifdef ADB_DEBUG | |
435 | if (adb_debug) | 435 | if (adb_debug) | |
436 | printf_intr("idle 0x%02x ", adbInputBuffer[1]); | 436 | printf_intr("idle 0x%02x ", adbInputBuffer[1]); | |
437 | #endif | 437 | #endif | |
438 | break; | 438 | break; | |
439 | 439 | |||
440 | case ADB_ACTION_IN: | 440 | case ADB_ACTION_IN: | |
441 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); | 441 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); | |
442 | /* intr off means this is the last byte (end of frame) */ | 442 | /* intr off means this is the last byte (end of frame) */ | |
443 | if (ADB_INTR_IS_OFF) | 443 | if (ADB_INTR_IS_OFF) | |
444 | ending = 1; | 444 | ending = 1; | |
445 | else | 445 | else | |
446 | ending = 0; | 446 | ending = 0; | |
447 | 447 | |||
448 | if (1 == ending) { /* end of message? */ | 448 | if (1 == ending) { /* end of message? */ | |
449 | #ifdef ADB_DEBUG | 449 | #ifdef ADB_DEBUG | |
450 | if (adb_debug) { | 450 | if (adb_debug) { | |
451 | printf_intr("in end 0x%02x ", | 451 | printf_intr("in end 0x%02x ", | |
452 | adbInputBuffer[adbInputBuffer[0]]); | 452 | adbInputBuffer[adbInputBuffer[0]]); | |
453 | print_single(adbInputBuffer); | 453 | print_single(adbInputBuffer); | |
454 | } | 454 | } | |
455 | #endif | 455 | #endif | |
456 | 456 | |||
457 | /* | 457 | /* | |
458 | * Are we waiting AND does this packet match what we | 458 | * Are we waiting AND does this packet match what we | |
459 | * are waiting for AND is it coming from either the | 459 | * are waiting for AND is it coming from either the | |
460 | * ADB or RTC/PRAM sub-device? This section _should_ | 460 | * ADB or RTC/PRAM sub-device? This section _should_ | |
461 | * recognize all ADB and RTC/PRAM type commands, but | 461 | * recognize all ADB and RTC/PRAM type commands, but | |
462 | * there may be more... NOTE: commands are always at | 462 | * there may be more... NOTE: commands are always at | |
463 | * [4], even for RTC/PRAM commands. | 463 | * [4], even for RTC/PRAM commands. | |
464 | */ | 464 | */ | |
465 | /* set up data for adb_pass_up */ | 465 | /* set up data for adb_pass_up */ | |
466 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | 466 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | |
467 | 467 | |||
468 | if ((adbWaiting == 1) && | 468 | if ((adbWaiting == 1) && | |
469 | (adbInputBuffer[4] == adbWaitingCmd) && | 469 | (adbInputBuffer[4] == adbWaitingCmd) && | |
470 | ((adbInputBuffer[2] == 0x00) || | 470 | ((adbInputBuffer[2] == 0x00) || | |
471 | (adbInputBuffer[2] == 0x01))) { | 471 | (adbInputBuffer[2] == 0x01))) { | |
472 | packet.saveBuf = adbBuffer; | 472 | packet.saveBuf = adbBuffer; | |
473 | packet.compRout = adbCompRout; | 473 | packet.compRout = adbCompRout; | |
474 | packet.compData = adbCompData; | 474 | packet.compData = adbCompData; | |
475 | packet.unsol = 0; | 475 | packet.unsol = 0; | |
476 | packet.ack_only = 0; | 476 | packet.ack_only = 0; | |
477 | adb_pass_up(&packet); | 477 | adb_pass_up(&packet); | |
478 | 478 | |||
479 | adbWaitingCmd = 0; /* reset "waiting" vars */ | 479 | adbWaitingCmd = 0; /* reset "waiting" vars */ | |
480 | adbWaiting = 0; | 480 | adbWaiting = 0; | |
481 | adbBuffer = (long)0; | 481 | adbBuffer = (long)0; | |
482 | adbCompRout = (long)0; | 482 | adbCompRout = (long)0; | |
483 | adbCompData = (long)0; | 483 | adbCompData = (long)0; | |
484 | } else { | 484 | } else { | |
485 | packet.unsol = 1; | 485 | packet.unsol = 1; | |
486 | packet.ack_only = 0; | 486 | packet.ack_only = 0; | |
487 | adb_pass_up(&packet); | 487 | adb_pass_up(&packet); | |
488 | } | 488 | } | |
489 | 489 | |||
490 | 490 | |||
491 | /* reset vars and signal the end of this frame */ | 491 | /* reset vars and signal the end of this frame */ | |
492 | adbActionState = ADB_ACTION_IDLE; | 492 | adbActionState = ADB_ACTION_IDLE; | |
493 | adbInputBuffer[0] = 0; | 493 | adbInputBuffer[0] = 0; | |
494 | ADB_SET_STATE_IDLE_CUDA(); | 494 | ADB_SET_STATE_IDLE_CUDA(); | |
495 | /*ADB_SET_SR_INPUT();*/ | 495 | /*ADB_SET_SR_INPUT();*/ | |
496 | 496 | |||
497 | /* | 497 | /* | |
498 | * If there is something waiting to be sent out, | 498 | * If there is something waiting to be sent out, | |
499 | * the set everything up and send the first byte. | 499 | * the set everything up and send the first byte. | |
500 | */ | 500 | */ | |
501 | if (adbWriteDelay == 1) { | 501 | if (adbWriteDelay == 1) { | |
502 | delay(ADB_DELAY); /* required */ | 502 | delay(ADB_DELAY); /* required */ | |
503 | adbSentChars = 0; | 503 | adbSentChars = 0; | |
504 | adbActionState = ADB_ACTION_OUT; | 504 | adbActionState = ADB_ACTION_OUT; | |
505 | /* | 505 | /* | |
506 | * If the interrupt is on, we were too slow | 506 | * If the interrupt is on, we were too slow | |
507 | * and the chip has already started to send | 507 | * and the chip has already started to send | |
508 | * something to us, so back out of the write | 508 | * something to us, so back out of the write | |
509 | * and start a read cycle. | 509 | * and start a read cycle. | |
510 | */ | 510 | */ | |
511 | if (ADB_INTR_IS_ON) { | 511 | if (ADB_INTR_IS_ON) { | |
512 | ADB_SET_SR_INPUT(); | 512 | ADB_SET_SR_INPUT(); | |
513 | ADB_SET_STATE_IDLE_CUDA(); | 513 | ADB_SET_STATE_IDLE_CUDA(); | |
514 | adbSentChars = 0; | 514 | adbSentChars = 0; | |
515 | adbActionState = ADB_ACTION_IDLE; | 515 | adbActionState = ADB_ACTION_IDLE; | |
516 | adbInputBuffer[0] = 0; | 516 | adbInputBuffer[0] = 0; | |
517 | break; | 517 | break; | |
518 | } | 518 | } | |
519 | /* | 519 | /* | |
520 | * If we got here, it's ok to start sending | 520 | * If we got here, it's ok to start sending | |
521 | * so load the first byte and tell the chip | 521 | * so load the first byte and tell the chip | |
522 | * we want to send. | 522 | * we want to send. | |
523 | */ | 523 | */ | |
524 | ADB_SET_STATE_TIP(); | 524 | ADB_SET_STATE_TIP(); | |
525 | ADB_SET_SR_OUTPUT(); | 525 | ADB_SET_SR_OUTPUT(); | |
526 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; | 526 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; | |
527 | } | 527 | } | |
528 | } else { | 528 | } else { | |
529 | ADB_TOGGLE_STATE_ACK_CUDA(); | 529 | ADB_TOGGLE_STATE_ACK_CUDA(); | |
530 | #ifdef ADB_DEBUG | 530 | #ifdef ADB_DEBUG | |
531 | if (adb_debug) | 531 | if (adb_debug) | |
532 | printf_intr("in 0x%02x ", | 532 | printf_intr("in 0x%02x ", | |
533 | adbInputBuffer[adbInputBuffer[0]]); | 533 | adbInputBuffer[adbInputBuffer[0]]); | |
534 | #endif | 534 | #endif | |
535 | } | 535 | } | |
536 | break; | 536 | break; | |
537 | 537 | |||
538 | case ADB_ACTION_OUT: | 538 | case ADB_ACTION_OUT: | |
539 | i = ADB_SR(); /* reset SR-intr in IFR */ | 539 | i = ADB_SR(); /* reset SR-intr in IFR */ | |
540 | #ifdef ADB_DEBUG | 540 | #ifdef ADB_DEBUG | |
541 | if (adb_debug) | 541 | if (adb_debug) | |
542 | printf_intr("intr out 0x%02x ", i); | 542 | printf_intr("intr out 0x%02x ", i); | |
543 | #endif | 543 | #endif | |
544 | 544 | |||
545 | adbSentChars++; | 545 | adbSentChars++; | |
546 | if (ADB_INTR_IS_ON) { /* ADB intr low during write */ | 546 | if (ADB_INTR_IS_ON) { /* ADB intr low during write */ | |
547 | #ifdef ADB_DEBUG | 547 | #ifdef ADB_DEBUG | |
548 | if (adb_debug) | 548 | if (adb_debug) | |
549 | printf_intr("intr was on "); | 549 | printf_intr("intr was on "); | |
550 | #endif | 550 | #endif | |
551 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 551 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
552 | ADB_SET_STATE_IDLE_CUDA(); | 552 | ADB_SET_STATE_IDLE_CUDA(); | |
553 | adbSentChars = 0; /* must start all over */ | 553 | adbSentChars = 0; /* must start all over */ | |
554 | adbActionState = ADB_ACTION_IDLE; /* new state */ | 554 | adbActionState = ADB_ACTION_IDLE; /* new state */ | |
555 | adbInputBuffer[0] = 0; | 555 | adbInputBuffer[0] = 0; | |
556 | adbWriteDelay = 1; /* must retry when done with | 556 | adbWriteDelay = 1; /* must retry when done with | |
557 | * read */ | 557 | * read */ | |
558 | delay(ADB_DELAY); | 558 | delay(ADB_DELAY); | |
559 | goto switch_start; /* process next state right | 559 | goto switch_start; /* process next state right | |
560 | * now */ | 560 | * now */ | |
561 | break; | 561 | break; | |
562 | } | 562 | } | |
563 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | 563 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | |
564 | if (0 == adb_cmd_result(adbOutputBuffer)) { /* do we expect data | 564 | if (0 == adb_cmd_result(adbOutputBuffer)) { /* do we expect data | |
565 | * back? */ | 565 | * back? */ | |
566 | adbWaiting = 1; /* signal waiting for return */ | 566 | adbWaiting = 1; /* signal waiting for return */ | |
567 | adbWaitingCmd = adbOutputBuffer[2]; /* save waiting command */ | 567 | adbWaitingCmd = adbOutputBuffer[2]; /* save waiting command */ | |
568 | } else { /* no talk, so done */ | 568 | } else { /* no talk, so done */ | |
569 | /* set up stuff for adb_pass_up */ | 569 | /* set up stuff for adb_pass_up */ | |
570 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | 570 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | |
571 | packet.saveBuf = adbBuffer; | 571 | packet.saveBuf = adbBuffer; | |
572 | packet.compRout = adbCompRout; | 572 | packet.compRout = adbCompRout; | |
573 | packet.compData = adbCompData; | 573 | packet.compData = adbCompData; | |
574 | packet.cmd = adbWaitingCmd; | 574 | packet.cmd = adbWaitingCmd; | |
575 | packet.unsol = 0; | 575 | packet.unsol = 0; | |
576 | packet.ack_only = 1; | 576 | packet.ack_only = 1; | |
577 | adb_pass_up(&packet); | 577 | adb_pass_up(&packet); | |
578 | 578 | |||
579 | /* reset "waiting" vars, just in case */ | 579 | /* reset "waiting" vars, just in case */ | |
580 | adbWaitingCmd = 0; | 580 | adbWaitingCmd = 0; | |
581 | adbBuffer = (long)0; | 581 | adbBuffer = (long)0; | |
582 | adbCompRout = (long)0; | 582 | adbCompRout = (long)0; | |
583 | adbCompData = (long)0; | 583 | adbCompData = (long)0; | |
584 | } | 584 | } | |
585 | 585 | |||
586 | adbWriteDelay = 0; /* done writing */ | 586 | adbWriteDelay = 0; /* done writing */ | |
587 | adbActionState = ADB_ACTION_IDLE; /* signal bus is idle */ | 587 | adbActionState = ADB_ACTION_IDLE; /* signal bus is idle */ | |
588 | ADB_SET_SR_INPUT(); | 588 | ADB_SET_SR_INPUT(); | |
589 | ADB_SET_STATE_IDLE_CUDA(); | 589 | ADB_SET_STATE_IDLE_CUDA(); | |
590 | #ifdef ADB_DEBUG | 590 | #ifdef ADB_DEBUG | |
591 | if (adb_debug) | 591 | if (adb_debug) | |
592 | printf_intr("write done "); | 592 | printf_intr("write done "); | |
593 | #endif | 593 | #endif | |
594 | } else { | 594 | } else { | |
595 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* send next byte */ | 595 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* send next byte */ | |
596 | ADB_TOGGLE_STATE_ACK_CUDA(); /* signal byte ready to | 596 | ADB_TOGGLE_STATE_ACK_CUDA(); /* signal byte ready to | |
597 | * shift */ | 597 | * shift */ | |
598 | #ifdef ADB_DEBUG | 598 | #ifdef ADB_DEBUG | |
599 | if (adb_debug) | 599 | if (adb_debug) | |
600 | printf_intr("toggle "); | 600 | printf_intr("toggle "); | |
601 | #endif | 601 | #endif | |
602 | } | 602 | } | |
603 | break; | 603 | break; | |
604 | 604 | |||
605 | case ADB_ACTION_NOTREADY: | 605 | case ADB_ACTION_NOTREADY: | |
606 | #ifdef ADB_DEBUG | 606 | #ifdef ADB_DEBUG | |
607 | if (adb_debug) | 607 | if (adb_debug) | |
608 | printf_intr("adb: not yet initialized\n"); | 608 | printf_intr("adb: not yet initialized\n"); | |
609 | #endif | 609 | #endif | |
610 | break; | 610 | break; | |
611 | 611 | |||
612 | default: | 612 | default: | |
613 | #ifdef ADB_DEBUG | 613 | #ifdef ADB_DEBUG | |
614 | if (adb_debug) | 614 | if (adb_debug) | |
615 | printf_intr("intr: unknown ADB state\n"); | 615 | printf_intr("intr: unknown ADB state\n"); | |
616 | #endif | 616 | #endif | |
617 | break; | 617 | break; | |
618 | } | 618 | } | |
619 | 619 | |||
620 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | 620 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | |
621 | 621 | |||
622 | splx(s); /* restore */ | 622 | splx(s); /* restore */ | |
623 | 623 | |||
624 | return; | 624 | return; | |
625 | } /* end adb_intr_cuda */ | 625 | } /* end adb_intr_cuda */ | |
626 | 626 | |||
627 | 627 | |||
628 | int | 628 | int | |
629 | send_adb_cuda(u_char *in, u_char *buffer, void *compRout, void *data, int | 629 | send_adb_cuda(u_char *in, u_char *buffer, void *compRout, void *data, int | |
630 | command) | 630 | command) | |
631 | { | 631 | { | |
632 | int s, len; | 632 | int s, len; | |
633 | 633 | |||
634 | #ifdef ADB_DEBUG | 634 | #ifdef ADB_DEBUG | |
635 | if (adb_debug) | 635 | if (adb_debug) | |
636 | printf_intr("SEND\n"); | 636 | printf_intr("SEND\n"); | |
637 | #endif | 637 | #endif | |
638 | 638 | |||
639 | if (adbActionState == ADB_ACTION_NOTREADY) | 639 | if (adbActionState == ADB_ACTION_NOTREADY) | |
640 | return 1; | 640 | return 1; | |
641 | 641 | |||
642 | /* Don't interrupt while we are messing with the ADB */ | 642 | /* Don't interrupt while we are messing with the ADB */ | |
643 | s = splhigh(); | 643 | s = splhigh(); | |
644 | 644 | |||
645 | if ((adbActionState == ADB_ACTION_IDLE) && /* ADB available? */ | 645 | if ((adbActionState == ADB_ACTION_IDLE) && /* ADB available? */ | |
646 | (ADB_INTR_IS_OFF)) { /* and no incoming interrupt? */ | 646 | (ADB_INTR_IS_OFF)) { /* and no incoming interrupt? */ | |
647 | } else | 647 | } else | |
648 | if (adbWriteDelay == 0) /* it's busy, but is anything waiting? */ | 648 | if (adbWriteDelay == 0) /* it's busy, but is anything waiting? */ | |
649 | adbWriteDelay = 1; /* if no, then we'll "queue" | 649 | adbWriteDelay = 1; /* if no, then we'll "queue" | |
650 | * it up */ | 650 | * it up */ | |
651 | else { | 651 | else { | |
652 | splx(s); | 652 | splx(s); | |
653 | return 1; /* really busy! */ | 653 | return 1; /* really busy! */ | |
654 | } | 654 | } | |
655 | 655 | |||
656 | #ifdef ADB_DEBUG | 656 | #ifdef ADB_DEBUG | |
657 | if (adb_debug) | 657 | if (adb_debug) | |
658 | printf_intr("QUEUE\n"); | 658 | printf_intr("QUEUE\n"); | |
659 | #endif | 659 | #endif | |
660 | if ((long)in == (long)0) { /* need to convert? */ | 660 | if ((long)in == (long)0) { /* need to convert? */ | |
661 | /* | 661 | /* | |
662 | * Don't need to use adb_cmd_extra here because this section | 662 | * Don't need to use adb_cmd_extra here because this section | |
663 | * will be called ONLY when it is an ADB command (no RTC or | 663 | * will be called ONLY when it is an ADB command (no RTC or | |
664 | * PRAM) | 664 | * PRAM) | |
665 | */ | 665 | */ | |
666 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | 666 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | |
667 | * doing a listen! */ | 667 | * doing a listen! */ | |
668 | len = buffer[0]; /* length of additional data */ | 668 | len = buffer[0]; /* length of additional data */ | |
669 | else | 669 | else | |
670 | len = 0;/* no additional data */ | 670 | len = 0;/* no additional data */ | |
671 | 671 | |||
672 | adbOutputBuffer[0] = 2 + len; /* dev. type + command + addl. | 672 | adbOutputBuffer[0] = 2 + len; /* dev. type + command + addl. | |
673 | * data */ | 673 | * data */ | |
674 | adbOutputBuffer[1] = 0x00; /* mark as an ADB command */ | 674 | adbOutputBuffer[1] = 0x00; /* mark as an ADB command */ | |
675 | adbOutputBuffer[2] = (u_char)command; /* load command */ | 675 | adbOutputBuffer[2] = (u_char)command; /* load command */ | |
676 | 676 | |||
677 | /* copy additional output data, if any */ | 677 | /* copy additional output data, if any */ | |
678 | memcpy(adbOutputBuffer + 3, buffer + 1, len); | 678 | memcpy(adbOutputBuffer + 3, buffer + 1, len); | |
679 | } else | 679 | } else | |
680 | /* if data ready, just copy over */ | 680 | /* if data ready, just copy over */ | |
681 | memcpy(adbOutputBuffer, in, in[0] + 2); | 681 | memcpy(adbOutputBuffer, in, in[0] + 2); | |
682 | 682 | |||
683 | adbSentChars = 0; /* nothing sent yet */ | 683 | adbSentChars = 0; /* nothing sent yet */ | |
684 | adbBuffer = buffer; /* save buffer to know where to save result */ | 684 | adbBuffer = buffer; /* save buffer to know where to save result */ | |
685 | adbCompRout = compRout; /* save completion routine pointer */ | 685 | adbCompRout = compRout; /* save completion routine pointer */ | |
686 | adbCompData = data; /* save completion routine data pointer */ | 686 | adbCompData = data; /* save completion routine data pointer */ | |
687 | adbWaitingCmd = adbOutputBuffer[2]; /* save wait command */ | 687 | adbWaitingCmd = adbOutputBuffer[2]; /* save wait command */ | |
688 | 688 | |||
689 | if (adbWriteDelay != 1) { /* start command now? */ | 689 | if (adbWriteDelay != 1) { /* start command now? */ | |
690 | #ifdef ADB_DEBUG | 690 | #ifdef ADB_DEBUG | |
691 | if (adb_debug) | 691 | if (adb_debug) | |
692 | printf_intr("out start NOW"); | 692 | printf_intr("out start NOW"); | |
693 | #endif | 693 | #endif | |
694 | delay(ADB_DELAY); | 694 | delay(ADB_DELAY); | |
695 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 695 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
696 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 696 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
697 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* load byte for output */ | 697 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* load byte for output */ | |
698 | ADB_SET_STATE_ACKOFF_CUDA(); | 698 | ADB_SET_STATE_ACKOFF_CUDA(); | |
699 | ADB_SET_STATE_TIP(); /* tell ADB that we want to send */ | 699 | ADB_SET_STATE_TIP(); /* tell ADB that we want to send */ | |
700 | } | 700 | } | |
701 | adbWriteDelay = 1; /* something in the write "queue" */ | 701 | adbWriteDelay = 1; /* something in the write "queue" */ | |
702 | 702 | |||
703 | splx(s); | 703 | splx(s); | |
704 | 704 | |||
705 | if (0x0100 <= (s & 0x0700)) /* were VIA1 interrupts blocked? */ | 705 | if (0x0100 <= (s & 0x0700)) /* were VIA1 interrupts blocked? */ | |
706 | /* poll until byte done */ | 706 | /* poll until byte done */ | |
707 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | 707 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | |
708 | || (adbWaiting == 1)) | 708 | || (adbWaiting == 1)) | |
709 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | 709 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | |
710 | adb_intr_cuda(NULL); /* go process it */ | 710 | adb_intr_cuda(NULL); /* go process it */ | |
711 | if (adb_polling) | 711 | if (adb_polling) | |
712 | adb_soft_intr(); | 712 | adb_soft_intr(); | |
713 | } | 713 | } | |
714 | 714 | |||
715 | return 0; | 715 | return 0; | |
716 | } /* send_adb_cuda */ | 716 | } /* send_adb_cuda */ | |
717 | 717 | |||
718 | 718 | |||
719 | void | 719 | void | |
720 | adb_intr_II(void *arg) | 720 | adb_intr_II(void *arg) | |
721 | { | 721 | { | |
722 | struct adbCommand packet; | 722 | struct adbCommand packet; | |
723 | int i, intr_on = 0; | 723 | int i, intr_on = 0; | |
724 | int send = 0; | 724 | int send = 0; | |
725 | unsigned int s; | 725 | unsigned int s; | |
726 | 726 | |||
727 | s = splhigh(); /* can't be too careful - might be called */ | 727 | s = splhigh(); /* can't be too careful - might be called */ | |
728 | /* from a routine, NOT an interrupt */ | 728 | /* from a routine, NOT an interrupt */ | |
729 | 729 | |||
730 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | 730 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | |
731 | 731 | |||
732 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | 732 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | |
733 | 733 | |||
734 | delay(ADB_DELAY); /* yuck (don't remove) */ | 734 | delay(ADB_DELAY); /* yuck (don't remove) */ | |
735 | 735 | |||
736 | adb_process_serial_intrs(); | 736 | adb_process_serial_intrs(); | |
737 | 737 | |||
738 | if (ADB_INTR_IS_ON) | 738 | if (ADB_INTR_IS_ON) | |
739 | intr_on = 1; /* save for later */ | 739 | intr_on = 1; /* save for later */ | |
740 | 740 | |||
741 | switch_start: | 741 | switch_start: | |
742 | switch (adbActionState) { | 742 | switch (adbActionState) { | |
743 | case ADB_ACTION_POLLING: | 743 | case ADB_ACTION_POLLING: | |
744 | if (!intr_on) { | 744 | if (!intr_on) { | |
745 | if (adbOutQueueHasData) { | 745 | if (adbOutQueueHasData) { | |
746 | #ifdef ADB_DEBUG | 746 | #ifdef ADB_DEBUG | |
747 | if (adb_debug & 0x80) | 747 | if (adb_debug & 0x80) | |
748 | printf_intr("POLL-doing-out-queue. "); | 748 | printf_intr("POLL-doing-out-queue. "); | |
749 | #endif | 749 | #endif | |
750 | ADB_SET_STATE_IDLE_II(); | 750 | ADB_SET_STATE_IDLE_II(); | |
751 | delay(ADB_DELAY); | 751 | delay(ADB_DELAY); | |
752 | 752 | |||
753 | /* copy over data */ | 753 | /* copy over data */ | |
754 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | 754 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | |
755 | adbOutQueue.outBuf[0] + 2); | 755 | adbOutQueue.outBuf[0] + 2); | |
756 | 756 | |||
757 | adbBuffer = adbOutQueue.saveBuf; /* user data area */ | 757 | adbBuffer = adbOutQueue.saveBuf; /* user data area */ | |
758 | adbCompRout = adbOutQueue.compRout; /* completion routine */ | 758 | adbCompRout = adbOutQueue.compRout; /* completion routine */ | |
759 | adbCompData = adbOutQueue.data; /* comp. rout. data */ | 759 | adbCompData = adbOutQueue.data; /* comp. rout. data */ | |
760 | adbOutQueueHasData = 0; /* currently processing | 760 | adbOutQueueHasData = 0; /* currently processing | |
761 | * "queue" entry */ | 761 | * "queue" entry */ | |
762 | adbSentChars = 0; /* nothing sent yet */ | 762 | adbSentChars = 0; /* nothing sent yet */ | |
763 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 763 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
764 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 764 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
765 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | 765 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | |
766 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 766 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
767 | ADB_SET_STATE_CMD(); /* tell ADB that we want to send */ | 767 | ADB_SET_STATE_CMD(); /* tell ADB that we want to send */ | |
768 | break; | 768 | break; | |
769 | } else { | 769 | } else { | |
770 | #ifdef ADB_DEBUG | 770 | #ifdef ADB_DEBUG | |
771 | if (adb_debug) | 771 | if (adb_debug) | |
772 | printf_intr("pIDLE "); | 772 | printf_intr("pIDLE "); | |
773 | #endif | 773 | #endif | |
774 | adbActionState = ADB_ACTION_IDLE; | 774 | adbActionState = ADB_ACTION_IDLE; | |
775 | } | 775 | } | |
776 | } else { | 776 | } else { | |
777 | #ifdef ADB_DEBUG | 777 | #ifdef ADB_DEBUG | |
778 | if (adb_debug & 0x80) | 778 | if (adb_debug & 0x80) | |
779 | printf_intr("pIN "); | 779 | printf_intr("pIN "); | |
780 | #endif | 780 | #endif | |
781 | adbActionState = ADB_ACTION_IN; | 781 | adbActionState = ADB_ACTION_IN; | |
782 | } | 782 | } | |
783 | delay(ADB_DELAY); | 783 | delay(ADB_DELAY); | |
784 | adb_process_serial_intrs(); | 784 | adb_process_serial_intrs(); | |
785 | goto switch_start; | 785 | goto switch_start; | |
786 | break; | 786 | break; | |
787 | case ADB_ACTION_IDLE: | 787 | case ADB_ACTION_IDLE: | |
788 | if (!intr_on) { | 788 | if (!intr_on) { | |
789 | i = ADB_SR(); | 789 | i = ADB_SR(); | |
790 | adbBusState = ADB_BUS_IDLE; | 790 | adbBusState = ADB_BUS_IDLE; | |
791 | adbActionState = ADB_ACTION_IDLE; | 791 | adbActionState = ADB_ACTION_IDLE; | |
792 | ADB_SET_STATE_IDLE_II(); | 792 | ADB_SET_STATE_IDLE_II(); | |
793 | break; | 793 | break; | |
794 | } | 794 | } | |
795 | adbInputBuffer[0] = 1; | 795 | adbInputBuffer[0] = 1; | |
796 | adbInputBuffer[1] = ADB_SR(); /* get first byte */ | 796 | adbInputBuffer[1] = ADB_SR(); /* get first byte */ | |
797 | #ifdef ADB_DEBUG | 797 | #ifdef ADB_DEBUG | |
798 | if (adb_debug & 0x80) | 798 | if (adb_debug & 0x80) | |
799 | printf_intr("idle 0x%02x ", adbInputBuffer[1]); | 799 | printf_intr("idle 0x%02x ", adbInputBuffer[1]); | |
800 | #endif | 800 | #endif | |
801 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 801 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
802 | adbActionState = ADB_ACTION_IN; /* set next state */ | 802 | adbActionState = ADB_ACTION_IN; /* set next state */ | |
803 | ADB_SET_STATE_EVEN(); /* set bus state to even */ | 803 | ADB_SET_STATE_EVEN(); /* set bus state to even */ | |
804 | adbBusState = ADB_BUS_EVEN; | 804 | adbBusState = ADB_BUS_EVEN; | |
805 | break; | 805 | break; | |
806 | 806 | |||
807 | case ADB_ACTION_IN: | 807 | case ADB_ACTION_IN: | |
808 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); /* get byte */ | 808 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); /* get byte */ | |
809 | #ifdef ADB_DEBUG | 809 | #ifdef ADB_DEBUG | |
810 | if (adb_debug & 0x80) | 810 | if (adb_debug & 0x80) | |
811 | printf_intr("in 0x%02x ", | 811 | printf_intr("in 0x%02x ", | |
812 | adbInputBuffer[adbInputBuffer[0]]); | 812 | adbInputBuffer[adbInputBuffer[0]]); | |
813 | #endif | 813 | #endif | |
814 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 814 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
815 | 815 | |||
816 | if (intr_on) { /* process last byte of packet */ | 816 | if (intr_on) { /* process last byte of packet */ | |
817 | adbInputBuffer[0]--; /* minus one */ | 817 | adbInputBuffer[0]--; /* minus one */ | |
818 | /* | 818 | /* | |
819 | * If intr_on was true, and it's the second byte, then | 819 | * If intr_on was true, and it's the second byte, then | |
820 | * the byte we just discarded is really valid, so | 820 | * the byte we just discarded is really valid, so | |
821 | * adjust the count | 821 | * adjust the count | |
822 | */ | 822 | */ | |
823 | if (adbInputBuffer[0] == 2) { | 823 | if (adbInputBuffer[0] == 2) { | |
824 | adbInputBuffer[0]++; | 824 | adbInputBuffer[0]++; | |
825 | } | 825 | } | |
826 | 826 | |||
827 | #ifdef ADB_DEBUG | 827 | #ifdef ADB_DEBUG | |
828 | if (adb_debug & 0x80) { | 828 | if (adb_debug & 0x80) { | |
829 | printf_intr("done: "); | 829 | printf_intr("done: "); | |
830 | print_single(adbInputBuffer); | 830 | print_single(adbInputBuffer); | |
831 | } | 831 | } | |
832 | #endif | 832 | #endif | |
833 | 833 | |||
834 | adbLastDevice = ADB_CMDADDR(adbInputBuffer[1]); | 834 | adbLastDevice = ADB_CMDADDR(adbInputBuffer[1]); | |
835 | 835 | |||
836 | if (adbInputBuffer[0] == 1 && !adbWaiting) { /* SRQ!!!*/ | 836 | if (adbInputBuffer[0] == 1 && !adbWaiting) { /* SRQ!!!*/ | |
837 | #ifdef ADB_DEBUG | 837 | #ifdef ADB_DEBUG | |
838 | if (adb_debug & 0x80) | 838 | if (adb_debug & 0x80) | |
839 | printf_intr(" xSRQ! "); | 839 | printf_intr(" xSRQ! "); | |
840 | #endif | 840 | #endif | |
841 | adb_guess_next_device(); | 841 | adb_guess_next_device(); | |
842 | #ifdef ADB_DEBUG | 842 | #ifdef ADB_DEBUG | |
843 | if (adb_debug & 0x80) | 843 | if (adb_debug & 0x80) | |
844 | printf_intr("try 0x%0x ", | 844 | printf_intr("try 0x%0x ", | |
845 | adbLastDevice); | 845 | adbLastDevice); | |
846 | #endif | 846 | #endif | |
847 | adbOutputBuffer[0] = 1; | 847 | adbOutputBuffer[0] = 1; | |
848 | adbOutputBuffer[1] = ADBTALK(adbLastDevice, 0); | 848 | adbOutputBuffer[1] = ADBTALK(adbLastDevice, 0); | |
849 | 849 | |||
850 | adbSentChars = 0; /* nothing sent yet */ | 850 | adbSentChars = 0; /* nothing sent yet */ | |
851 | adbActionState = ADB_ACTION_POLLING; /* set next state */ | 851 | adbActionState = ADB_ACTION_POLLING; /* set next state */ | |
852 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 852 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
853 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | 853 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | |
854 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 854 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
855 | ADB_SET_STATE_CMD(); /* tell ADB that we want to */ | 855 | ADB_SET_STATE_CMD(); /* tell ADB that we want to */ | |
856 | break; | 856 | break; | |
857 | } | 857 | } | |
858 | 858 | |||
859 | /* set up data for adb_pass_up */ | 859 | /* set up data for adb_pass_up */ | |
860 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | 860 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | |
861 | 861 | |||
862 | if (!adbWaiting && (adbInputBuffer[0] != 0)) { | 862 | if (!adbWaiting && (adbInputBuffer[0] != 0)) { | |
863 | packet.unsol = 1; | 863 | packet.unsol = 1; | |
864 | packet.ack_only = 0; | 864 | packet.ack_only = 0; | |
865 | adb_pass_up(&packet); | 865 | adb_pass_up(&packet); | |
866 | } else { | 866 | } else { | |
867 | packet.saveBuf = adbBuffer; | 867 | packet.saveBuf = adbBuffer; | |
868 | packet.compRout = adbCompRout; | 868 | packet.compRout = adbCompRout; | |
869 | packet.compData = adbCompData; | 869 | packet.compData = adbCompData; | |
870 | packet.unsol = 0; | 870 | packet.unsol = 0; | |
871 | packet.ack_only = 0; | 871 | packet.ack_only = 0; | |
872 | adb_pass_up(&packet); | 872 | adb_pass_up(&packet); | |
873 | } | 873 | } | |
874 | 874 | |||
875 | adbWaiting = 0; | 875 | adbWaiting = 0; | |
876 | adbInputBuffer[0] = 0; | 876 | adbInputBuffer[0] = 0; | |
877 | adbBuffer = (long)0; | 877 | adbBuffer = (long)0; | |
878 | adbCompRout = (long)0; | 878 | adbCompRout = (long)0; | |
879 | adbCompData = (long)0; | 879 | adbCompData = (long)0; | |
880 | /* | 880 | /* | |
881 | * Since we are done, check whether there is any data | 881 | * Since we are done, check whether there is any data | |
882 | * waiting to do out. If so, start the sending the data. | 882 | * waiting to do out. If so, start the sending the data. | |
883 | */ | 883 | */ | |
884 | if (adbOutQueueHasData == 1) { | 884 | if (adbOutQueueHasData == 1) { | |
885 | #ifdef ADB_DEBUG | 885 | #ifdef ADB_DEBUG | |
886 | if (adb_debug & 0x80) | 886 | if (adb_debug & 0x80) | |
887 | printf_intr("XXX: DOING OUT QUEUE\n"); | 887 | printf_intr("XXX: DOING OUT QUEUE\n"); | |
888 | #endif | 888 | #endif | |
889 | /* copy over data */ | 889 | /* copy over data */ | |
890 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | 890 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | |
891 | adbOutQueue.outBuf[0] + 2); | 891 | adbOutQueue.outBuf[0] + 2); | |
892 | adbBuffer = adbOutQueue.saveBuf; /* user data area */ | 892 | adbBuffer = adbOutQueue.saveBuf; /* user data area */ | |
893 | adbCompRout = adbOutQueue.compRout; /* completion routine */ | 893 | adbCompRout = adbOutQueue.compRout; /* completion routine */ | |
894 | adbCompData = adbOutQueue.data; /* comp. rout. data */ | 894 | adbCompData = adbOutQueue.data; /* comp. rout. data */ | |
895 | adbOutQueueHasData = 0; /* currently processing | 895 | adbOutQueueHasData = 0; /* currently processing | |
896 | * "queue" entry */ | 896 | * "queue" entry */ | |
897 | send = 1; | 897 | send = 1; | |
898 | } else { | 898 | } else { | |
899 | #ifdef ADB_DEBUG | 899 | #ifdef ADB_DEBUG | |
900 | if (adb_debug & 0x80) | 900 | if (adb_debug & 0x80) | |
901 | printf_intr("XXending "); | 901 | printf_intr("XXending "); | |
902 | #endif | 902 | #endif | |
903 | adb_guess_next_device(); | 903 | adb_guess_next_device(); | |
904 | adbOutputBuffer[0] = 1; | 904 | adbOutputBuffer[0] = 1; | |
905 | adbOutputBuffer[1] = ((adbLastDevice & 0x0f) << 4) | 0x0c; | 905 | adbOutputBuffer[1] = ((adbLastDevice & 0x0f) << 4) | 0x0c; | |
906 | adbSentChars = 0; /* nothing sent yet */ | 906 | adbSentChars = 0; /* nothing sent yet */ | |
907 | adbActionState = ADB_ACTION_POLLING; /* set next state */ | 907 | adbActionState = ADB_ACTION_POLLING; /* set next state */ | |
908 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 908 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
909 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | 909 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | |
910 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 910 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
911 | ADB_SET_STATE_CMD(); /* tell ADB that we want to */ | 911 | ADB_SET_STATE_CMD(); /* tell ADB that we want to */ | |
912 | break; | 912 | break; | |
913 | } | 913 | } | |
914 | } | 914 | } | |
915 | 915 | |||
916 | /* | 916 | /* | |
917 | * If send is true then something above determined that | 917 | * If send is true then something above determined that | |
918 | * the message has ended and we need to start sending out | 918 | * the message has ended and we need to start sending out | |
919 | * a new message immediately. This could be because there | 919 | * a new message immediately. This could be because there | |
920 | * is data waiting to go out or because an SRQ was seen. | 920 | * is data waiting to go out or because an SRQ was seen. | |
921 | */ | 921 | */ | |
922 | if (send) { | 922 | if (send) { | |
923 | adbSentChars = 0; /* nothing sent yet */ | 923 | adbSentChars = 0; /* nothing sent yet */ | |
924 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 924 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
925 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 925 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
926 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | 926 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | |
927 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 927 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
928 | ADB_SET_STATE_CMD(); /* tell ADB that we want to | 928 | ADB_SET_STATE_CMD(); /* tell ADB that we want to | |
929 | * send */ | 929 | * send */ | |
930 | break; | 930 | break; | |
931 | } | 931 | } | |
932 | /* We only get this far if the message hasn't ended yet. */ | 932 | /* We only get this far if the message hasn't ended yet. */ | |
933 | switch (adbBusState) { /* set to next state */ | 933 | switch (adbBusState) { /* set to next state */ | |
934 | case ADB_BUS_EVEN: | 934 | case ADB_BUS_EVEN: | |
935 | ADB_SET_STATE_ODD(); /* set state to odd */ | 935 | ADB_SET_STATE_ODD(); /* set state to odd */ | |
936 | adbBusState = ADB_BUS_ODD; | 936 | adbBusState = ADB_BUS_ODD; | |
937 | break; | 937 | break; | |
938 | 938 | |||
939 | case ADB_BUS_ODD: | 939 | case ADB_BUS_ODD: | |
940 | ADB_SET_STATE_EVEN(); /* set state to even */ | 940 | ADB_SET_STATE_EVEN(); /* set state to even */ | |
941 | adbBusState = ADB_BUS_EVEN; | 941 | adbBusState = ADB_BUS_EVEN; | |
942 | break; | 942 | break; | |
943 | default: | 943 | default: | |
944 | printf_intr("strange state!!!\n"); /* huh? */ | 944 | printf_intr("strange state!!!\n"); /* huh? */ | |
945 | break; | 945 | break; | |
946 | } | 946 | } | |
947 | break; | 947 | break; | |
948 | 948 | |||
949 | case ADB_ACTION_OUT: | 949 | case ADB_ACTION_OUT: | |
950 | i = ADB_SR(); /* clear interrupt */ | 950 | i = ADB_SR(); /* clear interrupt */ | |
951 | adbSentChars++; | 951 | adbSentChars++; | |
952 | /* | 952 | /* | |
953 | * If the outgoing data was a TALK, we must | 953 | * If the outgoing data was a TALK, we must | |
954 | * switch to input mode to get the result. | 954 | * switch to input mode to get the result. | |
955 | */ | 955 | */ | |
956 | if ((adbOutputBuffer[1] & 0x0c) == 0x0c) { | 956 | if ((adbOutputBuffer[1] & 0x0c) == 0x0c) { | |
957 | adbInputBuffer[0] = 1; | 957 | adbInputBuffer[0] = 1; | |
958 | adbInputBuffer[1] = i; | 958 | adbInputBuffer[1] = i; | |
959 | adbActionState = ADB_ACTION_IN; | 959 | adbActionState = ADB_ACTION_IN; | |
960 | ADB_SET_SR_INPUT(); | 960 | ADB_SET_SR_INPUT(); | |
961 | adbBusState = ADB_BUS_EVEN; | 961 | adbBusState = ADB_BUS_EVEN; | |
962 | ADB_SET_STATE_EVEN(); | 962 | ADB_SET_STATE_EVEN(); | |
963 | #ifdef ADB_DEBUG | 963 | #ifdef ADB_DEBUG | |
964 | if (adb_debug & 0x80) | 964 | if (adb_debug & 0x80) | |
965 | printf_intr("talk out 0x%02x ", i); | 965 | printf_intr("talk out 0x%02x ", i); | |
966 | #endif | 966 | #endif | |
967 | /* we want something back */ | 967 | /* we want something back */ | |
968 | adbWaiting = 1; | 968 | adbWaiting = 1; | |
969 | break; | 969 | break; | |
970 | } | 970 | } | |
971 | /* | 971 | /* | |
972 | * If it's not a TALK, check whether all data has been sent. | 972 | * If it's not a TALK, check whether all data has been sent. | |
973 | * If so, call the completion routine and clean up. If not, | 973 | * If so, call the completion routine and clean up. If not, | |
974 | * advance to the next state. | 974 | * advance to the next state. | |
975 | */ | 975 | */ | |
976 | #ifdef ADB_DEBUG | 976 | #ifdef ADB_DEBUG | |
977 | if (adb_debug & 0x80) | 977 | if (adb_debug & 0x80) | |
978 | printf_intr("non-talk out 0x%0x ", i); | 978 | printf_intr("non-talk out 0x%0x ", i); | |
979 | #endif | 979 | #endif | |
980 | ADB_SET_SR_OUTPUT(); | 980 | ADB_SET_SR_OUTPUT(); | |
981 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | 981 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | |
982 | #ifdef ADB_DEBUG | 982 | #ifdef ADB_DEBUG | |
983 | if (adb_debug & 0x80) | 983 | if (adb_debug & 0x80) | |
984 | printf_intr("done \n"); | 984 | printf_intr("done \n"); | |
985 | #endif | 985 | #endif | |
986 | /* set up stuff for adb_pass_up */ | 986 | /* set up stuff for adb_pass_up */ | |
987 | memcpy(packet.data, adbOutputBuffer, adbOutputBuffer[0] + 1); | 987 | memcpy(packet.data, adbOutputBuffer, adbOutputBuffer[0] + 1); | |
988 | packet.saveBuf = adbBuffer; | 988 | packet.saveBuf = adbBuffer; | |
989 | packet.compRout = adbCompRout; | 989 | packet.compRout = adbCompRout; | |
990 | packet.compData = adbCompData; | 990 | packet.compData = adbCompData; | |
991 | packet.cmd = adbWaitingCmd; | 991 | packet.cmd = adbWaitingCmd; | |
992 | packet.unsol = 0; | 992 | packet.unsol = 0; | |
993 | packet.ack_only = 1; | 993 | packet.ack_only = 1; | |
994 | adb_pass_up(&packet); | 994 | adb_pass_up(&packet); | |
995 | 995 | |||
996 | /* reset "waiting" vars, just in case */ | 996 | /* reset "waiting" vars, just in case */ | |
997 | adbBuffer = (long)0; | 997 | adbBuffer = (long)0; | |
998 | adbCompRout = (long)0; | 998 | adbCompRout = (long)0; | |
999 | adbCompData = (long)0; | 999 | adbCompData = (long)0; | |
1000 | if (adbOutQueueHasData == 1) { | 1000 | if (adbOutQueueHasData == 1) { | |
1001 | /* copy over data */ | 1001 | /* copy over data */ | |
1002 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | 1002 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | |
1003 | adbOutQueue.outBuf[0] + 2); | 1003 | adbOutQueue.outBuf[0] + 2); | |
1004 | adbBuffer = adbOutQueue.saveBuf; /* user data area */ | 1004 | adbBuffer = adbOutQueue.saveBuf; /* user data area */ | |
1005 | adbCompRout = adbOutQueue.compRout; /* completion routine */ | 1005 | adbCompRout = adbOutQueue.compRout; /* completion routine */ | |
1006 | adbCompData = adbOutQueue.data; /* comp. rout. data */ | 1006 | adbCompData = adbOutQueue.data; /* comp. rout. data */ | |
1007 | adbOutQueueHasData = 0; /* currently processing | 1007 | adbOutQueueHasData = 0; /* currently processing | |
1008 | * "queue" entry */ | 1008 | * "queue" entry */ | |
1009 | adbSentChars = 0; /* nothing sent yet */ | 1009 | adbSentChars = 0; /* nothing sent yet */ | |
1010 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 1010 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
1011 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 1011 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
1012 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | 1012 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | |
1013 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 1013 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
1014 | ADB_SET_STATE_CMD(); /* tell ADB that we want to | 1014 | ADB_SET_STATE_CMD(); /* tell ADB that we want to | |
1015 | * send */ | 1015 | * send */ | |
1016 | break; | 1016 | break; | |
1017 | } else { | 1017 | } else { | |
1018 | /* send talk to last device instead */ | 1018 | /* send talk to last device instead */ | |
1019 | adbOutputBuffer[0] = 1; | 1019 | adbOutputBuffer[0] = 1; | |
1020 | adbOutputBuffer[1] = | 1020 | adbOutputBuffer[1] = | |
1021 | ADBTALK(ADB_CMDADDR(adbOutputBuffer[1]), 0); | 1021 | ADBTALK(ADB_CMDADDR(adbOutputBuffer[1]), 0); | |
1022 | 1022 | |||
1023 | adbSentChars = 0; /* nothing sent yet */ | 1023 | adbSentChars = 0; /* nothing sent yet */ | |
1024 | adbActionState = ADB_ACTION_IDLE; /* set next state */ | 1024 | adbActionState = ADB_ACTION_IDLE; /* set next state */ | |
1025 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 1025 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
1026 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | 1026 | ADB_SR() = adbOutputBuffer[1]; /* load byte for output */ | |
1027 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 1027 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
1028 | ADB_SET_STATE_CMD(); /* tell ADB that we want to */ | 1028 | ADB_SET_STATE_CMD(); /* tell ADB that we want to */ | |
1029 | break; | 1029 | break; | |
1030 | } | 1030 | } | |
1031 | } | 1031 | } | |
1032 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; | 1032 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; | |
1033 | switch (adbBusState) { /* advance to next state */ | 1033 | switch (adbBusState) { /* advance to next state */ | |
1034 | case ADB_BUS_EVEN: | 1034 | case ADB_BUS_EVEN: | |
1035 | ADB_SET_STATE_ODD(); /* set state to odd */ | 1035 | ADB_SET_STATE_ODD(); /* set state to odd */ | |
1036 | adbBusState = ADB_BUS_ODD; | 1036 | adbBusState = ADB_BUS_ODD; | |
1037 | break; | 1037 | break; | |
1038 | 1038 | |||
1039 | case ADB_BUS_CMD: | 1039 | case ADB_BUS_CMD: | |
1040 | case ADB_BUS_ODD: | 1040 | case ADB_BUS_ODD: | |
1041 | ADB_SET_STATE_EVEN(); /* set state to even */ | 1041 | ADB_SET_STATE_EVEN(); /* set state to even */ | |
1042 | adbBusState = ADB_BUS_EVEN; | 1042 | adbBusState = ADB_BUS_EVEN; | |
1043 | break; | 1043 | break; | |
1044 | 1044 | |||
1045 | default: | 1045 | default: | |
1046 | #ifdef ADB_DEBUG | 1046 | #ifdef ADB_DEBUG | |
1047 | if (adb_debug) { | 1047 | if (adb_debug) { | |
1048 | printf_intr("strange state!!! (0x%x)\n", | 1048 | printf_intr("strange state!!! (0x%x)\n", | |
1049 | adbBusState); | 1049 | adbBusState); | |
1050 | } | 1050 | } | |
1051 | #endif | 1051 | #endif | |
1052 | break; | 1052 | break; | |
1053 | } | 1053 | } | |
1054 | break; | 1054 | break; | |
1055 | 1055 | |||
1056 | default: | 1056 | default: | |
1057 | #ifdef ADB_DEBUG | 1057 | #ifdef ADB_DEBUG | |
1058 | if (adb_debug) | 1058 | if (adb_debug) | |
1059 | printf_intr("adb: unknown ADB state (during intr)\n"); | 1059 | printf_intr("adb: unknown ADB state (during intr)\n"); | |
1060 | #endif | 1060 | #endif | |
1061 | break; | 1061 | break; | |
1062 | } | 1062 | } | |
1063 | 1063 | |||
1064 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | 1064 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | |
1065 | 1065 | |||
1066 | splx(s); /* restore */ | 1066 | splx(s); /* restore */ | |
1067 | 1067 | |||
1068 | return; | 1068 | return; | |
1069 | 1069 | |||
1070 | } | 1070 | } | |
1071 | 1071 | |||
1072 | 1072 | |||
1073 | /* | 1073 | /* | |
1074 | * send_adb version for II series machines | 1074 | * send_adb version for II series machines | |
1075 | */ | 1075 | */ | |
1076 | int | 1076 | int | |
1077 | send_adb_II(u_char *in, u_char *buffer, void *compRout, void *data, int command) | 1077 | send_adb_II(u_char *in, u_char *buffer, void *compRout, void *data, int command) | |
1078 | { | 1078 | { | |
1079 | int s, len; | 1079 | int s, len; | |
1080 | 1080 | |||
1081 | if (adbActionState == ADB_ACTION_NOTREADY) /* return if ADB not | 1081 | if (adbActionState == ADB_ACTION_NOTREADY) /* return if ADB not | |
1082 | * available */ | 1082 | * available */ | |
1083 | return 1; | 1083 | return 1; | |
1084 | 1084 | |||
1085 | /* Don't interrupt while we are messing with the ADB */ | 1085 | /* Don't interrupt while we are messing with the ADB */ | |
1086 | s = splhigh(); | 1086 | s = splhigh(); | |
1087 | 1087 | |||
1088 | if (0 != adbOutQueueHasData) { /* right now, "has data" means "full" */ | 1088 | if (0 != adbOutQueueHasData) { /* right now, "has data" means "full" */ | |
1089 | splx(s); /* sorry, try again later */ | 1089 | splx(s); /* sorry, try again later */ | |
1090 | return 1; | 1090 | return 1; | |
1091 | } | 1091 | } | |
1092 | if ((long)in == (long)0) { /* need to convert? */ | 1092 | if ((long)in == (long)0) { /* need to convert? */ | |
1093 | /* | 1093 | /* | |
1094 | * Don't need to use adb_cmd_extra here because this section | 1094 | * Don't need to use adb_cmd_extra here because this section | |
1095 | * will be called ONLY when it is an ADB command (no RTC or | 1095 | * will be called ONLY when it is an ADB command (no RTC or | |
1096 | * PRAM), especially on II series! | 1096 | * PRAM), especially on II series! | |
1097 | */ | 1097 | */ | |
1098 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | 1098 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | |
1099 | * doing a listen! */ | 1099 | * doing a listen! */ | |
1100 | len = buffer[0]; /* length of additional data */ | 1100 | len = buffer[0]; /* length of additional data */ | |
1101 | else | 1101 | else | |
1102 | len = 0;/* no additional data */ | 1102 | len = 0;/* no additional data */ | |
1103 | 1103 | |||
1104 | adbOutQueue.outBuf[0] = 1 + len; /* command + addl. data */ | 1104 | adbOutQueue.outBuf[0] = 1 + len; /* command + addl. data */ | |
1105 | adbOutQueue.outBuf[1] = (u_char)command; /* load command */ | 1105 | adbOutQueue.outBuf[1] = (u_char)command; /* load command */ | |
1106 | 1106 | |||
1107 | /* copy additional output data, if any */ | 1107 | /* copy additional output data, if any */ | |
1108 | memcpy(adbOutQueue.outBuf + 2, buffer + 1, len); | 1108 | memcpy(adbOutQueue.outBuf + 2, buffer + 1, len); | |
1109 | } else | 1109 | } else | |
1110 | /* if data ready, just copy over */ | 1110 | /* if data ready, just copy over */ | |
1111 | memcpy(adbOutQueue.outBuf, in, in[0] + 2); | 1111 | memcpy(adbOutQueue.outBuf, in, in[0] + 2); | |
1112 | 1112 | |||
1113 | adbOutQueue.saveBuf = buffer; /* save buffer to know where to save | 1113 | adbOutQueue.saveBuf = buffer; /* save buffer to know where to save | |
1114 | * result */ | 1114 | * result */ | |
1115 | adbOutQueue.compRout = compRout; /* save completion routine | 1115 | adbOutQueue.compRout = compRout; /* save completion routine | |
1116 | * pointer */ | 1116 | * pointer */ | |
1117 | adbOutQueue.data = data;/* save completion routine data pointer */ | 1117 | adbOutQueue.data = data;/* save completion routine data pointer */ | |
1118 | 1118 | |||
1119 | if ((adbActionState == ADB_ACTION_IDLE) && /* is ADB available? */ | 1119 | if ((adbActionState == ADB_ACTION_IDLE) && /* is ADB available? */ | |
1120 | (ADB_INTR_IS_OFF)) { /* and no incoming interrupts? */ | 1120 | (ADB_INTR_IS_OFF)) { /* and no incoming interrupts? */ | |
1121 | /* then start command now */ | 1121 | /* then start command now */ | |
1122 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | 1122 | memcpy(adbOutputBuffer, adbOutQueue.outBuf, | |
1123 | adbOutQueue.outBuf[0] + 2); /* copy over data */ | 1123 | adbOutQueue.outBuf[0] + 2); /* copy over data */ | |
1124 | 1124 | |||
1125 | adbBuffer = adbOutQueue.saveBuf; /* pointer to user data | 1125 | adbBuffer = adbOutQueue.saveBuf; /* pointer to user data | |
1126 | * area */ | 1126 | * area */ | |
1127 | adbCompRout = adbOutQueue.compRout; /* pointer to the | 1127 | adbCompRout = adbOutQueue.compRout; /* pointer to the | |
1128 | * completion routine */ | 1128 | * completion routine */ | |
1129 | adbCompData = adbOutQueue.data; /* pointer to the completion | 1129 | adbCompData = adbOutQueue.data; /* pointer to the completion | |
1130 | * routine data */ | 1130 | * routine data */ | |
1131 | 1131 | |||
1132 | adbSentChars = 0; /* nothing sent yet */ | 1132 | adbSentChars = 0; /* nothing sent yet */ | |
1133 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 1133 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
1134 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | 1134 | adbBusState = ADB_BUS_CMD; /* set bus to cmd state */ | |
1135 | 1135 | |||
1136 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 1136 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
1137 | 1137 | |||
1138 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* load byte for output */ | 1138 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* load byte for output */ | |
1139 | ADB_SET_STATE_CMD(); /* tell ADB that we want to send */ | 1139 | ADB_SET_STATE_CMD(); /* tell ADB that we want to send */ | |
1140 | adbOutQueueHasData = 0; /* currently processing "queue" entry */ | 1140 | adbOutQueueHasData = 0; /* currently processing "queue" entry */ | |
1141 | } else | 1141 | } else | |
1142 | adbOutQueueHasData = 1; /* something in the write "queue" */ | 1142 | adbOutQueueHasData = 1; /* something in the write "queue" */ | |
1143 | 1143 | |||
1144 | splx(s); | 1144 | splx(s); | |
1145 | 1145 | |||
1146 | if (0x0100 <= (s & 0x0700)) /* were VIA1 interrupts blocked? */ | 1146 | if (0x0100 <= (s & 0x0700)) /* were VIA1 interrupts blocked? */ | |
1147 | /* poll until message done */ | 1147 | /* poll until message done */ | |
1148 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | 1148 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | |
1149 | || (adbWaiting == 1)) | 1149 | || (adbWaiting == 1)) | |
1150 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | 1150 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | |
1151 | adb_intr_II(NULL); /* go process it */ | 1151 | adb_intr_II(NULL); /* go process it */ | |
1152 | if (adb_polling) | 1152 | if (adb_polling) | |
1153 | adb_soft_intr(); | 1153 | adb_soft_intr(); | |
1154 | } | 1154 | } | |
1155 | 1155 | |||
1156 | return 0; | 1156 | return 0; | |
1157 | } | 1157 | } | |
1158 | 1158 | |||
1159 | 1159 | |||
1160 | /* | 1160 | /* | |
1161 | * This routine is called from the II series interrupt routine | 1161 | * This routine is called from the II series interrupt routine | |
1162 | * to determine what the "next" device is that should be polled. | 1162 | * to determine what the "next" device is that should be polled. | |
1163 | */ | 1163 | */ | |
1164 | int | 1164 | int | |
1165 | adb_guess_next_device(void) | 1165 | adb_guess_next_device(void) | |
1166 | { | 1166 | { | |
1167 | int last, i, dummy; | 1167 | int last, i, dummy; | |
1168 | 1168 | |||
1169 | if (adbStarting) { | 1169 | if (adbStarting) { | |
1170 | /* | 1170 | /* | |
1171 | * Start polling EVERY device, since we can't be sure there is | 1171 | * Start polling EVERY device, since we can't be sure there is | |
1172 | * anything in the device table yet | 1172 | * anything in the device table yet | |
1173 | */ | 1173 | */ | |
1174 | if (adbLastDevice < 1 || adbLastDevice > 15) | 1174 | if (adbLastDevice < 1 || adbLastDevice > 15) | |
1175 | adbLastDevice = 1; | 1175 | adbLastDevice = 1; | |
1176 | if (++adbLastDevice > 15) /* point to next one */ | 1176 | if (++adbLastDevice > 15) /* point to next one */ | |
1177 | adbLastDevice = 1; | 1177 | adbLastDevice = 1; | |
1178 | } else { | 1178 | } else { | |
1179 | /* find the next device using the device table */ | 1179 | /* find the next device using the device table */ | |
1180 | if (adbLastDevice < 1 || adbLastDevice > 15) /* let's be parinoid */ | 1180 | if (adbLastDevice < 1 || adbLastDevice > 15) /* let's be parinoid */ | |
1181 | adbLastDevice = 2; | 1181 | adbLastDevice = 2; | |
1182 | last = 1; /* default index location */ | 1182 | last = 1; /* default index location */ | |
1183 | 1183 | |||
1184 | for (i = 1; i < 16; i++) /* find index entry */ | 1184 | for (i = 1; i < 16; i++) /* find index entry */ | |
1185 | if (ADBDevTable[i].currentAddr == adbLastDevice) { /* look for device */ | 1185 | if (ADBDevTable[i].currentAddr == adbLastDevice) { /* look for device */ | |
1186 | last = i; /* found it */ | 1186 | last = i; /* found it */ | |
1187 | break; | 1187 | break; | |
1188 | } | 1188 | } | |
1189 | dummy = last; /* index to start at */ | 1189 | dummy = last; /* index to start at */ | |
1190 | for (;;) { /* find next device in index */ | 1190 | for (;;) { /* find next device in index */ | |
1191 | if (++dummy > 15) /* wrap around if needed */ | 1191 | if (++dummy > 15) /* wrap around if needed */ | |
1192 | dummy = 1; | 1192 | dummy = 1; | |
1193 | if (dummy == last) { /* didn't find any other | 1193 | if (dummy == last) { /* didn't find any other | |
1194 | * device! This can happen if | 1194 | * device! This can happen if | |
1195 | * there are no devices on the | 1195 | * there are no devices on the | |
1196 | * bus */ | 1196 | * bus */ | |
1197 | dummy = 1; | 1197 | dummy = 1; | |
1198 | break; | 1198 | break; | |
1199 | } | 1199 | } | |
1200 | /* found the next device */ | 1200 | /* found the next device */ | |
1201 | if (ADBDevTable[dummy].devType != 0) | 1201 | if (ADBDevTable[dummy].devType != 0) | |
1202 | break; | 1202 | break; | |
1203 | } | 1203 | } | |
1204 | adbLastDevice = ADBDevTable[dummy].currentAddr; | 1204 | adbLastDevice = ADBDevTable[dummy].currentAddr; | |
1205 | } | 1205 | } | |
1206 | return adbLastDevice; | 1206 | return adbLastDevice; | |
1207 | } | 1207 | } | |
1208 | 1208 | |||
1209 | 1209 | |||
1210 | /* | 1210 | /* | |
1211 | * Called when when an adb interrupt happens. | 1211 | * Called when an adb interrupt happens. | |
1212 | * This routine simply transfers control over to the appropriate | 1212 | * This routine simply transfers control over to the appropriate | |
1213 | * code for the machine we are running on. | 1213 | * code for the machine we are running on. | |
1214 | */ | 1214 | */ | |
1215 | void | 1215 | void | |
1216 | adb_intr(void *arg) | 1216 | adb_intr(void *arg) | |
1217 | { | 1217 | { | |
1218 | switch (adbHardware) { | 1218 | switch (adbHardware) { | |
1219 | case ADB_HW_II: | 1219 | case ADB_HW_II: | |
1220 | adb_intr_II(arg); | 1220 | adb_intr_II(arg); | |
1221 | break; | 1221 | break; | |
1222 | 1222 | |||
1223 | case ADB_HW_IISI: | 1223 | case ADB_HW_IISI: | |
1224 | adb_intr_IIsi(arg); | 1224 | adb_intr_IIsi(arg); | |
1225 | break; | 1225 | break; | |
1226 | 1226 | |||
1227 | case ADB_HW_PB: /* Should not come through here. */ | 1227 | case ADB_HW_PB: /* Should not come through here. */ | |
1228 | break; | 1228 | break; | |
1229 | 1229 | |||
1230 | case ADB_HW_CUDA: | 1230 | case ADB_HW_CUDA: | |
1231 | adb_intr_cuda(arg); | 1231 | adb_intr_cuda(arg); | |
1232 | break; | 1232 | break; | |
1233 | 1233 | |||
1234 | case ADB_HW_IOP: /* Should not come through here. */ | 1234 | case ADB_HW_IOP: /* Should not come through here. */ | |
1235 | break; | 1235 | break; | |
1236 | 1236 | |||
1237 | case ADB_HW_UNKNOWN: | 1237 | case ADB_HW_UNKNOWN: | |
1238 | break; | 1238 | break; | |
1239 | } | 1239 | } | |
1240 | } | 1240 | } | |
1241 | 1241 | |||
1242 | 1242 | |||
1243 | /* | 1243 | /* | |
1244 | * called when when an adb interrupt happens | 1244 | * called when an adb interrupt happens | |
1245 | * | 1245 | * | |
1246 | * IIsi version of adb_intr | 1246 | * IIsi version of adb_intr | |
1247 | * | 1247 | * | |
1248 | */ | 1248 | */ | |
1249 | void | 1249 | void | |
1250 | adb_intr_IIsi(void *arg) | 1250 | adb_intr_IIsi(void *arg) | |
1251 | { | 1251 | { | |
1252 | struct adbCommand packet; | 1252 | struct adbCommand packet; | |
1253 | int ending; | 1253 | int ending; | |
1254 | unsigned int s; | 1254 | unsigned int s; | |
1255 | 1255 | |||
1256 | s = splhigh(); /* can't be too careful - might be called */ | 1256 | s = splhigh(); /* can't be too careful - might be called */ | |
1257 | /* from a routine, NOT an interrupt */ | 1257 | /* from a routine, NOT an interrupt */ | |
1258 | 1258 | |||
1259 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | 1259 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | |
1260 | 1260 | |||
1261 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | 1261 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | |
1262 | 1262 | |||
1263 | switch_start: | 1263 | switch_start: | |
1264 | switch (adbActionState) { | 1264 | switch (adbActionState) { | |
1265 | case ADB_ACTION_IDLE: | 1265 | case ADB_ACTION_IDLE: | |
1266 | delay(ADB_DELAY); /* short delay is required before the | 1266 | delay(ADB_DELAY); /* short delay is required before the | |
1267 | * first byte */ | 1267 | * first byte */ | |
1268 | 1268 | |||
1269 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 1269 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
1270 | ADB_SET_STATE_ACTIVE(); /* signal start of data frame */ | 1270 | ADB_SET_STATE_ACTIVE(); /* signal start of data frame */ | |
1271 | adbInputBuffer[1] = ADB_SR(); /* get byte */ | 1271 | adbInputBuffer[1] = ADB_SR(); /* get byte */ | |
1272 | adbInputBuffer[0] = 1; | 1272 | adbInputBuffer[0] = 1; | |
1273 | adbActionState = ADB_ACTION_IN; /* set next state */ | 1273 | adbActionState = ADB_ACTION_IN; /* set next state */ | |
1274 | 1274 | |||
1275 | ADB_SET_STATE_ACKON(); /* start ACK to ADB chip */ | 1275 | ADB_SET_STATE_ACKON(); /* start ACK to ADB chip */ | |
1276 | delay(ADB_DELAY); /* delay */ | 1276 | delay(ADB_DELAY); /* delay */ | |
1277 | ADB_SET_STATE_ACKOFF(); /* end ACK to ADB chip */ | 1277 | ADB_SET_STATE_ACKOFF(); /* end ACK to ADB chip */ | |
1278 | adb_process_serial_intrs(); | 1278 | adb_process_serial_intrs(); | |
1279 | break; | 1279 | break; | |
1280 | 1280 | |||
1281 | case ADB_ACTION_IN: | 1281 | case ADB_ACTION_IN: | |
1282 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 1282 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
1283 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); /* get byte */ | 1283 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); /* get byte */ | |
1284 | if (ADB_INTR_IS_OFF) /* check for end of frame */ | 1284 | if (ADB_INTR_IS_OFF) /* check for end of frame */ | |
1285 | ending = 1; | 1285 | ending = 1; | |
1286 | else | 1286 | else | |
1287 | ending = 0; | 1287 | ending = 0; | |
1288 | 1288 | |||
1289 | ADB_SET_STATE_ACKON(); /* start ACK to ADB chip */ | 1289 | ADB_SET_STATE_ACKON(); /* start ACK to ADB chip */ | |
1290 | delay(ADB_DELAY); /* delay */ | 1290 | delay(ADB_DELAY); /* delay */ | |
1291 | ADB_SET_STATE_ACKOFF(); /* end ACK to ADB chip */ | 1291 | ADB_SET_STATE_ACKOFF(); /* end ACK to ADB chip */ | |
1292 | adb_process_serial_intrs(); | 1292 | adb_process_serial_intrs(); | |
1293 | 1293 | |||
1294 | if (1 == ending) { /* end of message? */ | 1294 | if (1 == ending) { /* end of message? */ | |
1295 | ADB_SET_STATE_INACTIVE(); /* signal end of frame */ | 1295 | ADB_SET_STATE_INACTIVE(); /* signal end of frame */ | |
1296 | /* | 1296 | /* | |
1297 | * This section _should_ handle all ADB and RTC/PRAM | 1297 | * This section _should_ handle all ADB and RTC/PRAM | |
1298 | * type commands, but there may be more... Note: | 1298 | * type commands, but there may be more... Note: | |
1299 | * commands are always at [4], even for rtc/pram | 1299 | * commands are always at [4], even for rtc/pram | |
1300 | * commands | 1300 | * commands | |
1301 | */ | 1301 | */ | |
1302 | /* set up data for adb_pass_up */ | 1302 | /* set up data for adb_pass_up */ | |
1303 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | 1303 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | |
1304 | 1304 | |||
1305 | if ((adbWaiting == 1) && /* are we waiting AND */ | 1305 | if ((adbWaiting == 1) && /* are we waiting AND */ | |
1306 | (adbInputBuffer[4] == adbWaitingCmd) && /* the cmd we sent AND */ | 1306 | (adbInputBuffer[4] == adbWaitingCmd) && /* the cmd we sent AND */ | |
1307 | ((adbInputBuffer[2] == 0x00) || /* it's from the ADB | 1307 | ((adbInputBuffer[2] == 0x00) || /* it's from the ADB | |
1308 | * device OR */ | 1308 | * device OR */ | |
1309 | (adbInputBuffer[2] == 0x01))) { /* it's from the | 1309 | (adbInputBuffer[2] == 0x01))) { /* it's from the | |
1310 | * PRAM/RTC device */ | 1310 | * PRAM/RTC device */ | |
1311 | 1311 | |||
1312 | packet.saveBuf = adbBuffer; | 1312 | packet.saveBuf = adbBuffer; | |
1313 | packet.compRout = adbCompRout; | 1313 | packet.compRout = adbCompRout; | |
1314 | packet.compData = adbCompData; | 1314 | packet.compData = adbCompData; | |
1315 | packet.unsol = 0; | 1315 | packet.unsol = 0; | |
1316 | packet.ack_only = 0; | 1316 | packet.ack_only = 0; | |
1317 | adb_pass_up(&packet); | 1317 | adb_pass_up(&packet); | |
1318 | 1318 | |||
1319 | adbWaitingCmd = 0; /* reset "waiting" vars */ | 1319 | adbWaitingCmd = 0; /* reset "waiting" vars */ | |
1320 | adbWaiting = 0; | 1320 | adbWaiting = 0; | |
1321 | adbBuffer = (long)0; | 1321 | adbBuffer = (long)0; | |
1322 | adbCompRout = (long)0; | 1322 | adbCompRout = (long)0; | |
1323 | adbCompData = (long)0; | 1323 | adbCompData = (long)0; | |
1324 | } else { | 1324 | } else { | |
1325 | packet.unsol = 1; | 1325 | packet.unsol = 1; | |
1326 | packet.ack_only = 0; | 1326 | packet.ack_only = 0; | |
1327 | adb_pass_up(&packet); | 1327 | adb_pass_up(&packet); | |
1328 | } | 1328 | } | |
1329 | 1329 | |||
1330 | adbActionState = ADB_ACTION_IDLE; | 1330 | adbActionState = ADB_ACTION_IDLE; | |
1331 | adbInputBuffer[0] = 0; /* reset length */ | 1331 | adbInputBuffer[0] = 0; /* reset length */ | |
1332 | 1332 | |||
1333 | if (adbWriteDelay == 1) { /* were we waiting to | 1333 | if (adbWriteDelay == 1) { /* were we waiting to | |
1334 | * write? */ | 1334 | * write? */ | |
1335 | adbSentChars = 0; /* nothing sent yet */ | 1335 | adbSentChars = 0; /* nothing sent yet */ | |
1336 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 1336 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
1337 | 1337 | |||
1338 | delay(ADB_DELAY); /* delay */ | 1338 | delay(ADB_DELAY); /* delay */ | |
1339 | adb_process_serial_intrs(); | 1339 | adb_process_serial_intrs(); | |
1340 | 1340 | |||
1341 | if (ADB_INTR_IS_ON) { /* ADB intr low during | 1341 | if (ADB_INTR_IS_ON) { /* ADB intr low during | |
1342 | * write */ | 1342 | * write */ | |
1343 | ADB_SET_STATE_IDLE_IISI(); /* reset */ | 1343 | ADB_SET_STATE_IDLE_IISI(); /* reset */ | |
1344 | ADB_SET_SR_INPUT(); /* make sure SR is set | 1344 | ADB_SET_SR_INPUT(); /* make sure SR is set | |
1345 | * to IN */ | 1345 | * to IN */ | |
1346 | adbSentChars = 0; /* must start all over */ | 1346 | adbSentChars = 0; /* must start all over */ | |
1347 | adbActionState = ADB_ACTION_IDLE; /* new state */ | 1347 | adbActionState = ADB_ACTION_IDLE; /* new state */ | |
1348 | adbInputBuffer[0] = 0; | 1348 | adbInputBuffer[0] = 0; | |
1349 | /* may be able to take this out later */ | 1349 | /* may be able to take this out later */ | |
1350 | delay(ADB_DELAY); /* delay */ | 1350 | delay(ADB_DELAY); /* delay */ | |
1351 | break; | 1351 | break; | |
1352 | } | 1352 | } | |
1353 | ADB_SET_STATE_ACTIVE(); /* tell ADB that we want | 1353 | ADB_SET_STATE_ACTIVE(); /* tell ADB that we want | |
1354 | * to send */ | 1354 | * to send */ | |
1355 | ADB_SET_STATE_ACKOFF(); /* make sure */ | 1355 | ADB_SET_STATE_ACKOFF(); /* make sure */ | |
1356 | ADB_SET_SR_OUTPUT(); /* set shift register | 1356 | ADB_SET_SR_OUTPUT(); /* set shift register | |
1357 | * for OUT */ | 1357 | * for OUT */ | |
1358 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; | 1358 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; | |
1359 | ADB_SET_STATE_ACKON(); /* tell ADB byte ready | 1359 | ADB_SET_STATE_ACKON(); /* tell ADB byte ready | |
1360 | * to shift */ | 1360 | * to shift */ | |
1361 | } | 1361 | } | |
1362 | } | 1362 | } | |
1363 | break; | 1363 | break; | |
1364 | 1364 | |||
1365 | case ADB_ACTION_OUT: | 1365 | case ADB_ACTION_OUT: | |
1366 | (void)ADB_SR(); /* reset SR-intr in IFR */ | 1366 | (void)ADB_SR(); /* reset SR-intr in IFR */ | |
1367 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 1367 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
1368 | 1368 | |||
1369 | ADB_SET_STATE_ACKOFF(); /* finish ACK */ | 1369 | ADB_SET_STATE_ACKOFF(); /* finish ACK */ | |
1370 | adbSentChars++; | 1370 | adbSentChars++; | |
1371 | if (ADB_INTR_IS_ON) { /* ADB intr low during write */ | 1371 | if (ADB_INTR_IS_ON) { /* ADB intr low during write */ | |
1372 | ADB_SET_STATE_IDLE_IISI(); /* reset */ | 1372 | ADB_SET_STATE_IDLE_IISI(); /* reset */ | |
1373 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 1373 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
1374 | adbSentChars = 0; /* must start all over */ | 1374 | adbSentChars = 0; /* must start all over */ | |
1375 | adbActionState = ADB_ACTION_IDLE; /* new state */ | 1375 | adbActionState = ADB_ACTION_IDLE; /* new state */ | |
1376 | adbInputBuffer[0] = 0; | 1376 | adbInputBuffer[0] = 0; | |
1377 | adbWriteDelay = 1; /* must retry when done with | 1377 | adbWriteDelay = 1; /* must retry when done with | |
1378 | * read */ | 1378 | * read */ | |
1379 | delay(ADB_DELAY); /* delay */ | 1379 | delay(ADB_DELAY); /* delay */ | |
1380 | adb_process_serial_intrs(); | 1380 | adb_process_serial_intrs(); | |
1381 | goto switch_start; /* process next state right | 1381 | goto switch_start; /* process next state right | |
1382 | * now */ | 1382 | * now */ | |
1383 | break; | 1383 | break; | |
1384 | } | 1384 | } | |
1385 | delay(ADB_DELAY); /* required delay */ | 1385 | delay(ADB_DELAY); /* required delay */ | |
1386 | adb_process_serial_intrs(); | 1386 | adb_process_serial_intrs(); | |
1387 | 1387 | |||
1388 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | 1388 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | |
1389 | if (0 == adb_cmd_result(adbOutputBuffer)) { /* do we expect data | 1389 | if (0 == adb_cmd_result(adbOutputBuffer)) { /* do we expect data | |
1390 | * back? */ | 1390 | * back? */ | |
1391 | adbWaiting = 1; /* signal waiting for return */ | 1391 | adbWaiting = 1; /* signal waiting for return */ | |
1392 | adbWaitingCmd = adbOutputBuffer[2]; /* save waiting command */ | 1392 | adbWaitingCmd = adbOutputBuffer[2]; /* save waiting command */ | |
1393 | } else {/* no talk, so done */ | 1393 | } else {/* no talk, so done */ | |
1394 | /* set up stuff for adb_pass_up */ | 1394 | /* set up stuff for adb_pass_up */ | |
1395 | memcpy(packet.data, adbInputBuffer, | 1395 | memcpy(packet.data, adbInputBuffer, | |
1396 | adbInputBuffer[0] + 1); | 1396 | adbInputBuffer[0] + 1); | |
1397 | packet.saveBuf = adbBuffer; | 1397 | packet.saveBuf = adbBuffer; | |
1398 | packet.compRout = adbCompRout; | 1398 | packet.compRout = adbCompRout; | |
1399 | packet.compData = adbCompData; | 1399 | packet.compData = adbCompData; | |
1400 | packet.cmd = adbWaitingCmd; | 1400 | packet.cmd = adbWaitingCmd; | |
1401 | packet.unsol = 0; | 1401 | packet.unsol = 0; | |
1402 | packet.ack_only = 1; | 1402 | packet.ack_only = 1; | |
1403 | adb_pass_up(&packet); | 1403 | adb_pass_up(&packet); | |
1404 | 1404 | |||
1405 | /* reset "waiting" vars, just in case */ | 1405 | /* reset "waiting" vars, just in case */ | |
1406 | adbWaitingCmd = 0; | 1406 | adbWaitingCmd = 0; | |
1407 | adbBuffer = (long)0; | 1407 | adbBuffer = (long)0; | |
1408 | adbCompRout = (long)0; | 1408 | adbCompRout = (long)0; | |
1409 | adbCompData = (long)0; | 1409 | adbCompData = (long)0; | |
1410 | } | 1410 | } | |
1411 | 1411 | |||
1412 | adbWriteDelay = 0; /* done writing */ | 1412 | adbWriteDelay = 0; /* done writing */ | |
1413 | adbActionState = ADB_ACTION_IDLE; /* signal bus is idle */ | 1413 | adbActionState = ADB_ACTION_IDLE; /* signal bus is idle */ | |
1414 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 1414 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
1415 | ADB_SET_STATE_INACTIVE(); /* end of frame */ | 1415 | ADB_SET_STATE_INACTIVE(); /* end of frame */ | |
1416 | } else { | 1416 | } else { | |
1417 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* send next byte */ | 1417 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* send next byte */ | |
1418 | ADB_SET_STATE_ACKON(); /* signal byte ready to shift */ | 1418 | ADB_SET_STATE_ACKON(); /* signal byte ready to shift */ | |
1419 | } | 1419 | } | |
1420 | break; | 1420 | break; | |
1421 | 1421 | |||
1422 | case ADB_ACTION_NOTREADY: | 1422 | case ADB_ACTION_NOTREADY: | |
1423 | #ifdef ADB_DEBUG | 1423 | #ifdef ADB_DEBUG | |
1424 | if (adb_debug) | 1424 | if (adb_debug) | |
1425 | printf_intr("adb: not yet initialized\n"); | 1425 | printf_intr("adb: not yet initialized\n"); | |
1426 | #endif | 1426 | #endif | |
1427 | break; | 1427 | break; | |
1428 | 1428 | |||
1429 | default: | 1429 | default: | |
1430 | #ifdef ADB_DEBUG | 1430 | #ifdef ADB_DEBUG | |
1431 | if (adb_debug) | 1431 | if (adb_debug) | |
1432 | printf_intr("intr: unknown ADB state\n"); | 1432 | printf_intr("intr: unknown ADB state\n"); | |
1433 | #endif | 1433 | #endif | |
1434 | break; | 1434 | break; | |
1435 | } | 1435 | } | |
1436 | 1436 | |||
1437 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | 1437 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | |
1438 | 1438 | |||
1439 | splx(s); /* restore */ | 1439 | splx(s); /* restore */ | |
1440 | 1440 | |||
1441 | return; | 1441 | return; | |
1442 | } /* end adb_intr_IIsi */ | 1442 | } /* end adb_intr_IIsi */ | |
1443 | 1443 | |||
1444 | 1444 | |||
1445 | /***************************************************************************** | 1445 | /***************************************************************************** | |
1446 | * if the device is currently busy, and there is no data waiting to go out, then | 1446 | * if the device is currently busy, and there is no data waiting to go out, then | |
1447 | * the data is "queued" in the outgoing buffer. If we are already waiting, then | 1447 | * the data is "queued" in the outgoing buffer. If we are already waiting, then | |
1448 | * we return. | 1448 | * we return. | |
1449 | * in: if (in == 0) then the command string is built from command and buffer | 1449 | * in: if (in == 0) then the command string is built from command and buffer | |
1450 | * if (in != 0) then in is used as the command string | 1450 | * if (in != 0) then in is used as the command string | |
1451 | * buffer: additional data to be sent (used only if in == 0) | 1451 | * buffer: additional data to be sent (used only if in == 0) | |
1452 | * this is also where return data is stored | 1452 | * this is also where return data is stored | |
1453 | * compRout: the completion routine that is called when then return value | 1453 | * compRout: the completion routine that is called when then return value | |
1454 | * is received (if a return value is expected) | 1454 | * is received (if a return value is expected) | |
1455 | * data: a data pointer that can be used by the completion routine | 1455 | * data: a data pointer that can be used by the completion routine | |
1456 | * command: an ADB command to be sent (used only if in == 0) | 1456 | * command: an ADB command to be sent (used only if in == 0) | |
1457 | * | 1457 | * | |
1458 | */ | 1458 | */ | |
1459 | int | 1459 | int | |
1460 | send_adb_IIsi(u_char *in, u_char *buffer, void *compRout, void *data, int | 1460 | send_adb_IIsi(u_char *in, u_char *buffer, void *compRout, void *data, int | |
1461 | command) | 1461 | command) | |
1462 | { | 1462 | { | |
1463 | int s, len; | 1463 | int s, len; | |
1464 | 1464 | |||
1465 | if (adbActionState == ADB_ACTION_NOTREADY) | 1465 | if (adbActionState == ADB_ACTION_NOTREADY) | |
1466 | return 1; | 1466 | return 1; | |
1467 | 1467 | |||
1468 | /* Don't interrupt while we are messing with the ADB */ | 1468 | /* Don't interrupt while we are messing with the ADB */ | |
1469 | s = splhigh(); | 1469 | s = splhigh(); | |
1470 | 1470 | |||
1471 | if ((adbActionState == ADB_ACTION_IDLE) && /* ADB available? */ | 1471 | if ((adbActionState == ADB_ACTION_IDLE) && /* ADB available? */ | |
1472 | (ADB_INTR_IS_OFF)) {/* and no incoming interrupt? */ | 1472 | (ADB_INTR_IS_OFF)) {/* and no incoming interrupt? */ | |
1473 | 1473 | |||
1474 | } else | 1474 | } else | |
1475 | if (adbWriteDelay == 0) /* it's busy, but is anything waiting? */ | 1475 | if (adbWriteDelay == 0) /* it's busy, but is anything waiting? */ | |
1476 | adbWriteDelay = 1; /* if no, then we'll "queue" | 1476 | adbWriteDelay = 1; /* if no, then we'll "queue" | |
1477 | * it up */ | 1477 | * it up */ | |
1478 | else { | 1478 | else { | |
1479 | splx(s); | 1479 | splx(s); | |
1480 | return 1; /* really busy! */ | 1480 | return 1; /* really busy! */ | |
1481 | } | 1481 | } | |
1482 | 1482 | |||
1483 | if ((long)in == (long)0) { /* need to convert? */ | 1483 | if ((long)in == (long)0) { /* need to convert? */ | |
1484 | /* | 1484 | /* | |
1485 | * Don't need to use adb_cmd_extra here because this section | 1485 | * Don't need to use adb_cmd_extra here because this section | |
1486 | * will be called ONLY when it is an ADB command (no RTC or | 1486 | * will be called ONLY when it is an ADB command (no RTC or | |
1487 | * PRAM) | 1487 | * PRAM) | |
1488 | */ | 1488 | */ | |
1489 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | 1489 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | |
1490 | * doing a listen! */ | 1490 | * doing a listen! */ | |
1491 | len = buffer[0]; /* length of additional data */ | 1491 | len = buffer[0]; /* length of additional data */ | |
1492 | else | 1492 | else | |
1493 | len = 0;/* no additional data */ | 1493 | len = 0;/* no additional data */ | |
1494 | 1494 | |||
1495 | adbOutputBuffer[0] = 2 + len; /* dev. type + command + addl. | 1495 | adbOutputBuffer[0] = 2 + len; /* dev. type + command + addl. | |
1496 | * data */ | 1496 | * data */ | |
1497 | adbOutputBuffer[1] = 0x00; /* mark as an ADB command */ | 1497 | adbOutputBuffer[1] = 0x00; /* mark as an ADB command */ | |
1498 | adbOutputBuffer[2] = (u_char)command; /* load command */ | 1498 | adbOutputBuffer[2] = (u_char)command; /* load command */ | |
1499 | 1499 | |||
1500 | /* copy additional output data, if any */ | 1500 | /* copy additional output data, if any */ | |
1501 | memcpy(adbOutputBuffer + 3, buffer + 1, len); | 1501 | memcpy(adbOutputBuffer + 3, buffer + 1, len); | |
1502 | } else | 1502 | } else | |
1503 | /* if data ready, just copy over */ | 1503 | /* if data ready, just copy over */ | |
1504 | memcpy(adbOutputBuffer, in, in[0] + 2); | 1504 | memcpy(adbOutputBuffer, in, in[0] + 2); | |
1505 | 1505 | |||
1506 | adbSentChars = 0; /* nothing sent yet */ | 1506 | adbSentChars = 0; /* nothing sent yet */ | |
1507 | adbBuffer = buffer; /* save buffer to know where to save result */ | 1507 | adbBuffer = buffer; /* save buffer to know where to save result */ | |
1508 | adbCompRout = compRout; /* save completion routine pointer */ | 1508 | adbCompRout = compRout; /* save completion routine pointer */ | |
1509 | adbCompData = data; /* save completion routine data pointer */ | 1509 | adbCompData = data; /* save completion routine data pointer */ | |
1510 | adbWaitingCmd = adbOutputBuffer[2]; /* save wait command */ | 1510 | adbWaitingCmd = adbOutputBuffer[2]; /* save wait command */ | |
1511 | 1511 | |||
1512 | if (adbWriteDelay != 1) { /* start command now? */ | 1512 | if (adbWriteDelay != 1) { /* start command now? */ | |
1513 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 1513 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
1514 | 1514 | |||
1515 | ADB_SET_STATE_ACTIVE(); /* tell ADB that we want to send */ | 1515 | ADB_SET_STATE_ACTIVE(); /* tell ADB that we want to send */ | |
1516 | ADB_SET_STATE_ACKOFF(); /* make sure */ | 1516 | ADB_SET_STATE_ACKOFF(); /* make sure */ | |
1517 | 1517 | |||
1518 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 1518 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
1519 | 1519 | |||
1520 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* load byte for output */ | 1520 | ADB_SR() = adbOutputBuffer[adbSentChars + 1]; /* load byte for output */ | |
1521 | 1521 | |||
1522 | ADB_SET_STATE_ACKON(); /* tell ADB byte ready to shift */ | 1522 | ADB_SET_STATE_ACKON(); /* tell ADB byte ready to shift */ | |
1523 | } | 1523 | } | |
1524 | adbWriteDelay = 1; /* something in the write "queue" */ | 1524 | adbWriteDelay = 1; /* something in the write "queue" */ | |
1525 | 1525 | |||
1526 | splx(s); | 1526 | splx(s); | |
1527 | 1527 | |||
1528 | if (0x0100 <= (s & 0x0700)) /* were VIA1 interrupts blocked? */ | 1528 | if (0x0100 <= (s & 0x0700)) /* were VIA1 interrupts blocked? */ | |
1529 | /* poll until byte done */ | 1529 | /* poll until byte done */ | |
1530 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | 1530 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | |
1531 | || (adbWaiting == 1)) | 1531 | || (adbWaiting == 1)) | |
1532 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | 1532 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | |
1533 | adb_intr_IIsi(NULL); /* go process it */ | 1533 | adb_intr_IIsi(NULL); /* go process it */ | |
1534 | if (adb_polling) | 1534 | if (adb_polling) | |
1535 | adb_soft_intr(); | 1535 | adb_soft_intr(); | |
1536 | } | 1536 | } | |
1537 | 1537 | |||
1538 | return 0; | 1538 | return 0; | |
1539 | } /* send_adb_IIsi */ | 1539 | } /* send_adb_IIsi */ | |
1540 | 1540 | |||
1541 | void | 1541 | void | |
1542 | adb_iop_recv(IOP *iop, struct iop_msg *msg) | 1542 | adb_iop_recv(IOP *iop, struct iop_msg *msg) | |
1543 | { | 1543 | { | |
1544 | struct adbCommand pkt; | 1544 | struct adbCommand pkt; | |
1545 | unsigned flags; | 1545 | unsigned flags; | |
1546 | 1546 | |||
1547 | if (adbActionState != ADB_ACTION_RUNNING) | 1547 | if (adbActionState != ADB_ACTION_RUNNING) | |
1548 | return; | 1548 | return; | |
1549 | 1549 | |||
1550 | switch (msg->status) { | 1550 | switch (msg->status) { | |
1551 | case IOP_MSGSTAT_SENT: | 1551 | case IOP_MSGSTAT_SENT: | |
1552 | if (0 == adb_cmd_result(msg->msg + 1)) { | 1552 | if (0 == adb_cmd_result(msg->msg + 1)) { | |
1553 | adbWaiting = 1; | 1553 | adbWaiting = 1; | |
1554 | adbWaitingCmd = msg->msg[2]; | 1554 | adbWaitingCmd = msg->msg[2]; | |
1555 | } | 1555 | } | |
1556 | break; | 1556 | break; | |
1557 | case IOP_MSGSTAT_RECEIVED: | 1557 | case IOP_MSGSTAT_RECEIVED: | |
1558 | case IOP_MSGSTAT_UNEXPECTED: | 1558 | case IOP_MSGSTAT_UNEXPECTED: | |
1559 | flags = msg->msg[0]; | 1559 | flags = msg->msg[0]; | |
1560 | if (flags != 0) { | 1560 | if (flags != 0) { | |
1561 | printf("ADB FLAGS 0x%x", flags); | 1561 | printf("ADB FLAGS 0x%x", flags); | |
1562 | break; | 1562 | break; | |
1563 | } | 1563 | } | |
1564 | if (adbWaiting && | 1564 | if (adbWaiting && | |
1565 | (msg->msg[2] == adbWaitingCmd)) { | 1565 | (msg->msg[2] == adbWaitingCmd)) { | |
1566 | pkt.saveBuf = msg->msg + 1; | 1566 | pkt.saveBuf = msg->msg + 1; | |
1567 | pkt.compRout = adbCompRout; | 1567 | pkt.compRout = adbCompRout; | |
1568 | pkt.compData = adbCompData; | 1568 | pkt.compData = adbCompData; | |
1569 | pkt.unsol = 0; | 1569 | pkt.unsol = 0; | |
1570 | pkt.ack_only = 0; | 1570 | pkt.ack_only = 0; | |
1571 | adb_pass_up(&pkt); | 1571 | adb_pass_up(&pkt); | |
1572 | 1572 | |||
1573 | adbWaitingCmd = 0; | 1573 | adbWaitingCmd = 0; | |
1574 | adbWaiting = 0; | 1574 | adbWaiting = 0; | |
1575 | } else { | 1575 | } else { | |
1576 | pkt.unsol = 1; | 1576 | pkt.unsol = 1; | |
1577 | pkt.ack_only = 0; | 1577 | pkt.ack_only = 0; | |
1578 | adb_pass_up(&pkt); | 1578 | adb_pass_up(&pkt); | |
1579 | } | 1579 | } | |
1580 | break; | 1580 | break; | |
1581 | default: | 1581 | default: | |
1582 | return; | 1582 | return; | |
1583 | } | 1583 | } | |
1584 | } | 1584 | } | |
1585 | 1585 | |||
1586 | int | 1586 | int | |
1587 | send_adb_iop(int cmd, u_char * buffer, void *compRout, void *data) | 1587 | send_adb_iop(int cmd, u_char * buffer, void *compRout, void *data) | |
1588 | { | 1588 | { | |
1589 | u_char buff[32]; | 1589 | u_char buff[32]; | |
1590 | int cnt; | 1590 | int cnt; | |
1591 | 1591 | |||
1592 | if (adbActionState != ADB_ACTION_RUNNING) | 1592 | if (adbActionState != ADB_ACTION_RUNNING) | |
1593 | return -1; | 1593 | return -1; | |
1594 | 1594 | |||
1595 | buff[0] = IOP_ADB_FL_EXPLICIT; | 1595 | buff[0] = IOP_ADB_FL_EXPLICIT; | |
1596 | buff[1] = buffer[0]; | 1596 | buff[1] = buffer[0]; | |
1597 | buff[2] = cmd; | 1597 | buff[2] = cmd; | |
1598 | cnt = (int) buff[1]; | 1598 | cnt = (int) buff[1]; | |
1599 | memcpy(buff + 3, buffer + 1, cnt); | 1599 | memcpy(buff + 3, buffer + 1, cnt); | |
1600 | return iop_send_msg(ISM_IOP, IOP_CHAN_ADB, buff, cnt+3, | 1600 | return iop_send_msg(ISM_IOP, IOP_CHAN_ADB, buff, cnt+3, | |
1601 | adb_iop_recv, NULL); | 1601 | adb_iop_recv, NULL); | |
1602 | } | 1602 | } | |
1603 | 1603 | |||
1604 | /* | 1604 | /* | |
1605 | * adb_pass_up is called by the interrupt-time routines. | 1605 | * adb_pass_up is called by the interrupt-time routines. | |
1606 | * It takes the raw packet data that was received from the | 1606 | * It takes the raw packet data that was received from the | |
1607 | * device and puts it into the queue that the upper half | 1607 | * device and puts it into the queue that the upper half | |
1608 | * processes. It then signals for a soft ADB interrupt which | 1608 | * processes. It then signals for a soft ADB interrupt which | |
1609 | * will eventually call the upper half routine (adb_soft_intr). | 1609 | * will eventually call the upper half routine (adb_soft_intr). | |
1610 | * | 1610 | * | |
1611 | * If in->unsol is 0, then this is either the notification | 1611 | * If in->unsol is 0, then this is either the notification | |
1612 | * that the packet was sent (on a LISTEN, for example), or the | 1612 | * that the packet was sent (on a LISTEN, for example), or the | |
1613 | * response from the device (on a TALK). The completion routine | 1613 | * response from the device (on a TALK). The completion routine | |
1614 | * is called only if the user specified one. | 1614 | * is called only if the user specified one. | |
1615 | * | 1615 | * | |
1616 | * If in->unsol is 1, then this packet was unsolicited and | 1616 | * If in->unsol is 1, then this packet was unsolicited and | |
1617 | * so we look up the device in the ADB device table to determine | 1617 | * so we look up the device in the ADB device table to determine | |
1618 | * what its default service routine is. | 1618 | * what its default service routine is. | |
1619 | * | 1619 | * | |
1620 | * If in->ack_only is 1, then we really only need to call | 1620 | * If in->ack_only is 1, then we really only need to call | |
1621 | * the completion routine, so don't do any other stuff. | 1621 | * the completion routine, so don't do any other stuff. | |
1622 | * | 1622 | * | |
1623 | * Note that in->data contains the packet header AND data, | 1623 | * Note that in->data contains the packet header AND data, | |
1624 | * while adbInbound[]->data contains ONLY data. | 1624 | * while adbInbound[]->data contains ONLY data. | |
1625 | * | 1625 | * | |
1626 | * Note: Called only at interrupt time. Assumes this. | 1626 | * Note: Called only at interrupt time. Assumes this. | |
1627 | */ | 1627 | */ | |
1628 | void | 1628 | void | |
1629 | adb_pass_up(struct adbCommand *in) | 1629 | adb_pass_up(struct adbCommand *in) | |
1630 | { | 1630 | { | |
1631 | int start = 0, len = 0, cmd = 0; | 1631 | int start = 0, len = 0, cmd = 0; | |
1632 | ADBDataBlock block; | 1632 | ADBDataBlock block; | |
1633 | 1633 | |||
1634 | /* temp for testing */ | 1634 | /* temp for testing */ | |
1635 | /*u_char *buffer = 0;*/ | 1635 | /*u_char *buffer = 0;*/ | |
1636 | /*u_char *compdata = 0;*/ | 1636 | /*u_char *compdata = 0;*/ | |
1637 | /*u_char *comprout = 0;*/ | 1637 | /*u_char *comprout = 0;*/ | |
1638 | 1638 | |||
1639 | if (adbInCount >= ADB_QUEUE) { | 1639 | if (adbInCount >= ADB_QUEUE) { | |
1640 | #ifdef ADB_DEBUG | 1640 | #ifdef ADB_DEBUG | |
1641 | if (adb_debug) | 1641 | if (adb_debug) | |
1642 | printf_intr("adb: ring buffer overflow\n"); | 1642 | printf_intr("adb: ring buffer overflow\n"); | |
1643 | #endif | 1643 | #endif | |
1644 | return; | 1644 | return; | |
1645 | } | 1645 | } | |
1646 | 1646 | |||
1647 | if (in->ack_only) { | 1647 | if (in->ack_only) { | |
1648 | len = in->data[0]; | 1648 | len = in->data[0]; | |
1649 | cmd = in->cmd; | 1649 | cmd = in->cmd; | |
1650 | start = 0; | 1650 | start = 0; | |
1651 | } else { | 1651 | } else { | |
1652 | switch (adbHardware) { | 1652 | switch (adbHardware) { | |
1653 | case ADB_HW_IOP: | 1653 | case ADB_HW_IOP: | |
1654 | case ADB_HW_II: | 1654 | case ADB_HW_II: | |
1655 | cmd = in->data[1]; | 1655 | cmd = in->data[1]; | |
1656 | if (in->data[0] < 2) | 1656 | if (in->data[0] < 2) | |
1657 | len = 0; | 1657 | len = 0; | |
1658 | else | 1658 | else | |
1659 | len = in->data[0]-1; | 1659 | len = in->data[0]-1; | |
1660 | start = 1; | 1660 | start = 1; | |
1661 | break; | 1661 | break; | |
1662 | 1662 | |||
1663 | case ADB_HW_IISI: | 1663 | case ADB_HW_IISI: | |
1664 | case ADB_HW_CUDA: | 1664 | case ADB_HW_CUDA: | |
1665 | /* If it's unsolicited, accept only ADB data for now */ | 1665 | /* If it's unsolicited, accept only ADB data for now */ | |
1666 | if (in->unsol) | 1666 | if (in->unsol) | |
1667 | if (0 != in->data[2]) | 1667 | if (0 != in->data[2]) | |
1668 | return; | 1668 | return; | |
1669 | cmd = in->data[4]; | 1669 | cmd = in->data[4]; | |
1670 | if (in->data[0] < 5) | 1670 | if (in->data[0] < 5) | |
1671 | len = 0; | 1671 | len = 0; | |
1672 | else | 1672 | else | |
1673 | len = in->data[0]-4; | 1673 | len = in->data[0]-4; | |
1674 | start = 4; | 1674 | start = 4; | |
1675 | break; | 1675 | break; | |
1676 | 1676 | |||
1677 | case ADB_HW_PB: | 1677 | case ADB_HW_PB: | |
1678 | cmd = in->data[1]; | 1678 | cmd = in->data[1]; | |
1679 | if (in->data[0] < 2) | 1679 | if (in->data[0] < 2) | |
1680 | len = 0; | 1680 | len = 0; | |
1681 | else | 1681 | else | |
1682 | len = in->data[0]-1; | 1682 | len = in->data[0]-1; | |
1683 | start = 1; | 1683 | start = 1; | |
1684 | break; | 1684 | break; | |
1685 | 1685 | |||
1686 | case ADB_HW_UNKNOWN: | 1686 | case ADB_HW_UNKNOWN: | |
1687 | return; | 1687 | return; | |
1688 | } | 1688 | } | |
1689 | 1689 | |||
1690 | /* Make sure there is a valid device entry for this device */ | 1690 | /* Make sure there is a valid device entry for this device */ | |
1691 | if (in->unsol) { | 1691 | if (in->unsol) { | |
1692 | /* ignore unsolicited data during adbreinit */ | 1692 | /* ignore unsolicited data during adbreinit */ | |
1693 | if (adbStarting) | 1693 | if (adbStarting) | |
1694 | return; | 1694 | return; | |
1695 | /* get device's comp. routine and data area */ | 1695 | /* get device's comp. routine and data area */ | |
1696 | if (-1 == get_adb_info(&block, ADB_CMDADDR(cmd))) | 1696 | if (-1 == get_adb_info(&block, ADB_CMDADDR(cmd))) | |
1697 | return; | 1697 | return; | |
1698 | } | 1698 | } | |
1699 | } | 1699 | } | |
1700 | 1700 | |||
1701 | /* | 1701 | /* | |
1702 | * If this is an unsolicited packet, we need to fill in | 1702 | * If this is an unsolicited packet, we need to fill in | |
1703 | * some info so adb_soft_intr can process this packet | 1703 | * some info so adb_soft_intr can process this packet | |
1704 | * properly. If it's not unsolicited, then use what | 1704 | * properly. If it's not unsolicited, then use what | |
1705 | * the caller sent us. | 1705 | * the caller sent us. | |
1706 | */ | 1706 | */ | |
1707 | if (in->unsol) { | 1707 | if (in->unsol) { | |
1708 | if (in->ack_only) panic("invalid ack-only pkg"); | 1708 | if (in->ack_only) panic("invalid ack-only pkg"); | |
1709 | 1709 | |||
1710 | adbInbound[adbInTail].compRout = (void *)block.dbServiceRtPtr; | 1710 | adbInbound[adbInTail].compRout = (void *)block.dbServiceRtPtr; | |
1711 | adbInbound[adbInTail].compData = (void *)block.dbDataAreaAddr; | 1711 | adbInbound[adbInTail].compData = (void *)block.dbDataAreaAddr; | |
1712 | adbInbound[adbInTail].saveBuf = (void *)adbInbound[adbInTail].data; | 1712 | adbInbound[adbInTail].saveBuf = (void *)adbInbound[adbInTail].data; | |
1713 | } else { | 1713 | } else { | |
1714 | adbInbound[adbInTail].compRout = (void *)in->compRout; | 1714 | adbInbound[adbInTail].compRout = (void *)in->compRout; | |
1715 | adbInbound[adbInTail].compData = (void *)in->compData; | 1715 | adbInbound[adbInTail].compData = (void *)in->compData; | |
1716 | adbInbound[adbInTail].saveBuf = (void *)in->saveBuf; | 1716 | adbInbound[adbInTail].saveBuf = (void *)in->saveBuf; | |
1717 | } | 1717 | } | |
1718 | 1718 | |||
1719 | #ifdef ADB_DEBUG | 1719 | #ifdef ADB_DEBUG | |
1720 | if (adb_debug && in->data[1] == 2) | 1720 | if (adb_debug && in->data[1] == 2) | |
1721 | printf_intr("adb: caught error\n"); | 1721 | printf_intr("adb: caught error\n"); | |
1722 | #endif | 1722 | #endif | |
1723 | 1723 | |||
1724 | /* copy the packet data over */ | 1724 | /* copy the packet data over */ | |
1725 | /* | 1725 | /* | |
1726 | * TO DO: If the *_intr routines fed their incoming data | 1726 | * TO DO: If the *_intr routines fed their incoming data | |
1727 | * directly into an adbCommand struct, which is passed to | 1727 | * directly into an adbCommand struct, which is passed to | |
1728 | * this routine, then we could eliminate this copy. | 1728 | * this routine, then we could eliminate this copy. | |
1729 | */ | 1729 | */ | |
1730 | memcpy(adbInbound[adbInTail].data + 1, in->data + start + 1, len); | 1730 | memcpy(adbInbound[adbInTail].data + 1, in->data + start + 1, len); | |
1731 | adbInbound[adbInTail].data[0] = len; | 1731 | adbInbound[adbInTail].data[0] = len; | |
1732 | adbInbound[adbInTail].cmd = cmd; | 1732 | adbInbound[adbInTail].cmd = cmd; | |
1733 | 1733 | |||
1734 | adbInCount++; | 1734 | adbInCount++; | |
1735 | if (++adbInTail >= ADB_QUEUE) | 1735 | if (++adbInTail >= ADB_QUEUE) | |
1736 | adbInTail = 0; | 1736 | adbInTail = 0; | |
1737 | 1737 | |||
1738 | /* | 1738 | /* | |
1739 | * If the debugger is running, call upper half manually. | 1739 | * If the debugger is running, call upper half manually. | |
1740 | * Otherwise, trigger a soft interrupt to handle the rest later. | 1740 | * Otherwise, trigger a soft interrupt to handle the rest later. | |
1741 | */ | 1741 | */ | |
1742 | if (adb_polling) | 1742 | if (adb_polling) | |
1743 | adb_soft_intr(); | 1743 | adb_soft_intr(); | |
1744 | else | 1744 | else | |
1745 | softint_schedule(adb_softintr_cookie); | 1745 | softint_schedule(adb_softintr_cookie); | |
1746 | 1746 | |||
1747 | return; | 1747 | return; | |
1748 | } | 1748 | } | |
1749 | 1749 | |||
1750 | 1750 | |||
1751 | /* | 1751 | /* | |
1752 | * Called to process the packets after they have been | 1752 | * Called to process the packets after they have been | |
1753 | * placed in the incoming queue. | 1753 | * placed in the incoming queue. | |
1754 | * | 1754 | * | |
1755 | */ | 1755 | */ | |
1756 | void | 1756 | void | |
1757 | adb_soft_intr(void) | 1757 | adb_soft_intr(void) | |
1758 | { | 1758 | { | |
1759 | int s; | 1759 | int s; | |
1760 | int cmd = 0; | 1760 | int cmd = 0; | |
1761 | u_char *buffer = 0; | 1761 | u_char *buffer = 0; | |
1762 | u_char *comprout = 0; | 1762 | u_char *comprout = 0; | |
1763 | u_char *compdata = 0; | 1763 | u_char *compdata = 0; | |
1764 | 1764 | |||
1765 | #if 0 | 1765 | #if 0 | |
1766 | s = splhigh(); | 1766 | s = splhigh(); | |
1767 | printf_intr("sr: %x\n", (s & 0x0700)); | 1767 | printf_intr("sr: %x\n", (s & 0x0700)); | |
1768 | splx(s); | 1768 | splx(s); | |
1769 | #endif | 1769 | #endif | |
1770 | 1770 | |||
1771 | /*delay(2*ADB_DELAY);*/ | 1771 | /*delay(2*ADB_DELAY);*/ | |
1772 | 1772 | |||
1773 | while (adbInCount) { | 1773 | while (adbInCount) { | |
1774 | #ifdef ADB_DEBUG | 1774 | #ifdef ADB_DEBUG | |
1775 | if (adb_debug & 0x80) | 1775 | if (adb_debug & 0x80) | |
1776 | printf_intr("%x %x %x ", | 1776 | printf_intr("%x %x %x ", | |
1777 | adbInCount, adbInHead, adbInTail); | 1777 | adbInCount, adbInHead, adbInTail); | |
1778 | #endif | 1778 | #endif | |
1779 | /* get the data we need from the queue */ | 1779 | /* get the data we need from the queue */ | |
1780 | buffer = adbInbound[adbInHead].saveBuf; | 1780 | buffer = adbInbound[adbInHead].saveBuf; | |
1781 | comprout = adbInbound[adbInHead].compRout; | 1781 | comprout = adbInbound[adbInHead].compRout; | |
1782 | compdata = adbInbound[adbInHead].compData; | 1782 | compdata = adbInbound[adbInHead].compData; | |
1783 | cmd = adbInbound[adbInHead].cmd; | 1783 | cmd = adbInbound[adbInHead].cmd; | |
1784 | 1784 | |||
1785 | /* copy over data to data area if it's valid */ | 1785 | /* copy over data to data area if it's valid */ | |
1786 | /* | 1786 | /* | |
1787 | * Note that for unsol packets we don't want to copy the | 1787 | * Note that for unsol packets we don't want to copy the | |
1788 | * data anywhere, so buffer was already set to 0. | 1788 | * data anywhere, so buffer was already set to 0. | |
1789 | * For ack_only buffer was set to 0, so don't copy. | 1789 | * For ack_only buffer was set to 0, so don't copy. | |
1790 | */ | 1790 | */ | |
1791 | if (buffer) | 1791 | if (buffer) | |
1792 | memcpy(buffer, adbInbound[adbInHead].data, | 1792 | memcpy(buffer, adbInbound[adbInHead].data, | |
1793 | adbInbound[adbInHead].data[0] + 1); | 1793 | adbInbound[adbInHead].data[0] + 1); | |
1794 | 1794 | |||
1795 | #ifdef ADB_DEBUG | 1795 | #ifdef ADB_DEBUG | |
1796 | if (adb_debug & 0x80) { | 1796 | if (adb_debug & 0x80) { | |
1797 | printf_intr("%p %p %p %x ", | 1797 | printf_intr("%p %p %p %x ", | |
1798 | buffer, comprout, compdata, (short)cmd); | 1798 | buffer, comprout, compdata, (short)cmd); | |
1799 | printf_intr("buf: "); | 1799 | printf_intr("buf: "); | |
1800 | print_single(adbInbound[adbInHead].data); | 1800 | print_single(adbInbound[adbInHead].data); | |
1801 | } | 1801 | } | |
1802 | #endif | 1802 | #endif | |
1803 | 1803 | |||
1804 | /* call default completion routine if it's valid */ | 1804 | /* call default completion routine if it's valid */ | |
1805 | if (comprout) { | 1805 | if (comprout) { | |
1806 | #ifdef __NetBSD__ | 1806 | #ifdef __NetBSD__ | |
1807 | __asm volatile ( | 1807 | __asm volatile ( | |
1808 | " movml #0xffff,%%sp@- \n" /* save all regs */ | 1808 | " movml #0xffff,%%sp@- \n" /* save all regs */ | |
1809 | " movl %0,%%a2 \n" /* compdata */ | 1809 | " movl %0,%%a2 \n" /* compdata */ | |
1810 | " movl %1,%%a1 \n" /* comprout */ | 1810 | " movl %1,%%a1 \n" /* comprout */ | |
1811 | " movl %2,%%a0 \n" /* buffer */ | 1811 | " movl %2,%%a0 \n" /* buffer */ | |
1812 | " movl %3,%%d0 \n" /* cmd */ | 1812 | " movl %3,%%d0 \n" /* cmd */ | |
1813 | " jbsr %%a1@ \n" /* go call routine */ | 1813 | " jbsr %%a1@ \n" /* go call routine */ | |
1814 | " movml %%sp@+,#0xffff" /* restore all regs */ | 1814 | " movml %%sp@+,#0xffff" /* restore all regs */ | |
1815 | : | 1815 | : | |
1816 | : "g"(compdata), "g"(comprout), | 1816 | : "g"(compdata), "g"(comprout), | |
1817 | "g"(buffer), "g"(cmd) | 1817 | "g"(buffer), "g"(cmd) | |
1818 | : "d0", "a0", "a1", "a2"); | 1818 | : "d0", "a0", "a1", "a2"); | |
1819 | #else /* for macos based testing */ | 1819 | #else /* for macos based testing */ | |
1820 | asm | 1820 | asm | |
1821 | { | 1821 | { | |
1822 | movem.l a0/a1/a2/d0, -(a7) | 1822 | movem.l a0/a1/a2/d0, -(a7) | |
1823 | move.l compdata, a2 | 1823 | move.l compdata, a2 | |
1824 | move.l comprout, a1 | 1824 | move.l comprout, a1 | |
1825 | move.l buffer, a0 | 1825 | move.l buffer, a0 | |
1826 | move.w cmd, d0 | 1826 | move.w cmd, d0 | |
1827 | jsr(a1) | 1827 | jsr(a1) | |
1828 | movem.l(a7)+, d0/a2/a1/a0 | 1828 | movem.l(a7)+, d0/a2/a1/a0 | |
1829 | } | 1829 | } | |
1830 | #endif | 1830 | #endif | |
1831 | 1831 | |||
1832 | } | 1832 | } | |
1833 | 1833 | |||
1834 | s = splhigh(); | 1834 | s = splhigh(); | |
1835 | adbInCount--; | 1835 | adbInCount--; | |
1836 | if (++adbInHead >= ADB_QUEUE) | 1836 | if (++adbInHead >= ADB_QUEUE) | |
1837 | adbInHead = 0; | 1837 | adbInHead = 0; | |
1838 | splx(s); | 1838 | splx(s); | |
1839 | 1839 | |||
1840 | } | 1840 | } | |
1841 | return; | 1841 | return; | |
1842 | } | 1842 | } | |
1843 | 1843 | |||
1844 | 1844 | |||
1845 | /* | 1845 | /* | |
1846 | * This is my version of the ADBOp routine. It mainly just calls the | 1846 | * This is my version of the ADBOp routine. It mainly just calls the | |
1847 | * hardware-specific routine. | 1847 | * hardware-specific routine. | |
1848 | * | 1848 | * | |
1849 | * data : pointer to data area to be used by compRout | 1849 | * data : pointer to data area to be used by compRout | |
1850 | * compRout : completion routine | 1850 | * compRout : completion routine | |
1851 | * buffer : for LISTEN: points to data to send - MAX 8 data bytes, | 1851 | * buffer : for LISTEN: points to data to send - MAX 8 data bytes, | |
1852 | * byte 0 = # of bytes | 1852 | * byte 0 = # of bytes | |
1853 | * : for TALK: points to place to save return data | 1853 | * : for TALK: points to place to save return data | |
1854 | * command : the adb command to send | 1854 | * command : the adb command to send | |
1855 | * result : 0 = success | 1855 | * result : 0 = success | |
1856 | * : -1 = could not complete | 1856 | * : -1 = could not complete | |
1857 | */ | 1857 | */ | |
1858 | int | 1858 | int | |
1859 | adb_op(Ptr buffer, Ptr compRout, Ptr data, short command) | 1859 | adb_op(Ptr buffer, Ptr compRout, Ptr data, short command) | |
1860 | { | 1860 | { | |
1861 | int result; | 1861 | int result; | |
1862 | 1862 | |||
1863 | switch (adbHardware) { | 1863 | switch (adbHardware) { | |
1864 | case ADB_HW_II: | 1864 | case ADB_HW_II: | |
1865 | result = send_adb_II((u_char *)0, (u_char *)buffer, | 1865 | result = send_adb_II((u_char *)0, (u_char *)buffer, | |
1866 | (void *)compRout, (void *)data, (int)command); | 1866 | (void *)compRout, (void *)data, (int)command); | |
1867 | if (result == 0) | 1867 | if (result == 0) | |
1868 | return 0; | 1868 | return 0; | |
1869 | else | 1869 | else | |
1870 | return -1; | 1870 | return -1; | |
1871 | break; | 1871 | break; | |
1872 | 1872 | |||
1873 | case ADB_HW_IOP: | 1873 | case ADB_HW_IOP: | |
1874 | #ifdef __notyet__ | 1874 | #ifdef __notyet__ | |
1875 | result = send_adb_iop((int)command, (u_char *)buffer, | 1875 | result = send_adb_iop((int)command, (u_char *)buffer, | |
1876 | (void *)compRout, (void *)data); | 1876 | (void *)compRout, (void *)data); | |
1877 | if (result == 0) | 1877 | if (result == 0) | |
1878 | return 0; | 1878 | return 0; | |
1879 | else | 1879 | else | |
1880 | #endif | 1880 | #endif | |
1881 | return -1; | 1881 | return -1; | |
1882 | break; | 1882 | break; | |
1883 | 1883 | |||
1884 | case ADB_HW_IISI: | 1884 | case ADB_HW_IISI: | |
1885 | result = send_adb_IIsi((u_char *)0, (u_char *)buffer, | 1885 | result = send_adb_IIsi((u_char *)0, (u_char *)buffer, | |
1886 | (void *)compRout, (void *)data, (int)command); | 1886 | (void *)compRout, (void *)data, (int)command); | |
1887 | /* | 1887 | /* | |
1888 | * I wish I knew why this delay is needed. It usually needs to | 1888 | * I wish I knew why this delay is needed. It usually needs to | |
1889 | * be here when several commands are sent in close succession, | 1889 | * be here when several commands are sent in close succession, | |
1890 | * especially early in device probes when doing collision | 1890 | * especially early in device probes when doing collision | |
1891 | * detection. It must be some race condition. Sigh. - jpw | 1891 | * detection. It must be some race condition. Sigh. - jpw | |
1892 | */ | 1892 | */ | |
1893 | delay(100); | 1893 | delay(100); | |
1894 | if (result == 0) | 1894 | if (result == 0) | |
1895 | return 0; | 1895 | return 0; | |
1896 | else | 1896 | else | |
1897 | return -1; | 1897 | return -1; | |
1898 | break; | 1898 | break; | |
1899 | 1899 | |||
1900 | case ADB_HW_PB: | 1900 | case ADB_HW_PB: | |
1901 | result = pm_adb_op((u_char *)buffer, (void *)compRout, | 1901 | result = pm_adb_op((u_char *)buffer, (void *)compRout, | |
1902 | (void *)data, (int)command); | 1902 | (void *)data, (int)command); | |
1903 | 1903 | |||
1904 | if (result == 0) | 1904 | if (result == 0) | |
1905 | return 0; | 1905 | return 0; | |
1906 | else | 1906 | else | |
1907 | return -1; | 1907 | return -1; | |
1908 | break; | 1908 | break; | |
1909 | 1909 | |||
1910 | case ADB_HW_CUDA: | 1910 | case ADB_HW_CUDA: | |
1911 | result = send_adb_cuda((u_char *)0, (u_char *)buffer, | 1911 | result = send_adb_cuda((u_char *)0, (u_char *)buffer, | |
1912 | (void *)compRout, (void *)data, (int)command); | 1912 | (void *)compRout, (void *)data, (int)command); | |
1913 | if (result == 0) | 1913 | if (result == 0) | |
1914 | return 0; | 1914 | return 0; | |
1915 | else | 1915 | else | |
1916 | return -1; | 1916 | return -1; | |
1917 | break; | 1917 | break; | |
1918 | 1918 | |||
1919 | case ADB_HW_UNKNOWN: | 1919 | case ADB_HW_UNKNOWN: | |
1920 | default: | 1920 | default: | |
1921 | return -1; | 1921 | return -1; | |
1922 | } | 1922 | } | |
1923 | } | 1923 | } | |
1924 | 1924 | |||
1925 | 1925 | |||
1926 | /* | 1926 | /* | |
1927 | * adb_hw_setup | 1927 | * adb_hw_setup | |
1928 | * This routine sets up the possible machine specific hardware | 1928 | * This routine sets up the possible machine specific hardware | |
1929 | * config (mainly VIA settings) for the various models. | 1929 | * config (mainly VIA settings) for the various models. | |
1930 | */ | 1930 | */ | |
1931 | void | 1931 | void | |
1932 | adb_hw_setup(void) | 1932 | adb_hw_setup(void) | |
1933 | { | 1933 | { | |
1934 | volatile int i; | 1934 | volatile int i; | |
1935 | u_char send_string[ADB_MAX_MSG_LENGTH]; | 1935 | u_char send_string[ADB_MAX_MSG_LENGTH]; | |
1936 | 1936 | |||
1937 | switch (adbHardware) { | 1937 | switch (adbHardware) { | |
1938 | case ADB_HW_II: | 1938 | case ADB_HW_II: | |
1939 | via1_register_irq(2, adb_intr_II, NULL); | 1939 | via1_register_irq(2, adb_intr_II, NULL); | |
1940 | 1940 | |||
1941 | via_reg(VIA1, vDirB) |= 0x30; /* register B bits 4 and 5: | 1941 | via_reg(VIA1, vDirB) |= 0x30; /* register B bits 4 and 5: | |
1942 | * outputs */ | 1942 | * outputs */ | |
1943 | via_reg(VIA1, vDirB) &= 0xf7; /* register B bit 3: input */ | 1943 | via_reg(VIA1, vDirB) &= 0xf7; /* register B bit 3: input */ | |
1944 | via_reg(VIA1, vACR) &= ~vSR_OUT; /* make sure SR is set | 1944 | via_reg(VIA1, vACR) &= ~vSR_OUT; /* make sure SR is set | |
1945 | * to IN (II, IIsi) */ | 1945 | * to IN (II, IIsi) */ | |
1946 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | 1946 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | |
1947 | * hardware (II, IIsi) */ | 1947 | * hardware (II, IIsi) */ | |
1948 | adbBusState = ADB_BUS_IDLE; /* this var. used in II-series | 1948 | adbBusState = ADB_BUS_IDLE; /* this var. used in II-series | |
1949 | * code only */ | 1949 | * code only */ | |
1950 | via_reg(VIA1, vIER) = 0x84; /* make sure VIA interrupts | 1950 | via_reg(VIA1, vIER) = 0x84; /* make sure VIA interrupts | |
1951 | * are on (II, IIsi) */ | 1951 | * are on (II, IIsi) */ | |
1952 | ADB_SET_STATE_IDLE_II(); /* set ADB bus state to idle */ | 1952 | ADB_SET_STATE_IDLE_II(); /* set ADB bus state to idle */ | |
1953 | 1953 | |||
1954 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | 1954 | ADB_VIA_CLR_INTR(); /* clear interrupt */ | |
1955 | break; | 1955 | break; | |
1956 | 1956 | |||
1957 | case ADB_HW_IOP: | 1957 | case ADB_HW_IOP: | |
1958 | via_reg(VIA1, vIER) = 0x84; | 1958 | via_reg(VIA1, vIER) = 0x84; | |
1959 | via_reg(VIA1, vIFR) = 0x04; | 1959 | via_reg(VIA1, vIFR) = 0x04; | |
1960 | #ifdef __notyet__ | 1960 | #ifdef __notyet__ | |
1961 | adbActionState = ADB_ACTION_RUNNING; | 1961 | adbActionState = ADB_ACTION_RUNNING; | |
1962 | #endif | 1962 | #endif | |
1963 | break; | 1963 | break; | |
1964 | 1964 | |||
1965 | case ADB_HW_IISI: | 1965 | case ADB_HW_IISI: | |
1966 | via1_register_irq(2, adb_intr_IIsi, NULL); | 1966 | via1_register_irq(2, adb_intr_IIsi, NULL); | |
1967 | via_reg(VIA1, vDirB) |= 0x30; /* register B bits 4 and 5: | 1967 | via_reg(VIA1, vDirB) |= 0x30; /* register B bits 4 and 5: | |
1968 | * outputs */ | 1968 | * outputs */ | |
1969 | via_reg(VIA1, vDirB) &= 0xf7; /* register B bit 3: input */ | 1969 | via_reg(VIA1, vDirB) &= 0xf7; /* register B bit 3: input */ | |
1970 | via_reg(VIA1, vACR) &= ~vSR_OUT; /* make sure SR is set | 1970 | via_reg(VIA1, vACR) &= ~vSR_OUT; /* make sure SR is set | |
1971 | * to IN (II, IIsi) */ | 1971 | * to IN (II, IIsi) */ | |
1972 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | 1972 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | |
1973 | * hardware (II, IIsi) */ | 1973 | * hardware (II, IIsi) */ | |
1974 | adbBusState = ADB_BUS_IDLE; /* this var. used in II-series | 1974 | adbBusState = ADB_BUS_IDLE; /* this var. used in II-series | |
1975 | * code only */ | 1975 | * code only */ | |
1976 | via_reg(VIA1, vIER) = 0x84; /* make sure VIA interrupts | 1976 | via_reg(VIA1, vIER) = 0x84; /* make sure VIA interrupts | |
1977 | * are on (II, IIsi) */ | 1977 | * are on (II, IIsi) */ | |
1978 | ADB_SET_STATE_IDLE_IISI(); /* set ADB bus state to idle */ | 1978 | ADB_SET_STATE_IDLE_IISI(); /* set ADB bus state to idle */ | |
1979 | 1979 | |||
1980 | /* get those pesky clock ticks we missed while booting */ | 1980 | /* get those pesky clock ticks we missed while booting */ | |
1981 | for (i = 0; i < 30; i++) { | 1981 | for (i = 0; i < 30; i++) { | |
1982 | delay(ADB_DELAY); | 1982 | delay(ADB_DELAY); | |
1983 | adb_hw_setup_IIsi(send_string); | 1983 | adb_hw_setup_IIsi(send_string); | |
1984 | #ifdef ADB_DEBUG | 1984 | #ifdef ADB_DEBUG | |
1985 | if (adb_debug) { | 1985 | if (adb_debug) { | |
1986 | printf_intr("adb: cleanup: "); | 1986 | printf_intr("adb: cleanup: "); | |
1987 | print_single(send_string); | 1987 | print_single(send_string); | |
1988 | } | 1988 | } | |
1989 | #endif | 1989 | #endif | |
1990 | delay(ADB_DELAY); | 1990 | delay(ADB_DELAY); | |
1991 | if (ADB_INTR_IS_OFF) | 1991 | if (ADB_INTR_IS_OFF) | |
1992 | break; | 1992 | break; | |
1993 | } | 1993 | } | |
1994 | break; | 1994 | break; | |
1995 | 1995 | |||
1996 | case ADB_HW_PB: | 1996 | case ADB_HW_PB: | |
1997 | /* | 1997 | /* | |
1998 | * XXX - really PM_VIA_CLR_INTR - should we put it in | 1998 | * XXX - really PM_VIA_CLR_INTR - should we put it in | |
1999 | * pm_direct.h? | 1999 | * pm_direct.h? | |
2000 | */ | 2000 | */ | |
2001 | pm_hw_setup(); | 2001 | pm_hw_setup(); | |
2002 | break; | 2002 | break; | |
2003 | 2003 | |||
2004 | case ADB_HW_CUDA: | 2004 | case ADB_HW_CUDA: | |
2005 | via1_register_irq(2, adb_intr_cuda, NULL); | 2005 | via1_register_irq(2, adb_intr_cuda, NULL); | |
2006 | via_reg(VIA1, vDirB) |= 0x30; /* register B bits 4 and 5: | 2006 | via_reg(VIA1, vDirB) |= 0x30; /* register B bits 4 and 5: | |
2007 | * outputs */ | 2007 | * outputs */ | |
2008 | via_reg(VIA1, vDirB) &= 0xf7; /* register B bit 3: input */ | 2008 | via_reg(VIA1, vDirB) &= 0xf7; /* register B bit 3: input */ | |
2009 | via_reg(VIA1, vACR) &= ~vSR_OUT; /* make sure SR is set | 2009 | via_reg(VIA1, vACR) &= ~vSR_OUT; /* make sure SR is set | |
2010 | * to IN */ | 2010 | * to IN */ | |
2011 | via_reg(VIA1, vACR) = (via_reg(VIA1, vACR) | 0x0c) & ~0x10; | 2011 | via_reg(VIA1, vACR) = (via_reg(VIA1, vACR) | 0x0c) & ~0x10; | |
2012 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | 2012 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | |
2013 | * hardware */ | 2013 | * hardware */ | |
2014 | adbBusState = ADB_BUS_IDLE; /* this var. used in II-series | 2014 | adbBusState = ADB_BUS_IDLE; /* this var. used in II-series | |
2015 | * code only */ | 2015 | * code only */ | |
2016 | via_reg(VIA1, vIER) = 0x84; /* make sure VIA interrupts | 2016 | via_reg(VIA1, vIER) = 0x84; /* make sure VIA interrupts | |
2017 | * are on */ | 2017 | * are on */ | |
2018 | ADB_SET_STATE_IDLE_CUDA(); /* set ADB bus state to idle */ | 2018 | ADB_SET_STATE_IDLE_CUDA(); /* set ADB bus state to idle */ | |
2019 | 2019 | |||
2020 | /* sort of a device reset */ | 2020 | /* sort of a device reset */ | |
2021 | i = ADB_SR(); /* clear interrupt */ | 2021 | i = ADB_SR(); /* clear interrupt */ | |
2022 | ADB_VIA_INTR_DISABLE(); /* no interrupts while clearing */ | 2022 | ADB_VIA_INTR_DISABLE(); /* no interrupts while clearing */ | |
2023 | ADB_SET_STATE_IDLE_CUDA(); /* reset state to idle */ | 2023 | ADB_SET_STATE_IDLE_CUDA(); /* reset state to idle */ | |
2024 | delay(ADB_DELAY); | 2024 | delay(ADB_DELAY); | |
2025 | ADB_SET_STATE_TIP(); /* signal start of frame */ | 2025 | ADB_SET_STATE_TIP(); /* signal start of frame */ | |
2026 | delay(ADB_DELAY); | 2026 | delay(ADB_DELAY); | |
2027 | ADB_TOGGLE_STATE_ACK_CUDA(); | 2027 | ADB_TOGGLE_STATE_ACK_CUDA(); | |
2028 | delay(ADB_DELAY); | 2028 | delay(ADB_DELAY); | |
2029 | ADB_CLR_STATE_TIP(); | 2029 | ADB_CLR_STATE_TIP(); | |
2030 | delay(ADB_DELAY); | 2030 | delay(ADB_DELAY); | |
2031 | ADB_SET_STATE_IDLE_CUDA(); /* back to idle state */ | 2031 | ADB_SET_STATE_IDLE_CUDA(); /* back to idle state */ | |
2032 | i = ADB_SR(); /* clear interrupt */ | 2032 | i = ADB_SR(); /* clear interrupt */ | |
2033 | ADB_VIA_INTR_ENABLE(); /* ints ok now */ | 2033 | ADB_VIA_INTR_ENABLE(); /* ints ok now */ | |
2034 | break; | 2034 | break; | |
2035 | 2035 | |||
2036 | case ADB_HW_UNKNOWN: | 2036 | case ADB_HW_UNKNOWN: | |
2037 | default: | 2037 | default: | |
2038 | via_reg(VIA1, vIER) = 0x04; /* turn interrupts off - TO | 2038 | via_reg(VIA1, vIER) = 0x04; /* turn interrupts off - TO | |
2039 | * DO: turn PB ints off? */ | 2039 | * DO: turn PB ints off? */ | |
2040 | return; | 2040 | return; | |
2041 | break; | 2041 | break; | |
2042 | } | 2042 | } | |
2043 | } | 2043 | } | |
2044 | 2044 | |||
2045 | 2045 | |||
2046 | /* | 2046 | /* | |
2047 | * adb_hw_setup_IIsi | 2047 | * adb_hw_setup_IIsi | |
2048 | * This is sort of a "read" routine that forces the adb hardware through a read cycle | 2048 | * This is sort of a "read" routine that forces the adb hardware through a read cycle | |
2049 | * if there is something waiting. This helps "clean up" any commands that may have gotten | 2049 | * if there is something waiting. This helps "clean up" any commands that may have gotten | |
2050 | * stuck or stopped during the boot process. | 2050 | * stuck or stopped during the boot process. | |
2051 | * | 2051 | * | |
2052 | */ | 2052 | */ | |
2053 | void | 2053 | void | |
2054 | adb_hw_setup_IIsi(u_char *buffer) | 2054 | adb_hw_setup_IIsi(u_char *buffer) | |
2055 | { | 2055 | { | |
2056 | int i; | 2056 | int i; | |
2057 | int s; | 2057 | int s; | |
2058 | long my_time; | 2058 | long my_time; | |
2059 | int endofframe; | 2059 | int endofframe; | |
2060 | 2060 | |||
2061 | delay(ADB_DELAY); | 2061 | delay(ADB_DELAY); | |
2062 | 2062 | |||
2063 | i = 1; /* skip over [0] */ | 2063 | i = 1; /* skip over [0] */ | |
2064 | s = splhigh(); /* block ALL interrupts while we are working */ | 2064 | s = splhigh(); /* block ALL interrupts while we are working */ | |
2065 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 2065 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
2066 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | 2066 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | |
2067 | /* this is required, especially on faster machines */ | 2067 | /* this is required, especially on faster machines */ | |
2068 | delay(ADB_DELAY); | 2068 | delay(ADB_DELAY); | |
2069 | 2069 | |||
2070 | if (ADB_INTR_IS_ON) { | 2070 | if (ADB_INTR_IS_ON) { | |
2071 | ADB_SET_STATE_ACTIVE(); /* signal start of data frame */ | 2071 | ADB_SET_STATE_ACTIVE(); /* signal start of data frame */ | |
2072 | 2072 | |||
2073 | endofframe = 0; | 2073 | endofframe = 0; | |
2074 | while (0 == endofframe) { | 2074 | while (0 == endofframe) { | |
2075 | /* | 2075 | /* | |
2076 | * Poll for ADB interrupt and watch for timeout. | 2076 | * Poll for ADB interrupt and watch for timeout. | |
2077 | * If time out, keep going in hopes of not hanging | 2077 | * If time out, keep going in hopes of not hanging | |
2078 | * the ADB chip - I think | 2078 | * the ADB chip - I think | |
2079 | */ | 2079 | */ | |
2080 | my_time = ADB_DELAY * 5; | 2080 | my_time = ADB_DELAY * 5; | |
2081 | while ((ADB_SR_INTR_IS_OFF) && (my_time-- > 0)) | 2081 | while ((ADB_SR_INTR_IS_OFF) && (my_time-- > 0)) | |
2082 | (void)via_reg(VIA1, vBufB); | 2082 | (void)via_reg(VIA1, vBufB); | |
2083 | 2083 | |||
2084 | buffer[i++] = ADB_SR(); /* reset interrupt flag by | 2084 | buffer[i++] = ADB_SR(); /* reset interrupt flag by | |
2085 | * reading vSR */ | 2085 | * reading vSR */ | |
2086 | /* | 2086 | /* | |
2087 | * Perhaps put in a check here that ignores all data | 2087 | * Perhaps put in a check here that ignores all data | |
2088 | * after the first ADB_MAX_MSG_LENGTH bytes ??? | 2088 | * after the first ADB_MAX_MSG_LENGTH bytes ??? | |
2089 | */ | 2089 | */ | |
2090 | if (ADB_INTR_IS_OFF) /* check for end of frame */ | 2090 | if (ADB_INTR_IS_OFF) /* check for end of frame */ | |
2091 | endofframe = 1; | 2091 | endofframe = 1; | |
2092 | 2092 | |||
2093 | ADB_SET_STATE_ACKON(); /* send ACK to ADB chip */ | 2093 | ADB_SET_STATE_ACKON(); /* send ACK to ADB chip */ | |
2094 | delay(ADB_DELAY); /* delay */ | 2094 | delay(ADB_DELAY); /* delay */ | |
2095 | ADB_SET_STATE_ACKOFF(); /* send ACK to ADB chip */ | 2095 | ADB_SET_STATE_ACKOFF(); /* send ACK to ADB chip */ | |
2096 | } | 2096 | } | |
2097 | ADB_SET_STATE_INACTIVE(); /* signal end of frame and | 2097 | ADB_SET_STATE_INACTIVE(); /* signal end of frame and | |
2098 | * delay */ | 2098 | * delay */ | |
2099 | 2099 | |||
2100 | /* probably don't need to delay this long */ | 2100 | /* probably don't need to delay this long */ | |
2101 | delay(ADB_DELAY); | 2101 | delay(ADB_DELAY); | |
2102 | } | 2102 | } | |
2103 | buffer[0] = --i; /* [0] is length of message */ | 2103 | buffer[0] = --i; /* [0] is length of message */ | |
2104 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | 2104 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | |
2105 | splx(s); /* restore interrupts */ | 2105 | splx(s); /* restore interrupts */ | |
2106 | 2106 | |||
2107 | return; | 2107 | return; | |
2108 | } /* adb_hw_setup_IIsi */ | 2108 | } /* adb_hw_setup_IIsi */ | |
2109 | 2109 | |||
2110 | 2110 | |||
2111 | 2111 | |||
2112 | /* | 2112 | /* | |
2113 | * adb_reinit sets up the adb stuff | 2113 | * adb_reinit sets up the adb stuff | |
2114 | * | 2114 | * | |
2115 | */ | 2115 | */ | |
2116 | void | 2116 | void | |
2117 | adb_reinit(void) | 2117 | adb_reinit(void) | |
2118 | { | 2118 | { | |
2119 | u_char send_string[ADB_MAX_MSG_LENGTH]; | 2119 | u_char send_string[ADB_MAX_MSG_LENGTH]; | |
2120 | ADBDataBlock data; /* temp. holder for getting device info */ | 2120 | ADBDataBlock data; /* temp. holder for getting device info */ | |
2121 | volatile int i, x; | 2121 | volatile int i, x; | |
2122 | int s; | 2122 | int s; | |
2123 | int command; | 2123 | int command; | |
2124 | int result; | 2124 | int result; | |
2125 | int saveptr; /* point to next free relocation address */ | 2125 | int saveptr; /* point to next free relocation address */ | |
2126 | int device; | 2126 | int device; | |
2127 | int nonewtimes; /* times thru loop w/o any new devices */ | 2127 | int nonewtimes; /* times thru loop w/o any new devices */ | |
2128 | static bool again; | 2128 | static bool again; | |
2129 | 2129 | |||
2130 | if (!again) { | 2130 | if (!again) { | |
2131 | callout_init(&adb_cuda_tickle_ch, 0); | 2131 | callout_init(&adb_cuda_tickle_ch, 0); | |
2132 | again = true; | 2132 | again = true; | |
2133 | } | 2133 | } | |
2134 | 2134 | |||
2135 | adb_setup_hw_type(); /* setup hardware type */ | 2135 | adb_setup_hw_type(); /* setup hardware type */ | |
2136 | 2136 | |||
2137 | /* Make sure we are not interrupted while building the table. */ | 2137 | /* Make sure we are not interrupted while building the table. */ | |
2138 | /* ints must be on for PB & IOP (at least, for now) */ | 2138 | /* ints must be on for PB & IOP (at least, for now) */ | |
2139 | if (adbHardware != ADB_HW_PB && adbHardware != ADB_HW_IOP) | 2139 | if (adbHardware != ADB_HW_PB && adbHardware != ADB_HW_IOP) | |
2140 | s = splhigh(); | 2140 | s = splhigh(); | |
2141 | else | 2141 | else | |
2142 | s = 0; /* XXX shut the compiler up*/ | 2142 | s = 0; /* XXX shut the compiler up*/ | |
2143 | 2143 | |||
2144 | ADBNumDevices = 0; /* no devices yet */ | 2144 | ADBNumDevices = 0; /* no devices yet */ | |
2145 | 2145 | |||
2146 | /* Let intr routines know we are running reinit */ | 2146 | /* Let intr routines know we are running reinit */ | |
2147 | adbStarting = 1; | 2147 | adbStarting = 1; | |
2148 | 2148 | |||
2149 | /* | 2149 | /* | |
2150 | * Initialize the ADB table. For now, we'll always use the same table | 2150 | * Initialize the ADB table. For now, we'll always use the same table | |
2151 | * that is defined at the beginning of this file - no mallocs. | 2151 | * that is defined at the beginning of this file - no mallocs. | |
2152 | */ | 2152 | */ | |
2153 | for (i = 0; i < 16; i++) { | 2153 | for (i = 0; i < 16; i++) { | |
2154 | ADBDevTable[i].devType = 0; | 2154 | ADBDevTable[i].devType = 0; | |
2155 | ADBDevTable[i].origAddr = ADBDevTable[i].currentAddr = 0; | 2155 | ADBDevTable[i].origAddr = ADBDevTable[i].currentAddr = 0; | |
2156 | } | 2156 | } | |
2157 | 2157 | |||
2158 | adb_hw_setup(); /* init the VIA bits and hard reset ADB */ | 2158 | adb_hw_setup(); /* init the VIA bits and hard reset ADB */ | |
2159 | 2159 | |||
2160 | delay(1000); | 2160 | delay(1000); | |
2161 | 2161 | |||
2162 | /* send an ADB reset first */ | 2162 | /* send an ADB reset first */ | |
2163 | (void)adb_op_sync((Ptr)0, (Ptr)0, (Ptr)0, (short)0x00); | 2163 | (void)adb_op_sync((Ptr)0, (Ptr)0, (Ptr)0, (short)0x00); | |
2164 | delay(3000); | 2164 | delay(3000); | |
2165 | 2165 | |||
2166 | /* | 2166 | /* | |
2167 | * Probe for ADB devices. Probe devices 1-15 quickly to determine | 2167 | * Probe for ADB devices. Probe devices 1-15 quickly to determine | |
2168 | * which device addresses are in use and which are free. For each | 2168 | * which device addresses are in use and which are free. For each | |
2169 | * address that is in use, move the device at that address to a higher | 2169 | * address that is in use, move the device at that address to a higher | |
2170 | * free address. Continue doing this at that address until no device | 2170 | * free address. Continue doing this at that address until no device | |
2171 | * responds at that address. Then move the last device that was moved | 2171 | * responds at that address. Then move the last device that was moved | |
2172 | * back to the original address. Do this for the remaining addresses | 2172 | * back to the original address. Do this for the remaining addresses | |
2173 | * that we determined were in use. | 2173 | * that we determined were in use. | |
2174 | * | 2174 | * | |
2175 | * When finished, do this entire process over again with the updated | 2175 | * When finished, do this entire process over again with the updated | |
2176 | * list of in use addresses. Do this until no new devices have been | 2176 | * list of in use addresses. Do this until no new devices have been | |
2177 | * found in 20 passes though the in use address list. (This probably | 2177 | * found in 20 passes though the in use address list. (This probably | |
2178 | * seems long and complicated, but it's the best way to detect multiple | 2178 | * seems long and complicated, but it's the best way to detect multiple | |
2179 | * devices at the same address - sometimes it takes a couple of tries | 2179 | * devices at the same address - sometimes it takes a couple of tries | |
2180 | * before the collision is detected.) | 2180 | * before the collision is detected.) | |
2181 | */ | 2181 | */ | |
2182 | 2182 | |||
2183 | /* initial scan through the devices */ | 2183 | /* initial scan through the devices */ | |
2184 | for (i = 1; i < 16; i++) { | 2184 | for (i = 1; i < 16; i++) { | |
2185 | command = ADBTALK(i, 3); | 2185 | command = ADBTALK(i, 3); | |
2186 | result = adb_op_sync((Ptr)send_string, (Ptr)0, | 2186 | result = adb_op_sync((Ptr)send_string, (Ptr)0, | |
2187 | (Ptr)0, (short)command); | 2187 | (Ptr)0, (short)command); | |
2188 | 2188 | |||
2189 | if (result == 0 && send_string[0] != 0) { | 2189 | if (result == 0 && send_string[0] != 0) { | |
2190 | /* found a device */ | 2190 | /* found a device */ | |
2191 | ++ADBNumDevices; | 2191 | ++ADBNumDevices; | |
2192 | KASSERT(ADBNumDevices < 16); | 2192 | KASSERT(ADBNumDevices < 16); | |
2193 | ADBDevTable[ADBNumDevices].devType = | 2193 | ADBDevTable[ADBNumDevices].devType = | |
2194 | (int)(send_string[2]); | 2194 | (int)(send_string[2]); | |
2195 | ADBDevTable[ADBNumDevices].origAddr = i; | 2195 | ADBDevTable[ADBNumDevices].origAddr = i; | |
2196 | ADBDevTable[ADBNumDevices].currentAddr = i; | 2196 | ADBDevTable[ADBNumDevices].currentAddr = i; | |
2197 | ADBDevTable[ADBNumDevices].DataAreaAddr = | 2197 | ADBDevTable[ADBNumDevices].DataAreaAddr = | |
2198 | (long)0; | 2198 | (long)0; | |
2199 | ADBDevTable[ADBNumDevices].ServiceRtPtr = (void *)0; | 2199 | ADBDevTable[ADBNumDevices].ServiceRtPtr = (void *)0; | |
2200 | pm_check_adb_devices(i); /* tell pm driver device | 2200 | pm_check_adb_devices(i); /* tell pm driver device | |
2201 | * is here */ | 2201 | * is here */ | |
2202 | } | 2202 | } | |
2203 | } | 2203 | } | |
2204 | 2204 | |||
2205 | /* find highest unused address */ | 2205 | /* find highest unused address */ | |
2206 | for (saveptr = 15; saveptr > 0; saveptr--) | 2206 | for (saveptr = 15; saveptr > 0; saveptr--) | |
2207 | if (-1 == get_adb_info(&data, saveptr)) | 2207 | if (-1 == get_adb_info(&data, saveptr)) | |
2208 | break; | 2208 | break; | |
2209 | 2209 | |||
2210 | #ifdef ADB_DEBUG | 2210 | #ifdef ADB_DEBUG | |
2211 | if (adb_debug & 0x80) { | 2211 | if (adb_debug & 0x80) { | |
2212 | printf_intr("first free is: 0x%02x\n", saveptr); | 2212 | printf_intr("first free is: 0x%02x\n", saveptr); | |
2213 | printf_intr("devices: %i\n", ADBNumDevices); | 2213 | printf_intr("devices: %i\n", ADBNumDevices); | |
2214 | } | 2214 | } | |
2215 | #endif | 2215 | #endif | |
2216 | 2216 | |||
2217 | nonewtimes = 0; /* no loops w/o new devices */ | 2217 | nonewtimes = 0; /* no loops w/o new devices */ | |
2218 | while (saveptr > 0 && nonewtimes++ < 11) { | 2218 | while (saveptr > 0 && nonewtimes++ < 11) { | |
2219 | for (i = 1;saveptr > 0 && i <= ADBNumDevices; i++) { | 2219 | for (i = 1;saveptr > 0 && i <= ADBNumDevices; i++) { | |
2220 | device = ADBDevTable[i].currentAddr; | 2220 | device = ADBDevTable[i].currentAddr; | |
2221 | #ifdef ADB_DEBUG | 2221 | #ifdef ADB_DEBUG | |
2222 | if (adb_debug & 0x80) | 2222 | if (adb_debug & 0x80) | |
2223 | printf_intr("moving device 0x%02x to 0x%02x " | 2223 | printf_intr("moving device 0x%02x to 0x%02x " | |
2224 | "(index 0x%02x) ", device, saveptr, i); | 2224 | "(index 0x%02x) ", device, saveptr, i); | |
2225 | #endif | 2225 | #endif | |
2226 | 2226 | |||
2227 | /* send TALK R3 to address */ | 2227 | /* send TALK R3 to address */ | |
2228 | command = ADBTALK(device, 3); | 2228 | command = ADBTALK(device, 3); | |
2229 | (void)adb_op_sync((Ptr)send_string, (Ptr)0, | 2229 | (void)adb_op_sync((Ptr)send_string, (Ptr)0, | |
2230 | (Ptr)0, (short)command); | 2230 | (Ptr)0, (short)command); | |
2231 | 2231 | |||
2232 | /* move device to higher address */ | 2232 | /* move device to higher address */ | |
2233 | command = ADBLISTEN(device, 3); | 2233 | command = ADBLISTEN(device, 3); | |
2234 | send_string[0] = 2; | 2234 | send_string[0] = 2; | |
2235 | send_string[1] = (u_char)(saveptr | 0x60); | 2235 | send_string[1] = (u_char)(saveptr | 0x60); | |
2236 | send_string[2] = 0xfe; | 2236 | send_string[2] = 0xfe; | |
2237 | (void)adb_op_sync((Ptr)send_string, (Ptr)0, | 2237 | (void)adb_op_sync((Ptr)send_string, (Ptr)0, | |
2238 | (Ptr)0, (short)command); | 2238 | (Ptr)0, (short)command); | |
2239 | delay(1000); | 2239 | delay(1000); | |
2240 | 2240 | |||
2241 | /* send TALK R3 - anything at new address? */ | 2241 | /* send TALK R3 - anything at new address? */ | |
2242 | command = ADBTALK(saveptr, 3); | 2242 | command = ADBTALK(saveptr, 3); | |
2243 | send_string[0] = 0; | 2243 | send_string[0] = 0; |
--- src/sys/arch/macppc/dev/adb_direct.c 2023/09/21 09:31:49 1.45
+++ src/sys/arch/macppc/dev/adb_direct.c 2024/03/05 20:58:05 1.46
@@ -1,1321 +1,1321 @@ | @@ -1,1321 +1,1321 @@ | |||
1 | /* $NetBSD: adb_direct.c,v 1.45 2023/09/21 09:31:49 msaitoh Exp $ */ | 1 | /* $NetBSD: adb_direct.c,v 1.46 2024/03/05 20:58:05 andvar Exp $ */ | |
2 | 2 | |||
3 | /* From: adb_direct.c 2.02 4/18/97 jpw */ | 3 | /* From: adb_direct.c 2.02 4/18/97 jpw */ | |
4 | 4 | |||
5 | /* | 5 | /* | |
6 | * Copyright (C) 1996, 1997 John P. Wittkoski | 6 | * Copyright (C) 1996, 1997 John P. Wittkoski | |
7 | * All rights reserved. | 7 | * All rights reserved. | |
8 | * | 8 | * | |
9 | * Redistribution and use in source and binary forms, with or without | 9 | * Redistribution and use in source and binary forms, with or without | |
10 | * modification, are permitted provided that the following conditions | 10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | 11 | * are met: | |
12 | * 1. Redistributions of source code must retain the above copyright | 12 | * 1. Redistributions of source code must retain the above copyright | |
13 | * notice, this list of conditions and the following disclaimer. | 13 | * notice, this list of conditions and the following disclaimer. | |
14 | * 2. Redistributions in binary form must reproduce the above copyright | 14 | * 2. Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in the | 15 | * notice, this list of conditions and the following disclaimer in the | |
16 | * documentation and/or other materials provided with the distribution. | 16 | * documentation and/or other materials provided with the distribution. | |
17 | * 3. All advertising materials mentioning features or use of this software | 17 | * 3. All advertising materials mentioning features or use of this software | |
18 | * must display the following acknowledgement: | 18 | * must display the following acknowledgement: | |
19 | * This product includes software developed by John P. Wittkoski. | 19 | * This product includes software developed by John P. Wittkoski. | |
20 | * 4. The name of the author may not be used to endorse or promote products | 20 | * 4. The name of the author may not be used to endorse or promote products | |
21 | * derived from this software without specific prior written permission. | 21 | * derived from this software without specific prior written permission. | |
22 | * | 22 | * | |
23 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | 23 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
24 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | 24 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
25 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | 25 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
26 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | 26 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
29 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 29 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
30 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 30 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
32 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | */ | 33 | */ | |
34 | 34 | |||
35 | /* | 35 | /* | |
36 | * This code is rather messy, but I don't have time right now | 36 | * This code is rather messy, but I don't have time right now | |
37 | * to clean it up as much as I would like. | 37 | * to clean it up as much as I would like. | |
38 | * But it works, so I'm happy. :-) jpw | 38 | * But it works, so I'm happy. :-) jpw | |
39 | */ | 39 | */ | |
40 | 40 | |||
41 | /* | 41 | /* | |
42 | * TO DO: | 42 | * TO DO: | |
43 | * - We could reduce the time spent in the adb_intr_* routines | 43 | * - We could reduce the time spent in the adb_intr_* routines | |
44 | * by having them save the incoming and outgoing data directly | 44 | * by having them save the incoming and outgoing data directly | |
45 | * in the adbInbound and adbOutbound queues, as it would reduce | 45 | * in the adbInbound and adbOutbound queues, as it would reduce | |
46 | * the number of times we need to copy the data around. It | 46 | * the number of times we need to copy the data around. It | |
47 | * would also make the code more readable and easier to follow. | 47 | * would also make the code more readable and easier to follow. | |
48 | * - (Related to above) Use the header part of adbCommand to | 48 | * - (Related to above) Use the header part of adbCommand to | |
49 | * reduce the number of copies we have to do of the data. | 49 | * reduce the number of copies we have to do of the data. | |
50 | * - (Related to above) Actually implement the adbOutbound queue. | 50 | * - (Related to above) Actually implement the adbOutbound queue. | |
51 | * This is fairly easy once you switch all the intr routines | 51 | * This is fairly easy once you switch all the intr routines | |
52 | * over to using adbCommand structs directly. | 52 | * over to using adbCommand structs directly. | |
53 | * - There is a bug in the state machine of adb_intr_cuda | 53 | * - There is a bug in the state machine of adb_intr_cuda | |
54 | * code that causes hangs, especially on 030 machines, probably | 54 | * code that causes hangs, especially on 030 machines, probably | |
55 | * because of some timing issues. Because I have been unable to | 55 | * because of some timing issues. Because I have been unable to | |
56 | * determine the exact cause of this bug, I used the timeout function | 56 | * determine the exact cause of this bug, I used the timeout function | |
57 | * to check for and recover from this condition. If anyone finds | 57 | * to check for and recover from this condition. If anyone finds | |
58 | * the actual cause of this bug, the calls to timeout and the | 58 | * the actual cause of this bug, the calls to timeout and the | |
59 | * adb_cuda_tickle routine can be removed. | 59 | * adb_cuda_tickle routine can be removed. | |
60 | */ | 60 | */ | |
61 | 61 | |||
62 | #include <sys/cdefs.h> | 62 | #include <sys/cdefs.h> | |
63 | __KERNEL_RCSID(0, "$NetBSD: adb_direct.c,v 1.45 2023/09/21 09:31:49 msaitoh Exp $"); | 63 | __KERNEL_RCSID(0, "$NetBSD: adb_direct.c,v 1.46 2024/03/05 20:58:05 andvar Exp $"); | |
64 | 64 | |||
65 | #include <sys/param.h> | 65 | #include <sys/param.h> | |
66 | #include <sys/systm.h> | 66 | #include <sys/systm.h> | |
67 | #include <sys/callout.h> | 67 | #include <sys/callout.h> | |
68 | #include <sys/device.h> | 68 | #include <sys/device.h> | |
69 | 69 | |||
70 | #include <machine/cpu.h> | 70 | #include <machine/cpu.h> | |
71 | #include <machine/autoconf.h> | 71 | #include <machine/autoconf.h> | |
72 | #include <machine/adbsys.h> | 72 | #include <machine/adbsys.h> | |
73 | #include <machine/pio.h> | 73 | #include <machine/pio.h> | |
74 | 74 | |||
75 | #include <macppc/dev/viareg.h> | 75 | #include <macppc/dev/viareg.h> | |
76 | #include <macppc/dev/adbvar.h> | 76 | #include <macppc/dev/adbvar.h> | |
77 | #include <macppc/dev/pm_direct.h> | 77 | #include <macppc/dev/pm_direct.h> | |
78 | 78 | |||
79 | #define printf_intr printf | 79 | #define printf_intr printf | |
80 | 80 | |||
81 | #ifdef DEBUG | 81 | #ifdef DEBUG | |
82 | #ifndef ADB_DEBUG | 82 | #ifndef ADB_DEBUG | |
83 | #define ADB_DEBUG | 83 | #define ADB_DEBUG | |
84 | #endif | 84 | #endif | |
85 | #endif | 85 | #endif | |
86 | 86 | |||
87 | /* some misc. leftovers */ | 87 | /* some misc. leftovers */ | |
88 | #define vPB 0x0000 | 88 | #define vPB 0x0000 | |
89 | #define vPB3 0x08 | 89 | #define vPB3 0x08 | |
90 | #define vPB4 0x10 | 90 | #define vPB4 0x10 | |
91 | #define vPB5 0x20 | 91 | #define vPB5 0x20 | |
92 | #define vSR_INT 0x04 | 92 | #define vSR_INT 0x04 | |
93 | #define vSR_OUT 0x10 | 93 | #define vSR_OUT 0x10 | |
94 | 94 | |||
95 | /* the type of ADB action that we are currently preforming */ | 95 | /* the type of ADB action that we are currently preforming */ | |
96 | #define ADB_ACTION_NOTREADY 0x1 /* has not been initialized yet */ | 96 | #define ADB_ACTION_NOTREADY 0x1 /* has not been initialized yet */ | |
97 | #define ADB_ACTION_IDLE 0x2 /* the bus is currently idle */ | 97 | #define ADB_ACTION_IDLE 0x2 /* the bus is currently idle */ | |
98 | #define ADB_ACTION_OUT 0x3 /* sending out a command */ | 98 | #define ADB_ACTION_OUT 0x3 /* sending out a command */ | |
99 | #define ADB_ACTION_IN 0x4 /* receiving data */ | 99 | #define ADB_ACTION_IN 0x4 /* receiving data */ | |
100 | #define ADB_ACTION_POLLING 0x5 /* polling - II only */ | 100 | #define ADB_ACTION_POLLING 0x5 /* polling - II only */ | |
101 | 101 | |||
102 | /* | 102 | /* | |
103 | * These describe the state of the ADB bus itself, although they | 103 | * These describe the state of the ADB bus itself, although they | |
104 | * don't necessarily correspond directly to ADB states. | 104 | * don't necessarily correspond directly to ADB states. | |
105 | * Note: these are not really used in the IIsi code. | 105 | * Note: these are not really used in the IIsi code. | |
106 | */ | 106 | */ | |
107 | #define ADB_BUS_UNKNOWN 0x1 /* we don't know yet - all models */ | 107 | #define ADB_BUS_UNKNOWN 0x1 /* we don't know yet - all models */ | |
108 | #define ADB_BUS_IDLE 0x2 /* bus is idle - all models */ | 108 | #define ADB_BUS_IDLE 0x2 /* bus is idle - all models */ | |
109 | #define ADB_BUS_CMD 0x3 /* starting a command - II models */ | 109 | #define ADB_BUS_CMD 0x3 /* starting a command - II models */ | |
110 | #define ADB_BUS_ODD 0x4 /* the "odd" state - II models */ | 110 | #define ADB_BUS_ODD 0x4 /* the "odd" state - II models */ | |
111 | #define ADB_BUS_EVEN 0x5 /* the "even" state - II models */ | 111 | #define ADB_BUS_EVEN 0x5 /* the "even" state - II models */ | |
112 | #define ADB_BUS_ACTIVE 0x6 /* active state - IIsi models */ | 112 | #define ADB_BUS_ACTIVE 0x6 /* active state - IIsi models */ | |
113 | #define ADB_BUS_ACK 0x7 /* currently ACKing - IIsi models */ | 113 | #define ADB_BUS_ACK 0x7 /* currently ACKing - IIsi models */ | |
114 | 114 | |||
115 | /* | 115 | /* | |
116 | * Shortcuts for setting or testing the VIA bit states. | 116 | * Shortcuts for setting or testing the VIA bit states. | |
117 | * Not all shortcuts are used for every type of ADB hardware. | 117 | * Not all shortcuts are used for every type of ADB hardware. | |
118 | */ | 118 | */ | |
119 | #define ADB_SET_STATE_IDLE_CUDA() via_reg_or(VIA1, vBufB, (vPB4 | vPB5)) | 119 | #define ADB_SET_STATE_IDLE_CUDA() via_reg_or(VIA1, vBufB, (vPB4 | vPB5)) | |
120 | #define ADB_SET_STATE_TIP() via_reg_and(VIA1, vBufB, ~vPB5) | 120 | #define ADB_SET_STATE_TIP() via_reg_and(VIA1, vBufB, ~vPB5) | |
121 | #define ADB_CLR_STATE_TIP() via_reg_or(VIA1, vBufB, vPB5) | 121 | #define ADB_CLR_STATE_TIP() via_reg_or(VIA1, vBufB, vPB5) | |
122 | #define ADB_TOGGLE_STATE_ACK_CUDA() via_reg_xor(VIA1, vBufB, vPB4) | 122 | #define ADB_TOGGLE_STATE_ACK_CUDA() via_reg_xor(VIA1, vBufB, vPB4) | |
123 | #define ADB_SET_STATE_ACKOFF_CUDA() via_reg_or(VIA1, vBufB, vPB4) | 123 | #define ADB_SET_STATE_ACKOFF_CUDA() via_reg_or(VIA1, vBufB, vPB4) | |
124 | #define ADB_SET_SR_INPUT() via_reg_and(VIA1, vACR, ~vSR_OUT) | 124 | #define ADB_SET_SR_INPUT() via_reg_and(VIA1, vACR, ~vSR_OUT) | |
125 | #define ADB_SET_SR_OUTPUT() via_reg_or(VIA1, vACR, vSR_OUT) | 125 | #define ADB_SET_SR_OUTPUT() via_reg_or(VIA1, vACR, vSR_OUT) | |
126 | #define ADB_SR() read_via_reg(VIA1, vSR) | 126 | #define ADB_SR() read_via_reg(VIA1, vSR) | |
127 | #define ADB_VIA_INTR_ENABLE() write_via_reg(VIA1, vIER, 0x84) | 127 | #define ADB_VIA_INTR_ENABLE() write_via_reg(VIA1, vIER, 0x84) | |
128 | #define ADB_VIA_INTR_DISABLE() write_via_reg(VIA1, vIER, 0x04) | 128 | #define ADB_VIA_INTR_DISABLE() write_via_reg(VIA1, vIER, 0x04) | |
129 | #define ADB_INTR_IS_OFF (vPB3 == (read_via_reg(VIA1, vBufB) & vPB3)) | 129 | #define ADB_INTR_IS_OFF (vPB3 == (read_via_reg(VIA1, vBufB) & vPB3)) | |
130 | #define ADB_INTR_IS_ON (0 == (read_via_reg(VIA1, vBufB) & vPB3)) | 130 | #define ADB_INTR_IS_ON (0 == (read_via_reg(VIA1, vBufB) & vPB3)) | |
131 | #define ADB_SR_INTR_IS_OFF (0 == (read_via_reg(VIA1, vIFR) & vSR_INT)) | 131 | #define ADB_SR_INTR_IS_OFF (0 == (read_via_reg(VIA1, vIFR) & vSR_INT)) | |
132 | #define ADB_SR_INTR_IS_ON (vSR_INT == (read_via_reg(VIA1, \ | 132 | #define ADB_SR_INTR_IS_ON (vSR_INT == (read_via_reg(VIA1, \ | |
133 | vIFR) & vSR_INT)) | 133 | vIFR) & vSR_INT)) | |
134 | 134 | |||
135 | /* | 135 | /* | |
136 | * This is the delay that is required (in uS) between certain | 136 | * This is the delay that is required (in uS) between certain | |
137 | * ADB transactions. The actual timing delay for each uS is | 137 | * ADB transactions. The actual timing delay for each uS is | |
138 | * calculated at boot time to account for differences in machine speed. | 138 | * calculated at boot time to account for differences in machine speed. | |
139 | */ | 139 | */ | |
140 | #define ADB_DELAY 150 | 140 | #define ADB_DELAY 150 | |
141 | 141 | |||
142 | /* | 142 | /* | |
143 | * Maximum ADB message length; includes space for data, result, and | 143 | * Maximum ADB message length; includes space for data, result, and | |
144 | * device code - plus a little for safety. | 144 | * device code - plus a little for safety. | |
145 | */ | 145 | */ | |
146 | #define ADB_MAX_MSG_LENGTH 16 | 146 | #define ADB_MAX_MSG_LENGTH 16 | |
147 | #define ADB_MAX_HDR_LENGTH 8 | 147 | #define ADB_MAX_HDR_LENGTH 8 | |
148 | 148 | |||
149 | #define ADB_QUEUE 32 | 149 | #define ADB_QUEUE 32 | |
150 | #define ADB_TICKLE_TICKS 4 | 150 | #define ADB_TICKLE_TICKS 4 | |
151 | 151 | |||
152 | /* | 152 | /* | |
153 | * A structure for storing information about each ADB device. | 153 | * A structure for storing information about each ADB device. | |
154 | */ | 154 | */ | |
155 | struct ADBDevEntry { | 155 | struct ADBDevEntry { | |
156 | void (*ServiceRtPtr)(void); | 156 | void (*ServiceRtPtr)(void); | |
157 | void *DataAreaAddr; | 157 | void *DataAreaAddr; | |
158 | int devType; | 158 | int devType; | |
159 | int origAddr; | 159 | int origAddr; | |
160 | int currentAddr; | 160 | int currentAddr; | |
161 | }; | 161 | }; | |
162 | 162 | |||
163 | /* | 163 | /* | |
164 | * Used to hold ADB commands that are waiting to be sent out. | 164 | * Used to hold ADB commands that are waiting to be sent out. | |
165 | */ | 165 | */ | |
166 | struct adbCmdHoldEntry { | 166 | struct adbCmdHoldEntry { | |
167 | u_char outBuf[ADB_MAX_MSG_LENGTH]; /* our message */ | 167 | u_char outBuf[ADB_MAX_MSG_LENGTH]; /* our message */ | |
168 | u_char *saveBuf; /* buffer to know where to save result */ | 168 | u_char *saveBuf; /* buffer to know where to save result */ | |
169 | adbComp *compRout; /* completion routine pointer */ | 169 | adbComp *compRout; /* completion routine pointer */ | |
170 | int *data; /* completion routine data pointer */ | 170 | int *data; /* completion routine data pointer */ | |
171 | }; | 171 | }; | |
172 | 172 | |||
173 | /* | 173 | /* | |
174 | * Eventually used for two separate queues, the queue between | 174 | * Eventually used for two separate queues, the queue between | |
175 | * the upper and lower halves, and the outgoing packet queue. | 175 | * the upper and lower halves, and the outgoing packet queue. | |
176 | * TO DO: adbCommand can replace all of adbCmdHoldEntry eventually | 176 | * TO DO: adbCommand can replace all of adbCmdHoldEntry eventually | |
177 | */ | 177 | */ | |
178 | struct adbCommand { | 178 | struct adbCommand { | |
179 | u_char header[ADB_MAX_HDR_LENGTH]; /* not used yet */ | 179 | u_char header[ADB_MAX_HDR_LENGTH]; /* not used yet */ | |
180 | u_char data[ADB_MAX_MSG_LENGTH]; /* packet data only */ | 180 | u_char data[ADB_MAX_MSG_LENGTH]; /* packet data only */ | |
181 | u_char *saveBuf; /* where to save result */ | 181 | u_char *saveBuf; /* where to save result */ | |
182 | adbComp *compRout; /* completion routine pointer */ | 182 | adbComp *compRout; /* completion routine pointer */ | |
183 | volatile int *compData; /* completion routine data pointer */ | 183 | volatile int *compData; /* completion routine data pointer */ | |
184 | u_int cmd; /* the original command for this data */ | 184 | u_int cmd; /* the original command for this data */ | |
185 | u_int unsol; /* 1 if packet was unsolicited */ | 185 | u_int unsol; /* 1 if packet was unsolicited */ | |
186 | u_int ack_only; /* 1 for no special processing */ | 186 | u_int ack_only; /* 1 for no special processing */ | |
187 | }; | 187 | }; | |
188 | 188 | |||
189 | /* | 189 | /* | |
190 | * A few variables that we need and their initial values. | 190 | * A few variables that we need and their initial values. | |
191 | */ | 191 | */ | |
192 | int adbHardware = ADB_HW_UNKNOWN; | 192 | int adbHardware = ADB_HW_UNKNOWN; | |
193 | int adbActionState = ADB_ACTION_NOTREADY; | 193 | int adbActionState = ADB_ACTION_NOTREADY; | |
194 | int adbWaiting = 0; /* waiting for return data from the device */ | 194 | int adbWaiting = 0; /* waiting for return data from the device */ | |
195 | int adbWriteDelay = 0; /* working on (or waiting to do) a write */ | 195 | int adbWriteDelay = 0; /* working on (or waiting to do) a write */ | |
196 | 196 | |||
197 | int adbWaitingCmd = 0; /* ADB command we are waiting for */ | 197 | int adbWaitingCmd = 0; /* ADB command we are waiting for */ | |
198 | u_char *adbBuffer = (long)0; /* pointer to user data area */ | 198 | u_char *adbBuffer = (long)0; /* pointer to user data area */ | |
199 | adbComp *adbCompRout = NULL; /* pointer to the completion routine */ | 199 | adbComp *adbCompRout = NULL; /* pointer to the completion routine */ | |
200 | volatile int *adbCompData = NULL; /* pointer to the completion routine data */ | 200 | volatile int *adbCompData = NULL; /* pointer to the completion routine data */ | |
201 | int adbStarting = 1; /* doing ADBReInit so do polling differently */ | 201 | int adbStarting = 1; /* doing ADBReInit so do polling differently */ | |
202 | 202 | |||
203 | u_char adbInputBuffer[ADB_MAX_MSG_LENGTH]; /* data input buffer */ | 203 | u_char adbInputBuffer[ADB_MAX_MSG_LENGTH]; /* data input buffer */ | |
204 | u_char adbOutputBuffer[ADB_MAX_MSG_LENGTH]; /* data output buffer */ | 204 | u_char adbOutputBuffer[ADB_MAX_MSG_LENGTH]; /* data output buffer */ | |
205 | 205 | |||
206 | int adbSentChars = 0; /* how many characters we have sent */ | 206 | int adbSentChars = 0; /* how many characters we have sent */ | |
207 | 207 | |||
208 | struct ADBDevEntry ADBDevTable[16]; /* our ADB device table */ | 208 | struct ADBDevEntry ADBDevTable[16]; /* our ADB device table */ | |
209 | int ADBNumDevices; /* num. of ADB devices found with ADBReInit */ | 209 | int ADBNumDevices; /* num. of ADB devices found with ADBReInit */ | |
210 | 210 | |||
211 | struct adbCommand adbInbound[ADB_QUEUE]; /* incoming queue */ | 211 | struct adbCommand adbInbound[ADB_QUEUE]; /* incoming queue */ | |
212 | int adbInCount = 0; /* how many packets in in queue */ | 212 | int adbInCount = 0; /* how many packets in in queue */ | |
213 | int adbInHead = 0; /* head of in queue */ | 213 | int adbInHead = 0; /* head of in queue */ | |
214 | int adbInTail = 0; /* tail of in queue */ | 214 | int adbInTail = 0; /* tail of in queue */ | |
215 | struct adbCommand adbOutbound[ADB_QUEUE]; /* outgoing queue - not used yet */ | 215 | struct adbCommand adbOutbound[ADB_QUEUE]; /* outgoing queue - not used yet */ | |
216 | int adbOutCount = 0; /* how many packets in out queue */ | 216 | int adbOutCount = 0; /* how many packets in out queue */ | |
217 | int adbOutHead = 0; /* head of out queue */ | 217 | int adbOutHead = 0; /* head of out queue */ | |
218 | int adbOutTail = 0; /* tail of out queue */ | 218 | int adbOutTail = 0; /* tail of out queue */ | |
219 | 219 | |||
220 | int tickle_count = 0; /* how many tickles seen for this packet? */ | 220 | int tickle_count = 0; /* how many tickles seen for this packet? */ | |
221 | int tickle_serial = 0; /* the last packet tickled */ | 221 | int tickle_serial = 0; /* the last packet tickled */ | |
222 | int adb_cuda_serial = 0; /* the current packet */ | 222 | int adb_cuda_serial = 0; /* the current packet */ | |
223 | 223 | |||
224 | struct callout adb_cuda_tickle_ch; | 224 | struct callout adb_cuda_tickle_ch; | |
225 | struct callout adb_soft_intr_ch; | 225 | struct callout adb_soft_intr_ch; | |
226 | 226 | |||
227 | volatile uint8_t *Via1Base; | 227 | volatile uint8_t *Via1Base; | |
228 | extern int adb_polling; /* Are we polling? */ | 228 | extern int adb_polling; /* Are we polling? */ | |
229 | 229 | |||
230 | void pm_setup_adb(void); | 230 | void pm_setup_adb(void); | |
231 | void pm_check_adb_devices(int); | 231 | void pm_check_adb_devices(int); | |
232 | int pm_adb_op(u_char *, void *, volatile void *, int); | 232 | int pm_adb_op(u_char *, void *, volatile void *, int); | |
233 | void pm_init_adb_device(void); | 233 | void pm_init_adb_device(void); | |
234 | 234 | |||
235 | /* | 235 | /* | |
236 | * The following are private routines. | 236 | * The following are private routines. | |
237 | */ | 237 | */ | |
238 | #ifdef ADB_DEBUG | 238 | #ifdef ADB_DEBUG | |
239 | void print_single(u_char *); | 239 | void print_single(u_char *); | |
240 | #endif | 240 | #endif | |
241 | void adb_soft_intr(void); | 241 | void adb_soft_intr(void); | |
242 | int send_adb_cuda(u_char *, u_char *, adbComp *, volatile void *, int); | 242 | int send_adb_cuda(u_char *, u_char *, adbComp *, volatile void *, int); | |
243 | void adb_intr_cuda_test(void); | 243 | void adb_intr_cuda_test(void); | |
244 | void adb_cuda_tickle(void); | 244 | void adb_cuda_tickle(void); | |
245 | void adb_pass_up(struct adbCommand *); | 245 | void adb_pass_up(struct adbCommand *); | |
246 | void adb_op_comprout(void *, volatile int *, int); | 246 | void adb_op_comprout(void *, volatile int *, int); | |
247 | void adb_reinit(void); | 247 | void adb_reinit(void); | |
248 | int count_adbs(void); | 248 | int count_adbs(void); | |
249 | int get_ind_adb_info(ADBDataBlock *, int); | 249 | int get_ind_adb_info(ADBDataBlock *, int); | |
250 | int get_adb_info(ADBDataBlock *, int); | 250 | int get_adb_info(ADBDataBlock *, int); | |
251 | int set_adb_info(ADBSetInfoBlock *, int); | 251 | int set_adb_info(ADBSetInfoBlock *, int); | |
252 | void adb_setup_hw_type(void); | 252 | void adb_setup_hw_type(void); | |
253 | int adb_op (Ptr, adbComp *, volatile void *, short); | 253 | int adb_op (Ptr, adbComp *, volatile void *, short); | |
254 | int adb_op_sync(Ptr, adbComp *, Ptr, short); | 254 | int adb_op_sync(Ptr, adbComp *, Ptr, short); | |
255 | void adb_hw_setup(void); | 255 | void adb_hw_setup(void); | |
256 | int adb_cmd_result(u_char *); | 256 | int adb_cmd_result(u_char *); | |
257 | int adb_cmd_extra(u_char *); | 257 | int adb_cmd_extra(u_char *); | |
258 | /* we should create this and it will be the public version */ | 258 | /* we should create this and it will be the public version */ | |
259 | int send_adb(u_char *, void *, void *); | 259 | int send_adb(u_char *, void *, void *); | |
260 | 260 | |||
261 | int setsoftadb(void); | 261 | int setsoftadb(void); | |
262 | 262 | |||
263 | #ifdef ADB_DEBUG | 263 | #ifdef ADB_DEBUG | |
264 | /* | 264 | /* | |
265 | * print_single | 265 | * print_single | |
266 | * Diagnostic display routine. Displays the hex values of the | 266 | * Diagnostic display routine. Displays the hex values of the | |
267 | * specified elements of the u_char. The length of the "string" | 267 | * specified elements of the u_char. The length of the "string" | |
268 | * is in [0]. | 268 | * is in [0]. | |
269 | */ | 269 | */ | |
270 | void | 270 | void | |
271 | print_single(u_char *str) | 271 | print_single(u_char *str) | |
272 | { | 272 | { | |
273 | int x; | 273 | int x; | |
274 | 274 | |||
275 | if (str == 0) { | 275 | if (str == 0) { | |
276 | printf_intr("no data - null pointer\n"); | 276 | printf_intr("no data - null pointer\n"); | |
277 | return; | 277 | return; | |
278 | } | 278 | } | |
279 | if (*str == 0) { | 279 | if (*str == 0) { | |
280 | printf_intr("nothing returned\n"); | 280 | printf_intr("nothing returned\n"); | |
281 | return; | 281 | return; | |
282 | } | 282 | } | |
283 | if (*str > 20) { | 283 | if (*str > 20) { | |
284 | printf_intr("ADB: ACK > 20 no way!\n"); | 284 | printf_intr("ADB: ACK > 20 no way!\n"); | |
285 | *str = 20; | 285 | *str = 20; | |
286 | } | 286 | } | |
287 | printf_intr("(length=0x%x):", *str); | 287 | printf_intr("(length=0x%x):", *str); | |
288 | for (x = 1; x <= *str; x++) | 288 | for (x = 1; x <= *str; x++) | |
289 | printf_intr(" 0x%02x", str[x]); | 289 | printf_intr(" 0x%02x", str[x]); | |
290 | printf_intr("\n"); | 290 | printf_intr("\n"); | |
291 | } | 291 | } | |
292 | #endif | 292 | #endif | |
293 | 293 | |||
294 | void | 294 | void | |
295 | adb_cuda_tickle(void) | 295 | adb_cuda_tickle(void) | |
296 | { | 296 | { | |
297 | volatile int s; | 297 | volatile int s; | |
298 | 298 | |||
299 | if (adbActionState == ADB_ACTION_IN) { | 299 | if (adbActionState == ADB_ACTION_IN) { | |
300 | if (tickle_serial == adb_cuda_serial) { | 300 | if (tickle_serial == adb_cuda_serial) { | |
301 | if (++tickle_count > 0) { | 301 | if (++tickle_count > 0) { | |
302 | s = splhigh(); | 302 | s = splhigh(); | |
303 | adbActionState = ADB_ACTION_IDLE; | 303 | adbActionState = ADB_ACTION_IDLE; | |
304 | adbInputBuffer[0] = 0; | 304 | adbInputBuffer[0] = 0; | |
305 | ADB_SET_STATE_IDLE_CUDA(); | 305 | ADB_SET_STATE_IDLE_CUDA(); | |
306 | splx(s); | 306 | splx(s); | |
307 | } | 307 | } | |
308 | } else { | 308 | } else { | |
309 | tickle_serial = adb_cuda_serial; | 309 | tickle_serial = adb_cuda_serial; | |
310 | tickle_count = 0; | 310 | tickle_count = 0; | |
311 | } | 311 | } | |
312 | } else { | 312 | } else { | |
313 | tickle_serial = adb_cuda_serial; | 313 | tickle_serial = adb_cuda_serial; | |
314 | tickle_count = 0; | 314 | tickle_count = 0; | |
315 | } | 315 | } | |
316 | 316 | |||
317 | callout_reset(&adb_cuda_tickle_ch, ADB_TICKLE_TICKS, | 317 | callout_reset(&adb_cuda_tickle_ch, ADB_TICKLE_TICKS, | |
318 | (void *)adb_cuda_tickle, NULL); | 318 | (void *)adb_cuda_tickle, NULL); | |
319 | } | 319 | } | |
320 | 320 | |||
321 | /* | 321 | /* | |
322 | * called when when an adb interrupt happens | 322 | * called when an adb interrupt happens | |
323 | * | 323 | * | |
324 | * Cuda version of adb_intr | 324 | * Cuda version of adb_intr | |
325 | * TO DO: do we want to add some calls to intr_dispatch() here to | 325 | * TO DO: do we want to add some calls to intr_dispatch() here to | |
326 | * grab serial interrupts? | 326 | * grab serial interrupts? | |
327 | */ | 327 | */ | |
328 | int | 328 | int | |
329 | adb_intr_cuda(void *arg) | 329 | adb_intr_cuda(void *arg) | |
330 | { | 330 | { | |
331 | volatile int i, ending; | 331 | volatile int i, ending; | |
332 | volatile unsigned int s; | 332 | volatile unsigned int s; | |
333 | struct adbCommand packet; | 333 | struct adbCommand packet; | |
334 | uint8_t reg; | 334 | uint8_t reg; | |
335 | 335 | |||
336 | s = splhigh(); /* can't be too careful - might be called */ | 336 | s = splhigh(); /* can't be too careful - might be called */ | |
337 | /* from a routine, NOT an interrupt */ | 337 | /* from a routine, NOT an interrupt */ | |
338 | 338 | |||
339 | reg = read_via_reg(VIA1, vIFR); /* Read the interrupts */ | 339 | reg = read_via_reg(VIA1, vIFR); /* Read the interrupts */ | |
340 | if ((reg & 0x80) == 0) { | 340 | if ((reg & 0x80) == 0) { | |
341 | splx(s); | 341 | splx(s); | |
342 | return 0; /* No interrupts to process */ | 342 | return 0; /* No interrupts to process */ | |
343 | } | 343 | } | |
344 | 344 | |||
345 | write_via_reg(VIA1, vIFR, reg & 0x7f); /* Clear 'em */ | 345 | write_via_reg(VIA1, vIFR, reg & 0x7f); /* Clear 'em */ | |
346 | 346 | |||
347 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | 347 | ADB_VIA_INTR_DISABLE(); /* disable ADB interrupt on IIs. */ | |
348 | 348 | |||
349 | switch_start: | 349 | switch_start: | |
350 | switch (adbActionState) { | 350 | switch (adbActionState) { | |
351 | case ADB_ACTION_IDLE: | 351 | case ADB_ACTION_IDLE: | |
352 | /* | 352 | /* | |
353 | * This is an unexpected packet, so grab the first (dummy) | 353 | * This is an unexpected packet, so grab the first (dummy) | |
354 | * byte, set up the proper vars, and tell the chip we are | 354 | * byte, set up the proper vars, and tell the chip we are | |
355 | * starting to receive the packet by setting the TIP bit. | 355 | * starting to receive the packet by setting the TIP bit. | |
356 | */ | 356 | */ | |
357 | adbInputBuffer[1] = ADB_SR(); | 357 | adbInputBuffer[1] = ADB_SR(); | |
358 | adb_cuda_serial++; | 358 | adb_cuda_serial++; | |
359 | if (ADB_INTR_IS_OFF) /* must have been a fake start */ | 359 | if (ADB_INTR_IS_OFF) /* must have been a fake start */ | |
360 | break; | 360 | break; | |
361 | 361 | |||
362 | ADB_SET_SR_INPUT(); | 362 | ADB_SET_SR_INPUT(); | |
363 | ADB_SET_STATE_TIP(); | 363 | ADB_SET_STATE_TIP(); | |
364 | 364 | |||
365 | adbInputBuffer[0] = 1; | 365 | adbInputBuffer[0] = 1; | |
366 | adbActionState = ADB_ACTION_IN; | 366 | adbActionState = ADB_ACTION_IN; | |
367 | #ifdef ADB_DEBUG | 367 | #ifdef ADB_DEBUG | |
368 | if (adb_debug) | 368 | if (adb_debug) | |
369 | printf_intr("idle 0x%02x ", adbInputBuffer[1]); | 369 | printf_intr("idle 0x%02x ", adbInputBuffer[1]); | |
370 | #endif | 370 | #endif | |
371 | break; | 371 | break; | |
372 | 372 | |||
373 | case ADB_ACTION_IN: | 373 | case ADB_ACTION_IN: | |
374 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); | 374 | adbInputBuffer[++adbInputBuffer[0]] = ADB_SR(); | |
375 | /* intr off means this is the last byte (end of frame) */ | 375 | /* intr off means this is the last byte (end of frame) */ | |
376 | if (ADB_INTR_IS_OFF) | 376 | if (ADB_INTR_IS_OFF) | |
377 | ending = 1; | 377 | ending = 1; | |
378 | else | 378 | else | |
379 | ending = 0; | 379 | ending = 0; | |
380 | 380 | |||
381 | if (1 == ending) { /* end of message? */ | 381 | if (1 == ending) { /* end of message? */ | |
382 | #ifdef ADB_DEBUG | 382 | #ifdef ADB_DEBUG | |
383 | if (adb_debug) { | 383 | if (adb_debug) { | |
384 | printf_intr("in end 0x%02x ", | 384 | printf_intr("in end 0x%02x ", | |
385 | adbInputBuffer[adbInputBuffer[0]]); | 385 | adbInputBuffer[adbInputBuffer[0]]); | |
386 | print_single(adbInputBuffer); | 386 | print_single(adbInputBuffer); | |
387 | } | 387 | } | |
388 | #endif | 388 | #endif | |
389 | 389 | |||
390 | /* | 390 | /* | |
391 | * Are we waiting AND does this packet match what we | 391 | * Are we waiting AND does this packet match what we | |
392 | * are waiting for AND is it coming from either the | 392 | * are waiting for AND is it coming from either the | |
393 | * ADB or RTC/PRAM sub-device? This section _should_ | 393 | * ADB or RTC/PRAM sub-device? This section _should_ | |
394 | * recognize all ADB and RTC/PRAM type commands, but | 394 | * recognize all ADB and RTC/PRAM type commands, but | |
395 | * there may be more... NOTE: commands are always at | 395 | * there may be more... NOTE: commands are always at | |
396 | * [4], even for RTC/PRAM commands. | 396 | * [4], even for RTC/PRAM commands. | |
397 | */ | 397 | */ | |
398 | /* set up data for adb_pass_up */ | 398 | /* set up data for adb_pass_up */ | |
399 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | 399 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | |
400 | 400 | |||
401 | if ((adbWaiting == 1) && | 401 | if ((adbWaiting == 1) && | |
402 | (adbInputBuffer[4] == adbWaitingCmd) && | 402 | (adbInputBuffer[4] == adbWaitingCmd) && | |
403 | ((adbInputBuffer[2] == 0x00) || | 403 | ((adbInputBuffer[2] == 0x00) || | |
404 | (adbInputBuffer[2] == 0x01))) { | 404 | (adbInputBuffer[2] == 0x01))) { | |
405 | packet.saveBuf = adbBuffer; | 405 | packet.saveBuf = adbBuffer; | |
406 | packet.compRout = adbCompRout; | 406 | packet.compRout = adbCompRout; | |
407 | packet.compData = adbCompData; | 407 | packet.compData = adbCompData; | |
408 | packet.unsol = 0; | 408 | packet.unsol = 0; | |
409 | packet.ack_only = 0; | 409 | packet.ack_only = 0; | |
410 | adb_pass_up(&packet); | 410 | adb_pass_up(&packet); | |
411 | 411 | |||
412 | adbWaitingCmd = 0; /* reset "waiting" vars */ | 412 | adbWaitingCmd = 0; /* reset "waiting" vars */ | |
413 | adbWaiting = 0; | 413 | adbWaiting = 0; | |
414 | adbBuffer = (long)0; | 414 | adbBuffer = (long)0; | |
415 | adbCompRout = (long)0; | 415 | adbCompRout = (long)0; | |
416 | adbCompData = (long)0; | 416 | adbCompData = (long)0; | |
417 | } else { | 417 | } else { | |
418 | packet.unsol = 1; | 418 | packet.unsol = 1; | |
419 | packet.ack_only = 0; | 419 | packet.ack_only = 0; | |
420 | adb_pass_up(&packet); | 420 | adb_pass_up(&packet); | |
421 | } | 421 | } | |
422 | 422 | |||
423 | 423 | |||
424 | /* reset vars and signal the end of this frame */ | 424 | /* reset vars and signal the end of this frame */ | |
425 | adbActionState = ADB_ACTION_IDLE; | 425 | adbActionState = ADB_ACTION_IDLE; | |
426 | adbInputBuffer[0] = 0; | 426 | adbInputBuffer[0] = 0; | |
427 | ADB_SET_STATE_IDLE_CUDA(); | 427 | ADB_SET_STATE_IDLE_CUDA(); | |
428 | /*ADB_SET_SR_INPUT();*/ | 428 | /*ADB_SET_SR_INPUT();*/ | |
429 | 429 | |||
430 | /* | 430 | /* | |
431 | * If there is something waiting to be sent out, | 431 | * If there is something waiting to be sent out, | |
432 | * the set everything up and send the first byte. | 432 | * the set everything up and send the first byte. | |
433 | */ | 433 | */ | |
434 | if (adbWriteDelay == 1) { | 434 | if (adbWriteDelay == 1) { | |
435 | delay(ADB_DELAY); /* required */ | 435 | delay(ADB_DELAY); /* required */ | |
436 | adbSentChars = 0; | 436 | adbSentChars = 0; | |
437 | adbActionState = ADB_ACTION_OUT; | 437 | adbActionState = ADB_ACTION_OUT; | |
438 | /* | 438 | /* | |
439 | * If the interrupt is on, we were too slow | 439 | * If the interrupt is on, we were too slow | |
440 | * and the chip has already started to send | 440 | * and the chip has already started to send | |
441 | * something to us, so back out of the write | 441 | * something to us, so back out of the write | |
442 | * and start a read cycle. | 442 | * and start a read cycle. | |
443 | */ | 443 | */ | |
444 | if (ADB_INTR_IS_ON) { | 444 | if (ADB_INTR_IS_ON) { | |
445 | ADB_SET_SR_INPUT(); | 445 | ADB_SET_SR_INPUT(); | |
446 | ADB_SET_STATE_IDLE_CUDA(); | 446 | ADB_SET_STATE_IDLE_CUDA(); | |
447 | adbSentChars = 0; | 447 | adbSentChars = 0; | |
448 | adbActionState = ADB_ACTION_IDLE; | 448 | adbActionState = ADB_ACTION_IDLE; | |
449 | adbInputBuffer[0] = 0; | 449 | adbInputBuffer[0] = 0; | |
450 | break; | 450 | break; | |
451 | } | 451 | } | |
452 | /* | 452 | /* | |
453 | * If we got here, it's ok to start sending | 453 | * If we got here, it's ok to start sending | |
454 | * so load the first byte and tell the chip | 454 | * so load the first byte and tell the chip | |
455 | * we want to send. | 455 | * we want to send. | |
456 | */ | 456 | */ | |
457 | ADB_SET_STATE_TIP(); | 457 | ADB_SET_STATE_TIP(); | |
458 | ADB_SET_SR_OUTPUT(); | 458 | ADB_SET_SR_OUTPUT(); | |
459 | write_via_reg(VIA1, vSR, adbOutputBuffer[adbSentChars + 1]); | 459 | write_via_reg(VIA1, vSR, adbOutputBuffer[adbSentChars + 1]); | |
460 | } | 460 | } | |
461 | } else { | 461 | } else { | |
462 | ADB_TOGGLE_STATE_ACK_CUDA(); | 462 | ADB_TOGGLE_STATE_ACK_CUDA(); | |
463 | #ifdef ADB_DEBUG | 463 | #ifdef ADB_DEBUG | |
464 | if (adb_debug) | 464 | if (adb_debug) | |
465 | printf_intr("in 0x%02x ", | 465 | printf_intr("in 0x%02x ", | |
466 | adbInputBuffer[adbInputBuffer[0]]); | 466 | adbInputBuffer[adbInputBuffer[0]]); | |
467 | #endif | 467 | #endif | |
468 | } | 468 | } | |
469 | break; | 469 | break; | |
470 | 470 | |||
471 | case ADB_ACTION_OUT: | 471 | case ADB_ACTION_OUT: | |
472 | i = ADB_SR(); /* reset SR-intr in IFR */ | 472 | i = ADB_SR(); /* reset SR-intr in IFR */ | |
473 | #ifdef ADB_DEBUG | 473 | #ifdef ADB_DEBUG | |
474 | if (adb_debug) | 474 | if (adb_debug) | |
475 | printf_intr("intr out 0x%02x ", i); | 475 | printf_intr("intr out 0x%02x ", i); | |
476 | #endif | 476 | #endif | |
477 | 477 | |||
478 | adbSentChars++; | 478 | adbSentChars++; | |
479 | if (ADB_INTR_IS_ON) { /* ADB intr low during write */ | 479 | if (ADB_INTR_IS_ON) { /* ADB intr low during write */ | |
480 | #ifdef ADB_DEBUG | 480 | #ifdef ADB_DEBUG | |
481 | if (adb_debug) | 481 | if (adb_debug) | |
482 | printf_intr("intr was on "); | 482 | printf_intr("intr was on "); | |
483 | #endif | 483 | #endif | |
484 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | 484 | ADB_SET_SR_INPUT(); /* make sure SR is set to IN */ | |
485 | ADB_SET_STATE_IDLE_CUDA(); | 485 | ADB_SET_STATE_IDLE_CUDA(); | |
486 | adbSentChars = 0; /* must start all over */ | 486 | adbSentChars = 0; /* must start all over */ | |
487 | adbActionState = ADB_ACTION_IDLE; /* new state */ | 487 | adbActionState = ADB_ACTION_IDLE; /* new state */ | |
488 | adbInputBuffer[0] = 0; | 488 | adbInputBuffer[0] = 0; | |
489 | adbWriteDelay = 1; /* must retry when done with | 489 | adbWriteDelay = 1; /* must retry when done with | |
490 | * read */ | 490 | * read */ | |
491 | delay(ADB_DELAY); | 491 | delay(ADB_DELAY); | |
492 | goto switch_start; /* process next state right | 492 | goto switch_start; /* process next state right | |
493 | * now */ | 493 | * now */ | |
494 | break; | 494 | break; | |
495 | } | 495 | } | |
496 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | 496 | if (adbOutputBuffer[0] == adbSentChars) { /* check for done */ | |
497 | if (0 == adb_cmd_result(adbOutputBuffer)) { /* do we expect data | 497 | if (0 == adb_cmd_result(adbOutputBuffer)) { /* do we expect data | |
498 | * back? */ | 498 | * back? */ | |
499 | adbWaiting = 1; /* signal waiting for return */ | 499 | adbWaiting = 1; /* signal waiting for return */ | |
500 | adbWaitingCmd = adbOutputBuffer[2]; /* save waiting command */ | 500 | adbWaitingCmd = adbOutputBuffer[2]; /* save waiting command */ | |
501 | } else { /* no talk, so done */ | 501 | } else { /* no talk, so done */ | |
502 | /* set up stuff for adb_pass_up */ | 502 | /* set up stuff for adb_pass_up */ | |
503 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | 503 | memcpy(packet.data, adbInputBuffer, adbInputBuffer[0] + 1); | |
504 | packet.saveBuf = adbBuffer; | 504 | packet.saveBuf = adbBuffer; | |
505 | packet.compRout = adbCompRout; | 505 | packet.compRout = adbCompRout; | |
506 | packet.compData = adbCompData; | 506 | packet.compData = adbCompData; | |
507 | packet.cmd = adbWaitingCmd; | 507 | packet.cmd = adbWaitingCmd; | |
508 | packet.unsol = 0; | 508 | packet.unsol = 0; | |
509 | packet.ack_only = 1; | 509 | packet.ack_only = 1; | |
510 | adb_pass_up(&packet); | 510 | adb_pass_up(&packet); | |
511 | 511 | |||
512 | /* reset "waiting" vars, just in case */ | 512 | /* reset "waiting" vars, just in case */ | |
513 | adbWaitingCmd = 0; | 513 | adbWaitingCmd = 0; | |
514 | adbBuffer = (long)0; | 514 | adbBuffer = (long)0; | |
515 | adbCompRout = NULL; | 515 | adbCompRout = NULL; | |
516 | adbCompData = NULL; | 516 | adbCompData = NULL; | |
517 | } | 517 | } | |
518 | 518 | |||
519 | adbWriteDelay = 0; /* done writing */ | 519 | adbWriteDelay = 0; /* done writing */ | |
520 | adbActionState = ADB_ACTION_IDLE; /* signal bus is idle */ | 520 | adbActionState = ADB_ACTION_IDLE; /* signal bus is idle */ | |
521 | ADB_SET_SR_INPUT(); | 521 | ADB_SET_SR_INPUT(); | |
522 | ADB_SET_STATE_IDLE_CUDA(); | 522 | ADB_SET_STATE_IDLE_CUDA(); | |
523 | #ifdef ADB_DEBUG | 523 | #ifdef ADB_DEBUG | |
524 | if (adb_debug) | 524 | if (adb_debug) | |
525 | printf_intr("write done "); | 525 | printf_intr("write done "); | |
526 | #endif | 526 | #endif | |
527 | } else { | 527 | } else { | |
528 | write_via_reg(VIA1, vSR, adbOutputBuffer[adbSentChars + 1]); /* send next byte */ | 528 | write_via_reg(VIA1, vSR, adbOutputBuffer[adbSentChars + 1]); /* send next byte */ | |
529 | ADB_TOGGLE_STATE_ACK_CUDA(); /* signal byte ready to | 529 | ADB_TOGGLE_STATE_ACK_CUDA(); /* signal byte ready to | |
530 | * shift */ | 530 | * shift */ | |
531 | #ifdef ADB_DEBUG | 531 | #ifdef ADB_DEBUG | |
532 | if (adb_debug) | 532 | if (adb_debug) | |
533 | printf_intr("toggle "); | 533 | printf_intr("toggle "); | |
534 | #endif | 534 | #endif | |
535 | } | 535 | } | |
536 | break; | 536 | break; | |
537 | 537 | |||
538 | case ADB_ACTION_NOTREADY: | 538 | case ADB_ACTION_NOTREADY: | |
539 | #ifdef ADB_DEBUG | 539 | #ifdef ADB_DEBUG | |
540 | if (adb_debug) | 540 | if (adb_debug) | |
541 | printf_intr("adb: not yet initialized\n"); | 541 | printf_intr("adb: not yet initialized\n"); | |
542 | #endif | 542 | #endif | |
543 | break; | 543 | break; | |
544 | 544 | |||
545 | default: | 545 | default: | |
546 | #ifdef ADB_DEBUG | 546 | #ifdef ADB_DEBUG | |
547 | if (adb_debug) | 547 | if (adb_debug) | |
548 | printf_intr("intr: unknown ADB state\n"); | 548 | printf_intr("intr: unknown ADB state\n"); | |
549 | #endif | 549 | #endif | |
550 | break; | 550 | break; | |
551 | } | 551 | } | |
552 | 552 | |||
553 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | 553 | ADB_VIA_INTR_ENABLE(); /* enable ADB interrupt on IIs. */ | |
554 | 554 | |||
555 | splx(s); /* restore */ | 555 | splx(s); /* restore */ | |
556 | 556 | |||
557 | return 1; | 557 | return 1; | |
558 | } /* end adb_intr_cuda */ | 558 | } /* end adb_intr_cuda */ | |
559 | 559 | |||
560 | 560 | |||
561 | int | 561 | int | |
562 | send_adb_cuda(u_char * in, u_char * buffer, adbComp *compRout, | 562 | send_adb_cuda(u_char * in, u_char * buffer, adbComp *compRout, | |
563 | volatile void *data, int command) | 563 | volatile void *data, int command) | |
564 | { | 564 | { | |
565 | int s, len; | 565 | int s, len; | |
566 | 566 | |||
567 | #ifdef ADB_DEBUG | 567 | #ifdef ADB_DEBUG | |
568 | if (adb_debug) | 568 | if (adb_debug) | |
569 | printf_intr("SEND\n"); | 569 | printf_intr("SEND\n"); | |
570 | #endif | 570 | #endif | |
571 | 571 | |||
572 | if (adbActionState == ADB_ACTION_NOTREADY) | 572 | if (adbActionState == ADB_ACTION_NOTREADY) | |
573 | return 1; | 573 | return 1; | |
574 | 574 | |||
575 | /* Don't interrupt while we are messing with the ADB */ | 575 | /* Don't interrupt while we are messing with the ADB */ | |
576 | s = splhigh(); | 576 | s = splhigh(); | |
577 | 577 | |||
578 | if ((adbActionState == ADB_ACTION_IDLE) && /* ADB available? */ | 578 | if ((adbActionState == ADB_ACTION_IDLE) && /* ADB available? */ | |
579 | (ADB_INTR_IS_OFF)) { /* and no incoming interrupt? */ | 579 | (ADB_INTR_IS_OFF)) { /* and no incoming interrupt? */ | |
580 | } else | 580 | } else | |
581 | if (adbWriteDelay == 0) /* it's busy, but is anything waiting? */ | 581 | if (adbWriteDelay == 0) /* it's busy, but is anything waiting? */ | |
582 | adbWriteDelay = 1; /* if no, then we'll "queue" | 582 | adbWriteDelay = 1; /* if no, then we'll "queue" | |
583 | * it up */ | 583 | * it up */ | |
584 | else { | 584 | else { | |
585 | splx(s); | 585 | splx(s); | |
586 | return 1; /* really busy! */ | 586 | return 1; /* really busy! */ | |
587 | } | 587 | } | |
588 | 588 | |||
589 | #ifdef ADB_DEBUG | 589 | #ifdef ADB_DEBUG | |
590 | if (adb_debug) | 590 | if (adb_debug) | |
591 | printf_intr("QUEUE\n"); | 591 | printf_intr("QUEUE\n"); | |
592 | #endif | 592 | #endif | |
593 | if ((long)in == (long)0) { /* need to convert? */ | 593 | if ((long)in == (long)0) { /* need to convert? */ | |
594 | /* | 594 | /* | |
595 | * Don't need to use adb_cmd_extra here because this section | 595 | * Don't need to use adb_cmd_extra here because this section | |
596 | * will be called ONLY when it is an ADB command (no RTC or | 596 | * will be called ONLY when it is an ADB command (no RTC or | |
597 | * PRAM) | 597 | * PRAM) | |
598 | */ | 598 | */ | |
599 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | 599 | if ((command & 0x0c) == 0x08) /* copy addl data ONLY if | |
600 | * doing a listen! */ | 600 | * doing a listen! */ | |
601 | len = buffer[0]; /* length of additional data */ | 601 | len = buffer[0]; /* length of additional data */ | |
602 | else | 602 | else | |
603 | len = 0;/* no additional data */ | 603 | len = 0;/* no additional data */ | |
604 | 604 | |||
605 | adbOutputBuffer[0] = 2 + len; /* dev. type + command + addl. | 605 | adbOutputBuffer[0] = 2 + len; /* dev. type + command + addl. | |
606 | * data */ | 606 | * data */ | |
607 | adbOutputBuffer[1] = 0x00; /* mark as an ADB command */ | 607 | adbOutputBuffer[1] = 0x00; /* mark as an ADB command */ | |
608 | adbOutputBuffer[2] = (u_char)command; /* load command */ | 608 | adbOutputBuffer[2] = (u_char)command; /* load command */ | |
609 | 609 | |||
610 | /* copy additional output data, if any */ | 610 | /* copy additional output data, if any */ | |
611 | memcpy(adbOutputBuffer + 3, buffer + 1, len); | 611 | memcpy(adbOutputBuffer + 3, buffer + 1, len); | |
612 | } else | 612 | } else | |
613 | /* if data ready, just copy over */ | 613 | /* if data ready, just copy over */ | |
614 | memcpy(adbOutputBuffer, in, in[0] + 2); | 614 | memcpy(adbOutputBuffer, in, in[0] + 2); | |
615 | 615 | |||
616 | adbSentChars = 0; /* nothing sent yet */ | 616 | adbSentChars = 0; /* nothing sent yet */ | |
617 | adbBuffer = buffer; /* save buffer to know where to save result */ | 617 | adbBuffer = buffer; /* save buffer to know where to save result */ | |
618 | adbCompRout = compRout; /* save completion routine pointer */ | 618 | adbCompRout = compRout; /* save completion routine pointer */ | |
619 | adbCompData = data; /* save completion routine data pointer */ | 619 | adbCompData = data; /* save completion routine data pointer */ | |
620 | adbWaitingCmd = adbOutputBuffer[2]; /* save wait command */ | 620 | adbWaitingCmd = adbOutputBuffer[2]; /* save wait command */ | |
621 | 621 | |||
622 | if (adbWriteDelay != 1) { /* start command now? */ | 622 | if (adbWriteDelay != 1) { /* start command now? */ | |
623 | #ifdef ADB_DEBUG | 623 | #ifdef ADB_DEBUG | |
624 | if (adb_debug) | 624 | if (adb_debug) | |
625 | printf_intr("out start NOW"); | 625 | printf_intr("out start NOW"); | |
626 | #endif | 626 | #endif | |
627 | delay(ADB_DELAY); | 627 | delay(ADB_DELAY); | |
628 | adbActionState = ADB_ACTION_OUT; /* set next state */ | 628 | adbActionState = ADB_ACTION_OUT; /* set next state */ | |
629 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | 629 | ADB_SET_SR_OUTPUT(); /* set shift register for OUT */ | |
630 | write_via_reg(VIA1, vSR, adbOutputBuffer[adbSentChars + 1]); /* load byte for output */ | 630 | write_via_reg(VIA1, vSR, adbOutputBuffer[adbSentChars + 1]); /* load byte for output */ | |
631 | ADB_SET_STATE_ACKOFF_CUDA(); | 631 | ADB_SET_STATE_ACKOFF_CUDA(); | |
632 | ADB_SET_STATE_TIP(); /* tell ADB that we want to send */ | 632 | ADB_SET_STATE_TIP(); /* tell ADB that we want to send */ | |
633 | } | 633 | } | |
634 | adbWriteDelay = 1; /* something in the write "queue" */ | 634 | adbWriteDelay = 1; /* something in the write "queue" */ | |
635 | 635 | |||
636 | splx(s); | 636 | splx(s); | |
637 | 637 | |||
638 | if ((s & (1 << 18)) || adb_polling) /* XXX were VIA1 interrupts blocked ? */ | 638 | if ((s & (1 << 18)) || adb_polling) /* XXX were VIA1 interrupts blocked ? */ | |
639 | /* poll until byte done */ | 639 | /* poll until byte done */ | |
640 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | 640 | while ((adbActionState != ADB_ACTION_IDLE) || (ADB_INTR_IS_ON) | |
641 | || (adbWaiting == 1)) | 641 | || (adbWaiting == 1)) | |
642 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | 642 | if (ADB_SR_INTR_IS_ON) { /* wait for "interrupt" */ | |
643 | adb_intr_cuda(NULL); /* process it */ | 643 | adb_intr_cuda(NULL); /* process it */ | |
644 | adb_soft_intr(); | 644 | adb_soft_intr(); | |
645 | } | 645 | } | |
646 | 646 | |||
647 | return 0; | 647 | return 0; | |
648 | } /* send_adb_cuda */ | 648 | } /* send_adb_cuda */ | |
649 | 649 | |||
650 | int | 650 | int | |
651 | adb_intr(void *arg) | 651 | adb_intr(void *arg) | |
652 | { | 652 | { | |
653 | switch (adbHardware) { | 653 | switch (adbHardware) { | |
654 | case ADB_HW_PMU: | 654 | case ADB_HW_PMU: | |
655 | return pm_intr(arg); | 655 | return pm_intr(arg); | |
656 | break; | 656 | break; | |
657 | 657 | |||
658 | case ADB_HW_CUDA: | 658 | case ADB_HW_CUDA: | |
659 | return adb_intr_cuda(arg); | 659 | return adb_intr_cuda(arg); | |
660 | break; | 660 | break; | |
661 | 661 | |||
662 | case ADB_HW_UNKNOWN: | 662 | case ADB_HW_UNKNOWN: | |
663 | break; | 663 | break; | |
664 | } | 664 | } | |
665 | return 0; | 665 | return 0; | |
666 | } | 666 | } | |
667 | 667 | |||
668 | 668 | |||
669 | /* | 669 | /* | |
670 | * adb_pass_up is called by the interrupt-time routines. | 670 | * adb_pass_up is called by the interrupt-time routines. | |
671 | * It takes the raw packet data that was received from the | 671 | * It takes the raw packet data that was received from the | |
672 | * device and puts it into the queue that the upper half | 672 | * device and puts it into the queue that the upper half | |
673 | * processes. It then signals for a soft ADB interrupt which | 673 | * processes. It then signals for a soft ADB interrupt which | |
674 | * will eventually call the upper half routine (adb_soft_intr). | 674 | * will eventually call the upper half routine (adb_soft_intr). | |
675 | * | 675 | * | |
676 | * If in->unsol is 0, then this is either the notification | 676 | * If in->unsol is 0, then this is either the notification | |
677 | * that the packet was sent (on a LISTEN, for example), or the | 677 | * that the packet was sent (on a LISTEN, for example), or the | |
678 | * response from the device (on a TALK). The completion routine | 678 | * response from the device (on a TALK). The completion routine | |
679 | * is called only if the user specified one. | 679 | * is called only if the user specified one. | |
680 | * | 680 | * | |
681 | * If in->unsol is 1, then this packet was unsolicited and | 681 | * If in->unsol is 1, then this packet was unsolicited and | |
682 | * so we look up the device in the ADB device table to determine | 682 | * so we look up the device in the ADB device table to determine | |
683 | * what its default service routine is. | 683 | * what its default service routine is. | |
684 | * | 684 | * | |
685 | * If in->ack_only is 1, then we really only need to call | 685 | * If in->ack_only is 1, then we really only need to call | |
686 | * the completion routine, so don't do any other stuff. | 686 | * the completion routine, so don't do any other stuff. | |
687 | * | 687 | * | |
688 | * Note that in->data contains the packet header AND data, | 688 | * Note that in->data contains the packet header AND data, | |
689 | * while adbInbound[]->data contains ONLY data. | 689 | * while adbInbound[]->data contains ONLY data. | |
690 | * | 690 | * | |
691 | * Note: Called only at interrupt time. Assumes this. | 691 | * Note: Called only at interrupt time. Assumes this. | |
692 | */ | 692 | */ | |
693 | void | 693 | void | |
694 | adb_pass_up(struct adbCommand *in) | 694 | adb_pass_up(struct adbCommand *in) | |
695 | { | 695 | { | |
696 | int start = 0, len = 0, cmd = 0; | 696 | int start = 0, len = 0, cmd = 0; | |
697 | ADBDataBlock block; | 697 | ADBDataBlock block; | |
698 | 698 | |||
699 | /* temp for testing */ | 699 | /* temp for testing */ | |
700 | /*u_char *buffer = 0;*/ | 700 | /*u_char *buffer = 0;*/ | |
701 | /*u_char *compdata = 0;*/ | 701 | /*u_char *compdata = 0;*/ | |
702 | /*u_char *comprout = 0;*/ | 702 | /*u_char *comprout = 0;*/ | |
703 | 703 | |||
704 | if (adbInCount >= ADB_QUEUE) { | 704 | if (adbInCount >= ADB_QUEUE) { | |
705 | #ifdef ADB_DEBUG | 705 | #ifdef ADB_DEBUG | |
706 | if (adb_debug) | 706 | if (adb_debug) | |
707 | printf_intr("adb: ring buffer overflow\n"); | 707 | printf_intr("adb: ring buffer overflow\n"); | |
708 | #endif | 708 | #endif | |
709 | return; | 709 | return; | |
710 | } | 710 | } | |
711 | 711 | |||
712 | if (in->ack_only) { | 712 | if (in->ack_only) { | |
713 | len = in->data[0]; | 713 | len = in->data[0]; | |
714 | cmd = in->cmd; | 714 | cmd = in->cmd; | |
715 | start = 0; | 715 | start = 0; | |
716 | } else { | 716 | } else { | |
717 | switch (adbHardware) { | 717 | switch (adbHardware) { | |
718 | case ADB_HW_CUDA: | 718 | case ADB_HW_CUDA: | |
719 | /* If it's unsolicited, accept only ADB data for now */ | 719 | /* If it's unsolicited, accept only ADB data for now */ | |
720 | if (in->unsol) | 720 | if (in->unsol) | |
721 | if (0 != in->data[2]) | 721 | if (0 != in->data[2]) | |
722 | return; | 722 | return; | |
723 | cmd = in->data[4]; | 723 | cmd = in->data[4]; | |
724 | if (in->data[0] < 5) | 724 | if (in->data[0] < 5) | |
725 | len = 0; | 725 | len = 0; | |
726 | else | 726 | else | |
727 | len = in->data[0]-4; | 727 | len = in->data[0]-4; | |
728 | start = 4; | 728 | start = 4; | |
729 | break; | 729 | break; | |
730 | 730 | |||
731 | case ADB_HW_PMU: | 731 | case ADB_HW_PMU: | |
732 | cmd = in->data[1]; | 732 | cmd = in->data[1]; | |
733 | if (in->data[0] < 2) | 733 | if (in->data[0] < 2) | |
734 | len = 0; | 734 | len = 0; | |
735 | else | 735 | else | |
736 | len = in->data[0]-1; | 736 | len = in->data[0]-1; | |
737 | start = 1; | 737 | start = 1; | |
738 | break; | 738 | break; | |
739 | 739 | |||
740 | case ADB_HW_UNKNOWN: | 740 | case ADB_HW_UNKNOWN: | |
741 | return; | 741 | return; | |
742 | } | 742 | } | |
743 | 743 | |||
744 | /* Make sure there is a valid device entry for this device */ | 744 | /* Make sure there is a valid device entry for this device */ | |
745 | if (in->unsol) { | 745 | if (in->unsol) { | |
746 | /* ignore unsolicited data during adbreinit */ | 746 | /* ignore unsolicited data during adbreinit */ | |
747 | if (adbStarting) | 747 | if (adbStarting) | |
748 | return; | 748 | return; | |
749 | /* get device's comp. routine and data area */ | 749 | /* get device's comp. routine and data area */ | |
750 | if (-1 == get_adb_info(&block, ADB_CMDADDR(cmd))) | 750 | if (-1 == get_adb_info(&block, ADB_CMDADDR(cmd))) | |
751 | return; | 751 | return; | |
752 | } | 752 | } | |
753 | } | 753 | } | |
754 | 754 | |||
755 | /* | 755 | /* | |
756 | * If this is an unsolicited packet, we need to fill in | 756 | * If this is an unsolicited packet, we need to fill in | |
757 | * some info so adb_soft_intr can process this packet | 757 | * some info so adb_soft_intr can process this packet | |
758 | * properly. If it's not unsolicited, then use what | 758 | * properly. If it's not unsolicited, then use what | |
759 | * the caller sent us. | 759 | * the caller sent us. | |
760 | */ | 760 | */ | |
761 | if (in->unsol) { | 761 | if (in->unsol) { | |
762 | adbInbound[adbInTail].compRout = (void *)block.dbServiceRtPtr; | 762 | adbInbound[adbInTail].compRout = (void *)block.dbServiceRtPtr; | |
763 | adbInbound[adbInTail].compData = (void *)block.dbDataAreaAddr; | 763 | adbInbound[adbInTail].compData = (void *)block.dbDataAreaAddr; | |
764 | adbInbound[adbInTail].saveBuf = (void *)adbInbound[adbInTail].data; | 764 | adbInbound[adbInTail].saveBuf = (void *)adbInbound[adbInTail].data; | |
765 | } else { | 765 | } else { | |
766 | adbInbound[adbInTail].compRout = in->compRout; | 766 | adbInbound[adbInTail].compRout = in->compRout; | |
767 | adbInbound[adbInTail].compData = in->compData; | 767 | adbInbound[adbInTail].compData = in->compData; | |
768 | adbInbound[adbInTail].saveBuf = in->saveBuf; | 768 | adbInbound[adbInTail].saveBuf = in->saveBuf; | |
769 | } | 769 | } | |
770 | 770 | |||
771 | #ifdef ADB_DEBUG | 771 | #ifdef ADB_DEBUG | |
772 | if (adb_debug && in->data[1] == 2) | 772 | if (adb_debug && in->data[1] == 2) | |
773 | printf_intr("adb: caught error\n"); | 773 | printf_intr("adb: caught error\n"); | |
774 | #endif | 774 | #endif | |
775 | 775 | |||
776 | /* copy the packet data over */ | 776 | /* copy the packet data over */ | |
777 | /* | 777 | /* | |
778 | * TO DO: If the *_intr routines fed their incoming data | 778 | * TO DO: If the *_intr routines fed their incoming data | |
779 | * directly into an adbCommand struct, which is passed to | 779 | * directly into an adbCommand struct, which is passed to | |
780 | * this routine, then we could eliminate this copy. | 780 | * this routine, then we could eliminate this copy. | |
781 | */ | 781 | */ | |
782 | memcpy(adbInbound[adbInTail].data + 1, in->data + start + 1, len); | 782 | memcpy(adbInbound[adbInTail].data + 1, in->data + start + 1, len); | |
783 | adbInbound[adbInTail].data[0] = len; | 783 | adbInbound[adbInTail].data[0] = len; | |
784 | adbInbound[adbInTail].cmd = cmd; | 784 | adbInbound[adbInTail].cmd = cmd; | |
785 | 785 | |||
786 | adbInCount++; | 786 | adbInCount++; | |
787 | if (++adbInTail >= ADB_QUEUE) | 787 | if (++adbInTail >= ADB_QUEUE) | |
788 | adbInTail = 0; | 788 | adbInTail = 0; | |
789 | 789 | |||
790 | /* | 790 | /* | |
791 | * If the debugger is running, call upper half manually. | 791 | * If the debugger is running, call upper half manually. | |
792 | * Otherwise, trigger a soft interrupt to handle the rest later. | 792 | * Otherwise, trigger a soft interrupt to handle the rest later. | |
793 | */ | 793 | */ | |
794 | if (adb_polling) | 794 | if (adb_polling) | |
795 | adb_soft_intr(); | 795 | adb_soft_intr(); | |
796 | else | 796 | else | |
797 | setsoftadb(); | 797 | setsoftadb(); | |
798 | 798 | |||
799 | return; | 799 | return; | |
800 | } | 800 | } | |
801 | 801 | |||
802 | 802 | |||
803 | /* | 803 | /* | |
804 | * Called to process the packets after they have been | 804 | * Called to process the packets after they have been | |
805 | * placed in the incoming queue. | 805 | * placed in the incoming queue. | |
806 | * | 806 | * | |
807 | */ | 807 | */ | |
808 | void | 808 | void | |
809 | adb_soft_intr(void) | 809 | adb_soft_intr(void) | |
810 | { | 810 | { | |
811 | int s; | 811 | int s; | |
812 | int cmd = 0; | 812 | int cmd = 0; | |
813 | u_char *buffer = 0; | 813 | u_char *buffer = 0; | |
814 | adbComp *comprout = NULL; | 814 | adbComp *comprout = NULL; | |
815 | volatile int *compdata = 0; | 815 | volatile int *compdata = 0; | |
816 | 816 | |||
817 | #if 0 | 817 | #if 0 | |
818 | s = splhigh(); | 818 | s = splhigh(); | |
819 | printf_intr("sr: %x\n", (s & 0x0700)); | 819 | printf_intr("sr: %x\n", (s & 0x0700)); | |
820 | splx(s); | 820 | splx(s); | |
821 | #endif | 821 | #endif | |
822 | 822 | |||
823 | /*delay(2*ADB_DELAY);*/ | 823 | /*delay(2*ADB_DELAY);*/ | |
824 | 824 | |||
825 | while (adbInCount) { | 825 | while (adbInCount) { | |
826 | #ifdef ADB_DEBUG | 826 | #ifdef ADB_DEBUG | |
827 | if (adb_debug & 0x80) | 827 | if (adb_debug & 0x80) | |
828 | printf_intr("%x %x %x ", | 828 | printf_intr("%x %x %x ", | |
829 | adbInCount, adbInHead, adbInTail); | 829 | adbInCount, adbInHead, adbInTail); | |
830 | #endif | 830 | #endif | |
831 | /* get the data we need from the queue */ | 831 | /* get the data we need from the queue */ | |
832 | buffer = adbInbound[adbInHead].saveBuf; | 832 | buffer = adbInbound[adbInHead].saveBuf; | |
833 | comprout = adbInbound[adbInHead].compRout; | 833 | comprout = adbInbound[adbInHead].compRout; | |
834 | compdata = adbInbound[adbInHead].compData; | 834 | compdata = adbInbound[adbInHead].compData; | |
835 | cmd = adbInbound[adbInHead].cmd; | 835 | cmd = adbInbound[adbInHead].cmd; | |
836 | 836 | |||
837 | /* copy over data to data area if it's valid */ | 837 | /* copy over data to data area if it's valid */ | |
838 | /* | 838 | /* | |
839 | * Note that for unsol packets we don't want to copy the | 839 | * Note that for unsol packets we don't want to copy the | |
840 | * data anywhere, so buffer was already set to 0. | 840 | * data anywhere, so buffer was already set to 0. | |
841 | * For ack_only buffer was set to 0, so don't copy. | 841 | * For ack_only buffer was set to 0, so don't copy. | |
842 | */ | 842 | */ | |
843 | if (buffer) | 843 | if (buffer) | |
844 | memcpy(buffer, adbInbound[adbInHead].data, | 844 | memcpy(buffer, adbInbound[adbInHead].data, | |
845 | adbInbound[adbInHead].data[0] + 1); | 845 | adbInbound[adbInHead].data[0] + 1); | |
846 | 846 | |||
847 | #ifdef ADB_DEBUG | 847 | #ifdef ADB_DEBUG | |
848 | if (adb_debug & 0x80) { | 848 | if (adb_debug & 0x80) { | |
849 | printf_intr("%p %p %p %x ", | 849 | printf_intr("%p %p %p %x ", | |
850 | buffer, comprout, compdata, (short)cmd); | 850 | buffer, comprout, compdata, (short)cmd); | |
851 | printf_intr("buf: "); | 851 | printf_intr("buf: "); | |
852 | print_single(adbInbound[adbInHead].data); | 852 | print_single(adbInbound[adbInHead].data); | |
853 | } | 853 | } | |
854 | #endif | 854 | #endif | |
855 | /* Remove the packet from the queue before calling | 855 | /* Remove the packet from the queue before calling | |
856 | * the completion routine, so that the completion | 856 | * the completion routine, so that the completion | |
857 | * routine can reentrantly process the queue. For | 857 | * routine can reentrantly process the queue. For | |
858 | * example, this happens when polling is turned on | 858 | * example, this happens when polling is turned on | |
859 | * by entering the debuger by keystroke. | 859 | * by entering the debuger by keystroke. | |
860 | */ | 860 | */ | |
861 | s = splhigh(); | 861 | s = splhigh(); | |
862 | adbInCount--; | 862 | adbInCount--; | |
863 | if (++adbInHead >= ADB_QUEUE) | 863 | if (++adbInHead >= ADB_QUEUE) | |
864 | adbInHead = 0; | 864 | adbInHead = 0; | |
865 | splx(s); | 865 | splx(s); | |
866 | 866 | |||
867 | /* call default completion routine if it's valid */ | 867 | /* call default completion routine if it's valid */ | |
868 | if (comprout) | 868 | if (comprout) | |
869 | (*comprout)(buffer, compdata, cmd); | 869 | (*comprout)(buffer, compdata, cmd); | |
870 | } | 870 | } | |
871 | return; | 871 | return; | |
872 | } | 872 | } | |
873 | 873 | |||
874 | 874 | |||
875 | /* | 875 | /* | |
876 | * This is my version of the ADBOp routine. It mainly just calls the | 876 | * This is my version of the ADBOp routine. It mainly just calls the | |
877 | * hardware-specific routine. | 877 | * hardware-specific routine. | |
878 | * | 878 | * | |
879 | * data : pointer to data area to be used by compRout | 879 | * data : pointer to data area to be used by compRout | |
880 | * compRout : completion routine | 880 | * compRout : completion routine | |
881 | * buffer : for LISTEN: points to data to send - MAX 8 data bytes, | 881 | * buffer : for LISTEN: points to data to send - MAX 8 data bytes, | |
882 | * byte 0 = # of bytes | 882 | * byte 0 = # of bytes | |
883 | * : for TALK: points to place to save return data | 883 | * : for TALK: points to place to save return data | |
884 | * command : the adb command to send | 884 | * command : the adb command to send | |
885 | * result : 0 = success | 885 | * result : 0 = success | |
886 | * : -1 = could not complete | 886 | * : -1 = could not complete | |
887 | */ | 887 | */ | |
888 | int | 888 | int | |
889 | adb_op(Ptr buffer, adbComp *compRout, volatile void *data, short command) | 889 | adb_op(Ptr buffer, adbComp *compRout, volatile void *data, short command) | |
890 | { | 890 | { | |
891 | int result; | 891 | int result; | |
892 | 892 | |||
893 | switch (adbHardware) { | 893 | switch (adbHardware) { | |
894 | case ADB_HW_PMU: | 894 | case ADB_HW_PMU: | |
895 | result = pm_adb_op((u_char *)buffer, compRout, | 895 | result = pm_adb_op((u_char *)buffer, compRout, | |
896 | data, (int)command); | 896 | data, (int)command); | |
897 | 897 | |||
898 | if (result == 0) | 898 | if (result == 0) | |
899 | return 0; | 899 | return 0; | |
900 | else | 900 | else | |
901 | return -1; | 901 | return -1; | |
902 | break; | 902 | break; | |
903 | 903 | |||
904 | case ADB_HW_CUDA: | 904 | case ADB_HW_CUDA: | |
905 | result = send_adb_cuda((u_char *)0, (u_char *)buffer, | 905 | result = send_adb_cuda((u_char *)0, (u_char *)buffer, | |
906 | compRout, data, (int)command); | 906 | compRout, data, (int)command); | |
907 | if (result == 0) | 907 | if (result == 0) | |
908 | return 0; | 908 | return 0; | |
909 | else | 909 | else | |
910 | return -1; | 910 | return -1; | |
911 | break; | 911 | break; | |
912 | 912 | |||
913 | case ADB_HW_UNKNOWN: | 913 | case ADB_HW_UNKNOWN: | |
914 | default: | 914 | default: | |
915 | return -1; | 915 | return -1; | |
916 | } | 916 | } | |
917 | } | 917 | } | |
918 | 918 | |||
919 | 919 | |||
920 | /* | 920 | /* | |
921 | * adb_hw_setup | 921 | * adb_hw_setup | |
922 | * This routine sets up the possible machine specific hardware | 922 | * This routine sets up the possible machine specific hardware | |
923 | * config (mainly VIA settings) for the various models. | 923 | * config (mainly VIA settings) for the various models. | |
924 | */ | 924 | */ | |
925 | void | 925 | void | |
926 | adb_hw_setup(void) | 926 | adb_hw_setup(void) | |
927 | { | 927 | { | |
928 | volatile int i; | 928 | volatile int i; | |
929 | 929 | |||
930 | switch (adbHardware) { | 930 | switch (adbHardware) { | |
931 | case ADB_HW_PMU: | 931 | case ADB_HW_PMU: | |
932 | /* | 932 | /* | |
933 | * XXX - really PM_VIA_CLR_INTR - should we put it in | 933 | * XXX - really PM_VIA_CLR_INTR - should we put it in | |
934 | * pm_direct.h? | 934 | * pm_direct.h? | |
935 | */ | 935 | */ | |
936 | write_via_reg(VIA1, vIFR, 0x90); /* clear interrupt */ | 936 | write_via_reg(VIA1, vIFR, 0x90); /* clear interrupt */ | |
937 | break; | 937 | break; | |
938 | 938 | |||
939 | case ADB_HW_CUDA: | 939 | case ADB_HW_CUDA: | |
940 | via_reg_or(VIA1, vDirB, 0x30); /* register B bits 4 and 5: | 940 | via_reg_or(VIA1, vDirB, 0x30); /* register B bits 4 and 5: | |
941 | * outputs */ | 941 | * outputs */ | |
942 | via_reg_and(VIA1, vDirB, 0xf7); /* register B bit 3: input */ | 942 | via_reg_and(VIA1, vDirB, 0xf7); /* register B bit 3: input */ | |
943 | via_reg_and(VIA1, vACR, ~vSR_OUT); /* make sure SR is set | 943 | via_reg_and(VIA1, vACR, ~vSR_OUT); /* make sure SR is set | |
944 | * to IN */ | 944 | * to IN */ | |
945 | write_via_reg(VIA1, vACR, (read_via_reg(VIA1, vACR) | 0x0c) & ~0x10); | 945 | write_via_reg(VIA1, vACR, (read_via_reg(VIA1, vACR) | 0x0c) & ~0x10); | |
946 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | 946 | adbActionState = ADB_ACTION_IDLE; /* used by all types of | |
947 | * hardware */ | 947 | * hardware */ | |
948 | write_via_reg(VIA1, vIER, 0x84);/* make sure VIA interrupts | 948 | write_via_reg(VIA1, vIER, 0x84);/* make sure VIA interrupts | |
949 | * are on */ | 949 | * are on */ | |
950 | ADB_SET_STATE_IDLE_CUDA(); /* set ADB bus state to idle */ | 950 | ADB_SET_STATE_IDLE_CUDA(); /* set ADB bus state to idle */ | |
951 | 951 | |||
952 | /* sort of a device reset */ | 952 | /* sort of a device reset */ | |
953 | i = ADB_SR(); /* clear interrupt */ | 953 | i = ADB_SR(); /* clear interrupt */ | |
954 | ADB_VIA_INTR_DISABLE(); /* no interrupts while clearing */ | 954 | ADB_VIA_INTR_DISABLE(); /* no interrupts while clearing */ | |
955 | ADB_SET_STATE_IDLE_CUDA(); /* reset state to idle */ | 955 | ADB_SET_STATE_IDLE_CUDA(); /* reset state to idle */ | |
956 | delay(ADB_DELAY); | 956 | delay(ADB_DELAY); | |
957 | ADB_SET_STATE_TIP(); /* signal start of frame */ | 957 | ADB_SET_STATE_TIP(); /* signal start of frame */ | |
958 | delay(ADB_DELAY); | 958 | delay(ADB_DELAY); | |
959 | ADB_TOGGLE_STATE_ACK_CUDA(); | 959 | ADB_TOGGLE_STATE_ACK_CUDA(); | |
960 | delay(ADB_DELAY); | 960 | delay(ADB_DELAY); | |
961 | ADB_CLR_STATE_TIP(); | 961 | ADB_CLR_STATE_TIP(); | |
962 | delay(ADB_DELAY); | 962 | delay(ADB_DELAY); | |
963 | ADB_SET_STATE_IDLE_CUDA(); /* back to idle state */ | 963 | ADB_SET_STATE_IDLE_CUDA(); /* back to idle state */ | |
964 | i = ADB_SR(); /* clear interrupt */ | 964 | i = ADB_SR(); /* clear interrupt */ | |
965 | ADB_VIA_INTR_ENABLE(); /* ints ok now */ | 965 | ADB_VIA_INTR_ENABLE(); /* ints ok now */ | |
966 | break; | 966 | break; | |
967 | 967 | |||
968 | case ADB_HW_UNKNOWN: | 968 | case ADB_HW_UNKNOWN: | |
969 | default: | 969 | default: | |
970 | write_via_reg(VIA1, vIER, 0x04);/* turn interrupts off - TO | 970 | write_via_reg(VIA1, vIER, 0x04);/* turn interrupts off - TO | |
971 | * DO: turn PB ints off? */ | 971 | * DO: turn PB ints off? */ | |
972 | return; | 972 | return; | |
973 | break; | 973 | break; | |
974 | } | 974 | } | |
975 | } | 975 | } | |
976 | 976 | |||
977 | /* | 977 | /* | |
978 | * adb_reinit sets up the adb stuff | 978 | * adb_reinit sets up the adb stuff | |
979 | * | 979 | * | |
980 | */ | 980 | */ | |
981 | void | 981 | void | |
982 | adb_reinit(void) | 982 | adb_reinit(void) | |
983 | { | 983 | { | |
984 | u_char send_string[ADB_MAX_MSG_LENGTH]; | 984 | u_char send_string[ADB_MAX_MSG_LENGTH]; | |
985 | ADBDataBlock data; /* temp. holder for getting device info */ | 985 | ADBDataBlock data; /* temp. holder for getting device info */ | |
986 | volatile int i, x; | 986 | volatile int i, x; | |
987 | int s = 0; /* XXX: gcc */ | 987 | int s = 0; /* XXX: gcc */ | |
988 | int command; | 988 | int command; | |
989 | int result; | 989 | int result; | |
990 | int saveptr; /* point to next free relocation address */ | 990 | int saveptr; /* point to next free relocation address */ | |
991 | int device; | 991 | int device; | |
992 | int nonewtimes; /* times thru loop w/o any new devices */ | 992 | int nonewtimes; /* times thru loop w/o any new devices */ | |
993 | static bool callo; | 993 | static bool callo; | |
994 | 994 | |||
995 | if (!callo) { | 995 | if (!callo) { | |
996 | callo = true; | 996 | callo = true; | |
997 | callout_init(&adb_cuda_tickle_ch, 0); | 997 | callout_init(&adb_cuda_tickle_ch, 0); | |
998 | callout_init(&adb_soft_intr_ch, 0); | 998 | callout_init(&adb_soft_intr_ch, 0); | |
999 | } | 999 | } | |
1000 | 1000 | |||
1001 | /* Make sure we are not interrupted while building the table. */ | 1001 | /* Make sure we are not interrupted while building the table. */ | |
1002 | if (adbHardware != ADB_HW_PMU) /* ints must be on for PMU? */ | 1002 | if (adbHardware != ADB_HW_PMU) /* ints must be on for PMU? */ | |
1003 | s = splhigh(); | 1003 | s = splhigh(); | |
1004 | 1004 | |||
1005 | ADBNumDevices = 0; /* no devices yet */ | 1005 | ADBNumDevices = 0; /* no devices yet */ | |
1006 | 1006 | |||
1007 | /* Let intr routines know we are running reinit */ | 1007 | /* Let intr routines know we are running reinit */ | |
1008 | adbStarting = 1; | 1008 | adbStarting = 1; | |
1009 | 1009 | |||
1010 | /* | 1010 | /* | |
1011 | * Initialize the ADB table. For now, we'll always use the same table | 1011 | * Initialize the ADB table. For now, we'll always use the same table | |
1012 | * that is defined at the beginning of this file - no mallocs. | 1012 | * that is defined at the beginning of this file - no mallocs. | |
1013 | */ | 1013 | */ | |
1014 | for (i = 0; i < 16; i++) | 1014 | for (i = 0; i < 16; i++) | |
1015 | ADBDevTable[i].devType = 0; | 1015 | ADBDevTable[i].devType = 0; | |
1016 | 1016 | |||
1017 | adb_setup_hw_type(); /* setup hardware type */ | 1017 | adb_setup_hw_type(); /* setup hardware type */ | |
1018 | 1018 | |||
1019 | adb_hw_setup(); /* init the VIA bits and hard reset ADB */ | 1019 | adb_hw_setup(); /* init the VIA bits and hard reset ADB */ | |
1020 | 1020 | |||
1021 | delay(1000); | 1021 | delay(1000); | |
1022 | 1022 | |||
1023 | /* send an ADB reset first */ | 1023 | /* send an ADB reset first */ | |
1024 | result = adb_op_sync((Ptr)0, NULL, (Ptr)0, (short)0x00); | 1024 | result = adb_op_sync((Ptr)0, NULL, (Ptr)0, (short)0x00); | |
1025 | delay(200000); | 1025 | delay(200000); | |
1026 | 1026 | |||
1027 | #ifdef ADB_DEBUG | 1027 | #ifdef ADB_DEBUG | |
1028 | if (result && adb_debug) { | 1028 | if (result && adb_debug) { | |
1029 | printf_intr("adb_reinit: failed to reset, result = %d\n",result); | 1029 | printf_intr("adb_reinit: failed to reset, result = %d\n",result); | |
1030 | } | 1030 | } | |
1031 | #endif | 1031 | #endif | |
1032 | 1032 | |||
1033 | /* | 1033 | /* | |
1034 | * Probe for ADB devices. Probe devices 1-15 quickly to determine | 1034 | * Probe for ADB devices. Probe devices 1-15 quickly to determine | |
1035 | * which device addresses are in use and which are free. For each | 1035 | * which device addresses are in use and which are free. For each | |
1036 | * address that is in use, move the device at that address to a higher | 1036 | * address that is in use, move the device at that address to a higher | |
1037 | * free address. Continue doing this at that address until no device | 1037 | * free address. Continue doing this at that address until no device | |
1038 | * responds at that address. Then move the last device that was moved | 1038 | * responds at that address. Then move the last device that was moved | |
1039 | * back to the original address. Do this for the remaining addresses | 1039 | * back to the original address. Do this for the remaining addresses | |
1040 | * that we determined were in use. | 1040 | * that we determined were in use. | |
1041 | * | 1041 | * | |
1042 | * When finished, do this entire process over again with the updated | 1042 | * When finished, do this entire process over again with the updated | |
1043 | * list of in use addresses. Do this until no new devices have been | 1043 | * list of in use addresses. Do this until no new devices have been | |
1044 | * found in 20 passes though the in use address list. (This probably | 1044 | * found in 20 passes though the in use address list. (This probably | |
1045 | * seems long and complicated, but it's the best way to detect multiple | 1045 | * seems long and complicated, but it's the best way to detect multiple | |
1046 | * devices at the same address - sometimes it takes a couple of tries | 1046 | * devices at the same address - sometimes it takes a couple of tries | |
1047 | * before the collision is detected.) | 1047 | * before the collision is detected.) | |
1048 | */ | 1048 | */ | |
1049 | 1049 | |||
1050 | /* initial scan through the devices */ | 1050 | /* initial scan through the devices */ | |
1051 | for (i = 1; i < 16; i++) { | 1051 | for (i = 1; i < 16; i++) { | |
1052 | send_string[0] = 0; | 1052 | send_string[0] = 0; | |
1053 | command = ADBTALK(i, 3); | 1053 | command = ADBTALK(i, 3); | |
1054 | result = adb_op_sync((Ptr)send_string, NULL, | 1054 | result = adb_op_sync((Ptr)send_string, NULL, | |
1055 | (Ptr)0, (short)command); | 1055 | (Ptr)0, (short)command); | |
1056 | 1056 | |||
1057 | #ifdef ADB_DEBUG | 1057 | #ifdef ADB_DEBUG | |
1058 | if (result && adb_debug) { | 1058 | if (result && adb_debug) { | |
1059 | printf_intr("adb_reinit: scan of device %d, result = %d, str = 0x%x\n", | 1059 | printf_intr("adb_reinit: scan of device %d, result = %d, str = 0x%x\n", | |
1060 | i,result,send_string[0]); | 1060 | i,result,send_string[0]); | |
1061 | } | 1061 | } | |
1062 | #endif | 1062 | #endif | |
1063 | 1063 | |||
1064 | if (send_string[0] != 0) { | 1064 | if (send_string[0] != 0) { | |
1065 | /* check for valid device handler */ | 1065 | /* check for valid device handler */ | |
1066 | switch (send_string[2]) { | 1066 | switch (send_string[2]) { | |
1067 | case 0: | 1067 | case 0: | |
1068 | case 0xfd: | 1068 | case 0xfd: | |
1069 | case 0xfe: | 1069 | case 0xfe: | |
1070 | case 0xff: | 1070 | case 0xff: | |
1071 | continue; /* invalid, skip */ | 1071 | continue; /* invalid, skip */ | |
1072 | } | 1072 | } | |
1073 | 1073 | |||
1074 | /* found a device */ | 1074 | /* found a device */ | |
1075 | ++ADBNumDevices; | 1075 | ++ADBNumDevices; | |
1076 | KASSERT(ADBNumDevices < 16); | 1076 | KASSERT(ADBNumDevices < 16); | |
1077 | ADBDevTable[ADBNumDevices].devType = | 1077 | ADBDevTable[ADBNumDevices].devType = | |
1078 | (int)send_string[2]; | 1078 | (int)send_string[2]; | |
1079 | ADBDevTable[ADBNumDevices].origAddr = i; | 1079 | ADBDevTable[ADBNumDevices].origAddr = i; | |
1080 | ADBDevTable[ADBNumDevices].currentAddr = i; | 1080 | ADBDevTable[ADBNumDevices].currentAddr = i; | |
1081 | ADBDevTable[ADBNumDevices].DataAreaAddr = | 1081 | ADBDevTable[ADBNumDevices].DataAreaAddr = | |
1082 | (long)0; | 1082 | (long)0; | |
1083 | ADBDevTable[ADBNumDevices].ServiceRtPtr = (void *)0; | 1083 | ADBDevTable[ADBNumDevices].ServiceRtPtr = (void *)0; | |
1084 | pm_check_adb_devices(i); /* tell pm driver device | 1084 | pm_check_adb_devices(i); /* tell pm driver device | |
1085 | * is here */ | 1085 | * is here */ | |
1086 | } | 1086 | } | |
1087 | } | 1087 | } | |
1088 | 1088 | |||
1089 | /* find highest unused address */ | 1089 | /* find highest unused address */ | |
1090 | for (saveptr = 15; saveptr > 0; saveptr--) | 1090 | for (saveptr = 15; saveptr > 0; saveptr--) | |
1091 | if (-1 == get_adb_info(&data, saveptr)) | 1091 | if (-1 == get_adb_info(&data, saveptr)) | |
1092 | break; | 1092 | break; | |
1093 | 1093 | |||
1094 | #ifdef ADB_DEBUG | 1094 | #ifdef ADB_DEBUG | |
1095 | if (adb_debug & 0x80) { | 1095 | if (adb_debug & 0x80) { | |
1096 | printf_intr("first free is: 0x%02x\n", saveptr); | 1096 | printf_intr("first free is: 0x%02x\n", saveptr); | |
1097 | printf_intr("devices: %i\n", ADBNumDevices); | 1097 | printf_intr("devices: %i\n", ADBNumDevices); | |
1098 | } | 1098 | } | |
1099 | #endif | 1099 | #endif | |
1100 | 1100 | |||
1101 | nonewtimes = 0; /* no loops w/o new devices */ | 1101 | nonewtimes = 0; /* no loops w/o new devices */ | |
1102 | while (saveptr > 0 && nonewtimes++ < 11) { | 1102 | while (saveptr > 0 && nonewtimes++ < 11) { | |
1103 | for (i = 1; i <= ADBNumDevices; i++) { | 1103 | for (i = 1; i <= ADBNumDevices; i++) { | |
1104 | device = ADBDevTable[i].currentAddr; | 1104 | device = ADBDevTable[i].currentAddr; | |
1105 | #ifdef ADB_DEBUG | 1105 | #ifdef ADB_DEBUG | |
1106 | if (adb_debug & 0x80) | 1106 | if (adb_debug & 0x80) | |
1107 | printf_intr("moving device 0x%02x to 0x%02x " | 1107 | printf_intr("moving device 0x%02x to 0x%02x " | |
1108 | "(index 0x%02x) ", device, saveptr, i); | 1108 | "(index 0x%02x) ", device, saveptr, i); | |
1109 | #endif | 1109 | #endif | |
1110 | 1110 | |||
1111 | /* send TALK R3 to address */ | 1111 | /* send TALK R3 to address */ | |
1112 | command = ADBTALK(device, 3); | 1112 | command = ADBTALK(device, 3); | |
1113 | adb_op_sync((Ptr)send_string, NULL, | 1113 | adb_op_sync((Ptr)send_string, NULL, | |
1114 | (Ptr)0, (short)command); | 1114 | (Ptr)0, (short)command); | |
1115 | 1115 | |||
1116 | /* move device to higher address */ | 1116 | /* move device to higher address */ | |
1117 | command = ADBLISTEN(device, 3); | 1117 | command = ADBLISTEN(device, 3); | |
1118 | send_string[0] = 2; | 1118 | send_string[0] = 2; | |
1119 | send_string[1] = (u_char)(saveptr | 0x60); | 1119 | send_string[1] = (u_char)(saveptr | 0x60); | |
1120 | send_string[2] = 0xfe; | 1120 | send_string[2] = 0xfe; | |
1121 | adb_op_sync((Ptr)send_string, NULL, | 1121 | adb_op_sync((Ptr)send_string, NULL, | |
1122 | (Ptr)0, (short)command); | 1122 | (Ptr)0, (short)command); | |
1123 | delay(500); | 1123 | delay(500); | |
1124 | 1124 | |||
1125 | /* send TALK R3 - anything at new address? */ | 1125 | /* send TALK R3 - anything at new address? */ | |
1126 | command = ADBTALK(saveptr, 3); | 1126 | command = ADBTALK(saveptr, 3); | |
1127 | adb_op_sync((Ptr)send_string, NULL, | 1127 | adb_op_sync((Ptr)send_string, NULL, | |
1128 | (Ptr)0, (short)command); | 1128 | (Ptr)0, (short)command); | |
1129 | delay(500); | 1129 | delay(500); | |
1130 | 1130 | |||
1131 | if (send_string[0] == 0) { | 1131 | if (send_string[0] == 0) { | |
1132 | #ifdef ADB_DEBUG | 1132 | #ifdef ADB_DEBUG | |
1133 | if (adb_debug & 0x80) | 1133 | if (adb_debug & 0x80) | |
1134 | printf_intr("failed, continuing\n"); | 1134 | printf_intr("failed, continuing\n"); | |
1135 | #endif | 1135 | #endif | |
1136 | continue; | 1136 | continue; | |
1137 | } | 1137 | } | |
1138 | 1138 | |||
1139 | /* send TALK R3 - anything at old address? */ | 1139 | /* send TALK R3 - anything at old address? */ | |
1140 | command = ADBTALK(device, 3); | 1140 | command = ADBTALK(device, 3); | |
1141 | result = adb_op_sync((Ptr)send_string, NULL, | 1141 | result = adb_op_sync((Ptr)send_string, NULL, | |
1142 | (Ptr)0, (short)command); | 1142 | (Ptr)0, (short)command); | |
1143 | if (send_string[0] != 0) { | 1143 | if (send_string[0] != 0) { | |
1144 | /* check for valid device handler */ | 1144 | /* check for valid device handler */ | |
1145 | switch (send_string[2]) { | 1145 | switch (send_string[2]) { | |
1146 | case 0: | 1146 | case 0: | |
1147 | case 0xfd: | 1147 | case 0xfd: | |
1148 | case 0xfe: | 1148 | case 0xfe: | |
1149 | case 0xff: | 1149 | case 0xff: | |
1150 | continue; /* invalid, skip */ | 1150 | continue; /* invalid, skip */ | |
1151 | } | 1151 | } | |
1152 | 1152 | |||
1153 | /* new device found */ | 1153 | /* new device found */ | |
1154 | /* update data for previously moved device */ | 1154 | /* update data for previously moved device */ | |
1155 | ADBDevTable[i].currentAddr = saveptr; | 1155 | ADBDevTable[i].currentAddr = saveptr; | |
1156 | #ifdef ADB_DEBUG | 1156 | #ifdef ADB_DEBUG | |
1157 | if (adb_debug & 0x80) | 1157 | if (adb_debug & 0x80) | |
1158 | printf_intr("old device at index %i\n",i); | 1158 | printf_intr("old device at index %i\n",i); | |
1159 | #endif | 1159 | #endif | |
1160 | /* add new device in table */ | 1160 | /* add new device in table */ | |
1161 | #ifdef ADB_DEBUG | 1161 | #ifdef ADB_DEBUG | |
1162 | if (adb_debug & 0x80) | 1162 | if (adb_debug & 0x80) | |
1163 | printf_intr("new device found\n"); | 1163 | printf_intr("new device found\n"); | |
1164 | #endif | 1164 | #endif | |
1165 | if (saveptr > ADBNumDevices) { | 1165 | if (saveptr > ADBNumDevices) { | |
1166 | ++ADBNumDevices; | 1166 | ++ADBNumDevices; | |
1167 | KASSERT(ADBNumDevices < 16); | 1167 | KASSERT(ADBNumDevices < 16); | |
1168 | } | 1168 | } | |
1169 | ADBDevTable[ADBNumDevices].devType = | 1169 | ADBDevTable[ADBNumDevices].devType = | |
1170 | (int)send_string[2]; | 1170 | (int)send_string[2]; | |
1171 | ADBDevTable[ADBNumDevices].origAddr = device; | 1171 | ADBDevTable[ADBNumDevices].origAddr = device; | |
1172 | ADBDevTable[ADBNumDevices].currentAddr = device; | 1172 | ADBDevTable[ADBNumDevices].currentAddr = device; | |
1173 | /* These will be set correctly in adbsys.c */ | 1173 | /* These will be set correctly in adbsys.c */ | |
1174 | /* Until then, unsol. data will be ignored. */ | 1174 | /* Until then, unsol. data will be ignored. */ | |
1175 | ADBDevTable[ADBNumDevices].DataAreaAddr = | 1175 | ADBDevTable[ADBNumDevices].DataAreaAddr = | |
1176 | (long)0; | 1176 | (long)0; | |
1177 | ADBDevTable[ADBNumDevices].ServiceRtPtr = | 1177 | ADBDevTable[ADBNumDevices].ServiceRtPtr = | |
1178 | (void *)0; | 1178 | (void *)0; | |
1179 | /* find next unused address */ | 1179 | /* find next unused address */ | |
1180 | for (x = saveptr; x > 0; x--) { | 1180 | for (x = saveptr; x > 0; x--) { | |
1181 | if (-1 == get_adb_info(&data, x)) { | 1181 | if (-1 == get_adb_info(&data, x)) { | |
1182 | saveptr = x; | 1182 | saveptr = x; | |
1183 | break; | 1183 | break; | |
1184 | } | 1184 | } | |
1185 | } | 1185 | } | |
1186 | if (x == 0) | 1186 | if (x == 0) | |
1187 | saveptr = 0; | 1187 | saveptr = 0; | |
1188 | #ifdef ADB_DEBUG | 1188 | #ifdef ADB_DEBUG | |
1189 | if (adb_debug & 0x80) | 1189 | if (adb_debug & 0x80) | |
1190 | printf_intr("new free is 0x%02x\n", | 1190 | printf_intr("new free is 0x%02x\n", | |
1191 | saveptr); | 1191 | saveptr); | |
1192 | #endif | 1192 | #endif | |
1193 | nonewtimes = 0; | 1193 | nonewtimes = 0; | |
1194 | /* tell pm driver device is here */ | 1194 | /* tell pm driver device is here */ | |
1195 | pm_check_adb_devices(device); | 1195 | pm_check_adb_devices(device); | |
1196 | } else { | 1196 | } else { | |
1197 | #ifdef ADB_DEBUG | 1197 | #ifdef ADB_DEBUG | |
1198 | if (adb_debug & 0x80) | 1198 | if (adb_debug & 0x80) | |
1199 | printf_intr("moving back...\n"); | 1199 | printf_intr("moving back...\n"); | |
1200 | #endif | 1200 | #endif | |
1201 | /* move old device back */ | 1201 | /* move old device back */ | |
1202 | command = ADBLISTEN(saveptr, 3); | 1202 | command = ADBLISTEN(saveptr, 3); | |
1203 | send_string[0] = 2; | 1203 | send_string[0] = 2; | |
1204 | send_string[1] = (u_char)(device | 0x60); | 1204 | send_string[1] = (u_char)(device | 0x60); | |
1205 | send_string[2] = 0xfe; | 1205 | send_string[2] = 0xfe; | |
1206 | adb_op_sync((Ptr)send_string, NULL, | 1206 | adb_op_sync((Ptr)send_string, NULL, | |
1207 | (Ptr)0, (short)command); | 1207 | (Ptr)0, (short)command); | |
1208 | delay(1000); | 1208 | delay(1000); | |
1209 | } | 1209 | } | |
1210 | } | 1210 | } | |
1211 | } | 1211 | } | |
1212 | 1212 | |||
1213 | #ifdef ADB_DEBUG | 1213 | #ifdef ADB_DEBUG | |
1214 | if (adb_debug) { | 1214 | if (adb_debug) { | |
1215 | for (i = 1; i <= ADBNumDevices; i++) { | 1215 | for (i = 1; i <= ADBNumDevices; i++) { | |
1216 | x = get_ind_adb_info(&data, i); | 1216 | x = get_ind_adb_info(&data, i); | |
1217 | if (x != -1) | 1217 | if (x != -1) | |
1218 | printf_intr("index 0x%x, addr 0x%x, type 0x%x\n", | 1218 | printf_intr("index 0x%x, addr 0x%x, type 0x%x\n", | |
1219 | i, x, data.devType); | 1219 | i, x, data.devType); | |
1220 | } | 1220 | } | |
1221 | } | 1221 | } | |
1222 | #endif | 1222 | #endif | |
1223 | 1223 | |||
1224 | #ifdef ADB_DEBUG | 1224 | #ifdef ADB_DEBUG | |
1225 | if (adb_debug) { | 1225 | if (adb_debug) { | |
1226 | if (0 == ADBNumDevices) /* tell user if no devices found */ | 1226 | if (0 == ADBNumDevices) /* tell user if no devices found */ | |
1227 | printf_intr("adb: no devices found\n"); | 1227 | printf_intr("adb: no devices found\n"); | |
1228 | } | 1228 | } | |
1229 | #endif | 1229 | #endif | |
1230 | 1230 | |||
1231 | adbStarting = 0; /* not starting anymore */ | 1231 | adbStarting = 0; /* not starting anymore */ | |
1232 | #ifdef ADB_DEBUG | 1232 | #ifdef ADB_DEBUG | |
1233 | if (adb_debug) | 1233 | if (adb_debug) | |
1234 | printf_intr("adb: ADBReInit complete\n"); | 1234 | printf_intr("adb: ADBReInit complete\n"); | |
1235 | #endif | 1235 | #endif | |
1236 | 1236 | |||
1237 | if (adbHardware == ADB_HW_CUDA) | 1237 | if (adbHardware == ADB_HW_CUDA) | |
1238 | callout_reset(&adb_cuda_tickle_ch, ADB_TICKLE_TICKS, | 1238 | callout_reset(&adb_cuda_tickle_ch, ADB_TICKLE_TICKS, | |
1239 | (void *)adb_cuda_tickle, NULL); | 1239 | (void *)adb_cuda_tickle, NULL); | |
1240 | 1240 | |||
1241 | if (adbHardware != ADB_HW_PMU) /* ints must be on for PMU? */ | 1241 | if (adbHardware != ADB_HW_PMU) /* ints must be on for PMU? */ | |
1242 | splx(s); | 1242 | splx(s); | |
1243 | } | 1243 | } | |
1244 | 1244 | |||
1245 | /* | 1245 | /* | |
1246 | * adb_cmd_result | 1246 | * adb_cmd_result | |
1247 | * | 1247 | * | |
1248 | * This routine lets the caller know whether the specified adb command string | 1248 | * This routine lets the caller know whether the specified adb command string | |
1249 | * should expect a returned result, such as a TALK command. | 1249 | * should expect a returned result, such as a TALK command. | |
1250 | * | 1250 | * | |
1251 | * returns: 0 if a result should be expected | 1251 | * returns: 0 if a result should be expected | |
1252 | * 1 if a result should NOT be expected | 1252 | * 1 if a result should NOT be expected | |
1253 | */ | 1253 | */ | |
1254 | int | 1254 | int | |
1255 | adb_cmd_result(u_char *in) | 1255 | adb_cmd_result(u_char *in) | |
1256 | { | 1256 | { | |
1257 | switch (adbHardware) { | 1257 | switch (adbHardware) { | |
1258 | case ADB_HW_CUDA: | 1258 | case ADB_HW_CUDA: | |
1259 | /* was it an ADB talk command? */ | 1259 | /* was it an ADB talk command? */ | |
1260 | if ((in[1] == 0x00) && ((in[2] & 0x0c) == 0x0c)) | 1260 | if ((in[1] == 0x00) && ((in[2] & 0x0c) == 0x0c)) | |
1261 | return 0; | 1261 | return 0; | |
1262 | /* was it an RTC/PRAM read date/time? */ | 1262 | /* was it an RTC/PRAM read date/time? */ | |
1263 | if ((in[1] == 0x01) && (in[2] == 0x03)) | 1263 | if ((in[1] == 0x01) && (in[2] == 0x03)) | |
1264 | return 0; | 1264 | return 0; | |
1265 | return 1; | 1265 | return 1; | |
1266 | 1266 | |||
1267 | case ADB_HW_PMU: | 1267 | case ADB_HW_PMU: | |
1268 | return 1; | 1268 | return 1; | |
1269 | 1269 | |||
1270 | case ADB_HW_UNKNOWN: | 1270 | case ADB_HW_UNKNOWN: | |
1271 | default: | 1271 | default: | |
1272 | return 1; | 1272 | return 1; | |
1273 | } | 1273 | } | |
1274 | } | 1274 | } | |
1275 | 1275 | |||
1276 | 1276 | |||
1277 | /* | 1277 | /* | |
1278 | * adb_cmd_extra | 1278 | * adb_cmd_extra | |
1279 | * | 1279 | * | |
1280 | * This routine lets the caller know whether the specified adb command string | 1280 | * This routine lets the caller know whether the specified adb command string | |
1281 | * may have extra data appended to the end of it, such as a LISTEN command. | 1281 | * may have extra data appended to the end of it, such as a LISTEN command. | |
1282 | * | 1282 | * | |
1283 | * returns: 0 if extra data is allowed | 1283 | * returns: 0 if extra data is allowed | |
1284 | * 1 if extra data is NOT allowed | 1284 | * 1 if extra data is NOT allowed | |
1285 | */ | 1285 | */ | |
1286 | int | 1286 | int | |
1287 | adb_cmd_extra(u_char *in) | 1287 | adb_cmd_extra(u_char *in) | |
1288 | { | 1288 | { | |
1289 | switch (adbHardware) { | 1289 | switch (adbHardware) { | |
1290 | case ADB_HW_CUDA: | 1290 | case ADB_HW_CUDA: | |
1291 | /* | 1291 | /* | |
1292 | * TO DO: support needs to be added to recognize RTC and PRAM | 1292 | * TO DO: support needs to be added to recognize RTC and PRAM | |
1293 | * commands | 1293 | * commands | |
1294 | */ | 1294 | */ | |
1295 | if ((in[2] & 0x0c) == 0x08) /* was it a listen command? */ | 1295 | if ((in[2] & 0x0c) == 0x08) /* was it a listen command? */ | |
1296 | return 0; | 1296 | return 0; | |
1297 | /* add others later */ | 1297 | /* add others later */ | |
1298 | return 1; | 1298 | return 1; | |
1299 | 1299 | |||
1300 | case ADB_HW_PMU: | 1300 | case ADB_HW_PMU: | |
1301 | return 1; | 1301 | return 1; | |
1302 | 1302 | |||
1303 | case ADB_HW_UNKNOWN: | 1303 | case ADB_HW_UNKNOWN: | |
1304 | default: | 1304 | default: | |
1305 | return 1; | 1305 | return 1; | |
1306 | } | 1306 | } | |
1307 | } | 1307 | } | |
1308 | 1308 | |||
1309 | /* | 1309 | /* | |
1310 | * adb_op_sync | 1310 | * adb_op_sync | |
1311 | * | 1311 | * | |
1312 | * This routine does exactly what the adb_op routine does, except that after | 1312 | * This routine does exactly what the adb_op routine does, except that after | |
1313 | * the adb_op is called, it waits until the return value is present before | 1313 | * the adb_op is called, it waits until the return value is present before | |
1314 | * returning. | 1314 | * returning. | |
1315 | * | 1315 | * | |
1316 | * NOTE: The user specified compRout is ignored, since this routine specifies | 1316 | * NOTE: The user specified compRout is ignored, since this routine specifies | |
1317 | * its own to adb_op, which is why you really called this in the first place | 1317 | * its own to adb_op, which is why you really called this in the first place | |
1318 | * anyway. | 1318 | * anyway. | |
1319 | */ | 1319 | */ | |
1320 | int | 1320 | int | |
1321 | adb_op_sync(Ptr buffer, adbComp *compRout, Ptr data, short command) | 1321 | adb_op_sync(Ptr buffer, adbComp *compRout, Ptr data, short command) |
--- src/sys/arch/x86/x86/intr.c 2023/11/29 11:40:37 1.166
+++ src/sys/arch/x86/x86/intr.c 2024/03/05 20:58:05 1.167
@@ -1,2058 +1,2058 @@ | @@ -1,2058 +1,2058 @@ | |||
1 | /* $NetBSD: intr.c,v 1.166 2023/11/29 11:40:37 mlelstv Exp $ */ | 1 | /* $NetBSD: intr.c,v 1.167 2024/03/05 20:58:05 andvar Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Andrew Doran, and by Jason R. Thorpe. | 8 | * by Andrew Doran, and by Jason R. Thorpe. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * Copyright 2002 (c) Wasabi Systems, Inc. | 33 | * Copyright 2002 (c) Wasabi Systems, Inc. | |
34 | * All rights reserved. | 34 | * All rights reserved. | |
35 | * | 35 | * | |
36 | * Written by Frank van der Linden for Wasabi Systems, Inc. | 36 | * Written by Frank van der Linden for Wasabi Systems, Inc. | |
37 | * | 37 | * | |
38 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without | |
39 | * modification, are permitted provided that the following conditions | 39 | * modification, are permitted provided that the following conditions | |
40 | * are met: | 40 | * are met: | |
41 | * 1. Redistributions of source code must retain the above copyright | 41 | * 1. Redistributions of source code must retain the above copyright | |
42 | * notice, this list of conditions and the following disclaimer. | 42 | * notice, this list of conditions and the following disclaimer. | |
43 | * 2. Redistributions in binary form must reproduce the above copyright | 43 | * 2. Redistributions in binary form must reproduce the above copyright | |
44 | * notice, this list of conditions and the following disclaimer in the | 44 | * notice, this list of conditions and the following disclaimer in the | |
45 | * documentation and/or other materials provided with the distribution. | 45 | * documentation and/or other materials provided with the distribution. | |
46 | * 3. All advertising materials mentioning features or use of this software | 46 | * 3. All advertising materials mentioning features or use of this software | |
47 | * must display the following acknowledgement: | 47 | * must display the following acknowledgement: | |
48 | * This product includes software developed for the NetBSD Project by | 48 | * This product includes software developed for the NetBSD Project by | |
49 | * Wasabi Systems, Inc. | 49 | * Wasabi Systems, Inc. | |
50 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | 50 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | |
51 | * or promote products derived from this software without specific prior | 51 | * or promote products derived from this software without specific prior | |
52 | * written permission. | 52 | * written permission. | |
53 | * | 53 | * | |
54 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | 54 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
56 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 56 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
57 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | 57 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | |
58 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 58 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
59 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 59 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
60 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 60 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
61 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 61 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
62 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 62 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
63 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 63 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
64 | * POSSIBILITY OF SUCH DAMAGE. | 64 | * POSSIBILITY OF SUCH DAMAGE. | |
65 | */ | 65 | */ | |
66 | 66 | |||
67 | /*- | 67 | /*- | |
68 | * Copyright (c) 1991 The Regents of the University of California. | 68 | * Copyright (c) 1991 The Regents of the University of California. | |
69 | * All rights reserved. | 69 | * All rights reserved. | |
70 | * | 70 | * | |
71 | * This code is derived from software contributed to Berkeley by | 71 | * This code is derived from software contributed to Berkeley by | |
72 | * William Jolitz. | 72 | * William Jolitz. | |
73 | * | 73 | * | |
74 | * Redistribution and use in source and binary forms, with or without | 74 | * Redistribution and use in source and binary forms, with or without | |
75 | * modification, are permitted provided that the following conditions | 75 | * modification, are permitted provided that the following conditions | |
76 | * are met: | 76 | * are met: | |
77 | * 1. Redistributions of source code must retain the above copyright | 77 | * 1. Redistributions of source code must retain the above copyright | |
78 | * notice, this list of conditions and the following disclaimer. | 78 | * notice, this list of conditions and the following disclaimer. | |
79 | * 2. Redistributions in binary form must reproduce the above copyright | 79 | * 2. Redistributions in binary form must reproduce the above copyright | |
80 | * notice, this list of conditions and the following disclaimer in the | 80 | * notice, this list of conditions and the following disclaimer in the | |
81 | * documentation and/or other materials provided with the distribution. | 81 | * documentation and/or other materials provided with the distribution. | |
82 | * 3. Neither the name of the University nor the names of its contributors | 82 | * 3. Neither the name of the University nor the names of its contributors | |
83 | * may be used to endorse or promote products derived from this software | 83 | * may be used to endorse or promote products derived from this software | |
84 | * without specific prior written permission. | 84 | * without specific prior written permission. | |
85 | * | 85 | * | |
86 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | 86 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
87 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 87 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
88 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 88 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
89 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 89 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
90 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 90 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
91 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 91 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
92 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 92 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
93 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 93 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
94 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 94 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
95 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 95 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
96 | * SUCH DAMAGE. | 96 | * SUCH DAMAGE. | |
97 | * | 97 | * | |
98 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 | 98 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 | |
99 | */ | 99 | */ | |
100 | 100 | |||
101 | /*- | 101 | /*- | |
102 | * Copyright (c) 1993, 1994 Charles Hannum. | 102 | * Copyright (c) 1993, 1994 Charles Hannum. | |
103 | * | 103 | * | |
104 | * Redistribution and use in source and binary forms, with or without | 104 | * Redistribution and use in source and binary forms, with or without | |
105 | * modification, are permitted provided that the following conditions | 105 | * modification, are permitted provided that the following conditions | |
106 | * are met: | 106 | * are met: | |
107 | * 1. Redistributions of source code must retain the above copyright | 107 | * 1. Redistributions of source code must retain the above copyright | |
108 | * notice, this list of conditions and the following disclaimer. | 108 | * notice, this list of conditions and the following disclaimer. | |
109 | * 2. Redistributions in binary form must reproduce the above copyright | 109 | * 2. Redistributions in binary form must reproduce the above copyright | |
110 | * notice, this list of conditions and the following disclaimer in the | 110 | * notice, this list of conditions and the following disclaimer in the | |
111 | * documentation and/or other materials provided with the distribution. | 111 | * documentation and/or other materials provided with the distribution. | |
112 | * 3. All advertising materials mentioning features or use of this software | 112 | * 3. All advertising materials mentioning features or use of this software | |
113 | * must display the following acknowledgement: | 113 | * must display the following acknowledgement: | |
114 | * This product includes software developed by the University of | 114 | * This product includes software developed by the University of | |
115 | * California, Berkeley and its contributors. | 115 | * California, Berkeley and its contributors. | |
116 | * 4. Neither the name of the University nor the names of its contributors | 116 | * 4. Neither the name of the University nor the names of its contributors | |
117 | * may be used to endorse or promote products derived from this software | 117 | * may be used to endorse or promote products derived from this software | |
118 | * without specific prior written permission. | 118 | * without specific prior written permission. | |
119 | * | 119 | * | |
120 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | 120 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
121 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 121 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
122 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 122 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
123 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 123 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
124 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 124 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
125 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 125 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
126 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 126 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
127 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 127 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
128 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 128 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
129 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 129 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
130 | * SUCH DAMAGE. | 130 | * SUCH DAMAGE. | |
131 | * | 131 | * | |
132 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 | 132 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 | |
133 | */ | 133 | */ | |
134 | 134 | |||
135 | #include <sys/cdefs.h> | 135 | #include <sys/cdefs.h> | |
136 | __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.166 2023/11/29 11:40:37 mlelstv Exp $"); | 136 | __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.167 2024/03/05 20:58:05 andvar Exp $"); | |
137 | 137 | |||
138 | #include "opt_intrdebug.h" | 138 | #include "opt_intrdebug.h" | |
139 | #include "opt_multiprocessor.h" | 139 | #include "opt_multiprocessor.h" | |
140 | #include "opt_acpi.h" | 140 | #include "opt_acpi.h" | |
141 | 141 | |||
142 | #include <sys/param.h> | 142 | #include <sys/param.h> | |
143 | #include <sys/systm.h> | 143 | #include <sys/systm.h> | |
144 | #include <sys/kernel.h> | 144 | #include <sys/kernel.h> | |
145 | #include <sys/syslog.h> | 145 | #include <sys/syslog.h> | |
146 | #include <sys/device.h> | 146 | #include <sys/device.h> | |
147 | #include <sys/kmem.h> | 147 | #include <sys/kmem.h> | |
148 | #include <sys/proc.h> | 148 | #include <sys/proc.h> | |
149 | #include <sys/errno.h> | 149 | #include <sys/errno.h> | |
150 | #include <sys/intr.h> | 150 | #include <sys/intr.h> | |
151 | #include <sys/cpu.h> | 151 | #include <sys/cpu.h> | |
152 | #include <sys/xcall.h> | 152 | #include <sys/xcall.h> | |
153 | #include <sys/interrupt.h> | 153 | #include <sys/interrupt.h> | |
154 | #include <sys/reboot.h> /* for AB_VERBOSE */ | 154 | #include <sys/reboot.h> /* for AB_VERBOSE */ | |
155 | #include <sys/sdt.h> | 155 | #include <sys/sdt.h> | |
156 | 156 | |||
157 | #include <sys/kauth.h> | 157 | #include <sys/kauth.h> | |
158 | #include <sys/conf.h> | 158 | #include <sys/conf.h> | |
159 | 159 | |||
160 | #include <uvm/uvm_extern.h> | 160 | #include <uvm/uvm_extern.h> | |
161 | 161 | |||
162 | #include <machine/i8259.h> | 162 | #include <machine/i8259.h> | |
163 | #include <machine/pio.h> | 163 | #include <machine/pio.h> | |
164 | 164 | |||
165 | #include <x86/intr_private.h> | 165 | #include <x86/intr_private.h> | |
166 | 166 | |||
167 | #include "ioapic.h" | 167 | #include "ioapic.h" | |
168 | #include "lapic.h" | 168 | #include "lapic.h" | |
169 | #include "pci.h" | 169 | #include "pci.h" | |
170 | #include "acpica.h" | 170 | #include "acpica.h" | |
171 | #ifndef XENPV | 171 | #ifndef XENPV | |
172 | #include "hyperv.h" | 172 | #include "hyperv.h" | |
173 | #if NHYPERV > 0 | 173 | #if NHYPERV > 0 | |
174 | #include <dev/hyperv/hypervvar.h> | 174 | #include <dev/hyperv/hypervvar.h> | |
175 | 175 | |||
176 | extern void Xresume_hyperv_hypercall(void); | 176 | extern void Xresume_hyperv_hypercall(void); | |
177 | extern void Xrecurse_hyperv_hypercall(void); | 177 | extern void Xrecurse_hyperv_hypercall(void); | |
178 | #endif | 178 | #endif | |
179 | #endif | 179 | #endif | |
180 | 180 | |||
181 | #if NIOAPIC > 0 || NACPICA > 0 | 181 | #if NIOAPIC > 0 || NACPICA > 0 | |
182 | #include <machine/i82093var.h> | 182 | #include <machine/i82093var.h> | |
183 | #include <machine/mpbiosvar.h> | 183 | #include <machine/mpbiosvar.h> | |
184 | #include <machine/mpacpi.h> | 184 | #include <machine/mpacpi.h> | |
185 | #endif | 185 | #endif | |
186 | 186 | |||
187 | #if NLAPIC > 0 | 187 | #if NLAPIC > 0 | |
188 | #include <machine/i82489var.h> | 188 | #include <machine/i82489var.h> | |
189 | #endif | 189 | #endif | |
190 | 190 | |||
191 | #if NPCI > 0 | 191 | #if NPCI > 0 | |
192 | #include <dev/pci/ppbreg.h> | 192 | #include <dev/pci/ppbreg.h> | |
193 | #endif | 193 | #endif | |
194 | 194 | |||
195 | #include <x86/pci/msipic.h> | 195 | #include <x86/pci/msipic.h> | |
196 | #include <x86/pci/pci_msi_machdep.h> | 196 | #include <x86/pci/pci_msi_machdep.h> | |
197 | 197 | |||
198 | #if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX) | 198 | #if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX) | |
199 | #define msipic_is_msi_pic(PIC) (false) | 199 | #define msipic_is_msi_pic(PIC) (false) | |
200 | #endif | 200 | #endif | |
201 | 201 | |||
202 | #include <ddb/db_active.h> | 202 | #include <ddb/db_active.h> | |
203 | 203 | |||
204 | #ifdef DDB | 204 | #ifdef DDB | |
205 | #include <ddb/db_output.h> | 205 | #include <ddb/db_output.h> | |
206 | #endif | 206 | #endif | |
207 | 207 | |||
208 | #ifdef INTRDEBUG | 208 | #ifdef INTRDEBUG | |
209 | #define DPRINTF(msg) printf msg | 209 | #define DPRINTF(msg) printf msg | |
210 | #else | 210 | #else | |
211 | #define DPRINTF(msg) | 211 | #define DPRINTF(msg) | |
212 | #endif | 212 | #endif | |
213 | 213 | |||
214 | static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources = | 214 | static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources = | |
215 | SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources); | 215 | SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources); | |
216 | 216 | |||
217 | static kmutex_t intr_distribute_lock; | 217 | static kmutex_t intr_distribute_lock; | |
218 | 218 | |||
219 | static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *, | 219 | static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *, | |
220 | struct intrsource *); | 220 | struct intrsource *); | |
221 | static int __noinline intr_allocate_slot(struct pic *, int, int, | 221 | static int __noinline intr_allocate_slot(struct pic *, int, int, | |
222 | struct cpu_info **, int *, int *, | 222 | struct cpu_info **, int *, int *, | |
223 | struct intrsource *); | 223 | struct intrsource *); | |
224 | 224 | |||
225 | static void intr_source_free(struct cpu_info *, int, struct pic *, int); | 225 | static void intr_source_free(struct cpu_info *, int, struct pic *, int); | |
226 | 226 | |||
227 | static void intr_establish_xcall(void *, void *); | 227 | static void intr_establish_xcall(void *, void *); | |
228 | static void intr_disestablish_xcall(void *, void *); | 228 | static void intr_disestablish_xcall(void *, void *); | |
229 | 229 | |||
230 | static const char *legacy_intr_string(int, char *, size_t, struct pic *); | 230 | static const char *legacy_intr_string(int, char *, size_t, struct pic *); | |
231 | 231 | |||
232 | static const char *xen_intr_string(int, char *, size_t, struct pic *); | 232 | static const char *xen_intr_string(int, char *, size_t, struct pic *); | |
233 | 233 | |||
234 | #if defined(INTRSTACKSIZE) | 234 | #if defined(INTRSTACKSIZE) | |
235 | static inline bool redzone_const_or_false(bool); | 235 | static inline bool redzone_const_or_false(bool); | |
236 | static inline int redzone_const_or_zero(int); | 236 | static inline int redzone_const_or_zero(int); | |
237 | #endif | 237 | #endif | |
238 | 238 | |||
239 | static void intr_redistribute_xc_t(void *, void *); | 239 | static void intr_redistribute_xc_t(void *, void *); | |
240 | static void intr_redistribute_xc_s1(void *, void *); | 240 | static void intr_redistribute_xc_s1(void *, void *); | |
241 | static void intr_redistribute_xc_s2(void *, void *); | 241 | static void intr_redistribute_xc_s2(void *, void *); | |
242 | static bool intr_redistribute(struct cpu_info *); | 242 | static bool intr_redistribute(struct cpu_info *); | |
243 | static struct intrsource *intr_get_io_intrsource(const char *); | 243 | static struct intrsource *intr_get_io_intrsource(const char *); | |
244 | static void intr_free_io_intrsource_direct(struct intrsource *); | 244 | static void intr_free_io_intrsource_direct(struct intrsource *); | |
245 | static int intr_num_handlers(struct intrsource *); | 245 | static int intr_num_handlers(struct intrsource *); | |
246 | static int intr_find_unused_slot(struct cpu_info *, int *); | 246 | static int intr_find_unused_slot(struct cpu_info *, int *); | |
247 | static void intr_activate_xcall(void *, void *); | 247 | static void intr_activate_xcall(void *, void *); | |
248 | static void intr_deactivate_xcall(void *, void *); | 248 | static void intr_deactivate_xcall(void *, void *); | |
249 | static void intr_get_affinity(struct intrsource *, kcpuset_t *); | 249 | static void intr_get_affinity(struct intrsource *, kcpuset_t *); | |
250 | static int intr_set_affinity(struct intrsource *, const kcpuset_t *); | 250 | static int intr_set_affinity(struct intrsource *, const kcpuset_t *); | |
251 | 251 | |||
252 | SDT_PROBE_DEFINE3(sdt, kernel, intr, entry, | 252 | SDT_PROBE_DEFINE3(sdt, kernel, intr, entry, | |
253 | "int (*)(void *)"/*func*/, | 253 | "int (*)(void *)"/*func*/, | |
254 | "void *"/*arg*/, | 254 | "void *"/*arg*/, | |
255 | "struct intrhand *"/*ih*/); | 255 | "struct intrhand *"/*ih*/); | |
256 | SDT_PROBE_DEFINE4(sdt, kernel, intr, return, | 256 | SDT_PROBE_DEFINE4(sdt, kernel, intr, return, | |
257 | "int (*)(void *)"/*func*/, | 257 | "int (*)(void *)"/*func*/, | |
258 | "void *"/*arg*/, | 258 | "void *"/*arg*/, | |
259 | "struct intrhand *"/*ih*/, | 259 | "struct intrhand *"/*ih*/, | |
260 | "int"/*handled*/); | 260 | "int"/*handled*/); | |
261 | 261 | |||
262 | /* | 262 | /* | |
263 | * Fill in default interrupt table (in case of spurious interrupt | 263 | * Fill in default interrupt table (in case of spurious interrupt | |
264 | * during configuration of kernel), setup interrupt control unit | 264 | * during configuration of kernel), setup interrupt control unit | |
265 | */ | 265 | */ | |
266 | void | 266 | void | |
267 | intr_default_setup(void) | 267 | intr_default_setup(void) | |
268 | { | 268 | { | |
269 | struct idt_vec *iv = &(cpu_info_primary.ci_idtvec); | 269 | struct idt_vec *iv = &(cpu_info_primary.ci_idtvec); | |
270 | int i; | 270 | int i; | |
271 | 271 | |||
272 | /* icu vectors */ | 272 | /* icu vectors */ | |
273 | for (i = 0; i < NUM_LEGACY_IRQS; i++) { | 273 | for (i = 0; i < NUM_LEGACY_IRQS; i++) { | |
274 | idt_vec_reserve(iv, ICU_OFFSET + i); | 274 | idt_vec_reserve(iv, ICU_OFFSET + i); | |
275 | idt_vec_set(iv, ICU_OFFSET + i, legacy_stubs[i].ist_entry); | 275 | idt_vec_set(iv, ICU_OFFSET + i, legacy_stubs[i].ist_entry); | |
276 | } | 276 | } | |
277 | 277 | |||
278 | /* | 278 | /* | |
279 | * Eventually might want to check if it's actually there. | 279 | * Eventually might want to check if it's actually there. | |
280 | */ | 280 | */ | |
281 | i8259_default_setup(); | 281 | i8259_default_setup(); | |
282 | 282 | |||
283 | mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE); | 283 | mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE); | |
284 | } | 284 | } | |
285 | 285 | |||
286 | /* | 286 | /* | |
287 | * Handle a NMI, possibly a machine check. | 287 | * Handle a NMI, possibly a machine check. | |
288 | * return true to panic system, false to ignore. | 288 | * return true to panic system, false to ignore. | |
289 | */ | 289 | */ | |
290 | void | 290 | void | |
291 | x86_nmi(void) | 291 | x86_nmi(void) | |
292 | { | 292 | { | |
293 | 293 | |||
294 | log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); | 294 | log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); | |
295 | } | 295 | } | |
296 | 296 | |||
297 | /* | 297 | /* | |
298 | * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used | 298 | * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used | |
299 | * by MI code and intrctl(8). | 299 | * by MI code and intrctl(8). | |
300 | */ | 300 | */ | |
301 | const char * | 301 | const char * | |
302 | intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, | 302 | intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, | |
303 | size_t len) | 303 | size_t len) | |
304 | { | 304 | { | |
305 | int ih = 0; | 305 | int ih = 0; | |
306 | 306 | |||
307 | #if NPCI > 0 | 307 | #if NPCI > 0 | |
308 | #if defined(__HAVE_PCI_MSI_MSIX) | 308 | #if defined(__HAVE_PCI_MSI_MSIX) | |
309 | if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) { | 309 | if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) { | |
310 | uint64_t pih; | 310 | uint64_t pih; | |
311 | int dev, vec; | 311 | int dev, vec; | |
312 | 312 | |||
313 | dev = msipic_get_devid(pic); | 313 | dev = msipic_get_devid(pic); | |
314 | vec = pin; | 314 | vec = pin; | |
315 | pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) | 315 | pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) | |
316 | | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) | 316 | | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) | |
317 | | APIC_INT_VIA_MSI; | 317 | | APIC_INT_VIA_MSI; | |
318 | if (pic->pic_type == PIC_MSI) | 318 | if (pic->pic_type == PIC_MSI) | |
319 | MSI_INT_MAKE_MSI(pih); | 319 | MSI_INT_MAKE_MSI(pih); | |
320 | else if (pic->pic_type == PIC_MSIX) | 320 | else if (pic->pic_type == PIC_MSIX) | |
321 | MSI_INT_MAKE_MSIX(pih); | 321 | MSI_INT_MAKE_MSIX(pih); | |
322 | 322 | |||
323 | return x86_pci_msi_string(NULL, pih, buf, len); | 323 | return x86_pci_msi_string(NULL, pih, buf, len); | |
324 | } | 324 | } | |
325 | #endif /* __HAVE_PCI_MSI_MSIX */ | 325 | #endif /* __HAVE_PCI_MSI_MSIX */ | |
326 | #endif | 326 | #endif | |
327 | 327 | |||
328 | if (pic->pic_type == PIC_XEN) { | 328 | if (pic->pic_type == PIC_XEN) { | |
329 | ih = pin; /* Port == pin */ | 329 | ih = pin; /* Port == pin */ | |
330 | return xen_intr_string(pin, buf, len, pic); | 330 | return xen_intr_string(pin, buf, len, pic); | |
331 | } | 331 | } | |
332 | 332 | |||
333 | /* | 333 | /* | |
334 | * If the device is pci, "legacy_irq" is always -1. Least 8 bit of "ih" | 334 | * If the device is pci, "legacy_irq" is always -1. Least 8 bit of "ih" | |
335 | * is only used in intr_string() to show the irq number. | 335 | * is only used in intr_string() to show the irq number. | |
336 | * If the device is "legacy"(such as floppy), it should not use | 336 | * If the device is "legacy"(such as floppy), it should not use | |
337 | * intr_string(). | 337 | * intr_string(). | |
338 | */ | 338 | */ | |
339 | if (pic->pic_type == PIC_I8259) { | 339 | if (pic->pic_type == PIC_I8259) { | |
340 | ih = legacy_irq; | 340 | ih = legacy_irq; | |
341 | return legacy_intr_string(ih, buf, len, pic); | 341 | return legacy_intr_string(ih, buf, len, pic); | |
342 | } | 342 | } | |
343 | 343 | |||
344 | #if NIOAPIC > 0 || NACPICA > 0 | 344 | #if NIOAPIC > 0 || NACPICA > 0 | |
345 | ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK) | 345 | ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK) | |
346 | | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK); | 346 | | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK); | |
347 | if (pic->pic_type == PIC_IOAPIC) { | 347 | if (pic->pic_type == PIC_IOAPIC) { | |
348 | ih |= APIC_INT_VIA_APIC; | 348 | ih |= APIC_INT_VIA_APIC; | |
349 | } | 349 | } | |
350 | ih |= pin; | 350 | ih |= pin; | |
351 | return intr_string(ih, buf, len); | 351 | return intr_string(ih, buf, len); | |
352 | #endif | 352 | #endif | |
353 | 353 | |||
354 | return NULL; /* No pic found! */ | 354 | return NULL; /* No pic found! */ | |
355 | } | 355 | } | |
356 | 356 | |||
357 | /* | 357 | /* | |
358 | * Find intrsource from io_interrupt_sources list. | 358 | * Find intrsource from io_interrupt_sources list. | |
359 | */ | 359 | */ | |
360 | static struct intrsource * | 360 | static struct intrsource * | |
361 | intr_get_io_intrsource(const char *intrid) | 361 | intr_get_io_intrsource(const char *intrid) | |
362 | { | 362 | { | |
363 | struct intrsource *isp; | 363 | struct intrsource *isp; | |
364 | 364 | |||
365 | KASSERT(mutex_owned(&cpu_lock)); | 365 | KASSERT(mutex_owned(&cpu_lock)); | |
366 | 366 | |||
367 | SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) { | 367 | SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) { | |
368 | KASSERT(isp->is_intrid != NULL); | 368 | KASSERT(isp->is_intrid != NULL); | |
369 | if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0) | 369 | if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0) | |
370 | return isp; | 370 | return isp; | |
371 | } | 371 | } | |
372 | return NULL; | 372 | return NULL; | |
373 | } | 373 | } | |
374 | 374 | |||
375 | /* | 375 | /* | |
376 | * Allocate intrsource and add to io_interrupt_sources list. | 376 | * Allocate intrsource and add to io_interrupt_sources list. | |
377 | */ | 377 | */ | |
378 | struct intrsource * | 378 | struct intrsource * | |
379 | intr_allocate_io_intrsource(const char *intrid) | 379 | intr_allocate_io_intrsource(const char *intrid) | |
380 | { | 380 | { | |
381 | CPU_INFO_ITERATOR cii; | 381 | CPU_INFO_ITERATOR cii; | |
382 | struct cpu_info *ci; | 382 | struct cpu_info *ci; | |
383 | struct intrsource *isp; | 383 | struct intrsource *isp; | |
384 | struct percpu_evcnt *pep; | 384 | struct percpu_evcnt *pep; | |
385 | 385 | |||
386 | KASSERT(mutex_owned(&cpu_lock)); | 386 | KASSERT(mutex_owned(&cpu_lock)); | |
387 | 387 | |||
388 | if (intrid == NULL) | 388 | if (intrid == NULL) | |
389 | return NULL; | 389 | return NULL; | |
390 | 390 | |||
391 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | 391 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | |
392 | pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP); | 392 | pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP); | |
393 | isp->is_saved_evcnt = pep; | 393 | isp->is_saved_evcnt = pep; | |
394 | for (CPU_INFO_FOREACH(cii, ci)) { | 394 | for (CPU_INFO_FOREACH(cii, ci)) { | |
395 | pep->cpuid = ci->ci_cpuid; | 395 | pep->cpuid = ci->ci_cpuid; | |
396 | pep++; | 396 | pep++; | |
397 | } | 397 | } | |
398 | strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid)); | 398 | strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid)); | |
399 | 399 | |||
400 | SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list); | 400 | SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list); | |
401 | 401 | |||
402 | return isp; | 402 | return isp; | |
403 | } | 403 | } | |
404 | 404 | |||
405 | /* | 405 | /* | |
406 | * Remove from io_interrupt_sources list and free by the intrsource pointer. | 406 | * Remove from io_interrupt_sources list and free by the intrsource pointer. | |
407 | */ | 407 | */ | |
408 | static void | 408 | static void | |
409 | intr_free_io_intrsource_direct(struct intrsource *isp) | 409 | intr_free_io_intrsource_direct(struct intrsource *isp) | |
410 | { | 410 | { | |
411 | KASSERT(mutex_owned(&cpu_lock)); | 411 | KASSERT(mutex_owned(&cpu_lock)); | |
412 | 412 | |||
413 | SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list); | 413 | SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list); | |
414 | 414 | |||
415 | /* Is this interrupt established? */ | 415 | /* Is this interrupt established? */ | |
416 | if (isp->is_evname[0] != '\0') { | 416 | if (isp->is_evname[0] != '\0') { | |
417 | evcnt_detach(&isp->is_evcnt); | 417 | evcnt_detach(&isp->is_evcnt); | |
418 | isp->is_evname[0] = '\0'; | 418 | isp->is_evname[0] = '\0'; | |
419 | } | 419 | } | |
420 | 420 | |||
421 | kmem_free(isp->is_saved_evcnt, | 421 | kmem_free(isp->is_saved_evcnt, | |
422 | sizeof(*(isp->is_saved_evcnt)) * ncpu); | 422 | sizeof(*(isp->is_saved_evcnt)) * ncpu); | |
423 | 423 | |||
424 | kmem_free(isp, sizeof(*isp)); | 424 | kmem_free(isp, sizeof(*isp)); | |
425 | } | 425 | } | |
426 | 426 | |||
427 | /* | 427 | /* | |
428 | * Remove from io_interrupt_sources list and free by the interrupt id. | 428 | * Remove from io_interrupt_sources list and free by the interrupt id. | |
429 | * This function can be used by MI code. | 429 | * This function can be used by MI code. | |
430 | */ | 430 | */ | |
431 | void | 431 | void | |
432 | intr_free_io_intrsource(const char *intrid) | 432 | intr_free_io_intrsource(const char *intrid) | |
433 | { | 433 | { | |
434 | struct intrsource *isp; | 434 | struct intrsource *isp; | |
435 | 435 | |||
436 | KASSERT(mutex_owned(&cpu_lock)); | 436 | KASSERT(mutex_owned(&cpu_lock)); | |
437 | 437 | |||
438 | if (intrid == NULL) | 438 | if (intrid == NULL) | |
439 | return; | 439 | return; | |
440 | 440 | |||
441 | if ((isp = intr_get_io_intrsource(intrid)) == NULL) { | 441 | if ((isp = intr_get_io_intrsource(intrid)) == NULL) { | |
442 | return; | 442 | return; | |
443 | } | 443 | } | |
444 | 444 | |||
445 | /* If the interrupt uses shared IRQ, don't free yet. */ | 445 | /* If the interrupt uses shared IRQ, don't free yet. */ | |
446 | if (isp->is_handlers != NULL) { | 446 | if (isp->is_handlers != NULL) { | |
447 | return; | 447 | return; | |
448 | } | 448 | } | |
449 | 449 | |||
450 | intr_free_io_intrsource_direct(isp); | 450 | intr_free_io_intrsource_direct(isp); | |
451 | } | 451 | } | |
452 | 452 | |||
453 | static int | 453 | static int | |
454 | intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, | 454 | intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, | |
455 | int *index, struct intrsource *chained) | 455 | int *index, struct intrsource *chained) | |
456 | { | 456 | { | |
457 | int slot, i; | 457 | int slot, i; | |
458 | struct intrsource *isp; | 458 | struct intrsource *isp; | |
459 | 459 | |||
460 | KASSERT(mutex_owned(&cpu_lock)); | 460 | KASSERT(mutex_owned(&cpu_lock)); | |
461 | 461 | |||
462 | if (pic == &i8259_pic) { | 462 | if (pic == &i8259_pic) { | |
463 | KASSERT(CPU_IS_PRIMARY(ci)); | 463 | KASSERT(CPU_IS_PRIMARY(ci)); | |
464 | slot = pin; | 464 | slot = pin; | |
465 | } else { | 465 | } else { | |
466 | int start = 0; | 466 | int start = 0; | |
467 | int max = MAX_INTR_SOURCES; | 467 | int max = MAX_INTR_SOURCES; | |
468 | slot = -1; | 468 | slot = -1; | |
469 | 469 | |||
470 | /* avoid reserved slots for legacy interrupts. */ | 470 | /* avoid reserved slots for legacy interrupts. */ | |
471 | if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic)) | 471 | if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic)) | |
472 | start = NUM_LEGACY_IRQS; | 472 | start = NUM_LEGACY_IRQS; | |
473 | /* don't step over Xen's slots */ | 473 | /* don't step over Xen's slots */ | |
474 | if (vm_guest == VM_GUEST_XENPVH) | 474 | if (vm_guest == VM_GUEST_XENPVH) | |
475 | max = SIR_XENIPL_VM; | 475 | max = SIR_XENIPL_VM; | |
476 | /* | 476 | /* | |
477 | * intr_allocate_slot has checked for an existing mapping. | 477 | * intr_allocate_slot has checked for an existing mapping. | |
478 | * Now look for a free slot. | 478 | * Now look for a free slot. | |
479 | */ | 479 | */ | |
480 | for (i = start; i < max ; i++) { | 480 | for (i = start; i < max ; i++) { | |
481 | if (ci->ci_isources[i] == NULL) { | 481 | if (ci->ci_isources[i] == NULL) { | |
482 | slot = i; | 482 | slot = i; | |
483 | break; | 483 | break; | |
484 | } | 484 | } | |
485 | } | 485 | } | |
486 | if (slot == -1) { | 486 | if (slot == -1) { | |
487 | return EBUSY; | 487 | return EBUSY; | |
488 | } | 488 | } | |
489 | } | 489 | } | |
490 | 490 | |||
491 | isp = ci->ci_isources[slot]; | 491 | isp = ci->ci_isources[slot]; | |
492 | if (isp == NULL) { | 492 | if (isp == NULL) { | |
493 | const char *via; | 493 | const char *via; | |
494 | 494 | |||
495 | isp = chained; | 495 | isp = chained; | |
496 | KASSERT(isp != NULL); | 496 | KASSERT(isp != NULL); | |
497 | if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX) | 497 | if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX) | |
498 | via = "vec"; | 498 | via = "vec"; | |
499 | else | 499 | else | |
500 | via = "pin"; | 500 | via = "pin"; | |
501 | snprintf(isp->is_evname, sizeof (isp->is_evname), | 501 | snprintf(isp->is_evname, sizeof (isp->is_evname), | |
502 | "%s %d", via, pin); | 502 | "%s %d", via, pin); | |
503 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, | 503 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, | |
504 | pic->pic_name, isp->is_evname); | 504 | pic->pic_name, isp->is_evname); | |
505 | isp->is_active_cpu = ci->ci_cpuid; | 505 | isp->is_active_cpu = ci->ci_cpuid; | |
506 | ci->ci_isources[slot] = isp; | 506 | ci->ci_isources[slot] = isp; | |
507 | } | 507 | } | |
508 | 508 | |||
509 | *index = slot; | 509 | *index = slot; | |
510 | return 0; | 510 | return 0; | |
511 | } | 511 | } | |
512 | 512 | |||
513 | /* | 513 | /* | |
514 | * A simple round-robin allocator to assign interrupts to CPUs. | 514 | * A simple round-robin allocator to assign interrupts to CPUs. | |
515 | */ | 515 | */ | |
516 | static int __noinline | 516 | static int __noinline | |
517 | intr_allocate_slot(struct pic *pic, int pin, int level, | 517 | intr_allocate_slot(struct pic *pic, int pin, int level, | |
518 | struct cpu_info **cip, int *index, int *idt_slot, | 518 | struct cpu_info **cip, int *index, int *idt_slot, | |
519 | struct intrsource *chained) | 519 | struct intrsource *chained) | |
520 | { | 520 | { | |
521 | CPU_INFO_ITERATOR cii; | 521 | CPU_INFO_ITERATOR cii; | |
522 | struct cpu_info *ci, *lci; | 522 | struct cpu_info *ci, *lci; | |
523 | struct intrsource *isp; | 523 | struct intrsource *isp; | |
524 | int slot = 0, idtvec, error; | 524 | int slot = 0, idtvec, error; | |
525 | 525 | |||
526 | KASSERT(mutex_owned(&cpu_lock)); | 526 | KASSERT(mutex_owned(&cpu_lock)); | |
527 | 527 | |||
528 | /* First check if this pin is already used by an interrupt vector. */ | 528 | /* First check if this pin is already used by an interrupt vector. */ | |
529 | for (CPU_INFO_FOREACH(cii, ci)) { | 529 | for (CPU_INFO_FOREACH(cii, ci)) { | |
530 | for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) { | 530 | for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) { | |
531 | if ((isp = ci->ci_isources[slot]) == NULL) { | 531 | if ((isp = ci->ci_isources[slot]) == NULL) { | |
532 | continue; | 532 | continue; | |
533 | } | 533 | } | |
534 | if (isp->is_pic == pic && | 534 | if (isp->is_pic == pic && | |
535 | pin != -1 && isp->is_pin == pin) { | 535 | pin != -1 && isp->is_pin == pin) { | |
536 | *idt_slot = isp->is_idtvec; | 536 | *idt_slot = isp->is_idtvec; | |
537 | *index = slot; | 537 | *index = slot; | |
538 | *cip = ci; | 538 | *cip = ci; | |
539 | return 0; | 539 | return 0; | |
540 | } | 540 | } | |
541 | } | 541 | } | |
542 | } | 542 | } | |
543 | 543 | |||
544 | /* | 544 | /* | |
545 | * The pic/pin combination doesn't have an existing mapping. | 545 | * The pic/pin combination doesn't have an existing mapping. | |
546 | * Find a slot for a new interrupt source. For the i8259 case, | 546 | * Find a slot for a new interrupt source. For the i8259 case, | |
547 | * we always use reserved slots of the primary CPU. Otherwise, | 547 | * we always use reserved slots of the primary CPU. Otherwise, | |
548 | * we make an attempt to balance the interrupt load. | 548 | * we make an attempt to balance the interrupt load. | |
549 | * | 549 | * | |
550 | * PIC and APIC usage are essentially exclusive, so the reservation | 550 | * PIC and APIC usage are essentially exclusive, so the reservation | |
551 | * of the ISA slots is ignored when assigning IOAPIC slots. | 551 | * of the ISA slots is ignored when assigning IOAPIC slots. | |
552 | */ | 552 | */ | |
553 | if (pic == &i8259_pic) { | 553 | if (pic == &i8259_pic) { | |
554 | /* | 554 | /* | |
555 | * Must be directed to BP. | 555 | * Must be directed to BP. | |
556 | */ | 556 | */ | |
557 | ci = &cpu_info_primary; | 557 | ci = &cpu_info_primary; | |
558 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); | 558 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); | |
559 | } else { | 559 | } else { | |
560 | /* | 560 | /* | |
561 | * Find least loaded AP/BP and try to allocate there. | 561 | * Find least loaded AP/BP and try to allocate there. | |
562 | */ | 562 | */ | |
563 | ci = NULL; | 563 | ci = NULL; | |
564 | for (CPU_INFO_FOREACH(cii, lci)) { | 564 | for (CPU_INFO_FOREACH(cii, lci)) { | |
565 | if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | 565 | if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | |
566 | continue; | 566 | continue; | |
567 | } | 567 | } | |
568 | #if 0 | 568 | #if 0 | |
569 | if (ci == NULL || | 569 | if (ci == NULL || | |
570 | ci->ci_nintrhand > lci->ci_nintrhand) { | 570 | ci->ci_nintrhand > lci->ci_nintrhand) { | |
571 | ci = lci; | 571 | ci = lci; | |
572 | } | 572 | } | |
573 | #else | 573 | #else | |
574 | ci = &cpu_info_primary; | 574 | ci = &cpu_info_primary; | |
575 | #endif | 575 | #endif | |
576 | } | 576 | } | |
577 | KASSERT(ci != NULL); | 577 | KASSERT(ci != NULL); | |
578 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); | 578 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); | |
579 | 579 | |||
580 | /* | 580 | /* | |
581 | * If that did not work, allocate anywhere. | 581 | * If that did not work, allocate anywhere. | |
582 | */ | 582 | */ | |
583 | if (error != 0) { | 583 | if (error != 0) { | |
584 | for (CPU_INFO_FOREACH(cii, ci)) { | 584 | for (CPU_INFO_FOREACH(cii, ci)) { | |
585 | if ((ci->ci_schedstate.spc_flags & | 585 | if ((ci->ci_schedstate.spc_flags & | |
586 | SPCF_NOINTR) != 0) { | 586 | SPCF_NOINTR) != 0) { | |
587 | continue; | 587 | continue; | |
588 | } | 588 | } | |
589 | error = intr_allocate_slot_cpu(ci, pic, | 589 | error = intr_allocate_slot_cpu(ci, pic, | |
590 | pin, &slot, chained); | 590 | pin, &slot, chained); | |
591 | if (error == 0) { | 591 | if (error == 0) { | |
592 | break; | 592 | break; | |
593 | } | 593 | } | |
594 | } | 594 | } | |
595 | } | 595 | } | |
596 | } | 596 | } | |
597 | if (error != 0) { | 597 | if (error != 0) { | |
598 | return error; | 598 | return error; | |
599 | } | 599 | } | |
600 | KASSERT(ci != NULL); | 600 | KASSERT(ci != NULL); | |
601 | 601 | |||
602 | /* | 602 | /* | |
603 | * Now allocate an IDT vector. | 603 | * Now allocate an IDT vector. | |
604 | * For the 8259 these are reserved up front. | 604 | * For the 8259 these are reserved up front. | |
605 | */ | 605 | */ | |
606 | if (pic == &i8259_pic) { | 606 | if (pic == &i8259_pic) { | |
607 | idtvec = ICU_OFFSET + pin; | 607 | idtvec = ICU_OFFSET + pin; | |
608 | } else { | 608 | } else { | |
609 | /* | 609 | /* | |
610 | * TODO to support MSI (not MSI-X) multiple vectors | 610 | * TODO to support MSI (not MSI-X) multiple vectors | |
611 | * | 611 | * | |
612 | * PCI Local Bus Specification Revision 3.0 says the devices | 612 | * PCI Local Bus Specification Revision 3.0 says the devices | |
613 | * which use MSI multiple vectors increment the low order bits | 613 | * which use MSI multiple vectors increment the low order bits | |
614 | * of MSI message data. | 614 | * of MSI message data. | |
615 | * On the other hand, Intel SDM "10.11.2 Message Data Register | 615 | * On the other hand, Intel SDM "10.11.2 Message Data Register | |
616 | * Format" says the 7:0 bits of MSI message data mean Interrupt | 616 | * Format" says the 7:0 bits of MSI message data mean Interrupt | |
617 | * Descriptor Table(IDT) vector. | 617 | * Descriptor Table(IDT) vector. | |
618 | * As the result of these two documents, the IDT vectors which | 618 | * As the result of these two documents, the IDT vectors which | |
619 | * are used by a device using MSI multiple vectors must be | 619 | * are used by a device using MSI multiple vectors must be | |
620 | * continuous. | 620 | * continuous. | |
621 | */ | 621 | */ | |
622 | struct idt_vec *iv; | 622 | struct idt_vec *iv; | |
623 | 623 | |||
624 | iv = idt_vec_ref(&ci->ci_idtvec); | 624 | iv = idt_vec_ref(&ci->ci_idtvec); | |
625 | idtvec = idt_vec_alloc(iv, APIC_LEVEL(level), IDT_INTR_HIGH); | 625 | idtvec = idt_vec_alloc(iv, APIC_LEVEL(level), IDT_INTR_HIGH); | |
626 | } | 626 | } | |
627 | if (idtvec < 0) { | 627 | if (idtvec < 0) { | |
628 | evcnt_detach(&ci->ci_isources[slot]->is_evcnt); | 628 | evcnt_detach(&ci->ci_isources[slot]->is_evcnt); | |
629 | ci->ci_isources[slot]->is_evname[0] = '\0'; | 629 | ci->ci_isources[slot]->is_evname[0] = '\0'; | |
630 | ci->ci_isources[slot] = NULL; | 630 | ci->ci_isources[slot] = NULL; | |
631 | return EBUSY; | 631 | return EBUSY; | |
632 | } | 632 | } | |
633 | ci->ci_isources[slot]->is_idtvec = idtvec; | 633 | ci->ci_isources[slot]->is_idtvec = idtvec; | |
634 | *idt_slot = idtvec; | 634 | *idt_slot = idtvec; | |
635 | *index = slot; | 635 | *index = slot; | |
636 | *cip = ci; | 636 | *cip = ci; | |
637 | return 0; | 637 | return 0; | |
638 | } | 638 | } | |
639 | 639 | |||
640 | static void | 640 | static void | |
641 | intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec) | 641 | intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec) | |
642 | { | 642 | { | |
643 | struct intrsource *isp; | 643 | struct intrsource *isp; | |
644 | struct idt_vec *iv; | 644 | struct idt_vec *iv; | |
645 | 645 | |||
646 | isp = ci->ci_isources[slot]; | 646 | isp = ci->ci_isources[slot]; | |
647 | iv = idt_vec_ref(&ci->ci_idtvec); | 647 | iv = idt_vec_ref(&ci->ci_idtvec); | |
648 | 648 | |||
649 | if (isp->is_handlers != NULL) | 649 | if (isp->is_handlers != NULL) | |
650 | return; | 650 | return; | |
651 | ci->ci_isources[slot] = NULL; | 651 | ci->ci_isources[slot] = NULL; | |
652 | if (pic != &i8259_pic) | 652 | if (pic != &i8259_pic) | |
653 | idt_vec_free(iv, idtvec); | 653 | idt_vec_free(iv, idtvec); | |
654 | 654 | |||
655 | isp->is_recurse = NULL; | 655 | isp->is_recurse = NULL; | |
656 | isp->is_resume = NULL; | 656 | isp->is_resume = NULL; | |
657 | } | 657 | } | |
658 | 658 | |||
659 | #ifdef MULTIPROCESSOR | 659 | #ifdef MULTIPROCESSOR | |
660 | static int intr_biglock_wrapper(void *); | 660 | static int intr_biglock_wrapper(void *); | |
661 | 661 | |||
662 | /* | 662 | /* | |
663 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. | 663 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. | |
664 | */ | 664 | */ | |
665 | 665 | |||
666 | static int | 666 | static int | |
667 | intr_biglock_wrapper(void *vp) | 667 | intr_biglock_wrapper(void *vp) | |
668 | { | 668 | { | |
669 | struct intrhand *ih = vp; | 669 | struct intrhand *ih = vp; | |
670 | int locks; | 670 | int locks; | |
671 | int ret; | 671 | int ret; | |
672 | 672 | |||
673 | KERNEL_LOCK(1, NULL); | 673 | KERNEL_LOCK(1, NULL); | |
674 | 674 | |||
675 | locks = curcpu()->ci_biglock_count; | 675 | locks = curcpu()->ci_biglock_count; | |
676 | SDT_PROBE3(sdt, kernel, intr, entry, | 676 | SDT_PROBE3(sdt, kernel, intr, entry, | |
677 | ih->ih_realfun, ih->ih_realarg, ih); | 677 | ih->ih_realfun, ih->ih_realarg, ih); | |
678 | ret = (*ih->ih_realfun)(ih->ih_realarg); | 678 | ret = (*ih->ih_realfun)(ih->ih_realarg); | |
679 | SDT_PROBE4(sdt, kernel, intr, return, | 679 | SDT_PROBE4(sdt, kernel, intr, return, | |
680 | ih->ih_realfun, ih->ih_realarg, ih, ret); | 680 | ih->ih_realfun, ih->ih_realarg, ih, ret); | |
681 | KASSERTMSG(locks == curcpu()->ci_biglock_count, | 681 | KASSERTMSG(locks == curcpu()->ci_biglock_count, | |
682 | "%s @ %p slipped locks %d -> %d", | 682 | "%s @ %p slipped locks %d -> %d", | |
683 | ih->ih_xname, ih->ih_realfun, locks, curcpu()->ci_biglock_count); | 683 | ih->ih_xname, ih->ih_realfun, locks, curcpu()->ci_biglock_count); | |
684 | 684 | |||
685 | KERNEL_UNLOCK_ONE(NULL); | 685 | KERNEL_UNLOCK_ONE(NULL); | |
686 | 686 | |||
687 | return ret; | 687 | return ret; | |
688 | } | 688 | } | |
689 | #endif /* MULTIPROCESSOR */ | 689 | #endif /* MULTIPROCESSOR */ | |
690 | 690 | |||
691 | #ifdef KDTRACE_HOOKS | 691 | #ifdef KDTRACE_HOOKS | |
692 | static int | 692 | static int | |
693 | intr_kdtrace_wrapper(void *vp) | 693 | intr_kdtrace_wrapper(void *vp) | |
694 | { | 694 | { | |
695 | struct intrhand *ih = vp; | 695 | struct intrhand *ih = vp; | |
696 | int ret; | 696 | int ret; | |
697 | 697 | |||
698 | SDT_PROBE3(sdt, kernel, intr, entry, | 698 | SDT_PROBE3(sdt, kernel, intr, entry, | |
699 | ih->ih_realfun, ih->ih_realarg, ih); | 699 | ih->ih_realfun, ih->ih_realarg, ih); | |
700 | ret = (*ih->ih_realfun)(ih->ih_realarg); | 700 | ret = (*ih->ih_realfun)(ih->ih_realarg); | |
701 | SDT_PROBE4(sdt, kernel, intr, return, | 701 | SDT_PROBE4(sdt, kernel, intr, return, | |
702 | ih->ih_realfun, ih->ih_realarg, ih, ret); | 702 | ih->ih_realfun, ih->ih_realarg, ih, ret); | |
703 | 703 | |||
704 | return ret; | 704 | return ret; | |
705 | } | 705 | } | |
706 | #endif | 706 | #endif | |
707 | 707 | |||
708 | /* | 708 | /* | |
709 | * Append device name to intrsource. If device A and device B share IRQ number, | 709 | * Append device name to intrsource. If device A and device B share IRQ number, | |
710 | * the device name of the interrupt id is "device A, device B". | 710 | * the device name of the interrupt id is "device A, device B". | |
711 | */ | 711 | */ | |
712 | static void | 712 | static void | |
713 | intr_append_intrsource_xname(struct intrsource *isp, const char *xname) | 713 | intr_append_intrsource_xname(struct intrsource *isp, const char *xname) | |
714 | { | 714 | { | |
715 | 715 | |||
716 | if (isp->is_xname[0] != '\0') | 716 | if (isp->is_xname[0] != '\0') | |
717 | strlcat(isp->is_xname, ", ", sizeof(isp->is_xname)); | 717 | strlcat(isp->is_xname, ", ", sizeof(isp->is_xname)); | |
718 | strlcat(isp->is_xname, xname, sizeof(isp->is_xname)); | 718 | strlcat(isp->is_xname, xname, sizeof(isp->is_xname)); | |
719 | } | 719 | } | |
720 | 720 | |||
721 | /* | 721 | /* | |
722 | * Called on bound CPU to handle calling pic_hwunmask from contexts | 722 | * Called on bound CPU to handle calling pic_hwunmask from contexts | |
723 | * that are not already running on the bound CPU. | 723 | * that are not already running on the bound CPU. | |
724 | * | 724 | * | |
725 | * => caller (on initiating CPU) holds cpu_lock on our behalf | 725 | * => caller (on initiating CPU) holds cpu_lock on our behalf | |
726 | * => arg1: struct intrhand *ih | 726 | * => arg1: struct intrhand *ih | |
727 | */ | 727 | */ | |
728 | static void | 728 | static void | |
729 | intr_hwunmask_xcall(void *arg1, void *arg2) | 729 | intr_hwunmask_xcall(void *arg1, void *arg2) | |
730 | { | 730 | { | |
731 | struct intrhand * const ih = arg1; | 731 | struct intrhand * const ih = arg1; | |
732 | struct cpu_info * const ci = ih->ih_cpu; | 732 | struct cpu_info * const ci = ih->ih_cpu; | |
733 | 733 | |||
734 | KASSERT(ci == curcpu() || !mp_online); | 734 | KASSERT(ci == curcpu() || !mp_online); | |
735 | 735 | |||
736 | const u_long psl = x86_read_psl(); | 736 | const u_long psl = x86_read_psl(); | |
737 | x86_disable_intr(); | 737 | x86_disable_intr(); | |
738 | 738 | |||
739 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; | 739 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; | |
740 | struct pic * const pic = source->is_pic; | 740 | struct pic * const pic = source->is_pic; | |
741 | 741 | |||
742 | if (source->is_mask_count == 0) { | 742 | if (source->is_mask_count == 0) { | |
743 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | 743 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | |
744 | } | 744 | } | |
745 | 745 | |||
746 | x86_write_psl(psl); | 746 | x86_write_psl(psl); | |
747 | } | 747 | } | |
748 | 748 | |||
749 | /* | 749 | /* | |
750 | * Handle per-CPU component of interrupt establish. | 750 | * Handle per-CPU component of interrupt establish. | |
751 | * | 751 | * | |
752 | * => caller (on initiating CPU) holds cpu_lock on our behalf | 752 | * => caller (on initiating CPU) holds cpu_lock on our behalf | |
753 | * => arg1: struct intrhand *ih | 753 | * => arg1: struct intrhand *ih | |
754 | * => arg2: int idt_vec | 754 | * => arg2: int idt_vec | |
755 | */ | 755 | */ | |
756 | static void | 756 | static void | |
757 | intr_establish_xcall(void *arg1, void *arg2) | 757 | intr_establish_xcall(void *arg1, void *arg2) | |
758 | { | 758 | { | |
759 | struct idt_vec *iv; | 759 | struct idt_vec *iv; | |
760 | struct intrsource *source; | 760 | struct intrsource *source; | |
761 | struct intrstub *stubp; | 761 | struct intrstub *stubp; | |
762 | struct intrhand *ih; | 762 | struct intrhand *ih; | |
763 | struct cpu_info *ci; | 763 | struct cpu_info *ci; | |
764 | int idt_vec; | 764 | int idt_vec; | |
765 | u_long psl; | 765 | u_long psl; | |
766 | 766 | |||
767 | ih = arg1; | 767 | ih = arg1; | |
768 | 768 | |||
769 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | 769 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | |
770 | 770 | |||
771 | ci = ih->ih_cpu; | 771 | ci = ih->ih_cpu; | |
772 | source = ci->ci_isources[ih->ih_slot]; | 772 | source = ci->ci_isources[ih->ih_slot]; | |
773 | idt_vec = (int)(intptr_t)arg2; | 773 | idt_vec = (int)(intptr_t)arg2; | |
774 | iv = idt_vec_ref(&ci->ci_idtvec); | 774 | iv = idt_vec_ref(&ci->ci_idtvec); | |
775 | 775 | |||
776 | /* Disable interrupts locally. */ | 776 | /* Disable interrupts locally. */ | |
777 | psl = x86_read_psl(); | 777 | psl = x86_read_psl(); | |
778 | x86_disable_intr(); | 778 | x86_disable_intr(); | |
779 | 779 | |||
780 | /* Link in the handler and re-calculate masks. */ | 780 | /* Link in the handler and re-calculate masks. */ | |
781 | *(ih->ih_prevp) = ih; | 781 | *(ih->ih_prevp) = ih; | |
782 | x86_intr_calculatemasks(ci); | 782 | x86_intr_calculatemasks(ci); | |
783 | 783 | |||
784 | /* Hook in new IDT vector and SPL state. */ | 784 | /* Hook in new IDT vector and SPL state. */ | |
785 | if (source->is_resume == NULL || source->is_idtvec != idt_vec) { | 785 | if (source->is_resume == NULL || source->is_idtvec != idt_vec) { | |
786 | if (source->is_idtvec != 0 && source->is_idtvec != idt_vec) | 786 | if (source->is_idtvec != 0 && source->is_idtvec != idt_vec) | |
787 | idt_vec_free(iv, source->is_idtvec); | 787 | idt_vec_free(iv, source->is_idtvec); | |
788 | source->is_idtvec = idt_vec; | 788 | source->is_idtvec = idt_vec; | |
789 | if (source->is_type == IST_LEVEL) { | 789 | if (source->is_type == IST_LEVEL) { | |
790 | stubp = &source->is_pic->pic_level_stubs[ih->ih_slot]; | 790 | stubp = &source->is_pic->pic_level_stubs[ih->ih_slot]; | |
791 | } else { | 791 | } else { | |
792 | stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot]; | 792 | stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot]; | |
793 | } | 793 | } | |
794 | source->is_resume = stubp->ist_resume; | 794 | source->is_resume = stubp->ist_resume; | |
795 | source->is_recurse = stubp->ist_recurse; | 795 | source->is_recurse = stubp->ist_recurse; | |
796 | idt_vec_set(iv, idt_vec, stubp->ist_entry); | 796 | idt_vec_set(iv, idt_vec, stubp->ist_entry); | |
797 | } | 797 | } | |
798 | 798 | |||
799 | /* Re-enable interrupts locally. */ | 799 | /* Re-enable interrupts locally. */ | |
800 | x86_write_psl(psl); | 800 | x86_write_psl(psl); | |
801 | } | 801 | } | |
802 | 802 | |||
803 | void * | 803 | void * | |
804 | intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type, | 804 | intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type, | |
805 | int level, int (*handler)(void *), void *arg, | 805 | int level, int (*handler)(void *), void *arg, | |
806 | bool known_mpsafe, const char *xname) | 806 | bool known_mpsafe, const char *xname) | |
807 | { | 807 | { | |
808 | struct intrhand **p, *q, *ih; | 808 | struct intrhand **p, *q, *ih; | |
809 | struct cpu_info *ci; | 809 | struct cpu_info *ci; | |
810 | int slot, error, idt_vec; | 810 | int slot, error, idt_vec; | |
811 | struct intrsource *chained, *source; | 811 | struct intrsource *chained, *source; | |
812 | #ifdef MULTIPROCESSOR | 812 | #ifdef MULTIPROCESSOR | |
813 | bool mpsafe = (known_mpsafe || level != IPL_VM); | 813 | bool mpsafe = (known_mpsafe || level != IPL_VM); | |
814 | #endif /* MULTIPROCESSOR */ | 814 | #endif /* MULTIPROCESSOR */ | |
815 | uint64_t where; | 815 | uint64_t where; | |
816 | const char *intrstr; | 816 | const char *intrstr; | |
817 | char intrstr_buf[INTRIDBUF]; | 817 | char intrstr_buf[INTRIDBUF]; | |
818 | 818 | |||
819 | KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)), | 819 | KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)), | |
820 | "bad legacy IRQ value: %d", legacy_irq); | 820 | "bad legacy IRQ value: %d", legacy_irq); | |
821 | KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic), | 821 | KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic), | |
822 | "non-legacy IRQ on i8259"); | 822 | "non-legacy IRQ on i8259"); | |
823 | 823 | |||
824 | ih = kmem_alloc(sizeof(*ih), KM_SLEEP); | 824 | ih = kmem_alloc(sizeof(*ih), KM_SLEEP); | |
825 | intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf, | 825 | intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf, | |
826 | sizeof(intrstr_buf)); | 826 | sizeof(intrstr_buf)); | |
827 | KASSERT(intrstr != NULL); | 827 | KASSERT(intrstr != NULL); | |
828 | 828 | |||
829 | mutex_enter(&cpu_lock); | 829 | mutex_enter(&cpu_lock); | |
830 | 830 | |||
831 | /* allocate intrsource pool, if not yet. */ | 831 | /* allocate intrsource pool, if not yet. */ | |
832 | chained = intr_get_io_intrsource(intrstr); | 832 | chained = intr_get_io_intrsource(intrstr); | |
833 | if (chained == NULL) { | 833 | if (chained == NULL) { | |
834 | if (msipic_is_msi_pic(pic)) { | 834 | if (msipic_is_msi_pic(pic)) { | |
835 | mutex_exit(&cpu_lock); | 835 | mutex_exit(&cpu_lock); | |
836 | kmem_free(ih, sizeof(*ih)); | 836 | kmem_free(ih, sizeof(*ih)); | |
837 | printf("%s: %s has no intrsource\n", __func__, intrstr); | 837 | printf("%s: %s has no intrsource\n", __func__, intrstr); | |
838 | return NULL; | 838 | return NULL; | |
839 | } | 839 | } | |
840 | chained = intr_allocate_io_intrsource(intrstr); | 840 | chained = intr_allocate_io_intrsource(intrstr); | |
841 | if (chained == NULL) { | 841 | if (chained == NULL) { | |
842 | mutex_exit(&cpu_lock); | 842 | mutex_exit(&cpu_lock); | |
843 | kmem_free(ih, sizeof(*ih)); | 843 | kmem_free(ih, sizeof(*ih)); | |
844 | printf("%s: can't allocate io_intersource\n", __func__); | 844 | printf("%s: can't allocate io_intersource\n", __func__); | |
845 | return NULL; | 845 | return NULL; | |
846 | } | 846 | } | |
847 | } | 847 | } | |
848 | 848 | |||
849 | error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec, | 849 | error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec, | |
850 | chained); | 850 | chained); | |
851 | if (error != 0) { | 851 | if (error != 0) { | |
852 | intr_free_io_intrsource_direct(chained); | 852 | intr_free_io_intrsource_direct(chained); | |
853 | mutex_exit(&cpu_lock); | 853 | mutex_exit(&cpu_lock); | |
854 | kmem_free(ih, sizeof(*ih)); | 854 | kmem_free(ih, sizeof(*ih)); | |
855 | printf("failed to allocate interrupt slot for PIC %s pin %d\n", | 855 | printf("failed to allocate interrupt slot for PIC %s pin %d\n", | |
856 | pic->pic_name, pin); | 856 | pic->pic_name, pin); | |
857 | return NULL; | 857 | return NULL; | |
858 | } | 858 | } | |
859 | 859 | |||
860 | source = ci->ci_isources[slot]; | 860 | source = ci->ci_isources[slot]; | |
861 | 861 | |||
862 | if (source->is_handlers != NULL && | 862 | if (source->is_handlers != NULL && | |
863 | source->is_pic->pic_type != pic->pic_type) { | 863 | source->is_pic->pic_type != pic->pic_type) { | |
864 | intr_free_io_intrsource_direct(chained); | 864 | intr_free_io_intrsource_direct(chained); | |
865 | mutex_exit(&cpu_lock); | 865 | mutex_exit(&cpu_lock); | |
866 | kmem_free(ih, sizeof(*ih)); | 866 | kmem_free(ih, sizeof(*ih)); | |
867 | printf("%s: can't share intr source between " | 867 | printf("%s: can't share intr source between " | |
868 | "different PIC types (legacy_irq %d pin %d slot %d)\n", | 868 | "different PIC types (legacy_irq %d pin %d slot %d)\n", | |
869 | __func__, legacy_irq, pin, slot); | 869 | __func__, legacy_irq, pin, slot); | |
870 | return NULL; | 870 | return NULL; | |
871 | } | 871 | } | |
872 | 872 | |||
873 | source->is_pin = pin; | 873 | source->is_pin = pin; | |
874 | source->is_pic = pic; | 874 | source->is_pic = pic; | |
875 | intr_append_intrsource_xname(source, xname); | 875 | intr_append_intrsource_xname(source, xname); | |
876 | switch (source->is_type) { | 876 | switch (source->is_type) { | |
877 | case IST_NONE: | 877 | case IST_NONE: | |
878 | source->is_type = type; | 878 | source->is_type = type; | |
879 | break; | 879 | break; | |
880 | case IST_EDGE: | 880 | case IST_EDGE: | |
881 | case IST_LEVEL: | 881 | case IST_LEVEL: | |
882 | if (source->is_type == type) | 882 | if (source->is_type == type) | |
883 | break; | 883 | break; | |
884 | /* FALLTHROUGH */ | 884 | /* FALLTHROUGH */ | |
885 | case IST_PULSE: | 885 | case IST_PULSE: | |
886 | if (type != IST_NONE) { | 886 | if (type != IST_NONE) { | |
887 | int otype = source->is_type; | 887 | int otype = source->is_type; | |
888 | 888 | |||
889 | intr_source_free(ci, slot, pic, idt_vec); | 889 | intr_source_free(ci, slot, pic, idt_vec); | |
890 | intr_free_io_intrsource_direct(chained); | 890 | intr_free_io_intrsource_direct(chained); | |
891 | mutex_exit(&cpu_lock); | 891 | mutex_exit(&cpu_lock); | |
892 | kmem_free(ih, sizeof(*ih)); | 892 | kmem_free(ih, sizeof(*ih)); | |
893 | printf("%s: pic %s pin %d: can't share " | 893 | printf("%s: pic %s pin %d: can't share " | |
894 | "type %d with %d\n", | 894 | "type %d with %d\n", | |
895 | __func__, pic->pic_name, pin, | 895 | __func__, pic->pic_name, pin, | |
896 | otype, type); | 896 | otype, type); | |
897 | return NULL; | 897 | return NULL; | |
898 | } | 898 | } | |
899 | break; | 899 | break; | |
900 | default: | 900 | default: | |
901 | panic("%s: bad intr type %d for pic %s pin %d\n", | 901 | panic("%s: bad intr type %d for pic %s pin %d\n", | |
902 | __func__, source->is_type, pic->pic_name, pin); | 902 | __func__, source->is_type, pic->pic_name, pin); | |
903 | /* NOTREACHED */ | 903 | /* NOTREACHED */ | |
904 | } | 904 | } | |
905 | 905 | |||
906 | /* | 906 | /* | |
907 | * If the establishing interrupt uses shared IRQ, the interrupt uses | 907 | * If the establishing interrupt uses shared IRQ, the interrupt uses | |
908 | * "ci->ci_isources[slot]" instead of allocated by the establishing | 908 | * "ci->ci_isources[slot]" instead of allocated by the establishing | |
909 | * device's pci_intr_alloc() or this function. | 909 | * device's pci_intr_alloc() or this function. | |
910 | */ | 910 | */ | |
911 | if (source->is_handlers != NULL) { | 911 | if (source->is_handlers != NULL) { | |
912 | struct intrsource *isp, *nisp; | 912 | struct intrsource *isp, *nisp; | |
913 | 913 | |||
914 | SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources, | 914 | SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources, | |
915 | is_list, nisp) { | 915 | is_list, nisp) { | |
916 | if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0 | 916 | if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0 | |
917 | && isp->is_handlers == NULL) | 917 | && isp->is_handlers == NULL) | |
918 | intr_free_io_intrsource_direct(isp); | 918 | intr_free_io_intrsource_direct(isp); | |
919 | } | 919 | } | |
920 | } | 920 | } | |
921 | 921 | |||
922 | /* | 922 | /* | |
923 | * We're now committed. Mask the interrupt in hardware and | 923 | * We're now committed. Mask the interrupt in hardware and | |
924 | * count it for load distribution. | 924 | * count it for load distribution. | |
925 | */ | 925 | */ | |
926 | (*pic->pic_hwmask)(pic, pin); | 926 | (*pic->pic_hwmask)(pic, pin); | |
927 | (ci->ci_nintrhand)++; | 927 | (ci->ci_nintrhand)++; | |
928 | 928 | |||
929 | /* | 929 | /* | |
930 | * Figure out where to put the handler. | 930 | * Figure out where to put the handler. | |
931 | * This is O(N^2), but we want to preserve the order, and N is | 931 | * This is O(N^2), but we want to preserve the order, and N is | |
932 | * generally small. | 932 | * generally small. | |
933 | */ | 933 | */ | |
934 | for (p = &ci->ci_isources[slot]->is_handlers; | 934 | for (p = &ci->ci_isources[slot]->is_handlers; | |
935 | (q = *p) != NULL && q->ih_level > level; | 935 | (q = *p) != NULL && q->ih_level > level; | |
936 | p = &q->ih_next) { | 936 | p = &q->ih_next) { | |
937 | /* nothing */; | 937 | /* nothing */; | |
938 | } | 938 | } | |
939 | 939 | |||
940 | ih->ih_pic = pic; | 940 | ih->ih_pic = pic; | |
941 | ih->ih_fun = ih->ih_realfun = handler; | 941 | ih->ih_fun = ih->ih_realfun = handler; | |
942 | ih->ih_arg = ih->ih_realarg = arg; | 942 | ih->ih_arg = ih->ih_realarg = arg; | |
943 | ih->ih_prevp = p; | 943 | ih->ih_prevp = p; | |
944 | ih->ih_next = *p; | 944 | ih->ih_next = *p; | |
945 | ih->ih_level = level; | 945 | ih->ih_level = level; | |
946 | ih->ih_pin = pin; | 946 | ih->ih_pin = pin; | |
947 | ih->ih_cpu = ci; | 947 | ih->ih_cpu = ci; | |
948 | ih->ih_slot = slot; | 948 | ih->ih_slot = slot; | |
949 | strlcpy(ih->ih_xname, xname, sizeof(ih->ih_xname)); | 949 | strlcpy(ih->ih_xname, xname, sizeof(ih->ih_xname)); | |
950 | #ifdef KDTRACE_HOOKS | 950 | #ifdef KDTRACE_HOOKS | |
951 | /* | 951 | /* | |
952 | * XXX i8254_clockintr is special -- takes a magic extra | 952 | * XXX i8254_clockintr is special -- takes a magic extra | |
953 | * argument. This should be fixed properly in some way that | 953 | * argument. This should be fixed properly in some way that | |
954 | * doesn't involve sketchy function pointer casts. See also | 954 | * doesn't involve sketchy function pointer casts. See also | |
955 | * the comments in x86/isa/clock.c. | 955 | * the comments in x86/isa/clock.c. | |
956 | */ | 956 | */ | |
957 | if (handler != __FPTRCAST(int (*)(void *), i8254_clockintr)) { | 957 | if (handler != __FPTRCAST(int (*)(void *), i8254_clockintr)) { | |
958 | ih->ih_fun = intr_kdtrace_wrapper; | 958 | ih->ih_fun = intr_kdtrace_wrapper; | |
959 | ih->ih_arg = ih; | 959 | ih->ih_arg = ih; | |
960 | } | 960 | } | |
961 | #endif | 961 | #endif | |
962 | #ifdef MULTIPROCESSOR | 962 | #ifdef MULTIPROCESSOR | |
963 | if (!mpsafe) { | 963 | if (!mpsafe) { | |
964 | KASSERT(handler != /* XXX */ | 964 | KASSERT(handler != /* XXX */ | |
965 | __FPTRCAST(int (*)(void *), i8254_clockintr)); | 965 | __FPTRCAST(int (*)(void *), i8254_clockintr)); | |
966 | ih->ih_fun = intr_biglock_wrapper; | 966 | ih->ih_fun = intr_biglock_wrapper; | |
967 | ih->ih_arg = ih; | 967 | ih->ih_arg = ih; | |
968 | } | 968 | } | |
969 | #endif /* MULTIPROCESSOR */ | 969 | #endif /* MULTIPROCESSOR */ | |
970 | 970 | |||
971 | /* | 971 | /* | |
972 | * Call out to the remote CPU to update its interrupt state. | 972 | * Call out to the remote CPU to update its interrupt state. | |
973 | * Only make RPCs if the APs are up and running. | 973 | * Only make RPCs if the APs are up and running. | |
974 | */ | 974 | */ | |
975 | if (ci == curcpu() || !mp_online) { | 975 | if (ci == curcpu() || !mp_online) { | |
976 | intr_establish_xcall(ih, (void *)(intptr_t)idt_vec); | 976 | intr_establish_xcall(ih, (void *)(intptr_t)idt_vec); | |
977 | } else { | 977 | } else { | |
978 | where = xc_unicast(0, intr_establish_xcall, ih, | 978 | where = xc_unicast(0, intr_establish_xcall, ih, | |
979 | (void *)(intptr_t)idt_vec, ci); | 979 | (void *)(intptr_t)idt_vec, ci); | |
980 | xc_wait(where); | 980 | xc_wait(where); | |
981 | } | 981 | } | |
982 | 982 | |||
983 | /* All set up, so add a route for the interrupt and unmask it. */ | 983 | /* All set up, so add a route for the interrupt and unmask it. */ | |
984 | (*pic->pic_addroute)(pic, ci, pin, idt_vec, type); | 984 | (*pic->pic_addroute)(pic, ci, pin, idt_vec, type); | |
985 | if (ci == curcpu() || !mp_online) { | 985 | if (ci == curcpu() || !mp_online) { | |
986 | intr_hwunmask_xcall(ih, NULL); | 986 | intr_hwunmask_xcall(ih, NULL); | |
987 | } else { | 987 | } else { | |
988 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci); | 988 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci); | |
989 | xc_wait(where); | 989 | xc_wait(where); | |
990 | } | 990 | } | |
991 | mutex_exit(&cpu_lock); | 991 | mutex_exit(&cpu_lock); | |
992 | 992 | |||
993 | if (bootverbose || cpu_index(ci) != 0) | 993 | if (bootverbose || cpu_index(ci) != 0) | |
994 | aprint_verbose("allocated pic %s type %s pin %d level %d to " | 994 | aprint_verbose("allocated pic %s type %s pin %d level %d to " | |
995 | "%s slot %d idt entry %d\n", | 995 | "%s slot %d idt entry %d\n", | |
996 | pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, | 996 | pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, | |
997 | level, device_xname(ci->ci_dev), slot, idt_vec); | 997 | level, device_xname(ci->ci_dev), slot, idt_vec); | |
998 | 998 | |||
999 | return ih; | 999 | return ih; | |
1000 | } | 1000 | } | |
1001 | 1001 | |||
1002 | void * | 1002 | void * | |
1003 | intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level, | 1003 | intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level, | |
1004 | int (*handler)(void *), void *arg, bool known_mpsafe) | 1004 | int (*handler)(void *), void *arg, bool known_mpsafe) | |
1005 | { | 1005 | { | |
1006 | 1006 | |||
1007 | return intr_establish_xname(legacy_irq, pic, pin, type, | 1007 | return intr_establish_xname(legacy_irq, pic, pin, type, | |
1008 | level, handler, arg, known_mpsafe, "unknown"); | 1008 | level, handler, arg, known_mpsafe, "unknown"); | |
1009 | } | 1009 | } | |
1010 | 1010 | |||
1011 | /* | 1011 | /* | |
1012 | * Called on bound CPU to handle intr_mask() / intr_unmask(). | 1012 | * Called on bound CPU to handle intr_mask() / intr_unmask(). | |
1013 | * | 1013 | * | |
1014 | * => caller (on initiating CPU) holds cpu_lock on our behalf | 1014 | * => caller (on initiating CPU) holds cpu_lock on our behalf | |
1015 | * => arg1: struct intrhand *ih | 1015 | * => arg1: struct intrhand *ih | |
1016 | * => arg2: true -> mask, false -> unmask. | 1016 | * => arg2: true -> mask, false -> unmask. | |
1017 | */ | 1017 | */ | |
1018 | static void | 1018 | static void | |
1019 | intr_mask_xcall(void *arg1, void *arg2) | 1019 | intr_mask_xcall(void *arg1, void *arg2) | |
1020 | { | 1020 | { | |
1021 | struct intrhand * const ih = arg1; | 1021 | struct intrhand * const ih = arg1; | |
1022 | const uintptr_t mask = (uintptr_t)arg2; | 1022 | const uintptr_t mask = (uintptr_t)arg2; | |
1023 | struct cpu_info * const ci = ih->ih_cpu; | 1023 | struct cpu_info * const ci = ih->ih_cpu; | |
1024 | bool force_pending = false; | 1024 | bool force_pending = false; | |
1025 | 1025 | |||
1026 | KASSERT(ci == curcpu() || !mp_online); | 1026 | KASSERT(ci == curcpu() || !mp_online); | |
1027 | 1027 | |||
1028 | /* | 1028 | /* | |
1029 | * We need to disable interrupts to hold off the interrupt | 1029 | * We need to disable interrupts to hold off the interrupt | |
1030 | * vectors. | 1030 | * vectors. | |
1031 | */ | 1031 | */ | |
1032 | const u_long psl = x86_read_psl(); | 1032 | const u_long psl = x86_read_psl(); | |
1033 | x86_disable_intr(); | 1033 | x86_disable_intr(); | |
1034 | 1034 | |||
1035 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; | 1035 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; | |
1036 | struct pic * const pic = source->is_pic; | 1036 | struct pic * const pic = source->is_pic; | |
1037 | 1037 | |||
1038 | if (mask) { | 1038 | if (mask) { | |
1039 | source->is_mask_count++; | 1039 | source->is_mask_count++; | |
1040 | KASSERT(source->is_mask_count != 0); | 1040 | KASSERT(source->is_mask_count != 0); | |
1041 | if (source->is_mask_count == 1) { | 1041 | if (source->is_mask_count == 1) { | |
1042 | (*pic->pic_hwmask)(pic, ih->ih_pin); | 1042 | (*pic->pic_hwmask)(pic, ih->ih_pin); | |
1043 | } | 1043 | } | |
1044 | } else { | 1044 | } else { | |
1045 | KASSERT(source->is_mask_count != 0); | 1045 | KASSERT(source->is_mask_count != 0); | |
1046 | if (--source->is_mask_count == 0) { | 1046 | if (--source->is_mask_count == 0) { | |
1047 | /* | 1047 | /* | |
1048 | * If this interrupt source is being moved, don't | 1048 | * If this interrupt source is being moved, don't | |
1049 | * unmask it at the hw. | 1049 | * unmask it at the hw. | |
1050 | */ | 1050 | */ | |
1051 | if (! source->is_distribute_pending) { | 1051 | if (! source->is_distribute_pending) { | |
1052 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | 1052 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | |
1053 | } | 1053 | } | |
1054 | 1054 | |||
1055 | /* | 1055 | /* | |
1056 | * For level-sensitive interrupts, the hardware | 1056 | * For level-sensitive interrupts, the hardware | |
1057 | * will let us know. For everything else, we | 1057 | * will let us know. For everything else, we | |
1058 | * need to explicitly handle interrupts that | 1058 | * need to explicitly handle interrupts that | |
1059 | * happened when when the source was masked. | 1059 | * happened when the source was masked. | |
1060 | */ | 1060 | */ | |
1061 | const uint64_t bit = (1U << ih->ih_slot); | 1061 | const uint64_t bit = (1U << ih->ih_slot); | |
1062 | if (ci->ci_imasked & bit) { | 1062 | if (ci->ci_imasked & bit) { | |
1063 | ci->ci_imasked &= ~bit; | 1063 | ci->ci_imasked &= ~bit; | |
1064 | if (source->is_type != IST_LEVEL) { | 1064 | if (source->is_type != IST_LEVEL) { | |
1065 | ci->ci_ipending |= bit; | 1065 | ci->ci_ipending |= bit; | |
1066 | force_pending = true; | 1066 | force_pending = true; | |
1067 | } | 1067 | } | |
1068 | } | 1068 | } | |
1069 | } | 1069 | } | |
1070 | } | 1070 | } | |
1071 | 1071 | |||
1072 | /* Re-enable interrupts. */ | 1072 | /* Re-enable interrupts. */ | |
1073 | x86_write_psl(psl); | 1073 | x86_write_psl(psl); | |
1074 | 1074 | |||
1075 | if (force_pending) { | 1075 | if (force_pending) { | |
1076 | /* Force processing of any pending interrupts. */ | 1076 | /* Force processing of any pending interrupts. */ | |
1077 | splx(splhigh()); | 1077 | splx(splhigh()); | |
1078 | } | 1078 | } | |
1079 | } | 1079 | } | |
1080 | 1080 | |||
1081 | static void | 1081 | static void | |
1082 | intr_mask_internal(struct intrhand * const ih, const bool mask) | 1082 | intr_mask_internal(struct intrhand * const ih, const bool mask) | |
1083 | { | 1083 | { | |
1084 | 1084 | |||
1085 | /* | 1085 | /* | |
1086 | * Call out to the remote CPU to update its interrupt state. | 1086 | * Call out to the remote CPU to update its interrupt state. | |
1087 | * Only make RPCs if the APs are up and running. | 1087 | * Only make RPCs if the APs are up and running. | |
1088 | */ | 1088 | */ | |
1089 | mutex_enter(&cpu_lock); | 1089 | mutex_enter(&cpu_lock); | |
1090 | struct cpu_info * const ci = ih->ih_cpu; | 1090 | struct cpu_info * const ci = ih->ih_cpu; | |
1091 | void * const mask_arg = (void *)(uintptr_t)mask; | 1091 | void * const mask_arg = (void *)(uintptr_t)mask; | |
1092 | if (ci == curcpu() || !mp_online) { | 1092 | if (ci == curcpu() || !mp_online) { | |
1093 | intr_mask_xcall(ih, mask_arg); | 1093 | intr_mask_xcall(ih, mask_arg); | |
1094 | } else { | 1094 | } else { | |
1095 | const uint64_t where = | 1095 | const uint64_t where = | |
1096 | xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci); | 1096 | xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci); | |
1097 | xc_wait(where); | 1097 | xc_wait(where); | |
1098 | } | 1098 | } | |
1099 | mutex_exit(&cpu_lock); | 1099 | mutex_exit(&cpu_lock); | |
1100 | } | 1100 | } | |
1101 | 1101 | |||
1102 | void | 1102 | void | |
1103 | intr_mask(struct intrhand *ih) | 1103 | intr_mask(struct intrhand *ih) | |
1104 | { | 1104 | { | |
1105 | 1105 | |||
1106 | if (cpu_intr_p()) { | 1106 | if (cpu_intr_p()) { | |
1107 | /* | 1107 | /* | |
1108 | * Special case of calling intr_mask() from an interrupt | 1108 | * Special case of calling intr_mask() from an interrupt | |
1109 | * handler: we MUST be called from the bound CPU for this | 1109 | * handler: we MUST be called from the bound CPU for this | |
1110 | * interrupt (presumably from a handler we're about to | 1110 | * interrupt (presumably from a handler we're about to | |
1111 | * mask). | 1111 | * mask). | |
1112 | * | 1112 | * | |
1113 | * We can't take the cpu_lock in this case, and we must | 1113 | * We can't take the cpu_lock in this case, and we must | |
1114 | * therefore be extra careful. | 1114 | * therefore be extra careful. | |
1115 | */ | 1115 | */ | |
1116 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | 1116 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | |
1117 | intr_mask_xcall(ih, (void *)(uintptr_t)true); | 1117 | intr_mask_xcall(ih, (void *)(uintptr_t)true); | |
1118 | return; | 1118 | return; | |
1119 | } | 1119 | } | |
1120 | 1120 | |||
1121 | intr_mask_internal(ih, true); | 1121 | intr_mask_internal(ih, true); | |
1122 | } | 1122 | } | |
1123 | 1123 | |||
1124 | void | 1124 | void | |
1125 | intr_unmask(struct intrhand *ih) | 1125 | intr_unmask(struct intrhand *ih) | |
1126 | { | 1126 | { | |
1127 | 1127 | |||
1128 | /* | 1128 | /* | |
1129 | * This is not safe to call from an interrupt context because | 1129 | * This is not safe to call from an interrupt context because | |
1130 | * we don't want to accidentally unmask an interrupt source | 1130 | * we don't want to accidentally unmask an interrupt source | |
1131 | * that's masked because it's being serviced. | 1131 | * that's masked because it's being serviced. | |
1132 | */ | 1132 | */ | |
1133 | KASSERT(!cpu_intr_p()); | 1133 | KASSERT(!cpu_intr_p()); | |
1134 | intr_mask_internal(ih, false); | 1134 | intr_mask_internal(ih, false); | |
1135 | } | 1135 | } | |
1136 | 1136 | |||
1137 | /* | 1137 | /* | |
1138 | * Called on bound CPU to handle intr_disestablish(). | 1138 | * Called on bound CPU to handle intr_disestablish(). | |
1139 | * | 1139 | * | |
1140 | * => caller (on initiating CPU) holds cpu_lock on our behalf | 1140 | * => caller (on initiating CPU) holds cpu_lock on our behalf | |
1141 | * => arg1: struct intrhand *ih | 1141 | * => arg1: struct intrhand *ih | |
1142 | * => arg2: unused | 1142 | * => arg2: unused | |
1143 | */ | 1143 | */ | |
1144 | static void | 1144 | static void | |
1145 | intr_disestablish_xcall(void *arg1, void *arg2) | 1145 | intr_disestablish_xcall(void *arg1, void *arg2) | |
1146 | { | 1146 | { | |
1147 | struct intrhand **p, *q; | 1147 | struct intrhand **p, *q; | |
1148 | struct cpu_info *ci; | 1148 | struct cpu_info *ci; | |
1149 | struct pic *pic; | 1149 | struct pic *pic; | |
1150 | struct intrsource *source; | 1150 | struct intrsource *source; | |
1151 | struct intrhand *ih; | 1151 | struct intrhand *ih; | |
1152 | u_long psl; | 1152 | u_long psl; | |
1153 | int idtvec; | 1153 | int idtvec; | |
1154 | 1154 | |||
1155 | ih = arg1; | 1155 | ih = arg1; | |
1156 | ci = ih->ih_cpu; | 1156 | ci = ih->ih_cpu; | |
1157 | 1157 | |||
1158 | KASSERT(ci == curcpu() || !mp_online); | 1158 | KASSERT(ci == curcpu() || !mp_online); | |
1159 | 1159 | |||
1160 | /* Disable interrupts locally. */ | 1160 | /* Disable interrupts locally. */ | |
1161 | psl = x86_read_psl(); | 1161 | psl = x86_read_psl(); | |
1162 | x86_disable_intr(); | 1162 | x86_disable_intr(); | |
1163 | 1163 | |||
1164 | pic = ci->ci_isources[ih->ih_slot]->is_pic; | 1164 | pic = ci->ci_isources[ih->ih_slot]->is_pic; | |
1165 | source = ci->ci_isources[ih->ih_slot]; | 1165 | source = ci->ci_isources[ih->ih_slot]; | |
1166 | idtvec = source->is_idtvec; | 1166 | idtvec = source->is_idtvec; | |
1167 | 1167 | |||
1168 | (*pic->pic_hwmask)(pic, ih->ih_pin); | 1168 | (*pic->pic_hwmask)(pic, ih->ih_pin); | |
1169 | 1169 | |||
1170 | /* | 1170 | /* | |
1171 | * ci_pending is stable on the current CPU while interrupts are | 1171 | * ci_pending is stable on the current CPU while interrupts are | |
1172 | * blocked, and we only need to synchronize with interrupt | 1172 | * blocked, and we only need to synchronize with interrupt | |
1173 | * vectors on the same CPU, so no need for atomics or membars. | 1173 | * vectors on the same CPU, so no need for atomics or membars. | |
1174 | */ | 1174 | */ | |
1175 | ci->ci_ipending &= ~(1ULL << ih->ih_slot); | 1175 | ci->ci_ipending &= ~(1ULL << ih->ih_slot); | |
1176 | 1176 | |||
1177 | /* | 1177 | /* | |
1178 | * Remove the handler from the chain. | 1178 | * Remove the handler from the chain. | |
1179 | */ | 1179 | */ | |
1180 | for (p = &source->is_handlers; (q = *p) != NULL && q != ih; | 1180 | for (p = &source->is_handlers; (q = *p) != NULL && q != ih; | |
1181 | p = &q->ih_next) | 1181 | p = &q->ih_next) | |
1182 | ; | 1182 | ; | |
1183 | if (q == NULL) { | 1183 | if (q == NULL) { | |
1184 | x86_write_psl(psl); | 1184 | x86_write_psl(psl); | |
1185 | panic("%s: handler not registered", __func__); | 1185 | panic("%s: handler not registered", __func__); | |
1186 | /* NOTREACHED */ | 1186 | /* NOTREACHED */ | |
1187 | } | 1187 | } | |
1188 | 1188 | |||
1189 | *p = q->ih_next; | 1189 | *p = q->ih_next; | |
1190 | 1190 | |||
1191 | x86_intr_calculatemasks(ci); | 1191 | x86_intr_calculatemasks(ci); | |
1192 | /* | 1192 | /* | |
1193 | * If there is no any handler, 1) do delroute because it has no | 1193 | * If there is no any handler, 1) do delroute because it has no | |
1194 | * any source and 2) dont' hwunmask to prevent spurious interrupt. | 1194 | * any source and 2) dont' hwunmask to prevent spurious interrupt. | |
1195 | * | 1195 | * | |
1196 | * If there is any handler, 1) don't delroute because it has source | 1196 | * If there is any handler, 1) don't delroute because it has source | |
1197 | * and 2) do hwunmask to be able to get interrupt again. | 1197 | * and 2) do hwunmask to be able to get interrupt again. | |
1198 | * | 1198 | * | |
1199 | */ | 1199 | */ | |
1200 | if (source->is_handlers == NULL) | 1200 | if (source->is_handlers == NULL) | |
1201 | (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, | 1201 | (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, | |
1202 | source->is_type); | 1202 | source->is_type); | |
1203 | else if (source->is_mask_count == 0) | 1203 | else if (source->is_mask_count == 0) | |
1204 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | 1204 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | |
1205 | 1205 | |||
1206 | /* If the source is free we can drop it now. */ | 1206 | /* If the source is free we can drop it now. */ | |
1207 | intr_source_free(ci, ih->ih_slot, pic, idtvec); | 1207 | intr_source_free(ci, ih->ih_slot, pic, idtvec); | |
1208 | 1208 | |||
1209 | /* Re-enable interrupts. */ | 1209 | /* Re-enable interrupts. */ | |
1210 | x86_write_psl(psl); | 1210 | x86_write_psl(psl); | |
1211 | 1211 | |||
1212 | DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n", | 1212 | DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n", | |
1213 | device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name, | 1213 | device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name, | |
1214 | ih->ih_pin, idtvec)); | 1214 | ih->ih_pin, idtvec)); | |
1215 | } | 1215 | } | |
1216 | 1216 | |||
1217 | static int | 1217 | static int | |
1218 | intr_num_handlers(struct intrsource *isp) | 1218 | intr_num_handlers(struct intrsource *isp) | |
1219 | { | 1219 | { | |
1220 | struct intrhand *ih; | 1220 | struct intrhand *ih; | |
1221 | int num; | 1221 | int num; | |
1222 | 1222 | |||
1223 | num = 0; | 1223 | num = 0; | |
1224 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) | 1224 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) | |
1225 | num++; | 1225 | num++; | |
1226 | 1226 | |||
1227 | return num; | 1227 | return num; | |
1228 | } | 1228 | } | |
1229 | 1229 | |||
1230 | /* | 1230 | /* | |
1231 | * Deregister an interrupt handler. | 1231 | * Deregister an interrupt handler. | |
1232 | */ | 1232 | */ | |
1233 | void | 1233 | void | |
1234 | intr_disestablish(struct intrhand *ih) | 1234 | intr_disestablish(struct intrhand *ih) | |
1235 | { | 1235 | { | |
1236 | struct cpu_info *ci; | 1236 | struct cpu_info *ci; | |
1237 | struct intrsource *isp; | 1237 | struct intrsource *isp; | |
1238 | uint64_t where; | 1238 | uint64_t where; | |
1239 | 1239 | |||
1240 | /* | 1240 | /* | |
1241 | * Count the removal for load balancing. | 1241 | * Count the removal for load balancing. | |
1242 | * Call out to the remote CPU to update its interrupt state. | 1242 | * Call out to the remote CPU to update its interrupt state. | |
1243 | * Only make RPCs if the APs are up and running. | 1243 | * Only make RPCs if the APs are up and running. | |
1244 | */ | 1244 | */ | |
1245 | mutex_enter(&cpu_lock); | 1245 | mutex_enter(&cpu_lock); | |
1246 | ci = ih->ih_cpu; | 1246 | ci = ih->ih_cpu; | |
1247 | (ci->ci_nintrhand)--; | 1247 | (ci->ci_nintrhand)--; | |
1248 | KASSERT(ci->ci_nintrhand >= 0); | 1248 | KASSERT(ci->ci_nintrhand >= 0); | |
1249 | isp = ci->ci_isources[ih->ih_slot]; | 1249 | isp = ci->ci_isources[ih->ih_slot]; | |
1250 | if (ci == curcpu() || !mp_online) { | 1250 | if (ci == curcpu() || !mp_online) { | |
1251 | intr_disestablish_xcall(ih, NULL); | 1251 | intr_disestablish_xcall(ih, NULL); | |
1252 | } else { | 1252 | } else { | |
1253 | where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); | 1253 | where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); | |
1254 | xc_wait(where); | 1254 | xc_wait(where); | |
1255 | } | 1255 | } | |
1256 | if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) { | 1256 | if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) { | |
1257 | intr_free_io_intrsource_direct(isp); | 1257 | intr_free_io_intrsource_direct(isp); | |
1258 | } | 1258 | } | |
1259 | mutex_exit(&cpu_lock); | 1259 | mutex_exit(&cpu_lock); | |
1260 | kmem_free(ih, sizeof(*ih)); | 1260 | kmem_free(ih, sizeof(*ih)); | |
1261 | } | 1261 | } | |
1262 | 1262 | |||
1263 | static const char * | 1263 | static const char * | |
1264 | xen_intr_string(int port, char *buf, size_t len, struct pic *pic) | 1264 | xen_intr_string(int port, char *buf, size_t len, struct pic *pic) | |
1265 | { | 1265 | { | |
1266 | KASSERT(pic->pic_type == PIC_XEN); | 1266 | KASSERT(pic->pic_type == PIC_XEN); | |
1267 | 1267 | |||
1268 | KASSERT(port >= 0); | 1268 | KASSERT(port >= 0); | |
1269 | 1269 | |||
1270 | snprintf(buf, len, "%s chan %d", pic->pic_name, port); | 1270 | snprintf(buf, len, "%s chan %d", pic->pic_name, port); | |
1271 | 1271 | |||
1272 | return buf; | 1272 | return buf; | |
1273 | } | 1273 | } | |
1274 | 1274 | |||
1275 | static const char * | 1275 | static const char * | |
1276 | legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) | 1276 | legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) | |
1277 | { | 1277 | { | |
1278 | int legacy_irq; | 1278 | int legacy_irq; | |
1279 | 1279 | |||
1280 | KASSERT(pic->pic_type == PIC_I8259); | 1280 | KASSERT(pic->pic_type == PIC_I8259); | |
1281 | #if NLAPIC > 0 | 1281 | #if NLAPIC > 0 | |
1282 | KASSERT(APIC_IRQ_ISLEGACY(ih)); | 1282 | KASSERT(APIC_IRQ_ISLEGACY(ih)); | |
1283 | 1283 | |||
1284 | legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); | 1284 | legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); | |
1285 | #else | 1285 | #else | |
1286 | legacy_irq = ih; | 1286 | legacy_irq = ih; | |
1287 | #endif | 1287 | #endif | |
1288 | KASSERT(legacy_irq >= 0 && legacy_irq < 16); | 1288 | KASSERT(legacy_irq >= 0 && legacy_irq < 16); | |
1289 | 1289 | |||
1290 | snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq); | 1290 | snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq); | |
1291 | 1291 | |||
1292 | return buf; | 1292 | return buf; | |
1293 | } | 1293 | } | |
1294 | 1294 | |||
1295 | const char * | 1295 | const char * | |
1296 | intr_string(intr_handle_t ih, char *buf, size_t len) | 1296 | intr_string(intr_handle_t ih, char *buf, size_t len) | |
1297 | { | 1297 | { | |
1298 | #if NIOAPIC > 0 | 1298 | #if NIOAPIC > 0 | |
1299 | struct ioapic_softc *pic; | 1299 | struct ioapic_softc *pic; | |
1300 | #endif | 1300 | #endif | |
1301 | 1301 | |||
1302 | if (ih == 0) | 1302 | if (ih == 0) | |
1303 | panic("%s: bogus handle 0x%" PRIx64, __func__, ih); | 1303 | panic("%s: bogus handle 0x%" PRIx64, __func__, ih); | |
1304 | 1304 | |||
1305 | #if NIOAPIC > 0 | 1305 | #if NIOAPIC > 0 | |
1306 | if (ih & APIC_INT_VIA_APIC) { | 1306 | if (ih & APIC_INT_VIA_APIC) { | |
1307 | pic = ioapic_find(APIC_IRQ_APIC(ih)); | 1307 | pic = ioapic_find(APIC_IRQ_APIC(ih)); | |
1308 | if (pic != NULL) { | 1308 | if (pic != NULL) { | |
1309 | snprintf(buf, len, "%s pin %d", | 1309 | snprintf(buf, len, "%s pin %d", | |
1310 | device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); | 1310 | device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); | |
1311 | } else { | 1311 | } else { | |
1312 | snprintf(buf, len, | 1312 | snprintf(buf, len, | |
1313 | "apic %d int %d (irq %d)", | 1313 | "apic %d int %d (irq %d)", | |
1314 | APIC_IRQ_APIC(ih), | 1314 | APIC_IRQ_APIC(ih), | |
1315 | APIC_IRQ_PIN(ih), | 1315 | APIC_IRQ_PIN(ih), | |
1316 | APIC_IRQ_LEGACY_IRQ(ih)); | 1316 | APIC_IRQ_LEGACY_IRQ(ih)); | |
1317 | } | 1317 | } | |
1318 | } else | 1318 | } else | |
1319 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); | 1319 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); | |
1320 | 1320 | |||
1321 | #elif NLAPIC > 0 | 1321 | #elif NLAPIC > 0 | |
1322 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); | 1322 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); | |
1323 | #else | 1323 | #else | |
1324 | snprintf(buf, len, "irq %d", (int) ih); | 1324 | snprintf(buf, len, "irq %d", (int) ih); | |
1325 | #endif | 1325 | #endif | |
1326 | return buf; | 1326 | return buf; | |
1327 | 1327 | |||
1328 | } | 1328 | } | |
1329 | 1329 | |||
1330 | /* | 1330 | /* | |
1331 | * Fake interrupt handler structures for the benefit of symmetry with | 1331 | * Fake interrupt handler structures for the benefit of symmetry with | |
1332 | * other interrupt sources, and the benefit of x86_intr_calculatemasks() | 1332 | * other interrupt sources, and the benefit of x86_intr_calculatemasks() | |
1333 | */ | 1333 | */ | |
1334 | struct intrhand fake_timer_intrhand; | 1334 | struct intrhand fake_timer_intrhand; | |
1335 | struct intrhand fake_ipi_intrhand; | 1335 | struct intrhand fake_ipi_intrhand; | |
1336 | #if NHYPERV > 0 | 1336 | #if NHYPERV > 0 | |
1337 | struct intrhand fake_hyperv_intrhand; | 1337 | struct intrhand fake_hyperv_intrhand; | |
1338 | #endif | 1338 | #endif | |
1339 | 1339 | |||
1340 | #if NLAPIC > 0 && defined(MULTIPROCESSOR) | 1340 | #if NLAPIC > 0 && defined(MULTIPROCESSOR) | |
1341 | static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES; | 1341 | static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES; | |
1342 | #endif | 1342 | #endif | |
1343 | 1343 | |||
1344 | #if defined(INTRSTACKSIZE) | 1344 | #if defined(INTRSTACKSIZE) | |
1345 | static inline bool | 1345 | static inline bool | |
1346 | redzone_const_or_false(bool x) | 1346 | redzone_const_or_false(bool x) | |
1347 | { | 1347 | { | |
1348 | #ifdef DIAGNOSTIC | 1348 | #ifdef DIAGNOSTIC | |
1349 | return x; | 1349 | return x; | |
1350 | #else | 1350 | #else | |
1351 | return false; | 1351 | return false; | |
1352 | #endif /* !DIAGNOSTIC */ | 1352 | #endif /* !DIAGNOSTIC */ | |
1353 | } | 1353 | } | |
1354 | 1354 | |||
1355 | static inline int | 1355 | static inline int | |
1356 | redzone_const_or_zero(int x) | 1356 | redzone_const_or_zero(int x) | |
1357 | { | 1357 | { | |
1358 | return redzone_const_or_false(true) ? x : 0; | 1358 | return redzone_const_or_false(true) ? x : 0; | |
1359 | } | 1359 | } | |
1360 | #endif | 1360 | #endif | |
1361 | 1361 | |||
1362 | /* | 1362 | /* | |
1363 | * Initialize all handlers that aren't dynamically allocated, and exist | 1363 | * Initialize all handlers that aren't dynamically allocated, and exist | |
1364 | * for each CPU. | 1364 | * for each CPU. | |
1365 | */ | 1365 | */ | |
1366 | void | 1366 | void | |
1367 | cpu_intr_init(struct cpu_info *ci) | 1367 | cpu_intr_init(struct cpu_info *ci) | |
1368 | { | 1368 | { | |
1369 | #if (NLAPIC > 0) || defined(MULTIPROCESSOR) || \ | 1369 | #if (NLAPIC > 0) || defined(MULTIPROCESSOR) || \ | |
1370 | (NHYPERV > 0) | 1370 | (NHYPERV > 0) | |
1371 | struct intrsource *isp; | 1371 | struct intrsource *isp; | |
1372 | #endif | 1372 | #endif | |
1373 | #if NLAPIC > 0 | 1373 | #if NLAPIC > 0 | |
1374 | static int first = 1; | 1374 | static int first = 1; | |
1375 | #if defined(MULTIPROCESSOR) | 1375 | #if defined(MULTIPROCESSOR) | |
1376 | int i; | 1376 | int i; | |
1377 | #endif | 1377 | #endif | |
1378 | #endif | 1378 | #endif | |
1379 | 1379 | |||
1380 | #if NLAPIC > 0 | 1380 | #if NLAPIC > 0 | |
1381 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | 1381 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | |
1382 | isp->is_recurse = Xrecurse_lapic_ltimer; | 1382 | isp->is_recurse = Xrecurse_lapic_ltimer; | |
1383 | isp->is_resume = Xresume_lapic_ltimer; | 1383 | isp->is_resume = Xresume_lapic_ltimer; | |
1384 | fake_timer_intrhand.ih_pic = &local_pic; | 1384 | fake_timer_intrhand.ih_pic = &local_pic; | |
1385 | fake_timer_intrhand.ih_level = IPL_CLOCK; | 1385 | fake_timer_intrhand.ih_level = IPL_CLOCK; | |
1386 | isp->is_handlers = &fake_timer_intrhand; | 1386 | isp->is_handlers = &fake_timer_intrhand; | |
1387 | isp->is_pic = &local_pic; | 1387 | isp->is_pic = &local_pic; | |
1388 | ci->ci_isources[LIR_TIMER] = isp; | 1388 | ci->ci_isources[LIR_TIMER] = isp; | |
1389 | evcnt_attach_dynamic(&isp->is_evcnt, | 1389 | evcnt_attach_dynamic(&isp->is_evcnt, | |
1390 | first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL, | 1390 | first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL, | |
1391 | device_xname(ci->ci_dev), "timer"); | 1391 | device_xname(ci->ci_dev), "timer"); | |
1392 | first = 0; | 1392 | first = 0; | |
1393 | 1393 | |||
1394 | #ifdef MULTIPROCESSOR | 1394 | #ifdef MULTIPROCESSOR | |
1395 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | 1395 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | |
1396 | isp->is_recurse = Xrecurse_lapic_ipi; | 1396 | isp->is_recurse = Xrecurse_lapic_ipi; | |
1397 | isp->is_resume = Xresume_lapic_ipi; | 1397 | isp->is_resume = Xresume_lapic_ipi; | |
1398 | fake_ipi_intrhand.ih_pic = &local_pic; | 1398 | fake_ipi_intrhand.ih_pic = &local_pic; | |
1399 | fake_ipi_intrhand.ih_level = IPL_HIGH; | 1399 | fake_ipi_intrhand.ih_level = IPL_HIGH; | |
1400 | isp->is_handlers = &fake_ipi_intrhand; | 1400 | isp->is_handlers = &fake_ipi_intrhand; | |
1401 | isp->is_pic = &local_pic; | 1401 | isp->is_pic = &local_pic; | |
1402 | ci->ci_isources[LIR_IPI] = isp; | 1402 | ci->ci_isources[LIR_IPI] = isp; | |
1403 | 1403 | |||
1404 | for (i = 0; i < X86_NIPI; i++) | 1404 | for (i = 0; i < X86_NIPI; i++) | |
1405 | evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, | 1405 | evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, | |
1406 | NULL, device_xname(ci->ci_dev), x86_ipi_names[i]); | 1406 | NULL, device_xname(ci->ci_dev), x86_ipi_names[i]); | |
1407 | #endif | 1407 | #endif | |
1408 | 1408 | |||
1409 | #if NHYPERV > 0 | 1409 | #if NHYPERV > 0 | |
1410 | if (hyperv_hypercall_enabled()) { | 1410 | if (hyperv_hypercall_enabled()) { | |
1411 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | 1411 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | |
1412 | isp->is_recurse = Xrecurse_hyperv_hypercall; | 1412 | isp->is_recurse = Xrecurse_hyperv_hypercall; | |
1413 | isp->is_resume = Xresume_hyperv_hypercall; | 1413 | isp->is_resume = Xresume_hyperv_hypercall; | |
1414 | fake_hyperv_intrhand.ih_level = IPL_NET; | 1414 | fake_hyperv_intrhand.ih_level = IPL_NET; | |
1415 | isp->is_handlers = &fake_hyperv_intrhand; | 1415 | isp->is_handlers = &fake_hyperv_intrhand; | |
1416 | isp->is_pic = &local_pic; | 1416 | isp->is_pic = &local_pic; | |
1417 | ci->ci_isources[LIR_HV] = isp; | 1417 | ci->ci_isources[LIR_HV] = isp; | |
1418 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, | 1418 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, | |
1419 | device_xname(ci->ci_dev), "Hyper-V hypercall"); | 1419 | device_xname(ci->ci_dev), "Hyper-V hypercall"); | |
1420 | } | 1420 | } | |
1421 | #endif | 1421 | #endif | |
1422 | #endif | 1422 | #endif | |
1423 | 1423 | |||
1424 | #if defined(__HAVE_PREEMPTION) | 1424 | #if defined(__HAVE_PREEMPTION) | |
1425 | x86_init_preempt(ci); | 1425 | x86_init_preempt(ci); | |
1426 | 1426 | |||
1427 | #endif | 1427 | #endif | |
1428 | x86_intr_calculatemasks(ci); | 1428 | x86_intr_calculatemasks(ci); | |
1429 | 1429 | |||
1430 | #if defined(INTRSTACKSIZE) | 1430 | #if defined(INTRSTACKSIZE) | |
1431 | vaddr_t istack; | 1431 | vaddr_t istack; | |
1432 | 1432 | |||
1433 | /* | 1433 | /* | |
1434 | * If the red zone is activated, protect both the top and | 1434 | * If the red zone is activated, protect both the top and | |
1435 | * the bottom of the stack with an unmapped page. | 1435 | * the bottom of the stack with an unmapped page. | |
1436 | */ | 1436 | */ | |
1437 | istack = uvm_km_alloc(kernel_map, | 1437 | istack = uvm_km_alloc(kernel_map, | |
1438 | INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, | 1438 | INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, | |
1439 | UVM_KMF_WIRED | UVM_KMF_ZERO); | 1439 | UVM_KMF_WIRED | UVM_KMF_ZERO); | |
1440 | if (redzone_const_or_false(true)) { | 1440 | if (redzone_const_or_false(true)) { | |
1441 | pmap_kremove(istack, PAGE_SIZE); | 1441 | pmap_kremove(istack, PAGE_SIZE); | |
1442 | pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); | 1442 | pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); | |
1443 | pmap_update(pmap_kernel()); | 1443 | pmap_update(pmap_kernel()); | |
1444 | } | 1444 | } | |
1445 | 1445 | |||
1446 | /* | 1446 | /* | |
1447 | * 33 used to be 1. Arbitrarily reserve 32 more register_t's | 1447 | * 33 used to be 1. Arbitrarily reserve 32 more register_t's | |
1448 | * of space for ddb(4) to examine some subroutine arguments | 1448 | * of space for ddb(4) to examine some subroutine arguments | |
1449 | * and to hunt for the next stack frame. | 1449 | * and to hunt for the next stack frame. | |
1450 | */ | 1450 | */ | |
1451 | ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + | 1451 | ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + | |
1452 | INTRSTACKSIZE - 33 * sizeof(register_t); | 1452 | INTRSTACKSIZE - 33 * sizeof(register_t); | |
1453 | #endif | 1453 | #endif | |
1454 | 1454 | |||
1455 | ci->ci_idepth = -1; | 1455 | ci->ci_idepth = -1; | |
1456 | } | 1456 | } | |
1457 | 1457 | |||
1458 | #if defined(INTRDEBUG) || defined(DDB) | 1458 | #if defined(INTRDEBUG) || defined(DDB) | |
1459 | 1459 | |||
1460 | void | 1460 | void | |
1461 | intr_printconfig(void) | 1461 | intr_printconfig(void) | |
1462 | { | 1462 | { | |
1463 | int i; | 1463 | int i; | |
1464 | struct intrhand *ih; | 1464 | struct intrhand *ih; | |
1465 | struct intrsource *isp; | 1465 | struct intrsource *isp; | |
1466 | struct cpu_info *ci; | 1466 | struct cpu_info *ci; | |
1467 | CPU_INFO_ITERATOR cii; | 1467 | CPU_INFO_ITERATOR cii; | |
1468 | void (*pr)(const char *, ...); | 1468 | void (*pr)(const char *, ...); | |
1469 | 1469 | |||
1470 | pr = printf; | 1470 | pr = printf; | |
1471 | #ifdef DDB | 1471 | #ifdef DDB | |
1472 | if (db_active) { | 1472 | if (db_active) { | |
1473 | pr = db_printf; | 1473 | pr = db_printf; | |
1474 | } | 1474 | } | |
1475 | #endif | 1475 | #endif | |
1476 | 1476 | |||
1477 | for (CPU_INFO_FOREACH(cii, ci)) { | 1477 | for (CPU_INFO_FOREACH(cii, ci)) { | |
1478 | (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev)); | 1478 | (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev)); | |
1479 | for (i = 0; i < NIPL; i++) | 1479 | for (i = 0; i < NIPL; i++) | |
1480 | (*pr)("IPL %d mask %016"PRIx64" unmask %016"PRIx64"\n", | 1480 | (*pr)("IPL %d mask %016"PRIx64" unmask %016"PRIx64"\n", | |
1481 | i, ci->ci_imask[i], ci->ci_iunmask[i]); | 1481 | i, ci->ci_imask[i], ci->ci_iunmask[i]); | |
1482 | for (i = 0; i < MAX_INTR_SOURCES; i++) { | 1482 | for (i = 0; i < MAX_INTR_SOURCES; i++) { | |
1483 | isp = ci->ci_isources[i]; | 1483 | isp = ci->ci_isources[i]; | |
1484 | if (isp == NULL) | 1484 | if (isp == NULL) | |
1485 | continue; | 1485 | continue; | |
1486 | (*pr)("%s source %d is pin %d from pic %s type %d " | 1486 | (*pr)("%s source %d is pin %d from pic %s type %d " | |
1487 | "maxlevel %d\n", device_xname(ci->ci_dev), i, | 1487 | "maxlevel %d\n", device_xname(ci->ci_dev), i, | |
1488 | isp->is_pin, isp->is_pic->pic_name, isp->is_type, | 1488 | isp->is_pin, isp->is_pic->pic_name, isp->is_type, | |
1489 | isp->is_maxlevel); | 1489 | isp->is_maxlevel); | |
1490 | for (ih = isp->is_handlers; ih != NULL; | 1490 | for (ih = isp->is_handlers; ih != NULL; | |
1491 | ih = ih->ih_next) | 1491 | ih = ih->ih_next) | |
1492 | (*pr)("\thandler %p level %d\n", | 1492 | (*pr)("\thandler %p level %d\n", | |
1493 | ih->ih_fun, ih->ih_level); | 1493 | ih->ih_fun, ih->ih_level); | |
1494 | #if NIOAPIC > 0 | 1494 | #if NIOAPIC > 0 | |
1495 | if (isp->is_pic->pic_type == PIC_IOAPIC) { | 1495 | if (isp->is_pic->pic_type == PIC_IOAPIC) { | |
1496 | struct ioapic_softc *sc; | 1496 | struct ioapic_softc *sc; | |
1497 | sc = isp->is_pic->pic_ioapic; | 1497 | sc = isp->is_pic->pic_ioapic; | |
1498 | (*pr)("\tioapic redir 0x%x\n", | 1498 | (*pr)("\tioapic redir 0x%x\n", | |
1499 | sc->sc_pins[isp->is_pin].ip_map->redir); | 1499 | sc->sc_pins[isp->is_pin].ip_map->redir); | |
1500 | } | 1500 | } | |
1501 | #endif | 1501 | #endif | |
1502 | 1502 | |||
1503 | } | 1503 | } | |
1504 | } | 1504 | } | |
1505 | } | 1505 | } | |
1506 | 1506 | |||
1507 | #endif | 1507 | #endif | |
1508 | 1508 | |||
1509 | /* | 1509 | /* | |
1510 | * Save current affinitied cpu's interrupt count. | 1510 | * Save current affinitied cpu's interrupt count. | |
1511 | */ | 1511 | */ | |
1512 | static void | 1512 | static void | |
1513 | intr_save_evcnt(struct intrsource *source, cpuid_t cpuid) | 1513 | intr_save_evcnt(struct intrsource *source, cpuid_t cpuid) | |
1514 | { | 1514 | { | |
1515 | struct percpu_evcnt *pep; | 1515 | struct percpu_evcnt *pep; | |
1516 | uint64_t curcnt; | 1516 | uint64_t curcnt; | |
1517 | int i; | 1517 | int i; | |
1518 | 1518 | |||
1519 | curcnt = source->is_evcnt.ev_count; | 1519 | curcnt = source->is_evcnt.ev_count; | |
1520 | pep = source->is_saved_evcnt; | 1520 | pep = source->is_saved_evcnt; | |
1521 | 1521 | |||
1522 | for (i = 0; i < ncpu; i++) { | 1522 | for (i = 0; i < ncpu; i++) { | |
1523 | if (pep[i].cpuid == cpuid) { | 1523 | if (pep[i].cpuid == cpuid) { | |
1524 | pep[i].count = curcnt; | 1524 | pep[i].count = curcnt; | |
1525 | break; | 1525 | break; | |
1526 | } | 1526 | } | |
1527 | } | 1527 | } | |
1528 | } | 1528 | } | |
1529 | 1529 | |||
1530 | /* | 1530 | /* | |
1531 | * Restore current affinitied cpu's interrupt count. | 1531 | * Restore current affinitied cpu's interrupt count. | |
1532 | */ | 1532 | */ | |
1533 | static void | 1533 | static void | |
1534 | intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid) | 1534 | intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid) | |
1535 | { | 1535 | { | |
1536 | struct percpu_evcnt *pep; | 1536 | struct percpu_evcnt *pep; | |
1537 | int i; | 1537 | int i; | |
1538 | 1538 | |||
1539 | pep = source->is_saved_evcnt; | 1539 | pep = source->is_saved_evcnt; | |
1540 | 1540 | |||
1541 | for (i = 0; i < ncpu; i++) { | 1541 | for (i = 0; i < ncpu; i++) { | |
1542 | if (pep[i].cpuid == cpuid) { | 1542 | if (pep[i].cpuid == cpuid) { | |
1543 | source->is_evcnt.ev_count = pep[i].count; | 1543 | source->is_evcnt.ev_count = pep[i].count; | |
1544 | break; | 1544 | break; | |
1545 | } | 1545 | } | |
1546 | } | 1546 | } | |
1547 | } | 1547 | } | |
1548 | 1548 | |||
1549 | static void | 1549 | static void | |
1550 | intr_redistribute_xc_t(void *arg1, void *arg2) | 1550 | intr_redistribute_xc_t(void *arg1, void *arg2) | |
1551 | { | 1551 | { | |
1552 | struct cpu_info *ci; | 1552 | struct cpu_info *ci; | |
1553 | struct intrsource *isp; | 1553 | struct intrsource *isp; | |
1554 | int slot; | 1554 | int slot; | |
1555 | u_long psl; | 1555 | u_long psl; | |
1556 | 1556 | |||
1557 | ci = curcpu(); | 1557 | ci = curcpu(); | |
1558 | isp = arg1; | 1558 | isp = arg1; | |
1559 | slot = (int)(intptr_t)arg2; | 1559 | slot = (int)(intptr_t)arg2; | |
1560 | 1560 | |||
1561 | /* Disable interrupts locally. */ | 1561 | /* Disable interrupts locally. */ | |
1562 | psl = x86_read_psl(); | 1562 | psl = x86_read_psl(); | |
1563 | x86_disable_intr(); | 1563 | x86_disable_intr(); | |
1564 | 1564 | |||
1565 | /* Hook it in and re-calculate masks. */ | 1565 | /* Hook it in and re-calculate masks. */ | |
1566 | ci->ci_isources[slot] = isp; | 1566 | ci->ci_isources[slot] = isp; | |
1567 | x86_intr_calculatemasks(curcpu()); | 1567 | x86_intr_calculatemasks(curcpu()); | |
1568 | 1568 | |||
1569 | /* Re-enable interrupts locally. */ | 1569 | /* Re-enable interrupts locally. */ | |
1570 | x86_write_psl(psl); | 1570 | x86_write_psl(psl); | |
1571 | } | 1571 | } | |
1572 | 1572 | |||
1573 | static void | 1573 | static void | |
1574 | intr_redistribute_xc_s1(void *arg1, void *arg2) | 1574 | intr_redistribute_xc_s1(void *arg1, void *arg2) | |
1575 | { | 1575 | { | |
1576 | struct pic *pic; | 1576 | struct pic *pic; | |
1577 | struct intrsource *isp; | 1577 | struct intrsource *isp; | |
1578 | struct cpu_info *nci; | 1578 | struct cpu_info *nci; | |
1579 | u_long psl; | 1579 | u_long psl; | |
1580 | 1580 | |||
1581 | isp = arg1; | 1581 | isp = arg1; | |
1582 | nci = arg2; | 1582 | nci = arg2; | |
1583 | 1583 | |||
1584 | /* | 1584 | /* | |
1585 | * Disable interrupts on-chip and mask the pin. Back out | 1585 | * Disable interrupts on-chip and mask the pin. Back out | |
1586 | * and let the interrupt be processed if one is pending. | 1586 | * and let the interrupt be processed if one is pending. | |
1587 | */ | 1587 | */ | |
1588 | pic = isp->is_pic; | 1588 | pic = isp->is_pic; | |
1589 | for (;;) { | 1589 | for (;;) { | |
1590 | psl = x86_read_psl(); | 1590 | psl = x86_read_psl(); | |
1591 | x86_disable_intr(); | 1591 | x86_disable_intr(); | |
1592 | if ((*pic->pic_trymask)(pic, isp->is_pin)) { | 1592 | if ((*pic->pic_trymask)(pic, isp->is_pin)) { | |
1593 | break; | 1593 | break; | |
1594 | } | 1594 | } | |
1595 | x86_write_psl(psl); | 1595 | x86_write_psl(psl); | |
1596 | DELAY(1000); | 1596 | DELAY(1000); | |
1597 | } | 1597 | } | |
1598 | 1598 | |||
1599 | /* pic_addroute will unmask the interrupt. */ | 1599 | /* pic_addroute will unmask the interrupt. */ | |
1600 | (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec, | 1600 | (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec, | |
1601 | isp->is_type); | 1601 | isp->is_type); | |
1602 | x86_write_psl(psl); | 1602 | x86_write_psl(psl); | |
1603 | } | 1603 | } | |
1604 | 1604 | |||
1605 | static void | 1605 | static void | |
1606 | intr_redistribute_xc_s2(void *arg1, void *arg2) | 1606 | intr_redistribute_xc_s2(void *arg1, void *arg2) | |
1607 | { | 1607 | { | |
1608 | struct cpu_info *ci; | 1608 | struct cpu_info *ci; | |
1609 | u_long psl; | 1609 | u_long psl; | |
1610 | int slot; | 1610 | int slot; | |
1611 | 1611 | |||
1612 | ci = curcpu(); | 1612 | ci = curcpu(); | |
1613 | slot = (int)(uintptr_t)arg1; | 1613 | slot = (int)(uintptr_t)arg1; | |
1614 | 1614 | |||
1615 | /* Disable interrupts locally. */ | 1615 | /* Disable interrupts locally. */ | |
1616 | psl = x86_read_psl(); | 1616 | psl = x86_read_psl(); | |
1617 | x86_disable_intr(); | 1617 | x86_disable_intr(); | |
1618 | 1618 | |||
1619 | /* Patch out the source and re-calculate masks. */ | 1619 | /* Patch out the source and re-calculate masks. */ | |
1620 | ci->ci_isources[slot] = NULL; | 1620 | ci->ci_isources[slot] = NULL; | |
1621 | x86_intr_calculatemasks(ci); | 1621 | x86_intr_calculatemasks(ci); | |
1622 | 1622 | |||
1623 | /* Re-enable interrupts locally. */ | 1623 | /* Re-enable interrupts locally. */ | |
1624 | x86_write_psl(psl); | 1624 | x86_write_psl(psl); | |
1625 | } | 1625 | } | |
1626 | 1626 | |||
1627 | static bool | 1627 | static bool | |
1628 | intr_redistribute(struct cpu_info *oci) | 1628 | intr_redistribute(struct cpu_info *oci) | |
1629 | { | 1629 | { | |
1630 | struct intrsource *isp; | 1630 | struct intrsource *isp; | |
1631 | struct intrhand *ih; | 1631 | struct intrhand *ih; | |
1632 | CPU_INFO_ITERATOR cii; | 1632 | CPU_INFO_ITERATOR cii; | |
1633 | struct cpu_info *nci, *ici; | 1633 | struct cpu_info *nci, *ici; | |
1634 | int oslot, nslot; | 1634 | int oslot, nslot; | |
1635 | uint64_t where; | 1635 | uint64_t where; | |
1636 | 1636 | |||
1637 | KASSERT(mutex_owned(&cpu_lock)); | 1637 | KASSERT(mutex_owned(&cpu_lock)); | |
1638 | 1638 | |||
1639 | /* Look for an interrupt source that we can migrate. */ | 1639 | /* Look for an interrupt source that we can migrate. */ | |
1640 | for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) { | 1640 | for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) { | |
1641 | if ((isp = oci->ci_isources[oslot]) == NULL) { | 1641 | if ((isp = oci->ci_isources[oslot]) == NULL) { | |
1642 | continue; | 1642 | continue; | |
1643 | } | 1643 | } | |
1644 | if (isp->is_pic->pic_type == PIC_IOAPIC) { | 1644 | if (isp->is_pic->pic_type == PIC_IOAPIC) { | |
1645 | break; | 1645 | break; | |
1646 | } | 1646 | } | |
1647 | } | 1647 | } | |
1648 | if (oslot == MAX_INTR_SOURCES) { | 1648 | if (oslot == MAX_INTR_SOURCES) { | |
1649 | return false; | 1649 | return false; | |
1650 | } | 1650 | } | |
1651 | 1651 | |||
1652 | /* Find least loaded CPU and try to move there. */ | 1652 | /* Find least loaded CPU and try to move there. */ | |
1653 | nci = NULL; | 1653 | nci = NULL; | |
1654 | for (CPU_INFO_FOREACH(cii, ici)) { | 1654 | for (CPU_INFO_FOREACH(cii, ici)) { | |
1655 | if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | 1655 | if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | |
1656 | continue; | 1656 | continue; | |
1657 | } | 1657 | } | |
1658 | KASSERT(ici != oci); | 1658 | KASSERT(ici != oci); | |
1659 | if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) { | 1659 | if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) { | |
1660 | nci = ici; | 1660 | nci = ici; | |
1661 | } | 1661 | } | |
1662 | } | 1662 | } | |
1663 | if (nci == NULL) { | 1663 | if (nci == NULL) { | |
1664 | return false; | 1664 | return false; | |
1665 | } | 1665 | } | |
1666 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { | 1666 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { | |
1667 | if (nci->ci_isources[nslot] == NULL) { | 1667 | if (nci->ci_isources[nslot] == NULL) { | |
1668 | break; | 1668 | break; | |
1669 | } | 1669 | } | |
1670 | } | 1670 | } | |
1671 | 1671 | |||
1672 | /* If that did not work, allocate anywhere. */ | 1672 | /* If that did not work, allocate anywhere. */ | |
1673 | if (nslot == MAX_INTR_SOURCES) { | 1673 | if (nslot == MAX_INTR_SOURCES) { | |
1674 | for (CPU_INFO_FOREACH(cii, nci)) { | 1674 | for (CPU_INFO_FOREACH(cii, nci)) { | |
1675 | if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | 1675 | if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | |
1676 | continue; | 1676 | continue; | |
1677 | } | 1677 | } | |
1678 | KASSERT(nci != oci); | 1678 | KASSERT(nci != oci); | |
1679 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { | 1679 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { | |
1680 | if (nci->ci_isources[nslot] == NULL) { | 1680 | if (nci->ci_isources[nslot] == NULL) { | |
1681 | break; | 1681 | break; | |
1682 | } | 1682 | } | |
1683 | } | 1683 | } | |
1684 | if (nslot != MAX_INTR_SOURCES) { | 1684 | if (nslot != MAX_INTR_SOURCES) { | |
1685 | break; | 1685 | break; | |
1686 | } | 1686 | } | |
1687 | } | 1687 | } | |
1688 | } | 1688 | } | |
1689 | if (nslot == MAX_INTR_SOURCES) { | 1689 | if (nslot == MAX_INTR_SOURCES) { | |
1690 | return false; | 1690 | return false; | |
1691 | } | 1691 | } | |
1692 | 1692 | |||
1693 | /* | 1693 | /* | |
1694 | * Now we have new CPU and new slot. Run a cross-call to set up | 1694 | * Now we have new CPU and new slot. Run a cross-call to set up | |
1695 | * the new vector on the target CPU. | 1695 | * the new vector on the target CPU. | |
1696 | */ | 1696 | */ | |
1697 | where = xc_unicast(0, intr_redistribute_xc_t, isp, | 1697 | where = xc_unicast(0, intr_redistribute_xc_t, isp, | |
1698 | (void *)(intptr_t)nslot, nci); | 1698 | (void *)(intptr_t)nslot, nci); | |
1699 | xc_wait(where); | 1699 | xc_wait(where); | |
1700 | 1700 | |||
1701 | /* | 1701 | /* | |
1702 | * We're ready to go on the target CPU. Run a cross call to | 1702 | * We're ready to go on the target CPU. Run a cross call to | |
1703 | * reroute the interrupt away from the source CPU. | 1703 | * reroute the interrupt away from the source CPU. | |
1704 | */ | 1704 | */ | |
1705 | where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci); | 1705 | where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci); | |
1706 | xc_wait(where); | 1706 | xc_wait(where); | |
1707 | 1707 | |||
1708 | /* Sleep for (at least) 10ms to allow the change to take hold. */ | 1708 | /* Sleep for (at least) 10ms to allow the change to take hold. */ | |
1709 | (void)kpause("intrdist", false, mstohz(10), NULL); | 1709 | (void)kpause("intrdist", false, mstohz(10), NULL); | |
1710 | 1710 | |||
1711 | /* Complete removal from the source CPU. */ | 1711 | /* Complete removal from the source CPU. */ | |
1712 | where = xc_unicast(0, intr_redistribute_xc_s2, | 1712 | where = xc_unicast(0, intr_redistribute_xc_s2, | |
1713 | (void *)(uintptr_t)oslot, NULL, oci); | 1713 | (void *)(uintptr_t)oslot, NULL, oci); | |
1714 | xc_wait(where); | 1714 | xc_wait(where); | |
1715 | 1715 | |||
1716 | /* Finally, take care of book-keeping. */ | 1716 | /* Finally, take care of book-keeping. */ | |
1717 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) { | 1717 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) { | |
1718 | oci->ci_nintrhand--; | 1718 | oci->ci_nintrhand--; | |
1719 | nci->ci_nintrhand++; | 1719 | nci->ci_nintrhand++; | |
1720 | ih->ih_cpu = nci; | 1720 | ih->ih_cpu = nci; | |
1721 | } | 1721 | } | |
1722 | intr_save_evcnt(isp, oci->ci_cpuid); | 1722 | intr_save_evcnt(isp, oci->ci_cpuid); | |
1723 | intr_restore_evcnt(isp, nci->ci_cpuid); | 1723 | intr_restore_evcnt(isp, nci->ci_cpuid); | |
1724 | isp->is_active_cpu = nci->ci_cpuid; | 1724 | isp->is_active_cpu = nci->ci_cpuid; | |
1725 | 1725 | |||
1726 | return true; | 1726 | return true; | |
1727 | } | 1727 | } | |
1728 | 1728 | |||
1729 | void | 1729 | void | |
1730 | cpu_intr_redistribute(void) | 1730 | cpu_intr_redistribute(void) | |
1731 | { | 1731 | { | |
1732 | CPU_INFO_ITERATOR cii; | 1732 | CPU_INFO_ITERATOR cii; | |
1733 | struct cpu_info *ci; | 1733 | struct cpu_info *ci; | |
1734 | 1734 | |||
1735 | KASSERT(mutex_owned(&cpu_lock)); | 1735 | KASSERT(mutex_owned(&cpu_lock)); | |
1736 | KASSERT(mp_online); | 1736 | KASSERT(mp_online); | |
1737 | 1737 | |||
1738 | /* Direct interrupts away from shielded CPUs. */ | 1738 | /* Direct interrupts away from shielded CPUs. */ | |
1739 | for (CPU_INFO_FOREACH(cii, ci)) { | 1739 | for (CPU_INFO_FOREACH(cii, ci)) { | |
1740 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { | 1740 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { | |
1741 | continue; | 1741 | continue; | |
1742 | } | 1742 | } | |
1743 | while (intr_redistribute(ci)) { | 1743 | while (intr_redistribute(ci)) { | |
1744 | /* nothing */ | 1744 | /* nothing */ | |
1745 | } | 1745 | } | |
1746 | } | 1746 | } | |
1747 | 1747 | |||
1748 | /* XXX should now re-balance */ | 1748 | /* XXX should now re-balance */ | |
1749 | } | 1749 | } | |
1750 | 1750 | |||
1751 | u_int | 1751 | u_int | |
1752 | cpu_intr_count(struct cpu_info *ci) | 1752 | cpu_intr_count(struct cpu_info *ci) | |
1753 | { | 1753 | { | |
1754 | 1754 | |||
1755 | KASSERT(ci->ci_nintrhand >= 0); | 1755 | KASSERT(ci->ci_nintrhand >= 0); | |
1756 | 1756 | |||
1757 | return ci->ci_nintrhand; | 1757 | return ci->ci_nintrhand; | |
1758 | } | 1758 | } | |
1759 | 1759 | |||
1760 | static int | 1760 | static int | |
1761 | intr_find_unused_slot(struct cpu_info *ci, int *index) | 1761 | intr_find_unused_slot(struct cpu_info *ci, int *index) | |
1762 | { | 1762 | { | |
1763 | int slot, i; | 1763 | int slot, i; | |
1764 | 1764 | |||
1765 | KASSERT(mutex_owned(&cpu_lock)); | 1765 | KASSERT(mutex_owned(&cpu_lock)); | |
1766 | 1766 | |||
1767 | slot = -1; | 1767 | slot = -1; | |
1768 | for (i = 0; i < MAX_INTR_SOURCES ; i++) { | 1768 | for (i = 0; i < MAX_INTR_SOURCES ; i++) { | |
1769 | if (ci->ci_isources[i] == NULL) { | 1769 | if (ci->ci_isources[i] == NULL) { | |
1770 | slot = i; | 1770 | slot = i; | |
1771 | break; | 1771 | break; | |
1772 | } | 1772 | } | |
1773 | } | 1773 | } | |
1774 | if (slot == -1) { | 1774 | if (slot == -1) { | |
1775 | DPRINTF(("cannot allocate ci_isources\n")); | 1775 | DPRINTF(("cannot allocate ci_isources\n")); | |
1776 | return EBUSY; | 1776 | return EBUSY; | |
1777 | } | 1777 | } | |
1778 | 1778 | |||
1779 | *index = slot; | 1779 | *index = slot; | |
1780 | return 0; | 1780 | return 0; | |
1781 | } | 1781 | } | |
1782 | 1782 | |||
1783 | /* | 1783 | /* | |
1784 | * Let cpu_info ready to accept the interrupt. | 1784 | * Let cpu_info ready to accept the interrupt. | |
1785 | */ | 1785 | */ | |
1786 | static void | 1786 | static void | |
1787 | intr_activate_xcall(void *arg1, void *arg2) | 1787 | intr_activate_xcall(void *arg1, void *arg2) | |
1788 | { | 1788 | { | |
1789 | struct cpu_info *ci; | 1789 | struct cpu_info *ci; | |
1790 | struct intrsource *source; | 1790 | struct intrsource *source; | |
1791 | struct intrstub *stubp; | 1791 | struct intrstub *stubp; | |
1792 | struct intrhand *ih; | 1792 | struct intrhand *ih; | |
1793 | struct idt_vec *iv; | 1793 | struct idt_vec *iv; | |
1794 | u_long psl; | 1794 | u_long psl; | |
1795 | int idt_vec; | 1795 | int idt_vec; | |
1796 | int slot; | 1796 | int slot; | |
1797 | 1797 | |||
1798 | ih = arg1; | 1798 | ih = arg1; | |
1799 | 1799 | |||
1800 | kpreempt_disable(); | 1800 | kpreempt_disable(); | |
1801 | 1801 | |||
1802 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | 1802 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | |
1803 | 1803 | |||
1804 | ci = ih->ih_cpu; | 1804 | ci = ih->ih_cpu; | |
1805 | slot = ih->ih_slot; | 1805 | slot = ih->ih_slot; | |
1806 | source = ci->ci_isources[slot]; | 1806 | source = ci->ci_isources[slot]; | |
1807 | idt_vec = source->is_idtvec; | 1807 | idt_vec = source->is_idtvec; | |
1808 | iv = idt_vec_ref(&ci->ci_idtvec); | 1808 | iv = idt_vec_ref(&ci->ci_idtvec); | |
1809 | 1809 | |||
1810 | psl = x86_read_psl(); | 1810 | psl = x86_read_psl(); | |
1811 | x86_disable_intr(); | 1811 | x86_disable_intr(); | |
1812 | 1812 | |||
1813 | x86_intr_calculatemasks(ci); | 1813 | x86_intr_calculatemasks(ci); | |
1814 | 1814 | |||
1815 | if (source->is_type == IST_LEVEL) { | 1815 | if (source->is_type == IST_LEVEL) { | |
1816 | stubp = &source->is_pic->pic_level_stubs[slot]; | 1816 | stubp = &source->is_pic->pic_level_stubs[slot]; | |
1817 | } else { | 1817 | } else { | |
1818 | stubp = &source->is_pic->pic_edge_stubs[slot]; | 1818 | stubp = &source->is_pic->pic_edge_stubs[slot]; | |
1819 | } | 1819 | } | |
1820 | 1820 | |||
1821 | source->is_resume = stubp->ist_resume; | 1821 | source->is_resume = stubp->ist_resume; | |
1822 | source->is_recurse = stubp->ist_recurse; | 1822 | source->is_recurse = stubp->ist_recurse; | |
1823 | idt_vec_set(iv, idt_vec, stubp->ist_entry); | 1823 | idt_vec_set(iv, idt_vec, stubp->ist_entry); | |
1824 | 1824 | |||
1825 | x86_write_psl(psl); | 1825 | x86_write_psl(psl); | |
1826 | 1826 | |||
1827 | kpreempt_enable(); | 1827 | kpreempt_enable(); | |
1828 | } | 1828 | } | |
1829 | 1829 | |||
1830 | /* | 1830 | /* | |
1831 | * Let cpu_info not accept the interrupt. | 1831 | * Let cpu_info not accept the interrupt. | |
1832 | */ | 1832 | */ | |
1833 | static void | 1833 | static void | |
1834 | intr_deactivate_xcall(void *arg1, void *arg2) | 1834 | intr_deactivate_xcall(void *arg1, void *arg2) | |
1835 | { | 1835 | { | |
1836 | struct cpu_info *ci; | 1836 | struct cpu_info *ci; | |
1837 | struct intrhand *ih, *lih; | 1837 | struct intrhand *ih, *lih; | |
1838 | struct intrsource *isp; | 1838 | struct intrsource *isp; | |
1839 | u_long psl; | 1839 | u_long psl; | |
1840 | int idt_vec; | 1840 | int idt_vec; | |
1841 | int slot; | 1841 | int slot; | |
1842 | 1842 | |||
1843 | ih = arg1; | 1843 | ih = arg1; | |
1844 | 1844 | |||
1845 | kpreempt_disable(); | 1845 | kpreempt_disable(); | |
1846 | 1846 | |||
1847 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | 1847 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | |
1848 | 1848 | |||
1849 | ci = ih->ih_cpu; | 1849 | ci = ih->ih_cpu; | |
1850 | slot = ih->ih_slot; | 1850 | slot = ih->ih_slot; | |
1851 | isp = ci->ci_isources[slot]; | 1851 | isp = ci->ci_isources[slot]; | |
1852 | idt_vec = isp->is_idtvec; | 1852 | idt_vec = isp->is_idtvec; | |
1853 | 1853 | |||
1854 | psl = x86_read_psl(); | 1854 | psl = x86_read_psl(); | |
1855 | x86_disable_intr(); | 1855 | x86_disable_intr(); | |
1856 | 1856 | |||
1857 | /* Move all devices sharing IRQ number. */ | 1857 | /* Move all devices sharing IRQ number. */ | |
1858 | ci->ci_isources[slot] = NULL; | 1858 | ci->ci_isources[slot] = NULL; | |
1859 | for (lih = ih; lih != NULL; lih = lih->ih_next) { | 1859 | for (lih = ih; lih != NULL; lih = lih->ih_next) { | |
1860 | ci->ci_nintrhand--; | 1860 | ci->ci_nintrhand--; | |
1861 | } | 1861 | } | |
1862 | 1862 | |||
1863 | x86_intr_calculatemasks(ci); | 1863 | x86_intr_calculatemasks(ci); | |
1864 | 1864 | |||
1865 | if (idt_vec_is_pcpu()) { | 1865 | if (idt_vec_is_pcpu()) { | |
1866 | idt_vec_free(&ci->ci_idtvec, idt_vec); | 1866 | idt_vec_free(&ci->ci_idtvec, idt_vec); | |
1867 | } else { | 1867 | } else { | |
1868 | /* | 1868 | /* | |
1869 | * Skip unsetgate(), because the same idt[] entry is | 1869 | * Skip unsetgate(), because the same idt[] entry is | |
1870 | * overwritten in intr_activate_xcall(). | 1870 | * overwritten in intr_activate_xcall(). | |
1871 | */ | 1871 | */ | |
1872 | } | 1872 | } | |
1873 | 1873 | |||
1874 | x86_write_psl(psl); | 1874 | x86_write_psl(psl); | |
1875 | 1875 | |||
1876 | kpreempt_enable(); | 1876 | kpreempt_enable(); | |
1877 | } | 1877 | } | |
1878 | 1878 | |||
1879 | static void | 1879 | static void | |
1880 | intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset) | 1880 | intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset) | |
1881 | { | 1881 | { | |
1882 | struct cpu_info *ci; | 1882 | struct cpu_info *ci; | |
1883 | 1883 | |||
1884 | KASSERT(mutex_owned(&cpu_lock)); | 1884 | KASSERT(mutex_owned(&cpu_lock)); | |
1885 | 1885 | |||
1886 | if (isp == NULL) { | 1886 | if (isp == NULL) { | |
1887 | kcpuset_zero(cpuset); | 1887 | kcpuset_zero(cpuset); | |
1888 | return; | 1888 | return; | |
1889 | } | 1889 | } | |
1890 | 1890 | |||
1891 | KASSERTMSG(isp->is_handlers != NULL, | 1891 | KASSERTMSG(isp->is_handlers != NULL, | |
1892 | "Don't get affinity for the device which is not established."); | 1892 | "Don't get affinity for the device which is not established."); | |
1893 | 1893 | |||
1894 | ci = isp->is_handlers->ih_cpu; | 1894 | ci = isp->is_handlers->ih_cpu; | |
1895 | if (ci == NULL) { | 1895 | if (ci == NULL) { | |
1896 | kcpuset_zero(cpuset); | 1896 | kcpuset_zero(cpuset); | |
1897 | return; | 1897 | return; | |
1898 | } | 1898 | } | |
1899 | 1899 | |||
1900 | kcpuset_set(cpuset, cpu_index(ci)); | 1900 | kcpuset_set(cpuset, cpu_index(ci)); | |
1901 | return; | 1901 | return; | |
1902 | } | 1902 | } | |
1903 | 1903 | |||
1904 | static int | 1904 | static int | |
1905 | intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset) | 1905 | intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset) | |
1906 | { | 1906 | { | |
1907 | struct cpu_info *oldci, *newci; | 1907 | struct cpu_info *oldci, *newci; | |
1908 | struct intrhand *ih, *lih; | 1908 | struct intrhand *ih, *lih; | |
1909 | struct pic *pic; | 1909 | struct pic *pic; | |
1910 | u_int cpu_idx; | 1910 | u_int cpu_idx; | |
1911 | int old_idtvec, new_idtvec; | 1911 | int old_idtvec, new_idtvec; | |
1912 | int oldslot, newslot; | 1912 | int oldslot, newslot; | |
1913 | int err; | 1913 | int err; | |
1914 | int pin; | 1914 | int pin; | |
1915 | 1915 | |||
1916 | KASSERT(mutex_owned(&intr_distribute_lock)); | 1916 | KASSERT(mutex_owned(&intr_distribute_lock)); | |
1917 | KASSERT(mutex_owned(&cpu_lock)); | 1917 | KASSERT(mutex_owned(&cpu_lock)); | |
1918 | 1918 | |||
1919 | /* XXX | 1919 | /* XXX | |
1920 | * logical destination mode is not supported, use lowest index cpu. | 1920 | * logical destination mode is not supported, use lowest index cpu. | |
1921 | */ | 1921 | */ | |
1922 | cpu_idx = kcpuset_ffs(cpuset) - 1; | 1922 | cpu_idx = kcpuset_ffs(cpuset) - 1; | |
1923 | newci = cpu_lookup(cpu_idx); | 1923 | newci = cpu_lookup(cpu_idx); | |
1924 | if (newci == NULL) { | 1924 | if (newci == NULL) { | |
1925 | DPRINTF(("invalid cpu index: %u\n", cpu_idx)); | 1925 | DPRINTF(("invalid cpu index: %u\n", cpu_idx)); | |
1926 | return EINVAL; | 1926 | return EINVAL; | |
1927 | } | 1927 | } | |
1928 | if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | 1928 | if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | |
1929 | DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx)); | 1929 | DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx)); | |
1930 | return EINVAL; | 1930 | return EINVAL; | |
1931 | } | 1931 | } | |
1932 | 1932 | |||
1933 | if (isp == NULL) { | 1933 | if (isp == NULL) { | |
1934 | DPRINTF(("invalid intrctl handler\n")); | 1934 | DPRINTF(("invalid intrctl handler\n")); | |
1935 | return EINVAL; | 1935 | return EINVAL; | |
1936 | } | 1936 | } | |
1937 | 1937 | |||
1938 | /* i8259_pic supports only primary cpu, see i8259.c. */ | 1938 | /* i8259_pic supports only primary cpu, see i8259.c. */ | |
1939 | pic = isp->is_pic; | 1939 | pic = isp->is_pic; | |
1940 | if (pic == &i8259_pic) { | 1940 | if (pic == &i8259_pic) { | |
1941 | DPRINTF(("i8259 pic does not support set_affinity\n")); | 1941 | DPRINTF(("i8259 pic does not support set_affinity\n")); | |
1942 | return ENOTSUP; | 1942 | return ENOTSUP; | |
1943 | } | 1943 | } | |
1944 | 1944 | |||
1945 | ih = isp->is_handlers; | 1945 | ih = isp->is_handlers; | |
1946 | KASSERTMSG(ih != NULL, | 1946 | KASSERTMSG(ih != NULL, | |
1947 | "Don't set affinity for the device which is not established."); | 1947 | "Don't set affinity for the device which is not established."); | |
1948 | 1948 | |||
1949 | oldci = ih->ih_cpu; | 1949 | oldci = ih->ih_cpu; | |
1950 | if (newci == oldci) /* nothing to do */ | 1950 | if (newci == oldci) /* nothing to do */ | |
1951 | return 0; | 1951 | return 0; | |
1952 | 1952 | |||
1953 | oldslot = ih->ih_slot; | 1953 | oldslot = ih->ih_slot; | |
1954 | 1954 | |||
1955 | err = intr_find_unused_slot(newci, &newslot); | 1955 | err = intr_find_unused_slot(newci, &newslot); | |
1956 | if (err) { | 1956 | if (err) { | |
1957 | DPRINTF(("failed to allocate interrupt slot for PIC %s intrid " | 1957 | DPRINTF(("failed to allocate interrupt slot for PIC %s intrid " | |
1958 | "%s\n", isp->is_pic->pic_name, isp->is_intrid)); | 1958 | "%s\n", isp->is_pic->pic_name, isp->is_intrid)); | |
1959 | return err; | 1959 | return err; | |
1960 | } | 1960 | } | |
1961 | 1961 | |||
1962 | old_idtvec = isp->is_idtvec; | 1962 | old_idtvec = isp->is_idtvec; | |
1963 | 1963 | |||
1964 | if (idt_vec_is_pcpu()) { | 1964 | if (idt_vec_is_pcpu()) { | |
1965 | new_idtvec = idt_vec_alloc(&newci->ci_idtvec, | 1965 | new_idtvec = idt_vec_alloc(&newci->ci_idtvec, | |
1966 | APIC_LEVEL(ih->ih_level), IDT_INTR_HIGH); | 1966 | APIC_LEVEL(ih->ih_level), IDT_INTR_HIGH); | |
1967 | if (new_idtvec == 0) | 1967 | if (new_idtvec == 0) | |
1968 | return EBUSY; | 1968 | return EBUSY; | |
1969 | DPRINTF(("interrupt from cpu%d vec %d to cpu%d vec %d\n", | 1969 | DPRINTF(("interrupt from cpu%d vec %d to cpu%d vec %d\n", | |
1970 | cpu_index(oldci), old_idtvec, cpu_index(newci), | 1970 | cpu_index(oldci), old_idtvec, cpu_index(newci), | |
1971 | new_idtvec)); | 1971 | new_idtvec)); | |
1972 | } else { | 1972 | } else { | |
1973 | new_idtvec = isp->is_idtvec; | 1973 | new_idtvec = isp->is_idtvec; | |
1974 | } | 1974 | } | |
1975 | 1975 | |||
1976 | /* Prevent intr_unmask() from reenabling the source at the hw. */ | 1976 | /* Prevent intr_unmask() from reenabling the source at the hw. */ | |
1977 | isp->is_distribute_pending = true; | 1977 | isp->is_distribute_pending = true; | |
1978 | 1978 | |||
1979 | pin = isp->is_pin; | 1979 | pin = isp->is_pin; | |
1980 | (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */ | 1980 | (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */ | |
1981 | membar_sync(); | 1981 | membar_sync(); | |
1982 | while (oldci->ci_ipending & (1ULL << oldslot)) { | 1982 | while (oldci->ci_ipending & (1ULL << oldslot)) { | |
1983 | (void)kpause("intrdist", false, 1, &cpu_lock); | 1983 | (void)kpause("intrdist", false, 1, &cpu_lock); | |
1984 | membar_sync(); | 1984 | membar_sync(); | |
1985 | } | 1985 | } | |
1986 | 1986 | |||
1987 | kpreempt_disable(); | 1987 | kpreempt_disable(); | |
1988 | 1988 | |||
1989 | /* deactivate old interrupt setting */ | 1989 | /* deactivate old interrupt setting */ | |
1990 | if (oldci == curcpu() || !mp_online) { | 1990 | if (oldci == curcpu() || !mp_online) { | |
1991 | intr_deactivate_xcall(ih, NULL); | 1991 | intr_deactivate_xcall(ih, NULL); | |
1992 | } else { | 1992 | } else { | |
1993 | uint64_t where; | 1993 | uint64_t where; | |
1994 | where = xc_unicast(0, intr_deactivate_xcall, ih, | 1994 | where = xc_unicast(0, intr_deactivate_xcall, ih, | |
1995 | NULL, oldci); | 1995 | NULL, oldci); | |
1996 | xc_wait(where); | 1996 | xc_wait(where); | |
1997 | } | 1997 | } | |
1998 | intr_save_evcnt(isp, oldci->ci_cpuid); | 1998 | intr_save_evcnt(isp, oldci->ci_cpuid); | |
1999 | (*pic->pic_delroute)(pic, oldci, pin, old_idtvec, isp->is_type); | 1999 | (*pic->pic_delroute)(pic, oldci, pin, old_idtvec, isp->is_type); | |
2000 | 2000 | |||
2001 | /* activate new interrupt setting */ | 2001 | /* activate new interrupt setting */ | |
2002 | isp->is_idtvec = new_idtvec; | 2002 | isp->is_idtvec = new_idtvec; | |
2003 | newci->ci_isources[newslot] = isp; | 2003 | newci->ci_isources[newslot] = isp; | |
2004 | for (lih = ih; lih != NULL; lih = lih->ih_next) { | 2004 | for (lih = ih; lih != NULL; lih = lih->ih_next) { | |
2005 | newci->ci_nintrhand++; | 2005 | newci->ci_nintrhand++; | |
2006 | lih->ih_cpu = newci; | 2006 | lih->ih_cpu = newci; | |
2007 | lih->ih_slot = newslot; | 2007 | lih->ih_slot = newslot; | |
2008 | } | 2008 | } | |
2009 | if (newci == curcpu() || !mp_online) { | 2009 | if (newci == curcpu() || !mp_online) { | |
2010 | intr_activate_xcall(ih, NULL); | 2010 | intr_activate_xcall(ih, NULL); | |
2011 | } else { | 2011 | } else { | |
2012 | uint64_t where; | 2012 | uint64_t where; | |
2013 | where = xc_unicast(0, intr_activate_xcall, ih, | 2013 | where = xc_unicast(0, intr_activate_xcall, ih, | |
2014 | NULL, newci); | 2014 | NULL, newci); | |
2015 | xc_wait(where); | 2015 | xc_wait(where); | |
2016 | } | 2016 | } | |
2017 | intr_restore_evcnt(isp, newci->ci_cpuid); | 2017 | intr_restore_evcnt(isp, newci->ci_cpuid); | |
2018 | isp->is_active_cpu = newci->ci_cpuid; | 2018 | isp->is_active_cpu = newci->ci_cpuid; | |
2019 | (*pic->pic_addroute)(pic, newci, pin, new_idtvec, isp->is_type); | 2019 | (*pic->pic_addroute)(pic, newci, pin, new_idtvec, isp->is_type); | |
2020 | 2020 | |||
2021 | isp->is_distribute_pending = false; | 2021 | isp->is_distribute_pending = false; | |
2022 | if (newci == curcpu() || !mp_online) { | 2022 | if (newci == curcpu() || !mp_online) { | |
2023 | intr_hwunmask_xcall(ih, NULL); | 2023 | intr_hwunmask_xcall(ih, NULL); | |
2024 | } else { | 2024 | } else { | |
2025 | uint64_t where; | 2025 | uint64_t where; | |
2026 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci); | 2026 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci); | |
2027 | xc_wait(where); | 2027 | xc_wait(where); | |
2028 | } | 2028 | } | |
2029 | 2029 | |||
2030 | kpreempt_enable(); | 2030 | kpreempt_enable(); | |
2031 | 2031 | |||
2032 | return err; | 2032 | return err; | |
2033 | } | 2033 | } | |
2034 | 2034 | |||
2035 | static bool | 2035 | static bool | |
2036 | intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset) | 2036 | intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset) | |
2037 | { | 2037 | { | |
2038 | struct cpu_info *ci; | 2038 | struct cpu_info *ci; | |
2039 | 2039 | |||
2040 | KASSERT(mutex_owned(&cpu_lock)); | 2040 | KASSERT(mutex_owned(&cpu_lock)); | |
2041 | 2041 | |||
2042 | /* | 2042 | /* | |
2043 | * The device is already pci_intr_alloc'ed, however it is not | 2043 | * The device is already pci_intr_alloc'ed, however it is not | |
2044 | * established yet. | 2044 | * established yet. | |
2045 | */ | 2045 | */ | |
2046 | if (isp->is_handlers == NULL) | 2046 | if (isp->is_handlers == NULL) | |
2047 | return false; | 2047 | return false; | |
2048 | 2048 | |||
2049 | ci = isp->is_handlers->ih_cpu; | 2049 | ci = isp->is_handlers->ih_cpu; | |
2050 | KASSERT(ci != NULL); | 2050 | KASSERT(ci != NULL); | |
2051 | 2051 | |||
2052 | return kcpuset_isset(cpuset, cpu_index(ci)); | 2052 | return kcpuset_isset(cpuset, cpu_index(ci)); | |
2053 | } | 2053 | } | |
2054 | 2054 | |||
2055 | static struct intrhand * | 2055 | static struct intrhand * | |
2056 | intr_get_handler(const char *intrid) | 2056 | intr_get_handler(const char *intrid) | |
2057 | { | 2057 | { | |
2058 | struct intrsource *isp; | 2058 | struct intrsource *isp; |