Wed Jun 12 04:23:46 2013 UTC ()
fix serial type&pasto, while there add SX_ADD instructions


(macallan)
diff -r1.7 -r1.8 src/sys/arch/sparc/dev/sxreg.h

cvs diff -r1.7 -r1.8 src/sys/arch/sparc/dev/sxreg.h (expand / switch to unified diff)

--- src/sys/arch/sparc/dev/sxreg.h 2013/06/05 18:15:06 1.7
+++ src/sys/arch/sparc/dev/sxreg.h 2013/06/12 04:23:46 1.8
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sxreg.h,v 1.7 2013/06/05 18:15:06 macallan Exp $ */ 1/* $NetBSD: sxreg.h,v 1.8 2013/06/12 04:23:46 macallan Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Lorenz. 8 * by Michael Lorenz.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -197,59 +197,72 @@ @@ -197,59 +197,72 @@
197#define SX_M16X16SR8 (0x1 << 28) /* 16bit multiply, shift right 8 */ 197#define SX_M16X16SR8 (0x1 << 28) /* 16bit multiply, shift right 8 */
198#define SX_M16X16SR16 (0x2 << 28) /* 16bit multiply, shift right 16 */ 198#define SX_M16X16SR16 (0x2 << 28) /* 16bit multiply, shift right 16 */
199#define SX_M32X16SR0 (0x4 << 28) /* 32x16bit multiply, no shift */ 199#define SX_M32X16SR0 (0x4 << 28) /* 32x16bit multiply, no shift */
200#define SX_M32X16SR8 (0x5 << 28) /* 32x16bit multiply, shift right 8 */ 200#define SX_M32X16SR8 (0x5 << 28) /* 32x16bit multiply, shift right 8 */
201#define SX_M32X16SR16 (0x6 << 28) /* 32x16bit multiply, shift right 16 */ 201#define SX_M32X16SR16 (0x6 << 28) /* 32x16bit multiply, shift right 16 */
202 202
203#define SX_MULTIPLY (0x0 << 21) /* normal multiplication */ 203#define SX_MULTIPLY (0x0 << 21) /* normal multiplication */
204#define SX_DOT (0x1 << 21) /* dot product of A and B */ 204#define SX_DOT (0x1 << 21) /* dot product of A and B */
205#define SX_SAXP (0x2 << 21) /* A * SCAM + B */ 205#define SX_SAXP (0x2 << 21) /* A * SCAM + B */
206 206
207#define SX_ROUND (0x1 << 23) /* round results */ 207#define SX_ROUND (0x1 << 23) /* round results */
208 208
209#define SX_MUL16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \ 209#define SX_MUL16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
210 SX_MULTIPLY | ((sa) << 14) | ((sb) << 7) | (d))  210 SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
211#define SX_MUL16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \ 211#define SX_MUL16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
212 SX_MULTIPLY | ((sa) << 14) | ((sb) << 7) | (d) | SX_ROUND)  212 SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
213#define SX_MUL16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \ 213#define SX_MUL16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
214 SX_MULTIPLY | ((sa) << 14) | ((sb) << 7) | (d))  214 SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb))
215#define SX_MUL16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \ 215#define SX_MUL16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
216 SX_MULTIPLY | ((sa) << 14) | ((sb) << 7) | (d) | SX_ROUND)  216 SX_MULTIPLY | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
217 217
218#define SX_SAXP16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \ 218#define SX_SAXP16X16(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
219 SX_SAXP | ((sa) << 14) | ((sb) << 7) | (d))  219 SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
220#define SX_SAXP16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \ 220#define SX_SAXP16X16R(sa, sb, d, cnt) (SX_M16X16SR0 | ((cnt) << 24) | \
221 SX_SAXP | ((sa) << 14) | ((sb) << 7) | (d) | SX_ROUND)  221 SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
222#define SX_SAXP16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \ 222#define SX_SAXP16X16SR8(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
223 SX_SAXP | ((sa) << 14) | ((sb) << 7) | (d))  223 SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb))
224#define SX_SAXP16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \ 224#define SX_SAXP16X16SR8R(sa, sb, d, cnt) (SX_M16X16SR8 | ((cnt) << 24) | \
225 SX_SAXP | ((sa) << 14) | ((sb) << 7) | (d) | SX_ROUND)  225 SX_SAXP | ((sa) << 14) | ((d) << 7) | (sb) | SX_ROUND)
226 226
227/* logic group */ 227/* logic group */
228#define SX_AND_V (0x0 << 21) /* vector AND vector */ 228#define SX_AND_V (0x0 << 21) /* vector AND vector */
229#define SX_AND_S (0x1 << 21) /* vector AND scalar */ 229#define SX_AND_S (0x1 << 21) /* vector AND scalar */
230#define SX_AND_I (0x2 << 21) /* vector AND immediate */ 230#define SX_AND_I (0x2 << 21) /* vector AND immediate */
231#define SX_XOR_V (0x3 << 21) /* vector XOR vector */ 231#define SX_XOR_V (0x3 << 21) /* vector XOR vector */
232#define SX_XOR_S (0x4 << 21) /* vector XOR scalar */ 232#define SX_XOR_S (0x4 << 21) /* vector XOR scalar */
233#define SX_XOR_I (0x5 << 21) /* vector XOR immediate */ 233#define SX_XOR_I (0x5 << 21) /* vector XOR immediate */
234#define SX_OR_V (0x6 << 21) /* vector OR vector */ 234#define SX_OR_V (0x6 << 21) /* vector OR vector */
235#define SX_OR_S (0x7 << 21) /* vector OR scalar */ 235#define SX_OR_S (0x7 << 21) /* vector OR scalar */
236/* immediates are 7bit sign extended to 32bit */ 236/* immediates are 7bit sign extended to 32bit */
237 237
238#define SX_ANDV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_V | \ 238#define SX_ANDV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_V | \
239 ((sa) << 14) | ((sb) << 7) | (d)) 239 ((sa) << 14) | ((d) << 7) | (sb))
240#define SX_ANDS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_S | \ 240#define SX_ANDS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_S | \
241 ((sa) << 14) | ((sb) << 7) | (d)) 241 ((sa) << 14) | ((d) << 7) | (sb))
242#define SX_ANDI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_I | \ 242#define SX_ANDI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_AND_I | \
243 ((sa) << 14) | ((sb) << 7) | (d)) 243 ((sa) << 14) | ((d) << 7) | (sb))
244#define SX_XORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_V | \ 244#define SX_XORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_V | \
245 ((sa) << 14) | ((sb) << 7) | (d)) 245 ((sa) << 14) | ((d) << 7) | (sb))
246#define SX_XORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_S | \ 246#define SX_XORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_S | \
247 ((sa) << 14) | ((sb) << 7) | (d)) 247 ((sa) << 14) | ((d) << 7) | (sb))
248#define SX_XORI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_I | \ 248#define SX_XORI(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_XOR_I | \
249 ((sa) << 14) | ((sb) << 7) | (d)) 249 ((sa) << 14) | ((d) << 7) | (sb))
250#define SX_ORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_V | \ 250#define SX_ORV(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_V | \
251 ((sa) << 14) | ((sb) << 7) | (d)) 251 ((sa) << 14) | ((d) << 7) | (sb))
252#define SX_ORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_S | \ 252#define SX_ORS(sa, sb, d, cnt) (0xb0000000 | ((cnt) << 24) | SX_OR_S | \
253 ((sa) << 14) | ((sb) << 7) | (d)) 253 ((sa) << 14) | ((d) << 7) | (sb))
 254
 255/* arithmetic group */
 256#define SX_ADD_V (0x00 << 21)
 257#define SX_ADD_S (0x01 << 21)
 258#define SX_ADD_I (0x02 << 21)
 259#define SX_SUM (0x03 << 21)
 260#define SX_SUB_V (0x04 << 21)
 261#define SX_SUB_S (0x05 << 21)
 262#define SX_SUB_I (0x06 << 21)
 263#define SX_ABS (0x07 << 21)
 264
 265#define SX_ADDV(sa, sb, d, cnt) (0xa0000000 | ((cnt) << 24) | SX_ADD_V | \
 266 ((sa) << 14) | ((d) << 7) | (sb))
254 267
255#endif /* SXREG_H */ 268#endif /* SXREG_H */