Sat Jun 6 21:38:47 2015 UTC ()
Reuse the ci_next to hold the nmi exception stack.


(matt)
diff -r1.113 -r1.114 src/sys/arch/mips/include/cpu.h

cvs diff -r1.113 -r1.114 src/sys/arch/mips/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/mips/include/cpu.h 2015/06/02 05:05:28 1.113
+++ src/sys/arch/mips/include/cpu.h 2015/06/06 21:38:47 1.114
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.113 2015/06/02 05:05:28 matt Exp $ */ 1/* $NetBSD: cpu.h,v 1.114 2015/06/06 21:38:47 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * Ralph Campbell and Rick Macklem. 8 * Ralph Campbell and Rick Macklem.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -72,27 +72,27 @@ typedef struct cpu_watchpoint { @@ -72,27 +72,27 @@ typedef struct cpu_watchpoint {
72 72
73#define CPUWATCH_MAX 8 /* max possible number of watchpoints */ 73#define CPUWATCH_MAX 8 /* max possible number of watchpoints */
74 74
75u_int cpuwatch_discover(void); 75u_int cpuwatch_discover(void);
76void cpuwatch_free(cpu_watchpoint_t *); 76void cpuwatch_free(cpu_watchpoint_t *);
77cpu_watchpoint_t *cpuwatch_alloc(void); 77cpu_watchpoint_t *cpuwatch_alloc(void);
78void cpuwatch_set_all(void); 78void cpuwatch_set_all(void);
79void cpuwatch_clr_all(void); 79void cpuwatch_clr_all(void);
80void cpuwatch_set(cpu_watchpoint_t *); 80void cpuwatch_set(cpu_watchpoint_t *);
81void cpuwatch_clr(cpu_watchpoint_t *); 81void cpuwatch_clr(cpu_watchpoint_t *);
82 82
83struct cpu_info { 83struct cpu_info {
84 struct cpu_data ci_data; /* MI per-cpu data */ 84 struct cpu_data ci_data; /* MI per-cpu data */
85 void *ci_xnext; /* unused */ 85 void *ci_nmi_stack; /* NMI exception stack */
86 struct cpu_softc *ci_softc; /* chip-dependent hook */ 86 struct cpu_softc *ci_softc; /* chip-dependent hook */
87 device_t ci_dev; /* owning device */ 87 device_t ci_dev; /* owning device */
88 cpuid_t ci_cpuid; /* Machine-level identifier */ 88 cpuid_t ci_cpuid; /* Machine-level identifier */
89 u_long ci_cctr_freq; /* cycle counter frequency */ 89 u_long ci_cctr_freq; /* cycle counter frequency */
90 u_long ci_cpu_freq; /* CPU frequency */ 90 u_long ci_cpu_freq; /* CPU frequency */
91 u_long ci_cycles_per_hz; /* CPU freq / hz */ 91 u_long ci_cycles_per_hz; /* CPU freq / hz */
92 u_long ci_divisor_delay; /* for delay/DELAY */ 92 u_long ci_divisor_delay; /* for delay/DELAY */
93 u_long ci_divisor_recip; /* unused, for obsolete microtime(9) */ 93 u_long ci_divisor_recip; /* unused, for obsolete microtime(9) */
94 struct lwp *ci_curlwp; /* currently running lwp */ 94 struct lwp *ci_curlwp; /* currently running lwp */
95 volatile int ci_want_resched; /* user preemption pending */ 95 volatile int ci_want_resched; /* user preemption pending */
96 int ci_mtx_count; /* negative count of held mutexes */ 96 int ci_mtx_count; /* negative count of held mutexes */
97 int ci_mtx_oldspl; /* saved SPL value */ 97 int ci_mtx_oldspl; /* saved SPL value */
98 int ci_idepth; /* hardware interrupt depth */ 98 int ci_idepth; /* hardware interrupt depth */