| @@ -154,52 +154,51 @@ amd64nbsd_trapframe_cache(struct frame_i | | | @@ -154,52 +154,51 @@ amd64nbsd_trapframe_cache(struct frame_i |
154 | addr = sp + 8; /* It's an interrupt frame. */ | | 154 | addr = sp + 8; /* It's an interrupt frame. */ |
155 | else | | 155 | else |
156 | addr = sp; | | 156 | addr = sp; |
157 | | | 157 | |
158 | for (i = 0; i < ARRAY_SIZE (amd64nbsd_tf_reg_offset); i++) | | 158 | for (i = 0; i < ARRAY_SIZE (amd64nbsd_tf_reg_offset); i++) |
159 | if (amd64nbsd_tf_reg_offset[i] != -1) | | 159 | if (amd64nbsd_tf_reg_offset[i] != -1) |
160 | trad_frame_set_reg_addr (cache, i, addr + amd64nbsd_tf_reg_offset[i]); | | 160 | trad_frame_set_reg_addr (cache, i, addr + amd64nbsd_tf_reg_offset[i]); |
161 | | | 161 | |
162 | /* Read %cs and %rip when we have the addresses to hand */ | | 162 | /* Read %cs and %rip when we have the addresses to hand */ |
163 | cs = read_memory_unsigned_integer (addr | | 163 | cs = read_memory_unsigned_integer (addr |
164 | + amd64nbsd_tf_reg_offset[AMD64_CS_REGNUM], 8, byte_order); | | 164 | + amd64nbsd_tf_reg_offset[AMD64_CS_REGNUM], 8, byte_order); |
165 | rip = read_memory_unsigned_integer (addr | | 165 | rip = read_memory_unsigned_integer (addr |
166 | + amd64nbsd_tf_reg_offset[AMD64_RIP_REGNUM], 8, byte_order); | | 166 | + amd64nbsd_tf_reg_offset[AMD64_RIP_REGNUM], 8, byte_order); |
167 | if (cs == 0 || rip == 0) | | | |
168 | abort(); | | | |
169 | | | 167 | |
170 | /* The trap frame layout was changed lf the %rip value is less than 2^16 it | | 168 | /* The trap frame layout was changed lf the %rip value is less than 2^16 it |
171 | * is almost certainly the %ss of the old format. */ | | 169 | * is almost certainly the %ss of the old format. */ |
172 | if (rip < (1 << 16)) | | 170 | if (rip < (1 << 16)) |
173 | { | | 171 | { |
174 | | | 172 | |
175 | for (i = 0; i < ARRAY_SIZE (amd64nbsd_tf_reg_offset); i++) | | 173 | for (i = 0; i < ARRAY_SIZE (amd64nbsd_tf_reg_offset); i++) |
176 | { | | 174 | { |
177 | | | 175 | |
178 | if (amd64nbsd_tf_reg_offset[i] == -1) | | 176 | if (amd64nbsd_tf_reg_offset[i] == -1) |
179 | continue; | | 177 | continue; |
180 | | | 178 | |
181 | trad_frame_set_reg_addr (cache, i, addr + amd64nbsd_r_reg_offset[i]); | | 179 | trad_frame_set_reg_addr (cache, i, addr + amd64nbsd_r_reg_offset[i]); |
182 | | | 180 | |
183 | /* Read %cs when we have the address to hand */ | | 181 | /* Read %cs when we have the address to hand */ |
184 | if (i == AMD64_CS_REGNUM) | | 182 | if (i == AMD64_CS_REGNUM) |
185 | cs = read_memory_unsigned_integer (addr + amd64nbsd_r_reg_offset[i], | | 183 | cs = read_memory_unsigned_integer (addr + amd64nbsd_r_reg_offset[i], |
186 | 8, byte_order); | | 184 | 8, byte_order); |
187 | } | | 185 | } |
188 | } | | 186 | } |
189 | | | 187 | |
190 | if ((cs & I386_SEL_RPL) == I386_SEL_UPL) | | 188 | if ((cs & I386_SEL_RPL) == I386_SEL_UPL || |
| | | 189 | (name && strncmp(name, "Xsoft", 5) == 0)) |
191 | { | | 190 | { |
192 | /* Trap from user space; terminate backtrace. */ | | 191 | /* Trap from user space or soft interrupt; terminate backtrace. */ |
193 | trad_frame_set_id (cache, outer_frame_id); | | 192 | trad_frame_set_id (cache, outer_frame_id); |
194 | } | | 193 | } |
195 | else | | 194 | else |
196 | { | | 195 | { |
197 | /* Construct the frame ID using the function start. */ | | 196 | /* Construct the frame ID using the function start. */ |
198 | trad_frame_set_id (cache, frame_id_build (sp + 16, func)); | | 197 | trad_frame_set_id (cache, frame_id_build (sp + 16, func)); |
199 | } | | 198 | } |
200 | | | 199 | |
201 | return cache; | | 200 | return cache; |
202 | } | | 201 | } |
203 | | | 202 | |
204 | static void | | 203 | static void |
205 | amd64nbsd_trapframe_this_id (struct frame_info *this_frame, | | 204 | amd64nbsd_trapframe_this_id (struct frame_info *this_frame, |