Fix inconsistencies with GNU-stack note: - always use __ELF__ && __linux, not just __ELF__ or just __linux__ - remember to pop back to the previous section where it is missing XXX: need to file this bug with the GNU folks.diff -r1.1.1.1 -r1.2 src/external/gpl3/gcc/dist/libgcc/config/alpha/qrnnd.S
(christos)
--- src/external/gpl3/gcc/dist/libgcc/config/alpha/qrnnd.S 2014/03/01 08:41:46 1.1.1.1
+++ src/external/gpl3/gcc/dist/libgcc/config/alpha/qrnnd.S 2015/11/07 16:53:07 1.2
@@ -1,175 +1,176 @@ | @@ -1,175 +1,176 @@ | |||
1 | # Alpha 21064 __udiv_qrnnd | 1 | # Alpha 21064 __udiv_qrnnd | |
2 | # Copyright (C) 1992-2013 Free Software Foundation, Inc. | 2 | # Copyright (C) 1992-2013 Free Software Foundation, Inc. | |
3 | 3 | |||
4 | # This file is part of GCC. | 4 | # This file is part of GCC. | |
5 | 5 | |||
6 | # The GNU MP Library is free software; you can redistribute it and/or modify | 6 | # The GNU MP Library is free software; you can redistribute it and/or modify | |
7 | # it under the terms of the GNU General Public License as published by | 7 | # it under the terms of the GNU General Public License as published by | |
8 | # the Free Software Foundation; either version 3 of the License, or (at your | 8 | # the Free Software Foundation; either version 3 of the License, or (at your | |
9 | # option) any later version. | 9 | # option) any later version. | |
10 | 10 | |||
11 | # This file is distributed in the hope that it will be useful, but | 11 | # This file is distributed in the hope that it will be useful, but | |
12 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | 12 | # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
13 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public | 13 | # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public | |
14 | # License for more details. | 14 | # License for more details. | |
15 | 15 | |||
16 | # Under Section 7 of GPL version 3, you are granted additional | 16 | # Under Section 7 of GPL version 3, you are granted additional | |
17 | # permissions described in the GCC Runtime Library Exception, version | 17 | # permissions described in the GCC Runtime Library Exception, version | |
18 | # 3.1, as published by the Free Software Foundation. | 18 | # 3.1, as published by the Free Software Foundation. | |
19 | 19 | |||
20 | # You should have received a copy of the GNU General Public License and | 20 | # You should have received a copy of the GNU General Public License and | |
21 | # a copy of the GCC Runtime Library Exception along with this program; | 21 | # a copy of the GCC Runtime Library Exception along with this program; | |
22 | # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 22 | # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | # <http://www.gnu.org/licenses/>. | 23 | # <http://www.gnu.org/licenses/>. | |
24 | 24 | |||
25 | #ifdef __ELF__ | 25 | #if defined(__ELF__) && defined(__linux__) | |
26 | .section .note.GNU-stack,"" | 26 | .section .note.GNU-stack,"" | |
27 | .previous | |||
27 | #endif | 28 | #endif | |
28 | 29 | |||
29 | .set noreorder | 30 | .set noreorder | |
30 | .set noat | 31 | .set noat | |
31 | 32 | |||
32 | .text | 33 | .text | |
33 | 34 | |||
34 | .globl __udiv_qrnnd | 35 | .globl __udiv_qrnnd | |
35 | .ent __udiv_qrnnd | 36 | .ent __udiv_qrnnd | |
36 | #ifdef __VMS__ | 37 | #ifdef __VMS__ | |
37 | __udiv_qrnnd..en: | 38 | __udiv_qrnnd..en: | |
38 | .frame $29,0,$26,0 | 39 | .frame $29,0,$26,0 | |
39 | .prologue | 40 | .prologue | |
40 | #else | 41 | #else | |
41 | __udiv_qrnnd: | 42 | __udiv_qrnnd: | |
42 | .frame $30,0,$26,0 | 43 | .frame $30,0,$26,0 | |
43 | .prologue 0 | 44 | .prologue 0 | |
44 | #endif | 45 | #endif | |
45 | 46 | |||
46 | #define cnt $2 | 47 | #define cnt $2 | |
47 | #define tmp $3 | 48 | #define tmp $3 | |
48 | #define rem_ptr $16 | 49 | #define rem_ptr $16 | |
49 | #define n1 $17 | 50 | #define n1 $17 | |
50 | #define n0 $18 | 51 | #define n0 $18 | |
51 | #define d $19 | 52 | #define d $19 | |
52 | #define qb $20 | 53 | #define qb $20 | |
53 | #define AT $at | 54 | #define AT $at | |
54 | 55 | |||
55 | ldiq cnt,16 | 56 | ldiq cnt,16 | |
56 | blt d,$largedivisor | 57 | blt d,$largedivisor | |
57 | 58 | |||
58 | $loop1: cmplt n0,0,tmp | 59 | $loop1: cmplt n0,0,tmp | |
59 | addq n1,n1,n1 | 60 | addq n1,n1,n1 | |
60 | bis n1,tmp,n1 | 61 | bis n1,tmp,n1 | |
61 | addq n0,n0,n0 | 62 | addq n0,n0,n0 | |
62 | cmpule d,n1,qb | 63 | cmpule d,n1,qb | |
63 | subq n1,d,tmp | 64 | subq n1,d,tmp | |
64 | cmovne qb,tmp,n1 | 65 | cmovne qb,tmp,n1 | |
65 | bis n0,qb,n0 | 66 | bis n0,qb,n0 | |
66 | cmplt n0,0,tmp | 67 | cmplt n0,0,tmp | |
67 | addq n1,n1,n1 | 68 | addq n1,n1,n1 | |
68 | bis n1,tmp,n1 | 69 | bis n1,tmp,n1 | |
69 | addq n0,n0,n0 | 70 | addq n0,n0,n0 | |
70 | cmpule d,n1,qb | 71 | cmpule d,n1,qb | |
71 | subq n1,d,tmp | 72 | subq n1,d,tmp | |
72 | cmovne qb,tmp,n1 | 73 | cmovne qb,tmp,n1 | |
73 | bis n0,qb,n0 | 74 | bis n0,qb,n0 | |
74 | cmplt n0,0,tmp | 75 | cmplt n0,0,tmp | |
75 | addq n1,n1,n1 | 76 | addq n1,n1,n1 | |
76 | bis n1,tmp,n1 | 77 | bis n1,tmp,n1 | |
77 | addq n0,n0,n0 | 78 | addq n0,n0,n0 | |
78 | cmpule d,n1,qb | 79 | cmpule d,n1,qb | |
79 | subq n1,d,tmp | 80 | subq n1,d,tmp | |
80 | cmovne qb,tmp,n1 | 81 | cmovne qb,tmp,n1 | |
81 | bis n0,qb,n0 | 82 | bis n0,qb,n0 | |
82 | cmplt n0,0,tmp | 83 | cmplt n0,0,tmp | |
83 | addq n1,n1,n1 | 84 | addq n1,n1,n1 | |
84 | bis n1,tmp,n1 | 85 | bis n1,tmp,n1 | |
85 | addq n0,n0,n0 | 86 | addq n0,n0,n0 | |
86 | cmpule d,n1,qb | 87 | cmpule d,n1,qb | |
87 | subq n1,d,tmp | 88 | subq n1,d,tmp | |
88 | cmovne qb,tmp,n1 | 89 | cmovne qb,tmp,n1 | |
89 | bis n0,qb,n0 | 90 | bis n0,qb,n0 | |
90 | subq cnt,1,cnt | 91 | subq cnt,1,cnt | |
91 | bgt cnt,$loop1 | 92 | bgt cnt,$loop1 | |
92 | stq n1,0(rem_ptr) | 93 | stq n1,0(rem_ptr) | |
93 | bis $31,n0,$0 | 94 | bis $31,n0,$0 | |
94 | ret $31,($26),1 | 95 | ret $31,($26),1 | |
95 | 96 | |||
96 | $largedivisor: | 97 | $largedivisor: | |
97 | and n0,1,$4 | 98 | and n0,1,$4 | |
98 | 99 | |||
99 | srl n0,1,n0 | 100 | srl n0,1,n0 | |
100 | sll n1,63,tmp | 101 | sll n1,63,tmp | |
101 | or tmp,n0,n0 | 102 | or tmp,n0,n0 | |
102 | srl n1,1,n1 | 103 | srl n1,1,n1 | |
103 | 104 | |||
104 | and d,1,$6 | 105 | and d,1,$6 | |
105 | srl d,1,$5 | 106 | srl d,1,$5 | |
106 | addq $5,$6,$5 | 107 | addq $5,$6,$5 | |
107 | 108 | |||
108 | $loop2: cmplt n0,0,tmp | 109 | $loop2: cmplt n0,0,tmp | |
109 | addq n1,n1,n1 | 110 | addq n1,n1,n1 | |
110 | bis n1,tmp,n1 | 111 | bis n1,tmp,n1 | |
111 | addq n0,n0,n0 | 112 | addq n0,n0,n0 | |
112 | cmpule $5,n1,qb | 113 | cmpule $5,n1,qb | |
113 | subq n1,$5,tmp | 114 | subq n1,$5,tmp | |
114 | cmovne qb,tmp,n1 | 115 | cmovne qb,tmp,n1 | |
115 | bis n0,qb,n0 | 116 | bis n0,qb,n0 | |
116 | cmplt n0,0,tmp | 117 | cmplt n0,0,tmp | |
117 | addq n1,n1,n1 | 118 | addq n1,n1,n1 | |
118 | bis n1,tmp,n1 | 119 | bis n1,tmp,n1 | |
119 | addq n0,n0,n0 | 120 | addq n0,n0,n0 | |
120 | cmpule $5,n1,qb | 121 | cmpule $5,n1,qb | |
121 | subq n1,$5,tmp | 122 | subq n1,$5,tmp | |
122 | cmovne qb,tmp,n1 | 123 | cmovne qb,tmp,n1 | |
123 | bis n0,qb,n0 | 124 | bis n0,qb,n0 | |
124 | cmplt n0,0,tmp | 125 | cmplt n0,0,tmp | |
125 | addq n1,n1,n1 | 126 | addq n1,n1,n1 | |
126 | bis n1,tmp,n1 | 127 | bis n1,tmp,n1 | |
127 | addq n0,n0,n0 | 128 | addq n0,n0,n0 | |
128 | cmpule $5,n1,qb | 129 | cmpule $5,n1,qb | |
129 | subq n1,$5,tmp | 130 | subq n1,$5,tmp | |
130 | cmovne qb,tmp,n1 | 131 | cmovne qb,tmp,n1 | |
131 | bis n0,qb,n0 | 132 | bis n0,qb,n0 | |
132 | cmplt n0,0,tmp | 133 | cmplt n0,0,tmp | |
133 | addq n1,n1,n1 | 134 | addq n1,n1,n1 | |
134 | bis n1,tmp,n1 | 135 | bis n1,tmp,n1 | |
135 | addq n0,n0,n0 | 136 | addq n0,n0,n0 | |
136 | cmpule $5,n1,qb | 137 | cmpule $5,n1,qb | |
137 | subq n1,$5,tmp | 138 | subq n1,$5,tmp | |
138 | cmovne qb,tmp,n1 | 139 | cmovne qb,tmp,n1 | |
139 | bis n0,qb,n0 | 140 | bis n0,qb,n0 | |
140 | subq cnt,1,cnt | 141 | subq cnt,1,cnt | |
141 | bgt cnt,$loop2 | 142 | bgt cnt,$loop2 | |
142 | 143 | |||
143 | addq n1,n1,n1 | 144 | addq n1,n1,n1 | |
144 | addq $4,n1,n1 | 145 | addq $4,n1,n1 | |
145 | bne $6,$Odd | 146 | bne $6,$Odd | |
146 | stq n1,0(rem_ptr) | 147 | stq n1,0(rem_ptr) | |
147 | bis $31,n0,$0 | 148 | bis $31,n0,$0 | |
148 | ret $31,($26),1 | 149 | ret $31,($26),1 | |
149 | 150 | |||
150 | $Odd: | 151 | $Odd: | |
151 | /* q' in n0. r' in n1 */ | 152 | /* q' in n0. r' in n1 */ | |
152 | addq n1,n0,n1 | 153 | addq n1,n0,n1 | |
153 | 154 | |||
154 | cmpult n1,n0,tmp # tmp := carry from addq | 155 | cmpult n1,n0,tmp # tmp := carry from addq | |
155 | subq n1,d,AT | 156 | subq n1,d,AT | |
156 | addq n0,tmp,n0 | 157 | addq n0,tmp,n0 | |
157 | cmovne tmp,AT,n1 | 158 | cmovne tmp,AT,n1 | |
158 | 159 | |||
159 | cmpult n1,d,tmp | 160 | cmpult n1,d,tmp | |
160 | addq n0,1,AT | 161 | addq n0,1,AT | |
161 | cmoveq tmp,AT,n0 | 162 | cmoveq tmp,AT,n0 | |
162 | subq n1,d,AT | 163 | subq n1,d,AT | |
163 | cmoveq tmp,AT,n1 | 164 | cmoveq tmp,AT,n1 | |
164 | 165 | |||
165 | stq n1,0(rem_ptr) | 166 | stq n1,0(rem_ptr) | |
166 | bis $31,n0,$0 | 167 | bis $31,n0,$0 | |
167 | ret $31,($26),1 | 168 | ret $31,($26),1 | |
168 | 169 | |||
169 | #ifdef __VMS__ | 170 | #ifdef __VMS__ | |
170 | .link | 171 | .link | |
171 | .align 3 | 172 | .align 3 | |
172 | __udiv_qrnnd: | 173 | __udiv_qrnnd: | |
173 | .pdesc __udiv_qrnnd..en,null | 174 | .pdesc __udiv_qrnnd..en,null | |
174 | #endif | 175 | #endif | |
175 | .end __udiv_qrnnd | 176 | .end __udiv_qrnnd |
--- src/external/gpl3/gcc/dist/libgcc/config/i386/morestack.S 2014/03/01 08:41:47 1.1.1.1
+++ src/external/gpl3/gcc/dist/libgcc/config/i386/morestack.S 2015/11/07 16:53:07 1.2
@@ -1,860 +1,860 @@ | @@ -1,860 +1,860 @@ | |||
1 | # x86/x86_64 support for -fsplit-stack. | 1 | # x86/x86_64 support for -fsplit-stack. | |
2 | # Copyright (C) 2009-2013 Free Software Foundation, Inc. | 2 | # Copyright (C) 2009-2013 Free Software Foundation, Inc. | |
3 | # Contributed by Ian Lance Taylor <iant@google.com>. | 3 | # Contributed by Ian Lance Taylor <iant@google.com>. | |
4 | 4 | |||
5 | # This file is part of GCC. | 5 | # This file is part of GCC. | |
6 | 6 | |||
7 | # GCC is free software; you can redistribute it and/or modify it under | 7 | # GCC is free software; you can redistribute it and/or modify it under | |
8 | # the terms of the GNU General Public License as published by the Free | 8 | # the terms of the GNU General Public License as published by the Free | |
9 | # Software Foundation; either version 3, or (at your option) any later | 9 | # Software Foundation; either version 3, or (at your option) any later | |
10 | # version. | 10 | # version. | |
11 | 11 | |||
12 | # GCC is distributed in the hope that it will be useful, but WITHOUT ANY | 12 | # GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | # WARRANTY; without even the implied warranty of MERCHANTABILITY or | 13 | # WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | 14 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 | # for more details. | 15 | # for more details. | |
16 | 16 | |||
17 | # Under Section 7 of GPL version 3, you are granted additional | 17 | # Under Section 7 of GPL version 3, you are granted additional | |
18 | # permissions described in the GCC Runtime Library Exception, version | 18 | # permissions described in the GCC Runtime Library Exception, version | |
19 | # 3.1, as published by the Free Software Foundation. | 19 | # 3.1, as published by the Free Software Foundation. | |
20 | 20 | |||
21 | # You should have received a copy of the GNU General Public License and | 21 | # You should have received a copy of the GNU General Public License and | |
22 | # a copy of the GCC Runtime Library Exception along with this program; | 22 | # a copy of the GCC Runtime Library Exception along with this program; | |
23 | # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 23 | # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | # <http://www.gnu.org/licenses/>. | 24 | # <http://www.gnu.org/licenses/>. | |
25 | 25 | |||
26 | 26 | |||
27 | # Support for allocating more stack space when using -fsplit-stack. | 27 | # Support for allocating more stack space when using -fsplit-stack. | |
28 | # When a function discovers that it needs more stack space, it will | 28 | # When a function discovers that it needs more stack space, it will | |
29 | # call __morestack with the size of the stack frame and the size of | 29 | # call __morestack with the size of the stack frame and the size of | |
30 | # the parameters to copy from the old stack frame to the new one. | 30 | # the parameters to copy from the old stack frame to the new one. | |
31 | # The __morestack function preserves the parameter registers and | 31 | # The __morestack function preserves the parameter registers and | |
32 | # calls __generic_morestack to actually allocate the stack space. | 32 | # calls __generic_morestack to actually allocate the stack space. | |
33 | 33 | |||
34 | # When this is called stack space is very low, but we ensure that | 34 | # When this is called stack space is very low, but we ensure that | |
35 | # there is enough space to push the parameter registers and to call | 35 | # there is enough space to push the parameter registers and to call | |
36 | # __generic_morestack. | 36 | # __generic_morestack. | |
37 | 37 | |||
38 | # When calling __generic_morestack, FRAME_SIZE points to the size of | 38 | # When calling __generic_morestack, FRAME_SIZE points to the size of | |
39 | # the desired frame when the function is called, and the function | 39 | # the desired frame when the function is called, and the function | |
40 | # sets it to the size of the allocated stack. OLD_STACK points to | 40 | # sets it to the size of the allocated stack. OLD_STACK points to | |
41 | # the parameters on the old stack and PARAM_SIZE is the number of | 41 | # the parameters on the old stack and PARAM_SIZE is the number of | |
42 | # bytes of parameters to copy to the new stack. These are the | 42 | # bytes of parameters to copy to the new stack. These are the | |
43 | # parameters of the function that called __morestack. The | 43 | # parameters of the function that called __morestack. The | |
44 | # __generic_morestack function returns the new stack pointer, | 44 | # __generic_morestack function returns the new stack pointer, | |
45 | # pointing to the address of the first copied parameter. The return | 45 | # pointing to the address of the first copied parameter. The return | |
46 | # value minus the returned *FRAME_SIZE will be the first address on | 46 | # value minus the returned *FRAME_SIZE will be the first address on | |
47 | # the stack which we should not use. | 47 | # the stack which we should not use. | |
48 | 48 | |||
49 | # void *__generic_morestack (size_t *frame_size, void *old_stack, | 49 | # void *__generic_morestack (size_t *frame_size, void *old_stack, | |
50 | # size_t param_size); | 50 | # size_t param_size); | |
51 | 51 | |||
52 | # The __morestack routine has to arrange for the caller to return to a | 52 | # The __morestack routine has to arrange for the caller to return to a | |
53 | # stub on the new stack. The stub is responsible for restoring the | 53 | # stub on the new stack. The stub is responsible for restoring the | |
54 | # old stack pointer and returning to the caller's caller. This calls | 54 | # old stack pointer and returning to the caller's caller. This calls | |
55 | # __generic_releasestack to retrieve the old stack pointer and release | 55 | # __generic_releasestack to retrieve the old stack pointer and release | |
56 | # the newly allocated stack. | 56 | # the newly allocated stack. | |
57 | 57 | |||
58 | # void *__generic_releasestack (size_t *available); | 58 | # void *__generic_releasestack (size_t *available); | |
59 | 59 | |||
60 | # We do a little dance so that the processor's call/return return | 60 | # We do a little dance so that the processor's call/return return | |
61 | # address prediction works out. The compiler arranges for the caller | 61 | # address prediction works out. The compiler arranges for the caller | |
62 | # to look like this: | 62 | # to look like this: | |
63 | # call __generic_morestack | 63 | # call __generic_morestack | |
64 | # ret | 64 | # ret | |
65 | # L: | 65 | # L: | |
66 | # // carry on with function | 66 | # // carry on with function | |
67 | # After we allocate more stack, we call L, which is in our caller. | 67 | # After we allocate more stack, we call L, which is in our caller. | |
68 | # When that returns (to the predicted instruction), we release the | 68 | # When that returns (to the predicted instruction), we release the | |
69 | # stack segment and reset the stack pointer. We then return to the | 69 | # stack segment and reset the stack pointer. We then return to the | |
70 | # predicted instruction, namely the ret instruction immediately after | 70 | # predicted instruction, namely the ret instruction immediately after | |
71 | # the call to __generic_morestack. That then returns to the caller of | 71 | # the call to __generic_morestack. That then returns to the caller of | |
72 | # the original caller. | 72 | # the original caller. | |
73 | 73 | |||
74 | 74 | |||
75 | # The amount of extra space we ask for. In general this has to be | 75 | # The amount of extra space we ask for. In general this has to be | |
76 | # enough for the dynamic loader to find a symbol and for a signal | 76 | # enough for the dynamic loader to find a symbol and for a signal | |
77 | # handler to run. | 77 | # handler to run. | |
78 | 78 | |||
79 | #ifndef __x86_64__ | 79 | #ifndef __x86_64__ | |
80 | #define BACKOFF (1024) | 80 | #define BACKOFF (1024) | |
81 | #else | 81 | #else | |
82 | #define BACKOFF (1536) | 82 | #define BACKOFF (1536) | |
83 | #endif | 83 | #endif | |
84 | 84 | |||
85 | 85 | |||
86 | # The amount of space we ask for when calling non-split-stack code. | 86 | # The amount of space we ask for when calling non-split-stack code. | |
87 | #define NON_SPLIT_STACK 0x100000 | 87 | #define NON_SPLIT_STACK 0x100000 | |
88 | 88 | |||
89 | # This entry point is for split-stack code which calls non-split-stack | 89 | # This entry point is for split-stack code which calls non-split-stack | |
90 | # code. When the linker sees this case, it converts the call to | 90 | # code. When the linker sees this case, it converts the call to | |
91 | # __morestack to call __morestack_non_split instead. We just bump the | 91 | # __morestack to call __morestack_non_split instead. We just bump the | |
92 | # requested stack space by 16K. | 92 | # requested stack space by 16K. | |
93 | 93 | |||
94 | .global __morestack_non_split | 94 | .global __morestack_non_split | |
95 | .hidden __morestack_non_split | 95 | .hidden __morestack_non_split | |
96 | 96 | |||
97 | #ifdef __ELF__ | 97 | #ifdef __ELF__ | |
98 | .type __morestack_non_split,@function | 98 | .type __morestack_non_split,@function | |
99 | #endif | 99 | #endif | |
100 | 100 | |||
101 | __morestack_non_split: | 101 | __morestack_non_split: | |
102 | .cfi_startproc | 102 | .cfi_startproc | |
103 | 103 | |||
104 | #ifndef __x86_64__ | 104 | #ifndef __x86_64__ | |
105 | 105 | |||
106 | # See below for an extended explanation of this. | 106 | # See below for an extended explanation of this. | |
107 | .cfi_def_cfa %esp,16 | 107 | .cfi_def_cfa %esp,16 | |
108 | 108 | |||
109 | pushl %eax # Save %eax in case it is a parameter. | 109 | pushl %eax # Save %eax in case it is a parameter. | |
110 | 110 | |||
111 | .cfi_adjust_cfa_offset 4 # Account for pushed register. | 111 | .cfi_adjust_cfa_offset 4 # Account for pushed register. | |
112 | 112 | |||
113 | movl %esp,%eax # Current stack, | 113 | movl %esp,%eax # Current stack, | |
114 | subl 8(%esp),%eax # less required stack frame size, | 114 | subl 8(%esp),%eax # less required stack frame size, | |
115 | subl $NON_SPLIT_STACK,%eax # less space for non-split code. | 115 | subl $NON_SPLIT_STACK,%eax # less space for non-split code. | |
116 | cmpl %gs:0x30,%eax # See if we have enough space. | 116 | cmpl %gs:0x30,%eax # See if we have enough space. | |
117 | jb 2f # Get more space if we need it. | 117 | jb 2f # Get more space if we need it. | |
118 | 118 | |||
119 | # Here the stack is | 119 | # Here the stack is | |
120 | # %esp + 20: stack pointer after two returns | 120 | # %esp + 20: stack pointer after two returns | |
121 | # %esp + 16: return address of morestack caller's caller | 121 | # %esp + 16: return address of morestack caller's caller | |
122 | # %esp + 12: size of parameters | 122 | # %esp + 12: size of parameters | |
123 | # %esp + 8: new stack frame size | 123 | # %esp + 8: new stack frame size | |
124 | # %esp + 4: return address of this function | 124 | # %esp + 4: return address of this function | |
125 | # %esp: saved %eax | 125 | # %esp: saved %eax | |
126 | # | 126 | # | |
127 | # Since we aren't doing a full split stack, we don't need to | 127 | # Since we aren't doing a full split stack, we don't need to | |
128 | # do anything when our caller returns. So we return to our | 128 | # do anything when our caller returns. So we return to our | |
129 | # caller rather than calling it, and let it return as usual. | 129 | # caller rather than calling it, and let it return as usual. | |
130 | # To make that work we adjust the return address. | 130 | # To make that work we adjust the return address. | |
131 | 131 | |||
132 | # This breaks call/return address prediction for the call to | 132 | # This breaks call/return address prediction for the call to | |
133 | # this function. I can't figure out a way to make it work | 133 | # this function. I can't figure out a way to make it work | |
134 | # short of copying the parameters down the stack, which will | 134 | # short of copying the parameters down the stack, which will | |
135 | # probably take more clock cycles than we will lose breaking | 135 | # probably take more clock cycles than we will lose breaking | |
136 | # call/return address prediction. We will only break | 136 | # call/return address prediction. We will only break | |
137 | # prediction for this call, not for our caller. | 137 | # prediction for this call, not for our caller. | |
138 | 138 | |||
139 | movl 4(%esp),%eax # Increment the return address | 139 | movl 4(%esp),%eax # Increment the return address | |
140 | cmpb $0xc3,(%eax) # to skip the ret instruction; | 140 | cmpb $0xc3,(%eax) # to skip the ret instruction; | |
141 | je 1f # see above. | 141 | je 1f # see above. | |
142 | addl $2,%eax | 142 | addl $2,%eax | |
143 | 1: inc %eax | 143 | 1: inc %eax | |
144 | 144 | |||
145 | # If the instruction that we return to is | 145 | # If the instruction that we return to is | |
146 | # leal 20(%ebp),{%eax,%ecx,%edx} | 146 | # leal 20(%ebp),{%eax,%ecx,%edx} | |
147 | # then we have been called by a varargs function that expects | 147 | # then we have been called by a varargs function that expects | |
148 | # %ebp to hold a real value. That can only work if we do the | 148 | # %ebp to hold a real value. That can only work if we do the | |
149 | # full stack split routine. FIXME: This is fragile. | 149 | # full stack split routine. FIXME: This is fragile. | |
150 | cmpb $0x8d,(%eax) | 150 | cmpb $0x8d,(%eax) | |
151 | jne 3f | 151 | jne 3f | |
152 | cmpb $0x14,2(%eax) | 152 | cmpb $0x14,2(%eax) | |
153 | jne 3f | 153 | jne 3f | |
154 | cmpb $0x45,1(%eax) | 154 | cmpb $0x45,1(%eax) | |
155 | je 2f | 155 | je 2f | |
156 | cmpb $0x4d,1(%eax) | 156 | cmpb $0x4d,1(%eax) | |
157 | je 2f | 157 | je 2f | |
158 | cmpb $0x55,1(%eax) | 158 | cmpb $0x55,1(%eax) | |
159 | je 2f | 159 | je 2f | |
160 | 160 | |||
161 | 3: | 161 | 3: | |
162 | movl %eax,4(%esp) # Update return address. | 162 | movl %eax,4(%esp) # Update return address. | |
163 | 163 | |||
164 | popl %eax # Restore %eax and stack. | 164 | popl %eax # Restore %eax and stack. | |
165 | 165 | |||
166 | .cfi_adjust_cfa_offset -4 # Account for popped register. | 166 | .cfi_adjust_cfa_offset -4 # Account for popped register. | |
167 | 167 | |||
168 | ret $8 # Return to caller, popping args. | 168 | ret $8 # Return to caller, popping args. | |
169 | 169 | |||
170 | 2: | 170 | 2: | |
171 | .cfi_adjust_cfa_offset 4 # Back to where we were. | 171 | .cfi_adjust_cfa_offset 4 # Back to where we were. | |
172 | 172 | |||
173 | popl %eax # Restore %eax and stack. | 173 | popl %eax # Restore %eax and stack. | |
174 | 174 | |||
175 | .cfi_adjust_cfa_offset -4 # Account for popped register. | 175 | .cfi_adjust_cfa_offset -4 # Account for popped register. | |
176 | 176 | |||
177 | # Increment space we request. | 177 | # Increment space we request. | |
178 | addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp) | 178 | addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp) | |
179 | 179 | |||
180 | # Fall through into morestack. | 180 | # Fall through into morestack. | |
181 | 181 | |||
182 | #else | 182 | #else | |
183 | 183 | |||
184 | # See below for an extended explanation of this. | 184 | # See below for an extended explanation of this. | |
185 | .cfi_def_cfa %rsp,16 | 185 | .cfi_def_cfa %rsp,16 | |
186 | 186 | |||
187 | pushq %rax # Save %rax in case caller is using | 187 | pushq %rax # Save %rax in case caller is using | |
188 | # it to preserve original %r10. | 188 | # it to preserve original %r10. | |
189 | .cfi_adjust_cfa_offset 8 # Adjust for pushed register. | 189 | .cfi_adjust_cfa_offset 8 # Adjust for pushed register. | |
190 | 190 | |||
191 | movq %rsp,%rax # Current stack, | 191 | movq %rsp,%rax # Current stack, | |
192 | subq %r10,%rax # less required stack frame size, | 192 | subq %r10,%rax # less required stack frame size, | |
193 | subq $NON_SPLIT_STACK,%rax # less space for non-split code. | 193 | subq $NON_SPLIT_STACK,%rax # less space for non-split code. | |
194 | 194 | |||
195 | #ifdef __LP64__ | 195 | #ifdef __LP64__ | |
196 | cmpq %fs:0x70,%rax # See if we have enough space. | 196 | cmpq %fs:0x70,%rax # See if we have enough space. | |
197 | #else | 197 | #else | |
198 | cmpl %fs:0x40,%eax | 198 | cmpl %fs:0x40,%eax | |
199 | #endif | 199 | #endif | |
200 | 200 | |||
201 | jb 2f # Get more space if we need it. | 201 | jb 2f # Get more space if we need it. | |
202 | 202 | |||
203 | # If the instruction that we return to is | 203 | # If the instruction that we return to is | |
204 | # leaq 24(%rbp), %r11n | 204 | # leaq 24(%rbp), %r11n | |
205 | # then we have been called by a varargs function that expects | 205 | # then we have been called by a varargs function that expects | |
206 | # %ebp to hold a real value. That can only work if we do the | 206 | # %ebp to hold a real value. That can only work if we do the | |
207 | # full stack split routine. FIXME: This is fragile. | 207 | # full stack split routine. FIXME: This is fragile. | |
208 | movq 8(%rsp),%rax | 208 | movq 8(%rsp),%rax | |
209 | incq %rax # Skip ret instruction in caller. | 209 | incq %rax # Skip ret instruction in caller. | |
210 | cmpl $0x185d8d4c,(%rax) | 210 | cmpl $0x185d8d4c,(%rax) | |
211 | je 2f | 211 | je 2f | |
212 | 212 | |||
213 | # This breaks call/return prediction, as described above. | 213 | # This breaks call/return prediction, as described above. | |
214 | incq 8(%rsp) # Increment the return address. | 214 | incq 8(%rsp) # Increment the return address. | |
215 | 215 | |||
216 | popq %rax # Restore register. | 216 | popq %rax # Restore register. | |
217 | 217 | |||
218 | .cfi_adjust_cfa_offset -8 # Adjust for popped register. | 218 | .cfi_adjust_cfa_offset -8 # Adjust for popped register. | |
219 | 219 | |||
220 | ret # Return to caller. | 220 | ret # Return to caller. | |
221 | 221 | |||
222 | 2: | 222 | 2: | |
223 | popq %rax # Restore register. | 223 | popq %rax # Restore register. | |
224 | 224 | |||
225 | .cfi_adjust_cfa_offset -8 # Adjust for popped register. | 225 | .cfi_adjust_cfa_offset -8 # Adjust for popped register. | |
226 | 226 | |||
227 | # Increment space we request. | 227 | # Increment space we request. | |
228 | addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10 | 228 | addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10 | |
229 | 229 | |||
230 | # Fall through into morestack. | 230 | # Fall through into morestack. | |
231 | 231 | |||
232 | #endif | 232 | #endif | |
233 | 233 | |||
234 | .cfi_endproc | 234 | .cfi_endproc | |
235 | #ifdef __ELF__ | 235 | #ifdef __ELF__ | |
236 | .size __morestack_non_split, . - __morestack_non_split | 236 | .size __morestack_non_split, . - __morestack_non_split | |
237 | #endif | 237 | #endif | |
238 | 238 | |||
239 | # __morestack_non_split falls through into __morestack. | 239 | # __morestack_non_split falls through into __morestack. | |
240 | 240 | |||
241 | 241 | |||
242 | # The __morestack function. | 242 | # The __morestack function. | |
243 | 243 | |||
244 | .global __morestack | 244 | .global __morestack | |
245 | .hidden __morestack | 245 | .hidden __morestack | |
246 | 246 | |||
247 | #ifdef __ELF__ | 247 | #ifdef __ELF__ | |
248 | .type __morestack,@function | 248 | .type __morestack,@function | |
249 | #endif | 249 | #endif | |
250 | 250 | |||
251 | __morestack: | 251 | __morestack: | |
252 | .LFB1: | 252 | .LFB1: | |
253 | .cfi_startproc | 253 | .cfi_startproc | |
254 | 254 | |||
255 | 255 | |||
256 | #ifndef __x86_64__ | 256 | #ifndef __x86_64__ | |
257 | 257 | |||
258 | 258 | |||
259 | # The 32-bit __morestack function. | 259 | # The 32-bit __morestack function. | |
260 | 260 | |||
261 | # We use a cleanup to restore the stack guard if an exception | 261 | # We use a cleanup to restore the stack guard if an exception | |
262 | # is thrown through this code. | 262 | # is thrown through this code. | |
263 | #ifndef __PIC__ | 263 | #ifndef __PIC__ | |
264 | .cfi_personality 0,__gcc_personality_v0 | 264 | .cfi_personality 0,__gcc_personality_v0 | |
265 | .cfi_lsda 0,.LLSDA1 | 265 | .cfi_lsda 0,.LLSDA1 | |
266 | #else | 266 | #else | |
267 | .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 | 267 | .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 | |
268 | .cfi_lsda 0x1b,.LLSDA1 | 268 | .cfi_lsda 0x1b,.LLSDA1 | |
269 | #endif | 269 | #endif | |
270 | 270 | |||
271 | # We return below with a ret $8. We will return to a single | 271 | # We return below with a ret $8. We will return to a single | |
272 | # return instruction, which will return to the caller of our | 272 | # return instruction, which will return to the caller of our | |
273 | # caller. We let the unwinder skip that single return | 273 | # caller. We let the unwinder skip that single return | |
274 | # instruction, and just return to the real caller. | 274 | # instruction, and just return to the real caller. | |
275 | 275 | |||
276 | # Here CFA points just past the return address on the stack, | 276 | # Here CFA points just past the return address on the stack, | |
277 | # e.g., on function entry it is %esp + 4. The stack looks | 277 | # e.g., on function entry it is %esp + 4. The stack looks | |
278 | # like this: | 278 | # like this: | |
279 | # CFA + 12: stack pointer after two returns | 279 | # CFA + 12: stack pointer after two returns | |
280 | # CFA + 8: return address of morestack caller's caller | 280 | # CFA + 8: return address of morestack caller's caller | |
281 | # CFA + 4: size of parameters | 281 | # CFA + 4: size of parameters | |
282 | # CFA: new stack frame size | 282 | # CFA: new stack frame size | |
283 | # CFA - 4: return address of this function | 283 | # CFA - 4: return address of this function | |
284 | # CFA - 8: previous value of %ebp; %ebp points here | 284 | # CFA - 8: previous value of %ebp; %ebp points here | |
285 | # Setting the new CFA to be the current CFA + 12 (i.e., %esp + | 285 | # Setting the new CFA to be the current CFA + 12 (i.e., %esp + | |
286 | # 16) will make the unwinder pick up the right return address. | 286 | # 16) will make the unwinder pick up the right return address. | |
287 | 287 | |||
288 | .cfi_def_cfa %esp,16 | 288 | .cfi_def_cfa %esp,16 | |
289 | 289 | |||
290 | pushl %ebp | 290 | pushl %ebp | |
291 | .cfi_adjust_cfa_offset 4 | 291 | .cfi_adjust_cfa_offset 4 | |
292 | .cfi_offset %ebp, -20 | 292 | .cfi_offset %ebp, -20 | |
293 | movl %esp,%ebp | 293 | movl %esp,%ebp | |
294 | .cfi_def_cfa_register %ebp | 294 | .cfi_def_cfa_register %ebp | |
295 | 295 | |||
296 | # In 32-bit mode the parameters are pushed on the stack. The | 296 | # In 32-bit mode the parameters are pushed on the stack. The | |
297 | # argument size is pushed then the new stack frame size is | 297 | # argument size is pushed then the new stack frame size is | |
298 | # pushed. | 298 | # pushed. | |
299 | 299 | |||
300 | # In the body of a non-leaf function, the stack pointer will | 300 | # In the body of a non-leaf function, the stack pointer will | |
301 | # be aligned to a 16-byte boundary. That is CFA + 12 in the | 301 | # be aligned to a 16-byte boundary. That is CFA + 12 in the | |
302 | # stack picture above: (CFA + 12) % 16 == 0. At this point we | 302 | # stack picture above: (CFA + 12) % 16 == 0. At this point we | |
303 | # have %esp == CFA - 8, so %esp % 16 == 12. We need some | 303 | # have %esp == CFA - 8, so %esp % 16 == 12. We need some | |
304 | # space for saving registers and passing parameters, and we | 304 | # space for saving registers and passing parameters, and we | |
305 | # need to wind up with %esp % 16 == 0. | 305 | # need to wind up with %esp % 16 == 0. | |
306 | subl $44,%esp | 306 | subl $44,%esp | |
307 | 307 | |||
308 | # Because our cleanup code may need to clobber %ebx, we need | 308 | # Because our cleanup code may need to clobber %ebx, we need | |
309 | # to save it here so the unwinder can restore the value used | 309 | # to save it here so the unwinder can restore the value used | |
310 | # by the caller. Note that we don't have to restore the | 310 | # by the caller. Note that we don't have to restore the | |
311 | # register, since we don't change it, we just have to save it | 311 | # register, since we don't change it, we just have to save it | |
312 | # for the unwinder. | 312 | # for the unwinder. | |
313 | movl %ebx,-4(%ebp) | 313 | movl %ebx,-4(%ebp) | |
314 | .cfi_offset %ebx, -24 | 314 | .cfi_offset %ebx, -24 | |
315 | 315 | |||
316 | # In 32-bit mode the registers %eax, %edx, and %ecx may be | 316 | # In 32-bit mode the registers %eax, %edx, and %ecx may be | |
317 | # used for parameters, depending on the regparm and fastcall | 317 | # used for parameters, depending on the regparm and fastcall | |
318 | # attributes. | 318 | # attributes. | |
319 | 319 | |||
320 | movl %eax,-8(%ebp) | 320 | movl %eax,-8(%ebp) | |
321 | movl %edx,-12(%ebp) | 321 | movl %edx,-12(%ebp) | |
322 | movl %ecx,-16(%ebp) | 322 | movl %ecx,-16(%ebp) | |
323 | 323 | |||
324 | call __morestack_block_signals | 324 | call __morestack_block_signals | |
325 | 325 | |||
326 | movl 12(%ebp),%eax # The size of the parameters. | 326 | movl 12(%ebp),%eax # The size of the parameters. | |
327 | movl %eax,8(%esp) | 327 | movl %eax,8(%esp) | |
328 | leal 20(%ebp),%eax # Address of caller's parameters. | 328 | leal 20(%ebp),%eax # Address of caller's parameters. | |
329 | movl %eax,4(%esp) | 329 | movl %eax,4(%esp) | |
330 | addl $BACKOFF,8(%ebp) # Ask for backoff bytes. | 330 | addl $BACKOFF,8(%ebp) # Ask for backoff bytes. | |
331 | leal 8(%ebp),%eax # The address of the new frame size. | 331 | leal 8(%ebp),%eax # The address of the new frame size. | |
332 | movl %eax,(%esp) | 332 | movl %eax,(%esp) | |
333 | 333 | |||
334 | call __generic_morestack | 334 | call __generic_morestack | |
335 | 335 | |||
336 | movl %eax,%esp # Switch to the new stack. | 336 | movl %eax,%esp # Switch to the new stack. | |
337 | subl 8(%ebp),%eax # The end of the stack space. | 337 | subl 8(%ebp),%eax # The end of the stack space. | |
338 | addl $BACKOFF,%eax # Back off 512 bytes. | 338 | addl $BACKOFF,%eax # Back off 512 bytes. | |
339 | 339 | |||
340 | .LEHB0: | 340 | .LEHB0: | |
341 | # FIXME: The offset must match | 341 | # FIXME: The offset must match | |
342 | # TARGET_THREAD_SPLIT_STACK_OFFSET in | 342 | # TARGET_THREAD_SPLIT_STACK_OFFSET in | |
343 | # gcc/config/i386/linux.h. | 343 | # gcc/config/i386/linux.h. | |
344 | movl %eax,%gs:0x30 # Save the new stack boundary. | 344 | movl %eax,%gs:0x30 # Save the new stack boundary. | |
345 | 345 | |||
346 | call __morestack_unblock_signals | 346 | call __morestack_unblock_signals | |
347 | 347 | |||
348 | movl -12(%ebp),%edx # Restore registers. | 348 | movl -12(%ebp),%edx # Restore registers. | |
349 | movl -16(%ebp),%ecx | 349 | movl -16(%ebp),%ecx | |
350 | 350 | |||
351 | movl 4(%ebp),%eax # Increment the return address | 351 | movl 4(%ebp),%eax # Increment the return address | |
352 | cmpb $0xc3,(%eax) # to skip the ret instruction; | 352 | cmpb $0xc3,(%eax) # to skip the ret instruction; | |
353 | je 1f # see above. | 353 | je 1f # see above. | |
354 | addl $2,%eax | 354 | addl $2,%eax | |
355 | 1: inc %eax | 355 | 1: inc %eax | |
356 | 356 | |||
357 | movl %eax,-12(%ebp) # Store return address in an | 357 | movl %eax,-12(%ebp) # Store return address in an | |
358 | # unused slot. | 358 | # unused slot. | |
359 | 359 | |||
360 | movl -8(%ebp),%eax # Restore the last register. | 360 | movl -8(%ebp),%eax # Restore the last register. | |
361 | 361 | |||
362 | call *-12(%ebp) # Call our caller! | 362 | call *-12(%ebp) # Call our caller! | |
363 | 363 | |||
364 | # The caller will return here, as predicted. | 364 | # The caller will return here, as predicted. | |
365 | 365 | |||
366 | # Save the registers which may hold a return value. We | 366 | # Save the registers which may hold a return value. We | |
367 | # assume that __generic_releasestack does not touch any | 367 | # assume that __generic_releasestack does not touch any | |
368 | # floating point or vector registers. | 368 | # floating point or vector registers. | |
369 | pushl %eax | 369 | pushl %eax | |
370 | pushl %edx | 370 | pushl %edx | |
371 | 371 | |||
372 | # Push the arguments to __generic_releasestack now so that the | 372 | # Push the arguments to __generic_releasestack now so that the | |
373 | # stack is at a 16-byte boundary for | 373 | # stack is at a 16-byte boundary for | |
374 | # __morestack_block_signals. | 374 | # __morestack_block_signals. | |
375 | pushl $0 # Where the available space is returned. | 375 | pushl $0 # Where the available space is returned. | |
376 | leal 0(%esp),%eax # Push its address. | 376 | leal 0(%esp),%eax # Push its address. | |
377 | push %eax | 377 | push %eax | |
378 | 378 | |||
379 | call __morestack_block_signals | 379 | call __morestack_block_signals | |
380 | 380 | |||
381 | call __generic_releasestack | 381 | call __generic_releasestack | |
382 | 382 | |||
383 | subl 4(%esp),%eax # Subtract available space. | 383 | subl 4(%esp),%eax # Subtract available space. | |
384 | addl $BACKOFF,%eax # Back off 512 bytes. | 384 | addl $BACKOFF,%eax # Back off 512 bytes. | |
385 | .LEHE0: | 385 | .LEHE0: | |
386 | movl %eax,%gs:0x30 # Save the new stack boundary. | 386 | movl %eax,%gs:0x30 # Save the new stack boundary. | |
387 | 387 | |||
388 | addl $8,%esp # Remove values from stack. | 388 | addl $8,%esp # Remove values from stack. | |
389 | 389 | |||
390 | # We need to restore the old stack pointer, which is in %rbp, | 390 | # We need to restore the old stack pointer, which is in %rbp, | |
391 | # before we unblock signals. We also need to restore %eax and | 391 | # before we unblock signals. We also need to restore %eax and | |
392 | # %edx after we unblock signals but before we return. Do this | 392 | # %edx after we unblock signals but before we return. Do this | |
393 | # by moving %eax and %edx from the current stack to the old | 393 | # by moving %eax and %edx from the current stack to the old | |
394 | # stack. | 394 | # stack. | |
395 | 395 | |||
396 | popl %edx # Pop return value from current stack. | 396 | popl %edx # Pop return value from current stack. | |
397 | popl %eax | 397 | popl %eax | |
398 | 398 | |||
399 | movl %ebp,%esp # Restore stack pointer. | 399 | movl %ebp,%esp # Restore stack pointer. | |
400 | 400 | |||
401 | # As before, we now have %esp % 16 == 12. | 401 | # As before, we now have %esp % 16 == 12. | |
402 | 402 | |||
403 | pushl %eax # Push return value on old stack. | 403 | pushl %eax # Push return value on old stack. | |
404 | pushl %edx | 404 | pushl %edx | |
405 | subl $4,%esp # Align stack to 16-byte boundary. | 405 | subl $4,%esp # Align stack to 16-byte boundary. | |
406 | 406 | |||
407 | call __morestack_unblock_signals | 407 | call __morestack_unblock_signals | |
408 | 408 | |||
409 | addl $4,%esp | 409 | addl $4,%esp | |
410 | popl %edx # Restore return value. | 410 | popl %edx # Restore return value. | |
411 | popl %eax | 411 | popl %eax | |
412 | 412 | |||
413 | .cfi_remember_state | 413 | .cfi_remember_state | |
414 | 414 | |||
415 | # We never changed %ebx, so we don't have to actually restore it. | 415 | # We never changed %ebx, so we don't have to actually restore it. | |
416 | .cfi_restore %ebx | 416 | .cfi_restore %ebx | |
417 | 417 | |||
418 | popl %ebp | 418 | popl %ebp | |
419 | .cfi_restore %ebp | 419 | .cfi_restore %ebp | |
420 | .cfi_def_cfa %esp, 16 | 420 | .cfi_def_cfa %esp, 16 | |
421 | ret $8 # Return to caller, which will | 421 | ret $8 # Return to caller, which will | |
422 | # immediately return. Pop | 422 | # immediately return. Pop | |
423 | # arguments as we go. | 423 | # arguments as we go. | |
424 | 424 | |||
425 | # This is the cleanup code called by the stack unwinder when unwinding | 425 | # This is the cleanup code called by the stack unwinder when unwinding | |
426 | # through the code between .LEHB0 and .LEHE0 above. | 426 | # through the code between .LEHB0 and .LEHE0 above. | |
427 | 427 | |||
428 | .L1: | 428 | .L1: | |
429 | .cfi_restore_state | 429 | .cfi_restore_state | |
430 | subl $16,%esp # Maintain 16 byte alignment. | 430 | subl $16,%esp # Maintain 16 byte alignment. | |
431 | movl %eax,4(%esp) # Save exception header. | 431 | movl %eax,4(%esp) # Save exception header. | |
432 | movl %ebp,(%esp) # Stack pointer after resume. | 432 | movl %ebp,(%esp) # Stack pointer after resume. | |
433 | call __generic_findstack | 433 | call __generic_findstack | |
434 | movl %ebp,%ecx # Get the stack pointer. | 434 | movl %ebp,%ecx # Get the stack pointer. | |
435 | subl %eax,%ecx # Subtract available space. | 435 | subl %eax,%ecx # Subtract available space. | |
436 | addl $BACKOFF,%ecx # Back off 512 bytes. | 436 | addl $BACKOFF,%ecx # Back off 512 bytes. | |
437 | movl %ecx,%gs:0x30 # Save new stack boundary. | 437 | movl %ecx,%gs:0x30 # Save new stack boundary. | |
438 | movl 4(%esp),%eax # Function argument. | 438 | movl 4(%esp),%eax # Function argument. | |
439 | movl %eax,(%esp) | 439 | movl %eax,(%esp) | |
440 | #ifdef __PIC__ | 440 | #ifdef __PIC__ | |
441 | call __x86.get_pc_thunk.bx # %ebx may not be set up for us. | 441 | call __x86.get_pc_thunk.bx # %ebx may not be set up for us. | |
442 | addl $_GLOBAL_OFFSET_TABLE_, %ebx | 442 | addl $_GLOBAL_OFFSET_TABLE_, %ebx | |
443 | call _Unwind_Resume@PLT # Resume unwinding. | 443 | call _Unwind_Resume@PLT # Resume unwinding. | |
444 | #else | 444 | #else | |
445 | call _Unwind_Resume | 445 | call _Unwind_Resume | |
446 | #endif | 446 | #endif | |
447 | 447 | |||
448 | #else /* defined(__x86_64__) */ | 448 | #else /* defined(__x86_64__) */ | |
449 | 449 | |||
450 | 450 | |||
451 | # The 64-bit __morestack function. | 451 | # The 64-bit __morestack function. | |
452 | 452 | |||
453 | # We use a cleanup to restore the stack guard if an exception | 453 | # We use a cleanup to restore the stack guard if an exception | |
454 | # is thrown through this code. | 454 | # is thrown through this code. | |
455 | #ifndef __PIC__ | 455 | #ifndef __PIC__ | |
456 | .cfi_personality 0x3,__gcc_personality_v0 | 456 | .cfi_personality 0x3,__gcc_personality_v0 | |
457 | .cfi_lsda 0x3,.LLSDA1 | 457 | .cfi_lsda 0x3,.LLSDA1 | |
458 | #else | 458 | #else | |
459 | .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 | 459 | .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 | |
460 | .cfi_lsda 0x1b,.LLSDA1 | 460 | .cfi_lsda 0x1b,.LLSDA1 | |
461 | #endif | 461 | #endif | |
462 | 462 | |||
463 | # We will return a single return instruction, which will | 463 | # We will return a single return instruction, which will | |
464 | # return to the caller of our caller. Let the unwinder skip | 464 | # return to the caller of our caller. Let the unwinder skip | |
465 | # that single return instruction, and just return to the real | 465 | # that single return instruction, and just return to the real | |
466 | # caller. | 466 | # caller. | |
467 | .cfi_def_cfa %rsp,16 | 467 | .cfi_def_cfa %rsp,16 | |
468 | 468 | |||
469 | # Set up a normal backtrace. | 469 | # Set up a normal backtrace. | |
470 | pushq %rbp | 470 | pushq %rbp | |
471 | .cfi_adjust_cfa_offset 8 | 471 | .cfi_adjust_cfa_offset 8 | |
472 | .cfi_offset %rbp, -24 | 472 | .cfi_offset %rbp, -24 | |
473 | movq %rsp, %rbp | 473 | movq %rsp, %rbp | |
474 | .cfi_def_cfa_register %rbp | 474 | .cfi_def_cfa_register %rbp | |
475 | 475 | |||
476 | # In 64-bit mode the new stack frame size is passed in r10 | 476 | # In 64-bit mode the new stack frame size is passed in r10 | |
477 | # and the argument size is passed in r11. | 477 | # and the argument size is passed in r11. | |
478 | 478 | |||
479 | addq $BACKOFF,%r10 # Ask for backoff bytes. | 479 | addq $BACKOFF,%r10 # Ask for backoff bytes. | |
480 | pushq %r10 # Save new frame size. | 480 | pushq %r10 # Save new frame size. | |
481 | 481 | |||
482 | # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8, | 482 | # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8, | |
483 | # and %r9 may be used for parameters. We also preserve %rax | 483 | # and %r9 may be used for parameters. We also preserve %rax | |
484 | # which the caller may use to hold %r10. | 484 | # which the caller may use to hold %r10. | |
485 | 485 | |||
486 | pushq %rax | 486 | pushq %rax | |
487 | pushq %rdi | 487 | pushq %rdi | |
488 | pushq %rsi | 488 | pushq %rsi | |
489 | pushq %rdx | 489 | pushq %rdx | |
490 | pushq %rcx | 490 | pushq %rcx | |
491 | pushq %r8 | 491 | pushq %r8 | |
492 | pushq %r9 | 492 | pushq %r9 | |
493 | 493 | |||
494 | pushq %r11 | 494 | pushq %r11 | |
495 | 495 | |||
496 | # We entered morestack with the stack pointer aligned to a | 496 | # We entered morestack with the stack pointer aligned to a | |
497 | # 16-byte boundary (the call to morestack's caller used 8 | 497 | # 16-byte boundary (the call to morestack's caller used 8 | |
498 | # bytes, and the call to morestack used 8 bytes). We have now | 498 | # bytes, and the call to morestack used 8 bytes). We have now | |
499 | # pushed 10 registers, so we are still aligned to a 16-byte | 499 | # pushed 10 registers, so we are still aligned to a 16-byte | |
500 | # boundary. | 500 | # boundary. | |
501 | 501 | |||
502 | call __morestack_block_signals | 502 | call __morestack_block_signals | |
503 | 503 | |||
504 | leaq -8(%rbp),%rdi # Address of new frame size. | 504 | leaq -8(%rbp),%rdi # Address of new frame size. | |
505 | leaq 24(%rbp),%rsi # The caller's parameters. | 505 | leaq 24(%rbp),%rsi # The caller's parameters. | |
506 | popq %rdx # The size of the parameters. | 506 | popq %rdx # The size of the parameters. | |
507 | 507 | |||
508 | subq $8,%rsp # Align stack. | 508 | subq $8,%rsp # Align stack. | |
509 | 509 | |||
510 | call __generic_morestack | 510 | call __generic_morestack | |
511 | 511 | |||
512 | movq -8(%rbp),%r10 # Reload modified frame size | 512 | movq -8(%rbp),%r10 # Reload modified frame size | |
513 | movq %rax,%rsp # Switch to the new stack. | 513 | movq %rax,%rsp # Switch to the new stack. | |
514 | subq %r10,%rax # The end of the stack space. | 514 | subq %r10,%rax # The end of the stack space. | |
515 | addq $BACKOFF,%rax # Back off 1024 bytes. | 515 | addq $BACKOFF,%rax # Back off 1024 bytes. | |
516 | 516 | |||
517 | .LEHB0: | 517 | .LEHB0: | |
518 | # FIXME: The offset must match | 518 | # FIXME: The offset must match | |
519 | # TARGET_THREAD_SPLIT_STACK_OFFSET in | 519 | # TARGET_THREAD_SPLIT_STACK_OFFSET in | |
520 | # gcc/config/i386/linux64.h. | 520 | # gcc/config/i386/linux64.h. | |
521 | # Macro to save the new stack boundary. | 521 | # Macro to save the new stack boundary. | |
522 | #ifdef __LP64__ | 522 | #ifdef __LP64__ | |
523 | #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70 | 523 | #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70 | |
524 | #else | 524 | #else | |
525 | #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40 | 525 | #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40 | |
526 | #endif | 526 | #endif | |
527 | X86_64_SAVE_NEW_STACK_BOUNDARY (ax) | 527 | X86_64_SAVE_NEW_STACK_BOUNDARY (ax) | |
528 | 528 | |||
529 | call __morestack_unblock_signals | 529 | call __morestack_unblock_signals | |
530 | 530 | |||
531 | movq -24(%rbp),%rdi # Restore registers. | 531 | movq -24(%rbp),%rdi # Restore registers. | |
532 | movq -32(%rbp),%rsi | 532 | movq -32(%rbp),%rsi | |
533 | movq -40(%rbp),%rdx | 533 | movq -40(%rbp),%rdx | |
534 | movq -48(%rbp),%rcx | 534 | movq -48(%rbp),%rcx | |
535 | movq -56(%rbp),%r8 | 535 | movq -56(%rbp),%r8 | |
536 | movq -64(%rbp),%r9 | 536 | movq -64(%rbp),%r9 | |
537 | 537 | |||
538 | movq 8(%rbp),%r10 # Increment the return address | 538 | movq 8(%rbp),%r10 # Increment the return address | |
539 | incq %r10 # to skip the ret instruction; | 539 | incq %r10 # to skip the ret instruction; | |
540 | # see above. | 540 | # see above. | |
541 | 541 | |||
542 | movq -16(%rbp),%rax # Restore caller's %rax. | 542 | movq -16(%rbp),%rax # Restore caller's %rax. | |
543 | 543 | |||
544 | call *%r10 # Call our caller! | 544 | call *%r10 # Call our caller! | |
545 | 545 | |||
546 | # The caller will return here, as predicted. | 546 | # The caller will return here, as predicted. | |
547 | 547 | |||
548 | # Save the registers which may hold a return value. We | 548 | # Save the registers which may hold a return value. We | |
549 | # assume that __generic_releasestack does not touch any | 549 | # assume that __generic_releasestack does not touch any | |
550 | # floating point or vector registers. | 550 | # floating point or vector registers. | |
551 | pushq %rax | 551 | pushq %rax | |
552 | pushq %rdx | 552 | pushq %rdx | |
553 | 553 | |||
554 | call __morestack_block_signals | 554 | call __morestack_block_signals | |
555 | 555 | |||
556 | pushq $0 # For alignment. | 556 | pushq $0 # For alignment. | |
557 | pushq $0 # Where the available space is returned. | 557 | pushq $0 # Where the available space is returned. | |
558 | leaq 0(%rsp),%rdi # Pass its address. | 558 | leaq 0(%rsp),%rdi # Pass its address. | |
559 | 559 | |||
560 | call __generic_releasestack | 560 | call __generic_releasestack | |
561 | 561 | |||
562 | subq 0(%rsp),%rax # Subtract available space. | 562 | subq 0(%rsp),%rax # Subtract available space. | |
563 | addq $BACKOFF,%rax # Back off 1024 bytes. | 563 | addq $BACKOFF,%rax # Back off 1024 bytes. | |
564 | .LEHE0: | 564 | .LEHE0: | |
565 | X86_64_SAVE_NEW_STACK_BOUNDARY (ax) | 565 | X86_64_SAVE_NEW_STACK_BOUNDARY (ax) | |
566 | 566 | |||
567 | addq $16,%rsp # Remove values from stack. | 567 | addq $16,%rsp # Remove values from stack. | |
568 | 568 | |||
569 | # We need to restore the old stack pointer, which is in %rbp, | 569 | # We need to restore the old stack pointer, which is in %rbp, | |
570 | # before we unblock signals. We also need to restore %rax and | 570 | # before we unblock signals. We also need to restore %rax and | |
571 | # %rdx after we unblock signals but before we return. Do this | 571 | # %rdx after we unblock signals but before we return. Do this | |
572 | # by moving %rax and %rdx from the current stack to the old | 572 | # by moving %rax and %rdx from the current stack to the old | |
573 | # stack. | 573 | # stack. | |
574 | 574 | |||
575 | popq %rdx # Pop return value from current stack. | 575 | popq %rdx # Pop return value from current stack. | |
576 | popq %rax | 576 | popq %rax | |
577 | 577 | |||
578 | movq %rbp,%rsp # Restore stack pointer. | 578 | movq %rbp,%rsp # Restore stack pointer. | |
579 | 579 | |||
580 | # Now (%rsp & 16) == 8. | 580 | # Now (%rsp & 16) == 8. | |
581 | 581 | |||
582 | subq $8,%rsp # For alignment. | 582 | subq $8,%rsp # For alignment. | |
583 | pushq %rax # Push return value on old stack. | 583 | pushq %rax # Push return value on old stack. | |
584 | pushq %rdx | 584 | pushq %rdx | |
585 | 585 | |||
586 | call __morestack_unblock_signals | 586 | call __morestack_unblock_signals | |
587 | 587 | |||
588 | popq %rdx # Restore return value. | 588 | popq %rdx # Restore return value. | |
589 | popq %rax | 589 | popq %rax | |
590 | addq $8,%rsp | 590 | addq $8,%rsp | |
591 | 591 | |||
592 | .cfi_remember_state | 592 | .cfi_remember_state | |
593 | popq %rbp | 593 | popq %rbp | |
594 | .cfi_restore %rbp | 594 | .cfi_restore %rbp | |
595 | .cfi_def_cfa %rsp, 16 | 595 | .cfi_def_cfa %rsp, 16 | |
596 | ret # Return to caller, which will | 596 | ret # Return to caller, which will | |
597 | # immediately return. | 597 | # immediately return. | |
598 | 598 | |||
599 | # This is the cleanup code called by the stack unwinder when unwinding | 599 | # This is the cleanup code called by the stack unwinder when unwinding | |
600 | # through the code between .LEHB0 and .LEHE0 above. | 600 | # through the code between .LEHB0 and .LEHE0 above. | |
601 | 601 | |||
602 | .L1: | 602 | .L1: | |
603 | .cfi_restore_state | 603 | .cfi_restore_state | |
604 | subq $16,%rsp # Maintain 16 byte alignment. | 604 | subq $16,%rsp # Maintain 16 byte alignment. | |
605 | movq %rax,(%rsp) # Save exception header. | 605 | movq %rax,(%rsp) # Save exception header. | |
606 | movq %rbp,%rdi # Stack pointer after resume. | 606 | movq %rbp,%rdi # Stack pointer after resume. | |
607 | call __generic_findstack | 607 | call __generic_findstack | |
608 | movq %rbp,%rcx # Get the stack pointer. | 608 | movq %rbp,%rcx # Get the stack pointer. | |
609 | subq %rax,%rcx # Subtract available space. | 609 | subq %rax,%rcx # Subtract available space. | |
610 | addq $BACKOFF,%rcx # Back off 1024 bytes. | 610 | addq $BACKOFF,%rcx # Back off 1024 bytes. | |
611 | X86_64_SAVE_NEW_STACK_BOUNDARY (cx) | 611 | X86_64_SAVE_NEW_STACK_BOUNDARY (cx) | |
612 | movq (%rsp),%rdi # Restore exception data for call. | 612 | movq (%rsp),%rdi # Restore exception data for call. | |
613 | #ifdef __PIC__ | 613 | #ifdef __PIC__ | |
614 | call _Unwind_Resume@PLT # Resume unwinding. | 614 | call _Unwind_Resume@PLT # Resume unwinding. | |
615 | #else | 615 | #else | |
616 | call _Unwind_Resume # Resume unwinding. | 616 | call _Unwind_Resume # Resume unwinding. | |
617 | #endif | 617 | #endif | |
618 | 618 | |||
619 | #endif /* defined(__x86_64__) */ | 619 | #endif /* defined(__x86_64__) */ | |
620 | 620 | |||
621 | .cfi_endproc | 621 | .cfi_endproc | |
622 | #ifdef __ELF__ | 622 | #ifdef __ELF__ | |
623 | .size __morestack, . - __morestack | 623 | .size __morestack, . - __morestack | |
624 | #endif | 624 | #endif | |
625 | 625 | |||
626 | #if !defined(__x86_64__) && defined(__PIC__) | 626 | #if !defined(__x86_64__) && defined(__PIC__) | |
627 | # Output the thunk to get PC into bx, since we use it above. | 627 | # Output the thunk to get PC into bx, since we use it above. | |
628 | .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat | 628 | .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat | |
629 | .globl __x86.get_pc_thunk.bx | 629 | .globl __x86.get_pc_thunk.bx | |
630 | .hidden __x86.get_pc_thunk.bx | 630 | .hidden __x86.get_pc_thunk.bx | |
631 | #ifdef __ELF__ | 631 | #ifdef __ELF__ | |
632 | .type __x86.get_pc_thunk.bx, @function | 632 | .type __x86.get_pc_thunk.bx, @function | |
633 | #endif | 633 | #endif | |
634 | __x86.get_pc_thunk.bx: | 634 | __x86.get_pc_thunk.bx: | |
635 | .cfi_startproc | 635 | .cfi_startproc | |
636 | movl (%esp), %ebx | 636 | movl (%esp), %ebx | |
637 | ret | 637 | ret | |
638 | .cfi_endproc | 638 | .cfi_endproc | |
639 | #ifdef __ELF__ | 639 | #ifdef __ELF__ | |
640 | .size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx | 640 | .size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx | |
641 | #endif | 641 | #endif | |
642 | #endif | 642 | #endif | |
643 | 643 | |||
644 | # The exception table. This tells the personality routine to execute | 644 | # The exception table. This tells the personality routine to execute | |
645 | # the exception handler. | 645 | # the exception handler. | |
646 | 646 | |||
647 | .section .gcc_except_table,"a",@progbits | 647 | .section .gcc_except_table,"a",@progbits | |
648 | .align 4 | 648 | .align 4 | |
649 | .LLSDA1: | 649 | .LLSDA1: | |
650 | .byte 0xff # @LPStart format (omit) | 650 | .byte 0xff # @LPStart format (omit) | |
651 | .byte 0xff # @TType format (omit) | 651 | .byte 0xff # @TType format (omit) | |
652 | .byte 0x1 # call-site format (uleb128) | 652 | .byte 0x1 # call-site format (uleb128) | |
653 | .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length | 653 | .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length | |
654 | .LLSDACSB1: | 654 | .LLSDACSB1: | |
655 | .uleb128 .LEHB0-.LFB1 # region 0 start | 655 | .uleb128 .LEHB0-.LFB1 # region 0 start | |
656 | .uleb128 .LEHE0-.LEHB0 # length | 656 | .uleb128 .LEHE0-.LEHB0 # length | |
657 | .uleb128 .L1-.LFB1 # landing pad | 657 | .uleb128 .L1-.LFB1 # landing pad | |
658 | .uleb128 0 # action | 658 | .uleb128 0 # action | |
659 | .LLSDACSE1: | 659 | .LLSDACSE1: | |
660 | 660 | |||
661 | 661 | |||
662 | .global __gcc_personality_v0 | 662 | .global __gcc_personality_v0 | |
663 | #ifdef __PIC__ | 663 | #ifdef __PIC__ | |
664 | # Build a position independent reference to the basic | 664 | # Build a position independent reference to the basic | |
665 | # personality function. | 665 | # personality function. | |
666 | .hidden DW.ref.__gcc_personality_v0 | 666 | .hidden DW.ref.__gcc_personality_v0 | |
667 | .weak DW.ref.__gcc_personality_v0 | 667 | .weak DW.ref.__gcc_personality_v0 | |
668 | .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat | 668 | .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat | |
669 | .type DW.ref.__gcc_personality_v0, @object | 669 | .type DW.ref.__gcc_personality_v0, @object | |
670 | DW.ref.__gcc_personality_v0: | 670 | DW.ref.__gcc_personality_v0: | |
671 | #ifndef __LP64__ | 671 | #ifndef __LP64__ | |
672 | .align 4 | 672 | .align 4 | |
673 | .size DW.ref.__gcc_personality_v0, 4 | 673 | .size DW.ref.__gcc_personality_v0, 4 | |
674 | .long __gcc_personality_v0 | 674 | .long __gcc_personality_v0 | |
675 | #else | 675 | #else | |
676 | .align 8 | 676 | .align 8 | |
677 | .size DW.ref.__gcc_personality_v0, 8 | 677 | .size DW.ref.__gcc_personality_v0, 8 | |
678 | .quad __gcc_personality_v0 | 678 | .quad __gcc_personality_v0 | |
679 | #endif | 679 | #endif | |
680 | #endif | 680 | #endif | |
681 | 681 | |||
682 | #if defined __x86_64__ && defined __LP64__ | 682 | #if defined __x86_64__ && defined __LP64__ | |
683 | 683 | |||
684 | # This entry point is used for the large model. With this entry point | 684 | # This entry point is used for the large model. With this entry point | |
685 | # the upper 32 bits of %r10 hold the argument size and the lower 32 | 685 | # the upper 32 bits of %r10 hold the argument size and the lower 32 | |
686 | # bits hold the new stack frame size. There doesn't seem to be a way | 686 | # bits hold the new stack frame size. There doesn't seem to be a way | |
687 | # to know in the assembler code that we are assembling for the large | 687 | # to know in the assembler code that we are assembling for the large | |
688 | # model, and there doesn't seem to be a large model multilib anyhow. | 688 | # model, and there doesn't seem to be a large model multilib anyhow. | |
689 | # If one is developed, then the non-PIC code is probably OK since we | 689 | # If one is developed, then the non-PIC code is probably OK since we | |
690 | # will probably be close to the morestack code, but the PIC code | 690 | # will probably be close to the morestack code, but the PIC code | |
691 | # almost certainly needs to be changed. FIXME. | 691 | # almost certainly needs to be changed. FIXME. | |
692 | 692 | |||
693 | .text | 693 | .text | |
694 | .global __morestack_large_model | 694 | .global __morestack_large_model | |
695 | .hidden __morestack_large_model | 695 | .hidden __morestack_large_model | |
696 | 696 | |||
697 | #ifdef __ELF__ | 697 | #ifdef __ELF__ | |
698 | .type __morestack_large_model,@function | 698 | .type __morestack_large_model,@function | |
699 | #endif | 699 | #endif | |
700 | 700 | |||
701 | __morestack_large_model: | 701 | __morestack_large_model: | |
702 | 702 | |||
703 | .cfi_startproc | 703 | .cfi_startproc | |
704 | 704 | |||
705 | movq %r10, %r11 | 705 | movq %r10, %r11 | |
706 | andl $0xffffffff, %r10d | 706 | andl $0xffffffff, %r10d | |
707 | sarq $32, %r11 | 707 | sarq $32, %r11 | |
708 | jmp __morestack | 708 | jmp __morestack | |
709 | 709 | |||
710 | .cfi_endproc | 710 | .cfi_endproc | |
711 | #ifdef __ELF__ | 711 | #ifdef __ELF__ | |
712 | .size __morestack_large_model, . - __morestack_large_model | 712 | .size __morestack_large_model, . - __morestack_large_model | |
713 | #endif | 713 | #endif | |
714 | 714 | |||
715 | #endif /* __x86_64__ && __LP64__ */ | 715 | #endif /* __x86_64__ && __LP64__ */ | |
716 | 716 | |||
717 | # Initialize the stack test value when the program starts or when a | 717 | # Initialize the stack test value when the program starts or when a | |
718 | # new thread starts. We don't know how large the main stack is, so we | 718 | # new thread starts. We don't know how large the main stack is, so we | |
719 | # guess conservatively. We might be able to use getrlimit here. | 719 | # guess conservatively. We might be able to use getrlimit here. | |
720 | 720 | |||
721 | .text | 721 | .text | |
722 | .global __stack_split_initialize | 722 | .global __stack_split_initialize | |
723 | .hidden __stack_split_initialize | 723 | .hidden __stack_split_initialize | |
724 | 724 | |||
725 | #ifdef __ELF__ | 725 | #ifdef __ELF__ | |
726 | .type __stack_split_initialize, @function | 726 | .type __stack_split_initialize, @function | |
727 | #endif | 727 | #endif | |
728 | 728 | |||
729 | __stack_split_initialize: | 729 | __stack_split_initialize: | |
730 | 730 | |||
731 | #ifndef __x86_64__ | 731 | #ifndef __x86_64__ | |
732 | 732 | |||
733 | leal -16000(%esp),%eax # We should have at least 16K. | 733 | leal -16000(%esp),%eax # We should have at least 16K. | |
734 | movl %eax,%gs:0x30 | 734 | movl %eax,%gs:0x30 | |
735 | pushl $16000 | 735 | pushl $16000 | |
736 | pushl %esp | 736 | pushl %esp | |
737 | #ifdef __PIC__ | 737 | #ifdef __PIC__ | |
738 | call __generic_morestack_set_initial_sp@PLT | 738 | call __generic_morestack_set_initial_sp@PLT | |
739 | #else | 739 | #else | |
740 | call __generic_morestack_set_initial_sp | 740 | call __generic_morestack_set_initial_sp | |
741 | #endif | 741 | #endif | |
742 | addl $8,%esp | 742 | addl $8,%esp | |
743 | ret | 743 | ret | |
744 | 744 | |||
745 | #else /* defined(__x86_64__) */ | 745 | #else /* defined(__x86_64__) */ | |
746 | 746 | |||
747 | leaq -16000(%rsp),%rax # We should have at least 16K. | 747 | leaq -16000(%rsp),%rax # We should have at least 16K. | |
748 | X86_64_SAVE_NEW_STACK_BOUNDARY (ax) | 748 | X86_64_SAVE_NEW_STACK_BOUNDARY (ax) | |
749 | movq %rsp,%rdi | 749 | movq %rsp,%rdi | |
750 | movq $16000,%rsi | 750 | movq $16000,%rsi | |
751 | #ifdef __PIC__ | 751 | #ifdef __PIC__ | |
752 | call __generic_morestack_set_initial_sp@PLT | 752 | call __generic_morestack_set_initial_sp@PLT | |
753 | #else | 753 | #else | |
754 | call __generic_morestack_set_initial_sp | 754 | call __generic_morestack_set_initial_sp | |
755 | #endif | 755 | #endif | |
756 | ret | 756 | ret | |
757 | 757 | |||
758 | #endif /* defined(__x86_64__) */ | 758 | #endif /* defined(__x86_64__) */ | |
759 | 759 | |||
760 | #ifdef __ELF__ | 760 | #ifdef __ELF__ | |
761 | .size __stack_split_initialize, . - __stack_split_initialize | 761 | .size __stack_split_initialize, . - __stack_split_initialize | |
762 | #endif | 762 | #endif | |
763 | 763 | |||
764 | # Routines to get and set the guard, for __splitstack_getcontext, | 764 | # Routines to get and set the guard, for __splitstack_getcontext, | |
765 | # __splitstack_setcontext, and __splitstack_makecontext. | 765 | # __splitstack_setcontext, and __splitstack_makecontext. | |
766 | 766 | |||
767 | # void *__morestack_get_guard (void) returns the current stack guard. | 767 | # void *__morestack_get_guard (void) returns the current stack guard. | |
768 | .text | 768 | .text | |
769 | .global __morestack_get_guard | 769 | .global __morestack_get_guard | |
770 | .hidden __morestack_get_guard | 770 | .hidden __morestack_get_guard | |
771 | 771 | |||
772 | #ifdef __ELF__ | 772 | #ifdef __ELF__ | |
773 | .type __morestack_get_guard,@function | 773 | .type __morestack_get_guard,@function | |
774 | #endif | 774 | #endif | |
775 | 775 | |||
776 | __morestack_get_guard: | 776 | __morestack_get_guard: | |
777 | 777 | |||
778 | #ifndef __x86_64__ | 778 | #ifndef __x86_64__ | |
779 | movl %gs:0x30,%eax | 779 | movl %gs:0x30,%eax | |
780 | #else | 780 | #else | |
781 | #ifdef __LP64__ | 781 | #ifdef __LP64__ | |
782 | movq %fs:0x70,%rax | 782 | movq %fs:0x70,%rax | |
783 | #else | 783 | #else | |
784 | movl %fs:0x40,%eax | 784 | movl %fs:0x40,%eax | |
785 | #endif | 785 | #endif | |
786 | #endif | 786 | #endif | |
787 | ret | 787 | ret | |
788 | 788 | |||
789 | #ifdef __ELF__ | 789 | #ifdef __ELF__ | |
790 | .size __morestack_get_guard, . - __morestack_get_guard | 790 | .size __morestack_get_guard, . - __morestack_get_guard | |
791 | #endif | 791 | #endif | |
792 | 792 | |||
793 | # void __morestack_set_guard (void *) sets the stack guard. | 793 | # void __morestack_set_guard (void *) sets the stack guard. | |
794 | .global __morestack_set_guard | 794 | .global __morestack_set_guard | |
795 | .hidden __morestack_set_guard | 795 | .hidden __morestack_set_guard | |
796 | 796 | |||
797 | #ifdef __ELF__ | 797 | #ifdef __ELF__ | |
798 | .type __morestack_set_guard,@function | 798 | .type __morestack_set_guard,@function | |
799 | #endif | 799 | #endif | |
800 | 800 | |||
801 | __morestack_set_guard: | 801 | __morestack_set_guard: | |
802 | 802 | |||
803 | #ifndef __x86_64__ | 803 | #ifndef __x86_64__ | |
804 | movl 4(%esp),%eax | 804 | movl 4(%esp),%eax | |
805 | movl %eax,%gs:0x30 | 805 | movl %eax,%gs:0x30 | |
806 | #else | 806 | #else | |
807 | X86_64_SAVE_NEW_STACK_BOUNDARY (di) | 807 | X86_64_SAVE_NEW_STACK_BOUNDARY (di) | |
808 | #endif | 808 | #endif | |
809 | ret | 809 | ret | |
810 | 810 | |||
811 | #ifdef __ELF__ | 811 | #ifdef __ELF__ | |
812 | .size __morestack_set_guard, . - __morestack_set_guard | 812 | .size __morestack_set_guard, . - __morestack_set_guard | |
813 | #endif | 813 | #endif | |
814 | 814 | |||
815 | # void *__morestack_make_guard (void *, size_t) returns the stack | 815 | # void *__morestack_make_guard (void *, size_t) returns the stack | |
816 | # guard value for a stack. | 816 | # guard value for a stack. | |
817 | .global __morestack_make_guard | 817 | .global __morestack_make_guard | |
818 | .hidden __morestack_make_guard | 818 | .hidden __morestack_make_guard | |
819 | 819 | |||
820 | #ifdef __ELF__ | 820 | #ifdef __ELF__ | |
821 | .type __morestack_make_guard,@function | 821 | .type __morestack_make_guard,@function | |
822 | #endif | 822 | #endif | |
823 | 823 | |||
824 | __morestack_make_guard: | 824 | __morestack_make_guard: | |
825 | 825 | |||
826 | #ifndef __x86_64__ | 826 | #ifndef __x86_64__ | |
827 | movl 4(%esp),%eax | 827 | movl 4(%esp),%eax | |
828 | subl 8(%esp),%eax | 828 | subl 8(%esp),%eax | |
829 | addl $BACKOFF,%eax | 829 | addl $BACKOFF,%eax | |
830 | #else | 830 | #else | |
831 | subq %rsi,%rdi | 831 | subq %rsi,%rdi | |
832 | addq $BACKOFF,%rdi | 832 | addq $BACKOFF,%rdi | |
833 | movq %rdi,%rax | 833 | movq %rdi,%rax | |
834 | #endif | 834 | #endif | |
835 | ret | 835 | ret | |
836 | 836 | |||
837 | #ifdef __ELF__ | 837 | #ifdef __ELF__ | |
838 | .size __morestack_make_guard, . - __morestack_make_guard | 838 | .size __morestack_make_guard, . - __morestack_make_guard | |
839 | #endif | 839 | #endif | |
840 | 840 | |||
841 | # Make __stack_split_initialize a high priority constructor. FIXME: | 841 | # Make __stack_split_initialize a high priority constructor. FIXME: | |
842 | # This is ELF specific. | 842 | # This is ELF specific. | |
843 | 843 | |||
844 | .section .ctors.65535,"aw",@progbits | 844 | .section .ctors.65535,"aw",@progbits | |
845 | 845 | |||
846 | #ifndef __LP64__ | 846 | #ifndef __LP64__ | |
847 | .align 4 | 847 | .align 4 | |
848 | .long __stack_split_initialize | 848 | .long __stack_split_initialize | |
849 | .long __morestack_load_mmap | 849 | .long __morestack_load_mmap | |
850 | #else | 850 | #else | |
851 | .align 8 | 851 | .align 8 | |
852 | .quad __stack_split_initialize | 852 | .quad __stack_split_initialize | |
853 | .quad __morestack_load_mmap | 853 | .quad __morestack_load_mmap | |
854 | #endif | 854 | #endif | |
855 | 855 | |||
856 | #ifdef __ELF__ | 856 | #if defined(__ELF__) && defined(__linux__) | |
857 | .section .note.GNU-stack,"",@progbits | 857 | .section .note.GNU-stack,"",@progbits | |
858 | .section .note.GNU-split-stack,"",@progbits | 858 | .section .note.GNU-split-stack,"",@progbits | |
859 | .section .note.GNU-no-split-stack,"",@progbits | 859 | .section .note.GNU-no-split-stack,"",@progbits | |
860 | #endif | 860 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/alpha/Attic/sjlj.S 2014/03/01 08:41:18 1.1.1.1
+++ src/external/gpl3/gcc/dist/libitm/config/alpha/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,112 +1,112 @@ | @@ -1,112 +1,112 @@ | |||
1 | /* Copyright (C) 2009-2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2009-2013 Free Software Foundation, Inc. | |
2 | Contributed by Richard Henderson <rth@redhat.com>. | 2 | Contributed by Richard Henderson <rth@redhat.com>. | |
3 | 3 | |||
4 | This file is part of the GNU Transactional Memory Library (libitm). | 4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | 5 | |||
6 | Libitm is free software; you can redistribute it and/or modify it | 6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | 7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | 8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | 9 | (at your option) any later version. | |
10 | 10 | |||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | 14 | more details. | |
15 | 15 | |||
16 | Under Section 7 of GPL version 3, you are granted additional | 16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | 17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | 18 | 3.1, as published by the Free Software Foundation. | |
19 | 19 | |||
20 | You should have received a copy of the GNU General Public License and | 20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | 21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | 23 | <http://www.gnu.org/licenses/>. */ | |
24 | 24 | |||
25 | .text | 25 | .text | |
26 | .align 4 | 26 | .align 4 | |
27 | .globl _ITM_beginTransaction | 27 | .globl _ITM_beginTransaction | |
28 | .ent _ITM_beginTransaction | 28 | .ent _ITM_beginTransaction | |
29 | 29 | |||
30 | #define FRAME 144 | 30 | #define FRAME 144 | |
31 | 31 | |||
32 | _ITM_beginTransaction: | 32 | _ITM_beginTransaction: | |
33 | ldgp $29, 0($27) | 33 | ldgp $29, 0($27) | |
34 | subq $30, FRAME, $30 | 34 | subq $30, FRAME, $30 | |
35 | .frame $30, FRAME, $26, 0 | 35 | .frame $30, FRAME, $26, 0 | |
36 | .mask 0x04000000, 0 | 36 | .mask 0x04000000, 0 | |
37 | stq $26, 0($30) | 37 | stq $26, 0($30) | |
38 | .prologue 1 | 38 | .prologue 1 | |
39 | 39 | |||
40 | stq $9, 8($30) | 40 | stq $9, 8($30) | |
41 | stq $10, 16($30) | 41 | stq $10, 16($30) | |
42 | addq $30, FRAME, $0 | 42 | addq $30, FRAME, $0 | |
43 | stq $11, 24($30) | 43 | stq $11, 24($30) | |
44 | 44 | |||
45 | stq $12, 32($30) | 45 | stq $12, 32($30) | |
46 | stq $13, 40($30) | 46 | stq $13, 40($30) | |
47 | stq $14, 48($30) | 47 | stq $14, 48($30) | |
48 | stq $15, 56($30) | 48 | stq $15, 56($30) | |
49 | 49 | |||
50 | stq $0, 64($30) | 50 | stq $0, 64($30) | |
51 | stt $f2, 72($30) | 51 | stt $f2, 72($30) | |
52 | stt $f3, 80($30) | 52 | stt $f3, 80($30) | |
53 | stt $f4, 88($30) | 53 | stt $f4, 88($30) | |
54 | 54 | |||
55 | stt $f5, 96($30) | 55 | stt $f5, 96($30) | |
56 | stt $f6, 104($30) | 56 | stt $f6, 104($30) | |
57 | stt $f7, 112($30) | 57 | stt $f7, 112($30) | |
58 | stt $f8, 120($30) | 58 | stt $f8, 120($30) | |
59 | 59 | |||
60 | stt $f9, 128($30) | 60 | stt $f9, 128($30) | |
61 | mov $30, $17 | 61 | mov $30, $17 | |
62 | #ifdef __PIC__ | 62 | #ifdef __PIC__ | |
63 | unop | 63 | unop | |
64 | bsr $26, GTM_begin_transaction !samegp | 64 | bsr $26, GTM_begin_transaction !samegp | |
65 | #else | 65 | #else | |
66 | jsr $26, GTM_begin_transaction | 66 | jsr $26, GTM_begin_transaction | |
67 | ldgp $29, 0($26) | 67 | ldgp $29, 0($26) | |
68 | #endif | 68 | #endif | |
69 | 69 | |||
70 | ldq $26, 0($30) | 70 | ldq $26, 0($30) | |
71 | addq $30, FRAME, $30 | 71 | addq $30, FRAME, $30 | |
72 | ret | 72 | ret | |
73 | .end _ITM_beginTransaction | 73 | .end _ITM_beginTransaction | |
74 | 74 | |||
75 | .align 4 | 75 | .align 4 | |
76 | .globl GTM_longjmp | 76 | .globl GTM_longjmp | |
77 | #ifdef __ELF__ | 77 | #ifdef __ELF__ | |
78 | .hidden GTM_longjmp | 78 | .hidden GTM_longjmp | |
79 | #endif | 79 | #endif | |
80 | .ent GTM_longjmp | 80 | .ent GTM_longjmp | |
81 | 81 | |||
82 | GTM_longjmp: | 82 | GTM_longjmp: | |
83 | .prologue 0 | 83 | .prologue 0 | |
84 | ldq $26, 0($17) | 84 | ldq $26, 0($17) | |
85 | ldq $9, 8($17) | 85 | ldq $9, 8($17) | |
86 | ldq $10, 16($17) | 86 | ldq $10, 16($17) | |
87 | ldq $11, 24($17) | 87 | ldq $11, 24($17) | |
88 | 88 | |||
89 | ldq $12, 32($17) | 89 | ldq $12, 32($17) | |
90 | ldq $13, 40($17) | 90 | ldq $13, 40($17) | |
91 | ldq $14, 48($17) | 91 | ldq $14, 48($17) | |
92 | ldq $15, 56($17) | 92 | ldq $15, 56($17) | |
93 | 93 | |||
94 | ldq $1, 64($17) | 94 | ldq $1, 64($17) | |
95 | ldt $f2, 72($17) | 95 | ldt $f2, 72($17) | |
96 | ldt $f3, 80($17) | 96 | ldt $f3, 80($17) | |
97 | ldt $f4, 88($17) | 97 | ldt $f4, 88($17) | |
98 | 98 | |||
99 | ldt $f5, 96($17) | 99 | ldt $f5, 96($17) | |
100 | ldt $f6, 104($17) | 100 | ldt $f6, 104($17) | |
101 | ldt $f7, 112($17) | 101 | ldt $f7, 112($17) | |
102 | ldt $f8, 120($17) | 102 | ldt $f8, 120($17) | |
103 | 103 | |||
104 | ldt $f9, 128($17) | 104 | ldt $f9, 128($17) | |
105 | mov $16, $0 | 105 | mov $16, $0 | |
106 | mov $1, $30 | 106 | mov $1, $30 | |
107 | ret | 107 | ret | |
108 | .end GTM_longjmp | 108 | .end GTM_longjmp | |
109 | 109 | |||
110 | #ifdef __linux__ | 110 | #if defined(__ELF__) && defined(__linux__) | |
111 | .section .note.GNU-stack, "", @progbits | 111 | .section .note.GNU-stack, "", @progbits | |
112 | #endif | 112 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/arm/Attic/sjlj.S 2014/03/01 08:41:18 1.1.1.1
+++ src/external/gpl3/gcc/dist/libitm/config/arm/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,164 +1,164 @@ | @@ -1,164 +1,164 @@ | |||
1 | /* Copyright (C) 2011-2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2011-2013 Free Software Foundation, Inc. | |
2 | Contributed by Richard Henderson <rth@redhat.com>. | 2 | Contributed by Richard Henderson <rth@redhat.com>. | |
3 | 3 | |||
4 | This file is part of the GNU Transactional Memory Library (libitm). | 4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | 5 | |||
6 | Libitm is free software; you can redistribute it and/or modify it | 6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | 7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | 8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | 9 | (at your option) any later version. | |
10 | 10 | |||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | 14 | more details. | |
15 | 15 | |||
16 | Under Section 7 of GPL version 3, you are granted additional | 16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | 17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | 18 | 3.1, as published by the Free Software Foundation. | |
19 | 19 | |||
20 | You should have received a copy of the GNU General Public License and | 20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | 21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | 23 | <http://www.gnu.org/licenses/>. */ | |
24 | 24 | |||
25 | #include "hwcap.h" | 25 | #include "hwcap.h" | |
26 | #include "asmcfi.h" | 26 | #include "asmcfi.h" | |
27 | 27 | |||
28 | .syntax unified | 28 | .syntax unified | |
29 | 29 | |||
30 | #if defined(__thumb2__) | 30 | #if defined(__thumb2__) | |
31 | # define PC_OFS 4 | 31 | # define PC_OFS 4 | |
32 | .thumb | 32 | .thumb | |
33 | .thumb_func | 33 | .thumb_func | |
34 | #else | 34 | #else | |
35 | # define PC_OFS 8 | 35 | # define PC_OFS 8 | |
36 | #endif | 36 | #endif | |
37 | 37 | |||
38 | #if defined (__thumb2__) && defined(__ARM_ARCH_6T2__) | 38 | #if defined (__thumb2__) && defined(__ARM_ARCH_6T2__) | |
39 | # define HAVE_MOVT | 39 | # define HAVE_MOVT | |
40 | .arch armv6t2 | 40 | .arch armv6t2 | |
41 | #elif defined (__ARM_ARCH_7A__) | 41 | #elif defined (__ARM_ARCH_7A__) | |
42 | # define HAVE_MOVT | 42 | # define HAVE_MOVT | |
43 | .arch armv7-a | 43 | .arch armv7-a | |
44 | #elif defined (__ARM_ARCH_7R__) | 44 | #elif defined (__ARM_ARCH_7R__) | |
45 | # define HAVE_MOVT | 45 | # define HAVE_MOVT | |
46 | .arch armv7-r | 46 | .arch armv7-r | |
47 | #elif defined (__ARM_ARCH_7M__) | 47 | #elif defined (__ARM_ARCH_7M__) | |
48 | # define HAVE_MOVT | 48 | # define HAVE_MOVT | |
49 | .arch armv7-m | 49 | .arch armv7-m | |
50 | #endif | 50 | #endif | |
51 | 51 | |||
52 | #if defined(HAVE_MOVT) && defined(PIC) | 52 | #if defined(HAVE_MOVT) && defined(PIC) | |
53 | .macro ldaddr reg, addr | 53 | .macro ldaddr reg, addr | |
54 | movw \reg, #:lower16:(\addr - (98f + PC_OFS)) | 54 | movw \reg, #:lower16:(\addr - (98f + PC_OFS)) | |
55 | movt \reg, #:upper16:(\addr - (98f + PC_OFS)) | 55 | movt \reg, #:upper16:(\addr - (98f + PC_OFS)) | |
56 | 98: add \reg, \reg, pc | 56 | 98: add \reg, \reg, pc | |
57 | .endm | 57 | .endm | |
58 | #elif defined(HAVE_MOVT) | 58 | #elif defined(HAVE_MOVT) | |
59 | .macro ldaddr reg, addr | 59 | .macro ldaddr reg, addr | |
60 | movw \reg, #:lower16:\addr | 60 | movw \reg, #:lower16:\addr | |
61 | movt \reg, #:upper16:\addr | 61 | movt \reg, #:upper16:\addr | |
62 | .endm | 62 | .endm | |
63 | #elif defined(PIC) | 63 | #elif defined(PIC) | |
64 | .macro ldaddr reg, addr | 64 | .macro ldaddr reg, addr | |
65 | ldr \reg, 99f | 65 | ldr \reg, 99f | |
66 | 98: add \reg, \reg, pc | 66 | 98: add \reg, \reg, pc | |
67 | .subsection 1 | 67 | .subsection 1 | |
68 | .align 2 | 68 | .align 2 | |
69 | 99: .word \addr - (98b + PC_OFS) | 69 | 99: .word \addr - (98b + PC_OFS) | |
70 | .subsection 0 | 70 | .subsection 0 | |
71 | .endm | 71 | .endm | |
72 | #else | 72 | #else | |
73 | .macro ldaddr reg, addr | 73 | .macro ldaddr reg, addr | |
74 | ldr \reg, =\addr | 74 | ldr \reg, =\addr | |
75 | .endm | 75 | .endm | |
76 | #endif | 76 | #endif | |
77 | 77 | |||
78 | .text | 78 | .text | |
79 | .align 2 | 79 | .align 2 | |
80 | .global _ITM_beginTransaction | 80 | .global _ITM_beginTransaction | |
81 | .type _ITM_beginTransaction, %function | 81 | .type _ITM_beginTransaction, %function | |
82 | 82 | |||
83 | _ITM_beginTransaction: | 83 | _ITM_beginTransaction: | |
84 | .fnstart | 84 | .fnstart | |
85 | cfi_startproc | 85 | cfi_startproc | |
86 | mov ip, sp | 86 | mov ip, sp | |
87 | push { r4-r11, ip, lr } | 87 | push { r4-r11, ip, lr } | |
88 | .save { lr } | 88 | .save { lr } | |
89 | .pad #(9*4) | 89 | .pad #(9*4) | |
90 | cfi_adjust_cfa_offset(40) | 90 | cfi_adjust_cfa_offset(40) | |
91 | cfi_rel_offset(lr, 36) | 91 | cfi_rel_offset(lr, 36) | |
92 | sub sp, sp, #(14*8) | 92 | sub sp, sp, #(14*8) | |
93 | .pad #(14*8) | 93 | .pad #(14*8) | |
94 | cfi_adjust_cfa_offset(14*8) | 94 | cfi_adjust_cfa_offset(14*8) | |
95 | 95 | |||
96 | ldaddr r2, GTM_hwcap | 96 | ldaddr r2, GTM_hwcap | |
97 | ldr r2, [r2] | 97 | ldr r2, [r2] | |
98 | 98 | |||
99 | /* Store the VFP registers. Don't use VFP instructions directly | 99 | /* Store the VFP registers. Don't use VFP instructions directly | |
100 | because this code is used in non-VFP multilibs. */ | 100 | because this code is used in non-VFP multilibs. */ | |
101 | tst r2, #HWCAP_ARM_VFP | 101 | tst r2, #HWCAP_ARM_VFP | |
102 | beq 1f | 102 | beq 1f | |
103 | stc p11, cr8, [sp], {16} /* vstm sp, {d8-d15} */ | 103 | stc p11, cr8, [sp], {16} /* vstm sp, {d8-d15} */ | |
104 | 1: | 104 | 1: | |
105 | /* Save the call-preserved iWMMXt registers. */ | 105 | /* Save the call-preserved iWMMXt registers. */ | |
106 | tst r2, #HWCAP_ARM_IWMMXT | 106 | tst r2, #HWCAP_ARM_IWMMXT | |
107 | beq 1f | 107 | beq 1f | |
108 | stcl p1, cr10, [sp, #64] /* wstrd wr10, [sp, #64] */ | 108 | stcl p1, cr10, [sp, #64] /* wstrd wr10, [sp, #64] */ | |
109 | stcl p1, cr11, [sp, #72] | 109 | stcl p1, cr11, [sp, #72] | |
110 | stcl p1, cr12, [sp, #80] | 110 | stcl p1, cr12, [sp, #80] | |
111 | stcl p1, cr13, [sp, #88] | 111 | stcl p1, cr13, [sp, #88] | |
112 | stcl p1, cr14, [sp, #96] | 112 | stcl p1, cr14, [sp, #96] | |
113 | stcl p1, cr15, [sp, #104] | 113 | stcl p1, cr15, [sp, #104] | |
114 | 1: | 114 | 1: | |
115 | /* Invoke GTM_begin_transaction with the struct we just built. */ | 115 | /* Invoke GTM_begin_transaction with the struct we just built. */ | |
116 | mov r1, sp | 116 | mov r1, sp | |
117 | bl GTM_begin_transaction | 117 | bl GTM_begin_transaction | |
118 | 118 | |||
119 | /* Return; we don't need to restore any of the call-saved regs. */ | 119 | /* Return; we don't need to restore any of the call-saved regs. */ | |
120 | add sp, sp, #(14*8 + 9*4) | 120 | add sp, sp, #(14*8 + 9*4) | |
121 | cfi_adjust_cfa_offset(-(14*8 + 9*4)) | 121 | cfi_adjust_cfa_offset(-(14*8 + 9*4)) | |
122 | pop { pc } | 122 | pop { pc } | |
123 | .fnend | 123 | .fnend | |
124 | cfi_endproc | 124 | cfi_endproc | |
125 | .size _ITM_beginTransaction, . - _ITM_beginTransaction | 125 | .size _ITM_beginTransaction, . - _ITM_beginTransaction | |
126 | 126 | |||
127 | .align 2 | 127 | .align 2 | |
128 | .global GTM_longjmp | 128 | .global GTM_longjmp | |
129 | .hidden GTM_longjmp | 129 | .hidden GTM_longjmp | |
130 | .type GTM_longjmp, %function | 130 | .type GTM_longjmp, %function | |
131 | 131 | |||
132 | GTM_longjmp: | 132 | GTM_longjmp: | |
133 | cfi_startproc | 133 | cfi_startproc | |
134 | ldaddr r2, GTM_hwcap | 134 | ldaddr r2, GTM_hwcap | |
135 | ldr r2, [r2] | 135 | ldr r2, [r2] | |
136 | 136 | |||
137 | tst r2, #HWCAP_ARM_VFP | 137 | tst r2, #HWCAP_ARM_VFP | |
138 | beq 1f | 138 | beq 1f | |
139 | ldc p11, cr8, [r1], {16} /* vldmia r1, {d8-d15} */ | 139 | ldc p11, cr8, [r1], {16} /* vldmia r1, {d8-d15} */ | |
140 | 1: | 140 | 1: | |
141 | tst r2, #HWCAP_ARM_IWMMXT | 141 | tst r2, #HWCAP_ARM_IWMMXT | |
142 | beq 1f | 142 | beq 1f | |
143 | ldcl p1, cr10, [r1, #64] /* wldrd wr10, [r1, #64] */ | 143 | ldcl p1, cr10, [r1, #64] /* wldrd wr10, [r1, #64] */ | |
144 | ldcl p1, cr11, [r1, #72] | 144 | ldcl p1, cr11, [r1, #72] | |
145 | ldcl p1, cr12, [r1, #80] | 145 | ldcl p1, cr12, [r1, #80] | |
146 | ldcl p1, cr13, [r1, #88] | 146 | ldcl p1, cr13, [r1, #88] | |
147 | ldcl p1, cr14, [r1, #96] | 147 | ldcl p1, cr14, [r1, #96] | |
148 | ldcl p1, cr15, [r1, #104] | 148 | ldcl p1, cr15, [r1, #104] | |
149 | 1: | 149 | 1: | |
150 | add r1, r1, #(14*8) /* Skip both VFP and iWMMXt blocks */ | 150 | add r1, r1, #(14*8) /* Skip both VFP and iWMMXt blocks */ | |
151 | #ifdef __thumb2__ | 151 | #ifdef __thumb2__ | |
152 | ldm r1, { r4-r11, ip, lr } | 152 | ldm r1, { r4-r11, ip, lr } | |
153 | cfi_def_cfa(ip, 0) | 153 | cfi_def_cfa(ip, 0) | |
154 | mov sp, ip | 154 | mov sp, ip | |
155 | bx lr | 155 | bx lr | |
156 | #else | 156 | #else | |
157 | ldm r1, { r4-r11, sp, pc } | 157 | ldm r1, { r4-r11, sp, pc } | |
158 | #endif | 158 | #endif | |
159 | cfi_endproc | 159 | cfi_endproc | |
160 | .size GTM_longjmp, . - GTM_longjmp | 160 | .size GTM_longjmp, . - GTM_longjmp | |
161 | 161 | |||
162 | #ifdef __linux__ | 162 | #if defined(__ELF__) && defined(__linux__) | |
163 | .section .note.GNU-stack, "", %progbits | 163 | .section .note.GNU-stack, "", %progbits | |
164 | #endif | 164 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/powerpc/Attic/sjlj.S 2014/05/27 08:40:03 1.1.1.2
+++ src/external/gpl3/gcc/dist/libitm/config/powerpc/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,433 +1,433 @@ | @@ -1,433 +1,433 @@ | |||
1 | /* Copyright (C) 2012-2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2012-2013 Free Software Foundation, Inc. | |
2 | Contributed by Richard Henderson <rth@redhat.com>. | 2 | Contributed by Richard Henderson <rth@redhat.com>. | |
3 | 3 | |||
4 | This file is part of the GNU Transactional Memory Library (libitm). | 4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | 5 | |||
6 | Libitm is free software; you can redistribute it and/or modify it | 6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | 7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | 8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | 9 | (at your option) any later version. | |
10 | 10 | |||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | 14 | more details. | |
15 | 15 | |||
16 | Under Section 7 of GPL version 3, you are granted additional | 16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | 17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | 18 | 3.1, as published by the Free Software Foundation. | |
19 | 19 | |||
20 | You should have received a copy of the GNU General Public License and | 20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | 21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | 23 | <http://www.gnu.org/licenses/>. */ | |
24 | 24 | |||
25 | .text | 25 | .text | |
26 | 26 | |||
27 | #include "asmcfi.h" | 27 | #include "asmcfi.h" | |
28 | 28 | |||
29 | #if defined(__powerpc64__) && _CALL_ELF == 2 | 29 | #if defined(__powerpc64__) && _CALL_ELF == 2 | |
30 | .macro FUNC name | 30 | .macro FUNC name | |
31 | .globl \name | 31 | .globl \name | |
32 | .type \name, @function | 32 | .type \name, @function | |
33 | \name: | 33 | \name: | |
34 | 0: addis 2,12,(.TOC.-0b)@ha | 34 | 0: addis 2,12,(.TOC.-0b)@ha | |
35 | addi 2,2,(.TOC.-0b)@l | 35 | addi 2,2,(.TOC.-0b)@l | |
36 | .localentry \name, . - \name | 36 | .localentry \name, . - \name | |
37 | .endm | 37 | .endm | |
38 | .macro END name | 38 | .macro END name | |
39 | .size \name, . - \name | 39 | .size \name, . - \name | |
40 | .endm | 40 | .endm | |
41 | .macro HIDDEN name | 41 | .macro HIDDEN name | |
42 | .hidden \name | 42 | .hidden \name | |
43 | .endm | 43 | .endm | |
44 | .macro CALL name | 44 | .macro CALL name | |
45 | bl \name | 45 | bl \name | |
46 | nop | 46 | nop | |
47 | .endm | 47 | .endm | |
48 | #elif defined(__powerpc64__) && defined(__ELF__) | 48 | #elif defined(__powerpc64__) && defined(__ELF__) | |
49 | .macro FUNC name | 49 | .macro FUNC name | |
50 | .globl \name, .\name | 50 | .globl \name, .\name | |
51 | .section ".opd","aw" | 51 | .section ".opd","aw" | |
52 | .align 3 | 52 | .align 3 | |
53 | \name: | 53 | \name: | |
54 | .quad .\name, .TOC.@tocbase, 0 | 54 | .quad .\name, .TOC.@tocbase, 0 | |
55 | .size \name, 24 | 55 | .size \name, 24 | |
56 | .type .\name, @function | 56 | .type .\name, @function | |
57 | .text | 57 | .text | |
58 | .\name: | 58 | .\name: | |
59 | .endm | 59 | .endm | |
60 | .macro END name | 60 | .macro END name | |
61 | .size .\name, . - .\name | 61 | .size .\name, . - .\name | |
62 | .endm | 62 | .endm | |
63 | .macro HIDDEN name | 63 | .macro HIDDEN name | |
64 | .hidden \name, .\name | 64 | .hidden \name, .\name | |
65 | .endm | 65 | .endm | |
66 | .macro CALL name | 66 | .macro CALL name | |
67 | bl \name | 67 | bl \name | |
68 | nop | 68 | nop | |
69 | .endm | 69 | .endm | |
70 | #elif defined(__ELF__) | 70 | #elif defined(__ELF__) | |
71 | .macro FUNC name | 71 | .macro FUNC name | |
72 | .globl \name | 72 | .globl \name | |
73 | .type \name, @function | 73 | .type \name, @function | |
74 | \name: | 74 | \name: | |
75 | .endm | 75 | .endm | |
76 | .macro END name | 76 | .macro END name | |
77 | .size \name, . - \name | 77 | .size \name, . - \name | |
78 | .endm | 78 | .endm | |
79 | .macro HIDDEN name | 79 | .macro HIDDEN name | |
80 | .hidden \name | 80 | .hidden \name | |
81 | .endm | 81 | .endm | |
82 | .macro CALL name | 82 | .macro CALL name | |
83 | bl \name | 83 | bl \name | |
84 | .endm | 84 | .endm | |
85 | #elif defined(_CALL_DARWIN) | 85 | #elif defined(_CALL_DARWIN) | |
86 | .macro FUNC name | 86 | .macro FUNC name | |
87 | .globl _$0 | 87 | .globl _$0 | |
88 | _$0: | 88 | _$0: | |
89 | .endmacro | 89 | .endmacro | |
90 | .macro END name | 90 | .macro END name | |
91 | .endmacro | 91 | .endmacro | |
92 | .macro HIDDEN name | 92 | .macro HIDDEN name | |
93 | .private_extern _$0 | 93 | .private_extern _$0 | |
94 | .endmacro | 94 | .endmacro | |
95 | .macro CALL name | 95 | .macro CALL name | |
96 | bl _$0 | 96 | bl _$0 | |
97 | .endmacro | 97 | .endmacro | |
98 | # ifdef __ppc64__ | 98 | # ifdef __ppc64__ | |
99 | .machine ppc64 | 99 | .machine ppc64 | |
100 | # else | 100 | # else | |
101 | .machine ppc7400 | 101 | .machine ppc7400 | |
102 | # endif | 102 | # endif | |
103 | #else | 103 | #else | |
104 | #error "unsupported system" | 104 | #error "unsupported system" | |
105 | #endif | 105 | #endif | |
106 | 106 | |||
107 | /* Parameterize the naming of registers. */ | 107 | /* Parameterize the naming of registers. */ | |
108 | #if defined(__ELF__) | 108 | #if defined(__ELF__) | |
109 | # define r(N) %r##N | 109 | # define r(N) %r##N | |
110 | # define f(N) %f##N | 110 | # define f(N) %f##N | |
111 | # define v(N) %v##N | 111 | # define v(N) %v##N | |
112 | #elif defined(__MACH__) | 112 | #elif defined(__MACH__) | |
113 | # define r(N) r##N | 113 | # define r(N) r##N | |
114 | # define f(N) f##N | 114 | # define f(N) f##N | |
115 | # define v(N) v##N | 115 | # define v(N) v##N | |
116 | #else | 116 | #else | |
117 | # define r(N) N | 117 | # define r(N) N | |
118 | # define f(N) N | 118 | # define f(N) N | |
119 | # define v(N) N | 119 | # define v(N) N | |
120 | #endif | 120 | #endif | |
121 | 121 | |||
122 | /* Parameterize the code for 32-bit vs 64-bit. */ | 122 | /* Parameterize the code for 32-bit vs 64-bit. */ | |
123 | #if defined(__powerpc64__) || defined(__ppc64__) | 123 | #if defined(__powerpc64__) || defined(__ppc64__) | |
124 | #define ldreg ld | 124 | #define ldreg ld | |
125 | #define streg std | 125 | #define streg std | |
126 | #define stregu stdu | 126 | #define stregu stdu | |
127 | #define WS 8 | 127 | #define WS 8 | |
128 | #else | 128 | #else | |
129 | #define ldreg lwz | 129 | #define ldreg lwz | |
130 | #define streg stw | 130 | #define streg stw | |
131 | #define stregu stwu | 131 | #define stregu stwu | |
132 | #define WS 4 | 132 | #define WS 4 | |
133 | #endif | 133 | #endif | |
134 | 134 | |||
135 | /* Parameterize the code for call frame constants. */ | 135 | /* Parameterize the code for call frame constants. */ | |
136 | #if defined(_CALL_AIXDESC) | 136 | #if defined(_CALL_AIXDESC) | |
137 | # define BASE 6*WS | 137 | # define BASE 6*WS | |
138 | # define LR_SAVE 2*WS | 138 | # define LR_SAVE 2*WS | |
139 | #elif _CALL_ELF == 2 | 139 | #elif _CALL_ELF == 2 | |
140 | # define BASE 6*WS | 140 | # define BASE 6*WS | |
141 | # define LR_SAVE 2*WS | 141 | # define LR_SAVE 2*WS | |
142 | #elif defined(_CALL_SYSV) | 142 | #elif defined(_CALL_SYSV) | |
143 | # define BASE 2*WS | 143 | # define BASE 2*WS | |
144 | # define LR_SAVE 1*WS | 144 | # define LR_SAVE 1*WS | |
145 | #elif defined(_CALL_DARWIN) | 145 | #elif defined(_CALL_DARWIN) | |
146 | # define BASE (6*WS + 2*WS) | 146 | # define BASE (6*WS + 2*WS) | |
147 | # define LR_SAVE 2*WS | 147 | # define LR_SAVE 2*WS | |
148 | #else | 148 | #else | |
149 | # error "unsupported system" | 149 | # error "unsupported system" | |
150 | #endif | 150 | #endif | |
151 | 151 | |||
152 | #if defined(__ALTIVEC__) || defined(__VSX__) | 152 | #if defined(__ALTIVEC__) || defined(__VSX__) | |
153 | # define OFS_VR 0 | 153 | # define OFS_VR 0 | |
154 | # define OFS_VSCR 12*16 | 154 | # define OFS_VSCR 12*16 | |
155 | # define OFS_VR_END OFS_VSCR + 8 | 155 | # define OFS_VR_END OFS_VSCR + 8 | |
156 | #else | 156 | #else | |
157 | # define OFS_VR_END 0 | 157 | # define OFS_VR_END 0 | |
158 | #endif | 158 | #endif | |
159 | #ifndef _SOFT_FLOAT | 159 | #ifndef _SOFT_FLOAT | |
160 | # define OFS_FR OFS_VR_END | 160 | # define OFS_FR OFS_VR_END | |
161 | # define OFS_FPSCR OFS_FR + 18*8 | 161 | # define OFS_FPSCR OFS_FR + 18*8 | |
162 | # define OFS_FR_END OFS_FPSCR + 8 | 162 | # define OFS_FR_END OFS_FPSCR + 8 | |
163 | #else | 163 | #else | |
164 | # define OFS_FR_END OFS_VR_END | 164 | # define OFS_FR_END OFS_VR_END | |
165 | #endif | 165 | #endif | |
166 | #define OFS_GR OFS_FR_END | 166 | #define OFS_GR OFS_FR_END | |
167 | #define OFS_CFA OFS_GR + 18*WS | 167 | #define OFS_CFA OFS_GR + 18*WS | |
168 | #define OFS_LR OFS_CFA + WS | 168 | #define OFS_LR OFS_CFA + WS | |
169 | #define OFS_TOC OFS_LR + WS | 169 | #define OFS_TOC OFS_LR + WS | |
170 | #define OFS_CR OFS_TOC + WS | 170 | #define OFS_CR OFS_TOC + WS | |
171 | #define OFS_END (((OFS_CR + WS + 15) / 16) * 16) | 171 | #define OFS_END (((OFS_CR + WS + 15) / 16) * 16) | |
172 | 172 | |||
173 | #define FRAME (((BASE + OFS_END + 15) / 16) * 16) | 173 | #define FRAME (((BASE + OFS_END + 15) / 16) * 16) | |
174 | #define VRSAVE 256 | 174 | #define VRSAVE 256 | |
175 | 175 | |||
176 | .align 4 | 176 | .align 4 | |
177 | FUNC _ITM_beginTransaction | 177 | FUNC _ITM_beginTransaction | |
178 | cfi_startproc | 178 | cfi_startproc | |
179 | mflr r(0) | 179 | mflr r(0) | |
180 | mfcr r(5) | 180 | mfcr r(5) | |
181 | addi r(4), r(1), -OFS_END | 181 | addi r(4), r(1), -OFS_END | |
182 | mr r(6), r(1) | 182 | mr r(6), r(1) | |
183 | streg r(0), LR_SAVE(r(1)) | 183 | streg r(0), LR_SAVE(r(1)) | |
184 | stregu r(1), -FRAME(r(1)) | 184 | stregu r(1), -FRAME(r(1)) | |
185 | cfi_def_cfa_offset(FRAME) | 185 | cfi_def_cfa_offset(FRAME) | |
186 | cfi_offset(65, LR_SAVE) | 186 | cfi_offset(65, LR_SAVE) | |
187 | streg r(6), OFS_CFA(r(4)) | 187 | streg r(6), OFS_CFA(r(4)) | |
188 | streg r(0), OFS_LR(r(4)) | 188 | streg r(0), OFS_LR(r(4)) | |
189 | #ifdef _CALL_DARWIN | 189 | #ifdef _CALL_DARWIN | |
190 | streg r(13), OFS_TOC(r(4)) | 190 | streg r(13), OFS_TOC(r(4)) | |
191 | #else | 191 | #else | |
192 | streg r(2), OFS_TOC(r(4)) | 192 | streg r(2), OFS_TOC(r(4)) | |
193 | #endif | 193 | #endif | |
194 | streg r(5), OFS_CR(r(4)) | 194 | streg r(5), OFS_CR(r(4)) | |
195 | streg r(14), 0*WS+OFS_GR(r(4)) | 195 | streg r(14), 0*WS+OFS_GR(r(4)) | |
196 | streg r(15), 1*WS+OFS_GR(r(4)) | 196 | streg r(15), 1*WS+OFS_GR(r(4)) | |
197 | streg r(16), 2*WS+OFS_GR(r(4)) | 197 | streg r(16), 2*WS+OFS_GR(r(4)) | |
198 | streg r(17), 3*WS+OFS_GR(r(4)) | 198 | streg r(17), 3*WS+OFS_GR(r(4)) | |
199 | streg r(18), 4*WS+OFS_GR(r(4)) | 199 | streg r(18), 4*WS+OFS_GR(r(4)) | |
200 | streg r(19), 5*WS+OFS_GR(r(4)) | 200 | streg r(19), 5*WS+OFS_GR(r(4)) | |
201 | streg r(20), 6*WS+OFS_GR(r(4)) | 201 | streg r(20), 6*WS+OFS_GR(r(4)) | |
202 | streg r(21), 7*WS+OFS_GR(r(4)) | 202 | streg r(21), 7*WS+OFS_GR(r(4)) | |
203 | streg r(22), 8*WS+OFS_GR(r(4)) | 203 | streg r(22), 8*WS+OFS_GR(r(4)) | |
204 | streg r(23), 9*WS+OFS_GR(r(4)) | 204 | streg r(23), 9*WS+OFS_GR(r(4)) | |
205 | streg r(24), 10*WS+OFS_GR(r(4)) | 205 | streg r(24), 10*WS+OFS_GR(r(4)) | |
206 | streg r(25), 11*WS+OFS_GR(r(4)) | 206 | streg r(25), 11*WS+OFS_GR(r(4)) | |
207 | streg r(26), 12*WS+OFS_GR(r(4)) | 207 | streg r(26), 12*WS+OFS_GR(r(4)) | |
208 | streg r(27), 13*WS+OFS_GR(r(4)) | 208 | streg r(27), 13*WS+OFS_GR(r(4)) | |
209 | streg r(28), 14*WS+OFS_GR(r(4)) | 209 | streg r(28), 14*WS+OFS_GR(r(4)) | |
210 | streg r(29), 15*WS+OFS_GR(r(4)) | 210 | streg r(29), 15*WS+OFS_GR(r(4)) | |
211 | streg r(30), 16*WS+OFS_GR(r(4)) | 211 | streg r(30), 16*WS+OFS_GR(r(4)) | |
212 | streg r(31), 17*WS+OFS_GR(r(4)) | 212 | streg r(31), 17*WS+OFS_GR(r(4)) | |
213 | 213 | |||
214 | #ifndef _SOFT_FLOAT | 214 | #ifndef _SOFT_FLOAT | |
215 | /* ??? Determine when FPRs not present. */ | 215 | /* ??? Determine when FPRs not present. */ | |
216 | /* ??? Test r(3) for pr_hasNoFloatUpdate and skip the fp save. | 216 | /* ??? Test r(3) for pr_hasNoFloatUpdate and skip the fp save. | |
217 | This is not yet set by the compiler. */ | 217 | This is not yet set by the compiler. */ | |
218 | mffs f(0) | 218 | mffs f(0) | |
219 | stfd f(14), 0+OFS_FR(r(4)) | 219 | stfd f(14), 0+OFS_FR(r(4)) | |
220 | stfd f(15), 8+OFS_FR(r(4)) | 220 | stfd f(15), 8+OFS_FR(r(4)) | |
221 | stfd f(16), 16+OFS_FR(r(4)) | 221 | stfd f(16), 16+OFS_FR(r(4)) | |
222 | stfd f(17), 24+OFS_FR(r(4)) | 222 | stfd f(17), 24+OFS_FR(r(4)) | |
223 | stfd f(18), 32+OFS_FR(r(4)) | 223 | stfd f(18), 32+OFS_FR(r(4)) | |
224 | stfd f(19), 40+OFS_FR(r(4)) | 224 | stfd f(19), 40+OFS_FR(r(4)) | |
225 | stfd f(20), 48+OFS_FR(r(4)) | 225 | stfd f(20), 48+OFS_FR(r(4)) | |
226 | stfd f(21), 56+OFS_FR(r(4)) | 226 | stfd f(21), 56+OFS_FR(r(4)) | |
227 | stfd f(22), 64+OFS_FR(r(4)) | 227 | stfd f(22), 64+OFS_FR(r(4)) | |
228 | stfd f(23), 72+OFS_FR(r(4)) | 228 | stfd f(23), 72+OFS_FR(r(4)) | |
229 | stfd f(24), 80+OFS_FR(r(4)) | 229 | stfd f(24), 80+OFS_FR(r(4)) | |
230 | stfd f(25), 88+OFS_FR(r(4)) | 230 | stfd f(25), 88+OFS_FR(r(4)) | |
231 | stfd f(26), 96+OFS_FR(r(4)) | 231 | stfd f(26), 96+OFS_FR(r(4)) | |
232 | stfd f(27),104+OFS_FR(r(4)) | 232 | stfd f(27),104+OFS_FR(r(4)) | |
233 | stfd f(28),112+OFS_FR(r(4)) | 233 | stfd f(28),112+OFS_FR(r(4)) | |
234 | stfd f(29),120+OFS_FR(r(4)) | 234 | stfd f(29),120+OFS_FR(r(4)) | |
235 | stfd f(30),128+OFS_FR(r(4)) | 235 | stfd f(30),128+OFS_FR(r(4)) | |
236 | stfd f(31),136+OFS_FR(r(4)) | 236 | stfd f(31),136+OFS_FR(r(4)) | |
237 | stfd f(0), OFS_FPSCR(r(4)) | 237 | stfd f(0), OFS_FPSCR(r(4)) | |
238 | #endif | 238 | #endif | |
239 | 239 | |||
240 | #if defined(__ALTIVEC__) | 240 | #if defined(__ALTIVEC__) | |
241 | /* ??? Determine when VRs not present. */ | 241 | /* ??? Determine when VRs not present. */ | |
242 | /* ??? Test r(3) for pr_hasNoVectorUpdate and skip the vr save. | 242 | /* ??? Test r(3) for pr_hasNoVectorUpdate and skip the vr save. | |
243 | This is not yet set by the compiler. */ | 243 | This is not yet set by the compiler. */ | |
244 | addi r(5), r(4), OFS_VR | 244 | addi r(5), r(4), OFS_VR | |
245 | addi r(6), r(4), OFS_VR+16 | 245 | addi r(6), r(4), OFS_VR+16 | |
246 | mfspr r(0), VRSAVE | 246 | mfspr r(0), VRSAVE | |
247 | stvx v(20), 0, r(5) | 247 | stvx v(20), 0, r(5) | |
248 | addi r(5), r(5), 32 | 248 | addi r(5), r(5), 32 | |
249 | stvx v(21), 0, r(6) | 249 | stvx v(21), 0, r(6) | |
250 | addi r(6), r(6), 32 | 250 | addi r(6), r(6), 32 | |
251 | stvx v(22), 0, r(5) | 251 | stvx v(22), 0, r(5) | |
252 | addi r(5), r(5), 32 | 252 | addi r(5), r(5), 32 | |
253 | stvx v(23), 0, r(6) | 253 | stvx v(23), 0, r(6) | |
254 | addi r(6), r(6), 32 | 254 | addi r(6), r(6), 32 | |
255 | stvx v(25), 0, r(5) | 255 | stvx v(25), 0, r(5) | |
256 | addi r(5), r(5), 32 | 256 | addi r(5), r(5), 32 | |
257 | stvx v(26), 0, r(6) | 257 | stvx v(26), 0, r(6) | |
258 | addi r(6), r(6), 32 | 258 | addi r(6), r(6), 32 | |
259 | stvx v(26), 0, r(5) | 259 | stvx v(26), 0, r(5) | |
260 | addi r(5), r(5), 32 | 260 | addi r(5), r(5), 32 | |
261 | stvx v(27), 0, r(6) | 261 | stvx v(27), 0, r(6) | |
262 | addi r(6), r(6), 32 | 262 | addi r(6), r(6), 32 | |
263 | stvx v(28), 0, r(5) | 263 | stvx v(28), 0, r(5) | |
264 | addi r(5), r(5), 32 | 264 | addi r(5), r(5), 32 | |
265 | stvx v(29), 0, r(6) | 265 | stvx v(29), 0, r(6) | |
266 | addi r(6), r(6), 32 | 266 | addi r(6), r(6), 32 | |
267 | stvx v(30), 0, r(5) | 267 | stvx v(30), 0, r(5) | |
268 | stvx v(31), 0, r(6) | 268 | stvx v(31), 0, r(6) | |
269 | streg r(0), OFS_VSCR(r(4)) | 269 | streg r(0), OFS_VSCR(r(4)) | |
270 | #endif | 270 | #endif | |
271 | 271 | |||
272 | CALL GTM_begin_transaction | 272 | CALL GTM_begin_transaction | |
273 | 273 | |||
274 | ldreg r(0), LR_SAVE+FRAME(r(1)) | 274 | ldreg r(0), LR_SAVE+FRAME(r(1)) | |
275 | mtlr r(0) | 275 | mtlr r(0) | |
276 | addi r(1), r(1), FRAME | 276 | addi r(1), r(1), FRAME | |
277 | cfi_def_cfa_offset(0) | 277 | cfi_def_cfa_offset(0) | |
278 | cfi_restore(65) | 278 | cfi_restore(65) | |
279 | blr | 279 | blr | |
280 | cfi_endproc | 280 | cfi_endproc | |
281 | END _ITM_beginTransaction | 281 | END _ITM_beginTransaction | |
282 | 282 | |||
283 | .align 4 | 283 | .align 4 | |
284 | HIDDEN GTM_longjmp | 284 | HIDDEN GTM_longjmp | |
285 | FUNC GTM_longjmp | 285 | FUNC GTM_longjmp | |
286 | cfi_startproc | 286 | cfi_startproc | |
287 | #if defined(__ALTIVEC__) || defined(__VSX__) | 287 | #if defined(__ALTIVEC__) || defined(__VSX__) | |
288 | /* ??? Determine when VRs not present. */ | 288 | /* ??? Determine when VRs not present. */ | |
289 | /* ??? Test r(5) for pr_hasNoVectorUpdate and skip the vr restore. | 289 | /* ??? Test r(5) for pr_hasNoVectorUpdate and skip the vr restore. | |
290 | This is not yet set by the compiler. */ | 290 | This is not yet set by the compiler. */ | |
291 | addi r(6), r(4), OFS_VR | 291 | addi r(6), r(4), OFS_VR | |
292 | addi r(7), r(4), OFS_VR+16 | 292 | addi r(7), r(4), OFS_VR+16 | |
293 | ldreg r(0), OFS_VSCR(r(4)) | 293 | ldreg r(0), OFS_VSCR(r(4)) | |
294 | cfi_undefined(v(20)) | 294 | cfi_undefined(v(20)) | |
295 | cfi_undefined(v(21)) | 295 | cfi_undefined(v(21)) | |
296 | cfi_undefined(v(22)) | 296 | cfi_undefined(v(22)) | |
297 | cfi_undefined(v(23)) | 297 | cfi_undefined(v(23)) | |
298 | cfi_undefined(v(24)) | 298 | cfi_undefined(v(24)) | |
299 | cfi_undefined(v(25)) | 299 | cfi_undefined(v(25)) | |
300 | cfi_undefined(v(26)) | 300 | cfi_undefined(v(26)) | |
301 | cfi_undefined(v(27)) | 301 | cfi_undefined(v(27)) | |
302 | cfi_undefined(v(28)) | 302 | cfi_undefined(v(28)) | |
303 | cfi_undefined(v(29)) | 303 | cfi_undefined(v(29)) | |
304 | cfi_undefined(v(30)) | 304 | cfi_undefined(v(30)) | |
305 | cfi_undefined(v(31)) | 305 | cfi_undefined(v(31)) | |
306 | lvx v(20), 0, r(6) | 306 | lvx v(20), 0, r(6) | |
307 | addi r(6), r(6), 32 | 307 | addi r(6), r(6), 32 | |
308 | lvx v(21), 0, r(7) | 308 | lvx v(21), 0, r(7) | |
309 | addi r(7), r(7), 32 | 309 | addi r(7), r(7), 32 | |
310 | lvx v(22), 0, r(6) | 310 | lvx v(22), 0, r(6) | |
311 | addi r(6), r(6), 32 | 311 | addi r(6), r(6), 32 | |
312 | lvx v(23), 0, r(7) | 312 | lvx v(23), 0, r(7) | |
313 | addi r(7), r(7), 32 | 313 | addi r(7), r(7), 32 | |
314 | lvx v(24), 0, r(6) | 314 | lvx v(24), 0, r(6) | |
315 | addi r(6), r(6), 32 | 315 | addi r(6), r(6), 32 | |
316 | lvx v(25), 0, r(7) | 316 | lvx v(25), 0, r(7) | |
317 | addi r(7), r(7), 32 | 317 | addi r(7), r(7), 32 | |
318 | lvx v(26), 0, r(6) | 318 | lvx v(26), 0, r(6) | |
319 | addi r(6), r(6), 32 | 319 | addi r(6), r(6), 32 | |
320 | lvx v(27), 0, r(7) | 320 | lvx v(27), 0, r(7) | |
321 | addi r(7), r(7), 32 | 321 | addi r(7), r(7), 32 | |
322 | lvx v(28), 0, r(6) | 322 | lvx v(28), 0, r(6) | |
323 | addi r(6), r(6), 32 | 323 | addi r(6), r(6), 32 | |
324 | lvx v(29), 0, r(7) | 324 | lvx v(29), 0, r(7) | |
325 | addi r(7), r(7), 32 | 325 | addi r(7), r(7), 32 | |
326 | lvx v(30), 0, r(6) | 326 | lvx v(30), 0, r(6) | |
327 | lvx v(31), 0, r(7) | 327 | lvx v(31), 0, r(7) | |
328 | mtspr VRSAVE, r(0) | 328 | mtspr VRSAVE, r(0) | |
329 | #endif | 329 | #endif | |
330 | 330 | |||
331 | #ifndef _SOFT_FLOAT | 331 | #ifndef _SOFT_FLOAT | |
332 | /* ??? Determine when FPRs not present. */ | 332 | /* ??? Determine when FPRs not present. */ | |
333 | /* ??? Test r(5) for pr_hasNoFloatUpdate and skip the fp load. | 333 | /* ??? Test r(5) for pr_hasNoFloatUpdate and skip the fp load. | |
334 | This is not yet set by the compiler. */ | 334 | This is not yet set by the compiler. */ | |
335 | lfd f(0), OFS_FPSCR(r(4)) | 335 | lfd f(0), OFS_FPSCR(r(4)) | |
336 | cfi_undefined(f(14)) | 336 | cfi_undefined(f(14)) | |
337 | cfi_undefined(f(15)) | 337 | cfi_undefined(f(15)) | |
338 | cfi_undefined(f(16)) | 338 | cfi_undefined(f(16)) | |
339 | cfi_undefined(f(17)) | 339 | cfi_undefined(f(17)) | |
340 | cfi_undefined(f(18)) | 340 | cfi_undefined(f(18)) | |
341 | cfi_undefined(f(19)) | 341 | cfi_undefined(f(19)) | |
342 | cfi_undefined(f(20)) | 342 | cfi_undefined(f(20)) | |
343 | cfi_undefined(f(21)) | 343 | cfi_undefined(f(21)) | |
344 | cfi_undefined(f(22)) | 344 | cfi_undefined(f(22)) | |
345 | cfi_undefined(f(23)) | 345 | cfi_undefined(f(23)) | |
346 | cfi_undefined(f(24)) | 346 | cfi_undefined(f(24)) | |
347 | cfi_undefined(f(25)) | 347 | cfi_undefined(f(25)) | |
348 | cfi_undefined(f(26)) | 348 | cfi_undefined(f(26)) | |
349 | cfi_undefined(f(27)) | 349 | cfi_undefined(f(27)) | |
350 | cfi_undefined(f(28)) | 350 | cfi_undefined(f(28)) | |
351 | cfi_undefined(f(29)) | 351 | cfi_undefined(f(29)) | |
352 | cfi_undefined(f(30)) | 352 | cfi_undefined(f(30)) | |
353 | cfi_undefined(f(31)) | 353 | cfi_undefined(f(31)) | |
354 | lfd f(14), 0+OFS_FR(r(4)) | 354 | lfd f(14), 0+OFS_FR(r(4)) | |
355 | lfd f(15), 8+OFS_FR(r(4)) | 355 | lfd f(15), 8+OFS_FR(r(4)) | |
356 | lfd f(16), 16+OFS_FR(r(4)) | 356 | lfd f(16), 16+OFS_FR(r(4)) | |
357 | lfd f(17), 24+OFS_FR(r(4)) | 357 | lfd f(17), 24+OFS_FR(r(4)) | |
358 | lfd f(18), 32+OFS_FR(r(4)) | 358 | lfd f(18), 32+OFS_FR(r(4)) | |
359 | lfd f(19), 40+OFS_FR(r(4)) | 359 | lfd f(19), 40+OFS_FR(r(4)) | |
360 | lfd f(20), 48+OFS_FR(r(4)) | 360 | lfd f(20), 48+OFS_FR(r(4)) | |
361 | lfd f(21), 56+OFS_FR(r(4)) | 361 | lfd f(21), 56+OFS_FR(r(4)) | |
362 | lfd f(22), 64+OFS_FR(r(4)) | 362 | lfd f(22), 64+OFS_FR(r(4)) | |
363 | lfd f(23), 72+OFS_FR(r(4)) | 363 | lfd f(23), 72+OFS_FR(r(4)) | |
364 | lfd f(24), 80+OFS_FR(r(4)) | 364 | lfd f(24), 80+OFS_FR(r(4)) | |
365 | lfd f(25), 88+OFS_FR(r(4)) | 365 | lfd f(25), 88+OFS_FR(r(4)) | |
366 | lfd f(26), 96+OFS_FR(r(4)) | 366 | lfd f(26), 96+OFS_FR(r(4)) | |
367 | lfd f(27),104+OFS_FR(r(4)) | 367 | lfd f(27),104+OFS_FR(r(4)) | |
368 | lfd f(28),112+OFS_FR(r(4)) | 368 | lfd f(28),112+OFS_FR(r(4)) | |
369 | lfd f(29),120+OFS_FR(r(4)) | 369 | lfd f(29),120+OFS_FR(r(4)) | |
370 | lfd f(30),128+OFS_FR(r(4)) | 370 | lfd f(30),128+OFS_FR(r(4)) | |
371 | lfd f(31),136+OFS_FR(r(4)) | 371 | lfd f(31),136+OFS_FR(r(4)) | |
372 | mtfsf 0xff, f(0) | 372 | mtfsf 0xff, f(0) | |
373 | #endif | 373 | #endif | |
374 | 374 | |||
375 | ldreg r(6), OFS_CFA(r(4)) | 375 | ldreg r(6), OFS_CFA(r(4)) | |
376 | ldreg r(0), OFS_LR(r(4)) | 376 | ldreg r(0), OFS_LR(r(4)) | |
377 | #ifdef _CALL_DARWIN | 377 | #ifdef _CALL_DARWIN | |
378 | ldreg r(13), OFS_TOC(r(4)) | 378 | ldreg r(13), OFS_TOC(r(4)) | |
379 | #else | 379 | #else | |
380 | ldreg r(2), OFS_TOC(r(4)) | 380 | ldreg r(2), OFS_TOC(r(4)) | |
381 | #endif | 381 | #endif | |
382 | ldreg r(7), OFS_CR(r(4)) | 382 | ldreg r(7), OFS_CR(r(4)) | |
383 | /* At the instant we restore the LR, the only coherent view of | 383 | /* At the instant we restore the LR, the only coherent view of | |
384 | the world we have is into the new stack frame. Define the | 384 | the world we have is into the new stack frame. Define the | |
385 | CFA in terms of the not-yet-restored stack pointer. This will | 385 | CFA in terms of the not-yet-restored stack pointer. This will | |
386 | last until the end of the function. */ | 386 | last until the end of the function. */ | |
387 | mtlr r(0) | 387 | mtlr r(0) | |
388 | cfi_def_cfa(r(6), 0) | 388 | cfi_def_cfa(r(6), 0) | |
389 | cfi_undefined(r(14)) | 389 | cfi_undefined(r(14)) | |
390 | cfi_undefined(r(15)) | 390 | cfi_undefined(r(15)) | |
391 | cfi_undefined(r(16)) | 391 | cfi_undefined(r(16)) | |
392 | cfi_undefined(r(17)) | 392 | cfi_undefined(r(17)) | |
393 | cfi_undefined(r(18)) | 393 | cfi_undefined(r(18)) | |
394 | cfi_undefined(r(19)) | 394 | cfi_undefined(r(19)) | |
395 | cfi_undefined(r(20)) | 395 | cfi_undefined(r(20)) | |
396 | cfi_undefined(r(21)) | 396 | cfi_undefined(r(21)) | |
397 | cfi_undefined(r(22)) | 397 | cfi_undefined(r(22)) | |
398 | cfi_undefined(r(23)) | 398 | cfi_undefined(r(23)) | |
399 | cfi_undefined(r(24)) | 399 | cfi_undefined(r(24)) | |
400 | cfi_undefined(r(25)) | 400 | cfi_undefined(r(25)) | |
401 | cfi_undefined(r(26)) | 401 | cfi_undefined(r(26)) | |
402 | cfi_undefined(r(27)) | 402 | cfi_undefined(r(27)) | |
403 | cfi_undefined(r(28)) | 403 | cfi_undefined(r(28)) | |
404 | cfi_undefined(r(29)) | 404 | cfi_undefined(r(29)) | |
405 | cfi_undefined(r(30)) | 405 | cfi_undefined(r(30)) | |
406 | cfi_undefined(r(31)) | 406 | cfi_undefined(r(31)) | |
407 | mtcr r(7) | 407 | mtcr r(7) | |
408 | ldreg r(14), 0*WS+OFS_GR(r(4)) | 408 | ldreg r(14), 0*WS+OFS_GR(r(4)) | |
409 | ldreg r(15), 1*WS+OFS_GR(r(4)) | 409 | ldreg r(15), 1*WS+OFS_GR(r(4)) | |
410 | ldreg r(16), 2*WS+OFS_GR(r(4)) | 410 | ldreg r(16), 2*WS+OFS_GR(r(4)) | |
411 | ldreg r(17), 3*WS+OFS_GR(r(4)) | 411 | ldreg r(17), 3*WS+OFS_GR(r(4)) | |
412 | ldreg r(18), 4*WS+OFS_GR(r(4)) | 412 | ldreg r(18), 4*WS+OFS_GR(r(4)) | |
413 | ldreg r(19), 5*WS+OFS_GR(r(4)) | 413 | ldreg r(19), 5*WS+OFS_GR(r(4)) | |
414 | ldreg r(20), 6*WS+OFS_GR(r(4)) | 414 | ldreg r(20), 6*WS+OFS_GR(r(4)) | |
415 | ldreg r(21), 7*WS+OFS_GR(r(4)) | 415 | ldreg r(21), 7*WS+OFS_GR(r(4)) | |
416 | ldreg r(22), 8*WS+OFS_GR(r(4)) | 416 | ldreg r(22), 8*WS+OFS_GR(r(4)) | |
417 | ldreg r(23), 9*WS+OFS_GR(r(4)) | 417 | ldreg r(23), 9*WS+OFS_GR(r(4)) | |
418 | ldreg r(24), 10*WS+OFS_GR(r(4)) | 418 | ldreg r(24), 10*WS+OFS_GR(r(4)) | |
419 | ldreg r(25), 11*WS+OFS_GR(r(4)) | 419 | ldreg r(25), 11*WS+OFS_GR(r(4)) | |
420 | ldreg r(26), 12*WS+OFS_GR(r(4)) | 420 | ldreg r(26), 12*WS+OFS_GR(r(4)) | |
421 | ldreg r(27), 13*WS+OFS_GR(r(4)) | 421 | ldreg r(27), 13*WS+OFS_GR(r(4)) | |
422 | ldreg r(28), 14*WS+OFS_GR(r(4)) | 422 | ldreg r(28), 14*WS+OFS_GR(r(4)) | |
423 | ldreg r(29), 15*WS+OFS_GR(r(4)) | 423 | ldreg r(29), 15*WS+OFS_GR(r(4)) | |
424 | ldreg r(30), 16*WS+OFS_GR(r(4)) | 424 | ldreg r(30), 16*WS+OFS_GR(r(4)) | |
425 | ldreg r(31), 17*WS+OFS_GR(r(4)) | 425 | ldreg r(31), 17*WS+OFS_GR(r(4)) | |
426 | mr r(1), r(6) | 426 | mr r(1), r(6) | |
427 | blr | 427 | blr | |
428 | cfi_endproc | 428 | cfi_endproc | |
429 | END GTM_longjmp | 429 | END GTM_longjmp | |
430 | 430 | |||
431 | #ifdef __linux__ | 431 | #if defined(__ELF__) && defined(__linux__) | |
432 | .section .note.GNU-stack, "", @progbits | 432 | .section .note.GNU-stack, "", @progbits | |
433 | #endif | 433 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/s390/Attic/sjlj.S 2014/03/01 08:41:18 1.1.1.1
+++ src/external/gpl3/gcc/dist/libitm/config/s390/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,108 +1,110 @@ | @@ -1,108 +1,110 @@ | |||
1 | /* Copyright (C) 2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2013 Free Software Foundation, Inc. | |
2 | Contributed by Andreas Krebbel <krebbel@linux.vnet.ibm.com> | 2 | Contributed by Andreas Krebbel <krebbel@linux.vnet.ibm.com> | |
3 | 3 | |||
4 | This file is part of the GNU Transactional Memory Library (libitm). | 4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | 5 | |||
6 | Libitm is free software; you can redistribute it and/or modify it | 6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | 7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | 8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | 9 | (at your option) any later version. | |
10 | 10 | |||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | 14 | more details. | |
15 | 15 | |||
16 | Under Section 7 of GPL version 3, you are granted additional | 16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | 17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | 18 | 3.1, as published by the Free Software Foundation. | |
19 | 19 | |||
20 | You should have received a copy of the GNU General Public License and | 20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | 21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | 23 | <http://www.gnu.org/licenses/>. */ | |
24 | 24 | |||
25 | 25 | |||
26 | #include "asmcfi.h" | 26 | #include "asmcfi.h" | |
27 | 27 | |||
28 | .text | 28 | .text | |
29 | .align 4 | 29 | .align 4 | |
30 | .globl _ITM_beginTransaction | 30 | .globl _ITM_beginTransaction | |
31 | .type _ITM_beginTransaction, @function | 31 | .type _ITM_beginTransaction, @function | |
32 | 32 | |||
33 | /* _ITM_beginTransaction (int props); props -> r2 */ | 33 | /* _ITM_beginTransaction (int props); props -> r2 */ | |
34 | _ITM_beginTransaction: | 34 | _ITM_beginTransaction: | |
35 | cfi_startproc | 35 | cfi_startproc | |
36 | #ifdef __s390x__ | 36 | #ifdef __s390x__ | |
37 | lgr %r3,%r15 /* backup stack pointer */ | 37 | lgr %r3,%r15 /* backup stack pointer */ | |
38 | aghi %r15,-304 /* jump buffer (144) + reg save area (160) */ | 38 | aghi %r15,-304 /* jump buffer (144) + reg save area (160) */ | |
39 | cfi_adjust_cfa_offset(304) | 39 | cfi_adjust_cfa_offset(304) | |
40 | stmg %r6,%r14,160(%r15) | 40 | stmg %r6,%r14,160(%r15) | |
41 | stg %r3,72+160(%r15) /* store the old stack pointer */ | 41 | stg %r3,72+160(%r15) /* store the old stack pointer */ | |
42 | std %f8,80+160(%r15) | 42 | std %f8,80+160(%r15) | |
43 | std %f9,88+160(%r15) | 43 | std %f9,88+160(%r15) | |
44 | std %f10,96+160(%r15) | 44 | std %f10,96+160(%r15) | |
45 | std %f11,104+160(%r15) | 45 | std %f11,104+160(%r15) | |
46 | std %f12,112+160(%r15) | 46 | std %f12,112+160(%r15) | |
47 | std %f13,120+160(%r15) | 47 | std %f13,120+160(%r15) | |
48 | std %f14,128+160(%r15) | 48 | std %f14,128+160(%r15) | |
49 | std %f15,136+160(%r15) | 49 | std %f15,136+160(%r15) | |
50 | la %r3,160(%r15) /* second argument to GTM_begin_transaction */ | 50 | la %r3,160(%r15) /* second argument to GTM_begin_transaction */ | |
51 | brasl %r14,GTM_begin_transaction | 51 | brasl %r14,GTM_begin_transaction | |
52 | lg %r1,64+160(%r15) | 52 | lg %r1,64+160(%r15) | |
53 | aghi %r15,304 | 53 | aghi %r15,304 | |
54 | cfi_adjust_cfa_offset(-304) | 54 | cfi_adjust_cfa_offset(-304) | |
55 | br %r1 | 55 | br %r1 | |
56 | #else | 56 | #else | |
57 | lr %r3,%r15 /* backup stack pointer */ | 57 | lr %r3,%r15 /* backup stack pointer */ | |
58 | ahi %r15,-152 /* jump buffer (56) + reg save area (96) */ | 58 | ahi %r15,-152 /* jump buffer (56) + reg save area (96) */ | |
59 | cfi_adjust_cfa_offset(152) | 59 | cfi_adjust_cfa_offset(152) | |
60 | stm %r6,%r14,96(%r15) | 60 | stm %r6,%r14,96(%r15) | |
61 | st %r3,36+96(%r15) /* store the old stack pointer */ | 61 | st %r3,36+96(%r15) /* store the old stack pointer */ | |
62 | std %f4,40+96(%r15) | 62 | std %f4,40+96(%r15) | |
63 | std %f6,48+96(%r15) | 63 | std %f6,48+96(%r15) | |
64 | la %r3,96(%r15) /* second argument to GTM_begin_transaction */ | 64 | la %r3,96(%r15) /* second argument to GTM_begin_transaction */ | |
65 | /* begin_transaction (uint32_t prop, const gtm_jmpbuf *jb) */ | 65 | /* begin_transaction (uint32_t prop, const gtm_jmpbuf *jb) */ | |
66 | brasl %r14,GTM_begin_transaction /* requires mzarch */ | 66 | brasl %r14,GTM_begin_transaction /* requires mzarch */ | |
67 | l %r1,32+96(%r15) | 67 | l %r1,32+96(%r15) | |
68 | ahi %r15,152 | 68 | ahi %r15,152 | |
69 | cfi_adjust_cfa_offset(-152) | 69 | cfi_adjust_cfa_offset(-152) | |
70 | br %r1 | 70 | br %r1 | |
71 | #endif | 71 | #endif | |
72 | cfi_endproc | 72 | cfi_endproc | |
73 | 73 | |||
74 | .size _ITM_beginTransaction, .-_ITM_beginTransaction | 74 | .size _ITM_beginTransaction, .-_ITM_beginTransaction | |
75 | 75 | |||
76 | .align 4 | 76 | .align 4 | |
77 | .globl GTM_longjmp | 77 | .globl GTM_longjmp | |
78 | .type GTM_longjmp, @function | 78 | .type GTM_longjmp, @function | |
79 | .hidden GTM_longjmp | 79 | .hidden GTM_longjmp | |
80 | 80 | |||
81 | /* uint32_t GTM_longjmp (uint32_t, const gtm_jmpbuf *, uint32_t) */ | 81 | /* uint32_t GTM_longjmp (uint32_t, const gtm_jmpbuf *, uint32_t) */ | |
82 | GTM_longjmp: | 82 | GTM_longjmp: | |
83 | /* First parameter becomes the return value of | 83 | /* First parameter becomes the return value of | |
84 | _ITM_beginTransaction (r2). | 84 | _ITM_beginTransaction (r2). | |
85 | Third parameter is ignored for now. */ | 85 | Third parameter is ignored for now. */ | |
86 | cfi_startproc | 86 | cfi_startproc | |
87 | #ifdef __s390x__ | 87 | #ifdef __s390x__ | |
88 | ld %f8,80(%r3) | 88 | ld %f8,80(%r3) | |
89 | ld %f9,88(%r3) | 89 | ld %f9,88(%r3) | |
90 | ld %f10,96(%r3) | 90 | ld %f10,96(%r3) | |
91 | ld %f11,104(%r3) | 91 | ld %f11,104(%r3) | |
92 | ld %f12,112(%r3) | 92 | ld %f12,112(%r3) | |
93 | ld %f13,120(%r3) | 93 | ld %f13,120(%r3) | |
94 | ld %f14,128(%r3) | 94 | ld %f14,128(%r3) | |
95 | ld %f15,136(%r3) | 95 | ld %f15,136(%r3) | |
96 | lmg %r6,%r15,0(%r3) | 96 | lmg %r6,%r15,0(%r3) | |
97 | br %r14 | 97 | br %r14 | |
98 | #else | 98 | #else | |
99 | ld %f4,40(%r3) | 99 | ld %f4,40(%r3) | |
100 | ld %f6,48(%r3) | 100 | ld %f6,48(%r3) | |
101 | lm %r6,%r15,0(%r3) | 101 | lm %r6,%r15,0(%r3) | |
102 | br %r14 | 102 | br %r14 | |
103 | #endif | 103 | #endif | |
104 | cfi_endproc | 104 | cfi_endproc | |
105 | 105 | |||
106 | .size GTM_longjmp, .-GTM_longjmp | 106 | .size GTM_longjmp, .-GTM_longjmp | |
107 | 107 | |||
108 | #if defined(__ELF__) && defined(__linux__) | |||
108 | .section .note.GNU-stack, "", @progbits | 109 | .section .note.GNU-stack, "", @progbits | |
110 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/sh/Attic/sjlj.S 2014/03/01 08:41:18 1.1.1.1
+++ src/external/gpl3/gcc/dist/libitm/config/sh/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,122 +1,122 @@ | @@ -1,122 +1,122 @@ | |||
1 | /* Copyright (C) 2011-2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2011-2013 Free Software Foundation, Inc. | |
2 | 2 | |||
3 | This file is part of the GNU Transactional Memory Library (libitm). | 3 | This file is part of the GNU Transactional Memory Library (libitm). | |
4 | 4 | |||
5 | Libitm is free software; you can redistribute it and/or modify it | 5 | Libitm is free software; you can redistribute it and/or modify it | |
6 | under the terms of the GNU General Public License as published by | 6 | under the terms of the GNU General Public License as published by | |
7 | the Free Software Foundation; either version 3 of the License, or | 7 | the Free Software Foundation; either version 3 of the License, or | |
8 | (at your option) any later version. | 8 | (at your option) any later version. | |
9 | 9 | |||
10 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 10 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | 13 | more details. | |
14 | 14 | |||
15 | Under Section 7 of GPL version 3, you are granted additional | 15 | Under Section 7 of GPL version 3, you are granted additional | |
16 | permissions described in the GCC Runtime Library Exception, version | 16 | permissions described in the GCC Runtime Library Exception, version | |
17 | 3.1, as published by the Free Software Foundation. | 17 | 3.1, as published by the Free Software Foundation. | |
18 | 18 | |||
19 | You should have received a copy of the GNU General Public License and | 19 | You should have received a copy of the GNU General Public License and | |
20 | a copy of the GCC Runtime Library Exception along with this program; | 20 | a copy of the GCC Runtime Library Exception along with this program; | |
21 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 21 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
22 | <http://www.gnu.org/licenses/>. */ | 22 | <http://www.gnu.org/licenses/>. */ | |
23 | 23 | |||
24 | #include "asmcfi.h" | 24 | #include "asmcfi.h" | |
25 | 25 | |||
26 | .text | 26 | .text | |
27 | .align 2 | 27 | .align 2 | |
28 | .global _ITM_beginTransaction | 28 | .global _ITM_beginTransaction | |
29 | .type _ITM_beginTransaction, %function | 29 | .type _ITM_beginTransaction, %function | |
30 | 30 | |||
31 | _ITM_beginTransaction: | 31 | _ITM_beginTransaction: | |
32 | cfi_startproc | 32 | cfi_startproc | |
33 | mov r15, r1 | 33 | mov r15, r1 | |
34 | #ifdef __SH_FPU_ANY__ | 34 | #ifdef __SH_FPU_ANY__ | |
35 | fmov.s fr15, @-r15 | 35 | fmov.s fr15, @-r15 | |
36 | fmov.s fr14, @-r15 | 36 | fmov.s fr14, @-r15 | |
37 | fmov.s fr13, @-r15 | 37 | fmov.s fr13, @-r15 | |
38 | fmov.s fr12, @-r15 | 38 | fmov.s fr12, @-r15 | |
39 | sts.l fpscr, @-r15 | 39 | sts.l fpscr, @-r15 | |
40 | #endif /* __SH_FPU_ANY__ */ | 40 | #endif /* __SH_FPU_ANY__ */ | |
41 | stc.l gbr, @-r15 | 41 | stc.l gbr, @-r15 | |
42 | sts.l pr, @-r15 | 42 | sts.l pr, @-r15 | |
43 | mov.l r1, @-r15 | 43 | mov.l r1, @-r15 | |
44 | mov.l r14, @-r15 | 44 | mov.l r14, @-r15 | |
45 | mov.l r13, @-r15 | 45 | mov.l r13, @-r15 | |
46 | mov.l r12, @-r15 | 46 | mov.l r12, @-r15 | |
47 | mov.l r11, @-r15 | 47 | mov.l r11, @-r15 | |
48 | mov.l r10, @-r15 | 48 | mov.l r10, @-r15 | |
49 | mov.l r9, @-r15 | 49 | mov.l r9, @-r15 | |
50 | mov.l r8, @-r15 | 50 | mov.l r8, @-r15 | |
51 | #ifdef __SH_FPU_ANY__ | 51 | #ifdef __SH_FPU_ANY__ | |
52 | cfi_def_cfa_offset (4*15) | 52 | cfi_def_cfa_offset (4*15) | |
53 | #else | 53 | #else | |
54 | cfi_def_cfa_offset (4*10) | 54 | cfi_def_cfa_offset (4*10) | |
55 | #endif | 55 | #endif | |
56 | #if defined HAVE_ATTRIBUTE_VISIBILITY || !defined __PIC__ | 56 | #if defined HAVE_ATTRIBUTE_VISIBILITY || !defined __PIC__ | |
57 | mov.l .Lbegin, r1 | 57 | mov.l .Lbegin, r1 | |
58 | jsr @r1 | 58 | jsr @r1 | |
59 | mov r15, r5 | 59 | mov r15, r5 | |
60 | #else | 60 | #else | |
61 | mova .Lgot, r0 | 61 | mova .Lgot, r0 | |
62 | mov.l .Lgot, r12 | 62 | mov.l .Lgot, r12 | |
63 | add r0, r12 | 63 | add r0, r12 | |
64 | mov.l .Lbegin, r1 | 64 | mov.l .Lbegin, r1 | |
65 | bsrf r1 | 65 | bsrf r1 | |
66 | mov r15, r5 | 66 | mov r15, r5 | |
67 | .Lbegin0: | 67 | .Lbegin0: | |
68 | mov.l @(4*4,r15), r12 | 68 | mov.l @(4*4,r15), r12 | |
69 | #endif | 69 | #endif | |
70 | mov.l @(8*4,r15), r1 | 70 | mov.l @(8*4,r15), r1 | |
71 | lds r1, pr | 71 | lds r1, pr | |
72 | #ifdef __SH_FPU_ANY__ | 72 | #ifdef __SH_FPU_ANY__ | |
73 | add #(15*4), r15 | 73 | add #(15*4), r15 | |
74 | #else | 74 | #else | |
75 | add #(10*5), r15 | 75 | add #(10*5), r15 | |
76 | #endif | 76 | #endif | |
77 | cfi_def_cfa_offset (0) | 77 | cfi_def_cfa_offset (0) | |
78 | rts | 78 | rts | |
79 | nop | 79 | nop | |
80 | cfi_endproc | 80 | cfi_endproc | |
81 | 81 | |||
82 | .align 2 | 82 | .align 2 | |
83 | .Lgot: | 83 | .Lgot: | |
84 | .long _GLOBAL_OFFSET_TABLE_ | 84 | .long _GLOBAL_OFFSET_TABLE_ | |
85 | .Lbegin: | 85 | .Lbegin: | |
86 | #if defined HAVE_ATTRIBUTE_VISIBILITY || !defined __PIC__ | 86 | #if defined HAVE_ATTRIBUTE_VISIBILITY || !defined __PIC__ | |
87 | .long GTM_begin_transaction | 87 | .long GTM_begin_transaction | |
88 | #else | 88 | #else | |
89 | .long GTM_begin_transaction@PLT-(.Lbegin0-.) | 89 | .long GTM_begin_transaction@PLT-(.Lbegin0-.) | |
90 | #endif | 90 | #endif | |
91 | .size _ITM_beginTransaction, . - _ITM_beginTransaction | 91 | .size _ITM_beginTransaction, . - _ITM_beginTransaction | |
92 | 92 | |||
93 | .global GTM_longjmp | 93 | .global GTM_longjmp | |
94 | .hidden GTM_longjmp | 94 | .hidden GTM_longjmp | |
95 | .type GTM_longjmp, %function | 95 | .type GTM_longjmp, %function | |
96 | 96 | |||
97 | GTM_longjmp: | 97 | GTM_longjmp: | |
98 | mov.l @r5+, r8 | 98 | mov.l @r5+, r8 | |
99 | mov.l @r5+, r9 | 99 | mov.l @r5+, r9 | |
100 | mov.l @r5+, r10 | 100 | mov.l @r5+, r10 | |
101 | mov.l @r5+, r11 | 101 | mov.l @r5+, r11 | |
102 | mov.l @r5+, r12 | 102 | mov.l @r5+, r12 | |
103 | mov.l @r5+, r13 | 103 | mov.l @r5+, r13 | |
104 | mov.l @r5+, r14 | 104 | mov.l @r5+, r14 | |
105 | mov.l @r5+, r15 | 105 | mov.l @r5+, r15 | |
106 | lds.l @r5+, pr | 106 | lds.l @r5+, pr | |
107 | ldc.l @r5+, gbr | 107 | ldc.l @r5+, gbr | |
108 | #ifdef __SH_FPU_ANY__ | 108 | #ifdef __SH_FPU_ANY__ | |
109 | lds.l @r5+, fpscr | 109 | lds.l @r5+, fpscr | |
110 | fmov.s @r5+, fr12 | 110 | fmov.s @r5+, fr12 | |
111 | fmov.s @r5+, fr13 | 111 | fmov.s @r5+, fr13 | |
112 | fmov.s @r5+, fr14 | 112 | fmov.s @r5+, fr14 | |
113 | fmov.s @r5+, fr15 | 113 | fmov.s @r5+, fr15 | |
114 | #endif | 114 | #endif | |
115 | rts | 115 | rts | |
116 | mov r4, r0 | 116 | mov r4, r0 | |
117 | 117 | |||
118 | .size GTM_longjmp, . - GTM_longjmp | 118 | .size GTM_longjmp, . - GTM_longjmp | |
119 | 119 | |||
120 | #ifdef __linux__ | 120 | #if defined(__ELF__) && defined(__linux__) | |
121 | .section .note.GNU-stack, "", %progbits | 121 | .section .note.GNU-stack, "", %progbits | |
122 | #endif | 122 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/sparc/Attic/sjlj.S 2014/03/01 08:41:18 1.1.1.1
+++ src/external/gpl3/gcc/dist/libitm/config/sparc/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,97 +1,97 @@ | @@ -1,97 +1,97 @@ | |||
1 | /* Copyright (C) 2012-2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2012-2013 Free Software Foundation, Inc. | |
2 | 2 | |||
3 | This file is part of the GNU Transactional Memory Library (libitm). | 3 | This file is part of the GNU Transactional Memory Library (libitm). | |
4 | 4 | |||
5 | Libitm is free software; you can redistribute it and/or modify it | 5 | Libitm is free software; you can redistribute it and/or modify it | |
6 | under the terms of the GNU General Public License as published by | 6 | under the terms of the GNU General Public License as published by | |
7 | the Free Software Foundation; either version 3 of the License, or | 7 | the Free Software Foundation; either version 3 of the License, or | |
8 | (at your option) any later version. | 8 | (at your option) any later version. | |
9 | 9 | |||
10 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 10 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 11 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | 13 | more details. | |
14 | 14 | |||
15 | Under Section 7 of GPL version 3, you are granted additional | 15 | Under Section 7 of GPL version 3, you are granted additional | |
16 | permissions described in the GCC Runtime Library Exception, version | 16 | permissions described in the GCC Runtime Library Exception, version | |
17 | 3.1, as published by the Free Software Foundation. | 17 | 3.1, as published by the Free Software Foundation. | |
18 | 18 | |||
19 | You should have received a copy of the GNU General Public License and | 19 | You should have received a copy of the GNU General Public License and | |
20 | a copy of the GCC Runtime Library Exception along with this program; | 20 | a copy of the GCC Runtime Library Exception along with this program; | |
21 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 21 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
22 | <http://www.gnu.org/licenses/>. */ | 22 | <http://www.gnu.org/licenses/>. */ | |
23 | 23 | |||
24 | #include "asmcfi.h" | 24 | #include "asmcfi.h" | |
25 | 25 | |||
26 | #ifdef __arch64__ | 26 | #ifdef __arch64__ | |
27 | # define WORD_SIZE 8 | 27 | # define WORD_SIZE 8 | |
28 | # define MIN_FRAME_SIZE 176 | 28 | # define MIN_FRAME_SIZE 176 | |
29 | # define STACK_BIAS 2047 | 29 | # define STACK_BIAS 2047 | |
30 | # define load ldx | 30 | # define load ldx | |
31 | # define store stx | 31 | # define store stx | |
32 | #else | 32 | #else | |
33 | # define WORD_SIZE 4 | 33 | # define WORD_SIZE 4 | |
34 | # define MIN_FRAME_SIZE 96 | 34 | # define MIN_FRAME_SIZE 96 | |
35 | # define STACK_BIAS 0 | 35 | # define STACK_BIAS 0 | |
36 | # define load ld | 36 | # define load ld | |
37 | # define store st | 37 | # define store st | |
38 | #endif | 38 | #endif | |
39 | 39 | |||
40 | /* Fields of the JmpBuf structure. */ | 40 | /* Fields of the JmpBuf structure. */ | |
41 | #define JB_CFA 0 | 41 | #define JB_CFA 0 | |
42 | #define JB_PC 1 | 42 | #define JB_PC 1 | |
43 | #define OFFSET(FIELD) ((FIELD) * WORD_SIZE) | 43 | #define OFFSET(FIELD) ((FIELD) * WORD_SIZE) | |
44 | 44 | |||
45 | /* The frame size must be a multiple of the double-word size. */ | 45 | /* The frame size must be a multiple of the double-word size. */ | |
46 | #define FRAME_SIZE (MIN_FRAME_SIZE + 2 * WORD_SIZE) | 46 | #define FRAME_SIZE (MIN_FRAME_SIZE + 2 * WORD_SIZE) | |
47 | #define JB_OFFSET (STACK_BIAS + MIN_FRAME_SIZE) | 47 | #define JB_OFFSET (STACK_BIAS + MIN_FRAME_SIZE) | |
48 | 48 | |||
49 | .text | 49 | .text | |
50 | .align 4 | 50 | .align 4 | |
51 | .globl _ITM_beginTransaction | 51 | .globl _ITM_beginTransaction | |
52 | .type _ITM_beginTransaction, #function | 52 | .type _ITM_beginTransaction, #function | |
53 | .proc 016 | 53 | .proc 016 | |
54 | _ITM_beginTransaction: | 54 | _ITM_beginTransaction: | |
55 | cfi_startproc | 55 | cfi_startproc | |
56 | add %sp, STACK_BIAS, %g1 | 56 | add %sp, STACK_BIAS, %g1 | |
57 | sub %sp, FRAME_SIZE, %sp | 57 | sub %sp, FRAME_SIZE, %sp | |
58 | cfi_def_cfa_offset(STACK_BIAS + FRAME_SIZE) | 58 | cfi_def_cfa_offset(STACK_BIAS + FRAME_SIZE) | |
59 | store %g1, [%sp + JB_OFFSET + OFFSET (JB_CFA)] | 59 | store %g1, [%sp + JB_OFFSET + OFFSET (JB_CFA)] | |
60 | store %o7, [%sp + JB_OFFSET + OFFSET (JB_PC)] | 60 | store %o7, [%sp + JB_OFFSET + OFFSET (JB_PC)] | |
61 | /* ??? This triggers an internal error in GDB. */ | 61 | /* ??? This triggers an internal error in GDB. */ | |
62 | cfi_offset(%o7, -WORD_SIZE) | 62 | cfi_offset(%o7, -WORD_SIZE) | |
63 | call GTM_begin_transaction | 63 | call GTM_begin_transaction | |
64 | add %sp, JB_OFFSET, %o1 | 64 | add %sp, JB_OFFSET, %o1 | |
65 | load [%sp + JB_OFFSET + OFFSET (JB_PC)], %o7 | 65 | load [%sp + JB_OFFSET + OFFSET (JB_PC)], %o7 | |
66 | jmp %o7+8 | 66 | jmp %o7+8 | |
67 | add %sp, FRAME_SIZE, %sp | 67 | add %sp, FRAME_SIZE, %sp | |
68 | cfi_def_cfa_offset(STACK_BIAS) | 68 | cfi_def_cfa_offset(STACK_BIAS) | |
69 | cfi_endproc | 69 | cfi_endproc | |
70 | .size _ITM_beginTransaction, . - _ITM_beginTransaction | 70 | .size _ITM_beginTransaction, . - _ITM_beginTransaction | |
71 | 71 | |||
72 | .align 4 | 72 | .align 4 | |
73 | .globl GTM_longjmp | 73 | .globl GTM_longjmp | |
74 | #ifdef HAVE_ATTRIBUTE_VISIBILITY | 74 | #ifdef HAVE_ATTRIBUTE_VISIBILITY | |
75 | .hidden GTM_longjmp | 75 | .hidden GTM_longjmp | |
76 | #endif | 76 | #endif | |
77 | .type GTM_longjmp, #function | 77 | .type GTM_longjmp, #function | |
78 | .proc 016 | 78 | .proc 016 | |
79 | GTM_longjmp: | 79 | GTM_longjmp: | |
80 | cfi_startproc | 80 | cfi_startproc | |
81 | flushw | 81 | flushw | |
82 | #if STACK_BIAS | 82 | #if STACK_BIAS | |
83 | load [%o1 + OFFSET (JB_CFA)], %g1 | 83 | load [%o1 + OFFSET (JB_CFA)], %g1 | |
84 | sub %g1, STACK_BIAS, %fp | 84 | sub %g1, STACK_BIAS, %fp | |
85 | #else | 85 | #else | |
86 | load [%o1 + OFFSET (JB_CFA)], %fp | 86 | load [%o1 + OFFSET (JB_CFA)], %fp | |
87 | #endif | 87 | #endif | |
88 | cfi_def_cfa(%fp, STACK_BIAS) | 88 | cfi_def_cfa(%fp, STACK_BIAS) | |
89 | load [%o1 + OFFSET (JB_PC)], %o7 | 89 | load [%o1 + OFFSET (JB_PC)], %o7 | |
90 | jmp %o7+8 | 90 | jmp %o7+8 | |
91 | restore %g0, %o0, %o0 | 91 | restore %g0, %o0, %o0 | |
92 | cfi_endproc | 92 | cfi_endproc | |
93 | .size GTM_longjmp, . - GTM_longjmp | 93 | .size GTM_longjmp, . - GTM_longjmp | |
94 | 94 | |||
95 | #ifdef __linux__ | 95 | #if defined(__ELF__) && defined(__linux__) | |
96 | .section .note.GNU-stack, "", @progbits | 96 | .section .note.GNU-stack, "", @progbits | |
97 | #endif | 97 | #endif |
--- src/external/gpl3/gcc/dist/libitm/config/x86/Attic/sjlj.S 2014/03/01 08:41:18 1.1.1.1
+++ src/external/gpl3/gcc/dist/libitm/config/x86/Attic/sjlj.S 2015/11/07 16:53:08 1.2
@@ -1,147 +1,147 @@ | @@ -1,147 +1,147 @@ | |||
1 | /* Copyright (C) 2008-2013 Free Software Foundation, Inc. | 1 | /* Copyright (C) 2008-2013 Free Software Foundation, Inc. | |
2 | Contributed by Richard Henderson <rth@redhat.com>. | 2 | Contributed by Richard Henderson <rth@redhat.com>. | |
3 | 3 | |||
4 | This file is part of the GNU Transactional Memory Library (libitm). | 4 | This file is part of the GNU Transactional Memory Library (libitm). | |
5 | 5 | |||
6 | Libitm is free software; you can redistribute it and/or modify it | 6 | Libitm is free software; you can redistribute it and/or modify it | |
7 | under the terms of the GNU General Public License as published by | 7 | under the terms of the GNU General Public License as published by | |
8 | the Free Software Foundation; either version 3 of the License, or | 8 | the Free Software Foundation; either version 3 of the License, or | |
9 | (at your option) any later version. | 9 | (at your option) any later version. | |
10 | 10 | |||
11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | 11 | Libitm is distributed in the hope that it will be useful, but WITHOUT ANY | |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | 12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | 14 | more details. | |
15 | 15 | |||
16 | Under Section 7 of GPL version 3, you are granted additional | 16 | Under Section 7 of GPL version 3, you are granted additional | |
17 | permissions described in the GCC Runtime Library Exception, version | 17 | permissions described in the GCC Runtime Library Exception, version | |
18 | 3.1, as published by the Free Software Foundation. | 18 | 3.1, as published by the Free Software Foundation. | |
19 | 19 | |||
20 | You should have received a copy of the GNU General Public License and | 20 | You should have received a copy of the GNU General Public License and | |
21 | a copy of the GCC Runtime Library Exception along with this program; | 21 | a copy of the GCC Runtime Library Exception along with this program; | |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
23 | <http://www.gnu.org/licenses/>. */ | 23 | <http://www.gnu.org/licenses/>. */ | |
24 | 24 | |||
25 | 25 | |||
26 | #include "asmcfi.h" | 26 | #include "asmcfi.h" | |
27 | 27 | |||
28 | #define CONCAT1(a, b) CONCAT2(a, b) | 28 | #define CONCAT1(a, b) CONCAT2(a, b) | |
29 | #define CONCAT2(a, b) a ## b | 29 | #define CONCAT2(a, b) a ## b | |
30 | 30 | |||
31 | #ifdef __USER_LABEL_PREFIX__ | 31 | #ifdef __USER_LABEL_PREFIX__ | |
32 | # define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) | 32 | # define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) | |
33 | #else | 33 | #else | |
34 | # define SYM(x) x | 34 | # define SYM(x) x | |
35 | #endif | 35 | #endif | |
36 | 36 | |||
37 | #ifdef __ELF__ | 37 | #ifdef __ELF__ | |
38 | # define TYPE(x) .type SYM(x), @function | 38 | # define TYPE(x) .type SYM(x), @function | |
39 | # define SIZE(x) .size SYM(x), . - SYM(x) | 39 | # define SIZE(x) .size SYM(x), . - SYM(x) | |
40 | # ifdef HAVE_ATTRIBUTE_VISIBILITY | 40 | # ifdef HAVE_ATTRIBUTE_VISIBILITY | |
41 | # define HIDDEN(x) .hidden SYM(x) | 41 | # define HIDDEN(x) .hidden SYM(x) | |
42 | # else | 42 | # else | |
43 | # define HIDDEN(x) | 43 | # define HIDDEN(x) | |
44 | # endif | 44 | # endif | |
45 | #else | 45 | #else | |
46 | # define TYPE(x) | 46 | # define TYPE(x) | |
47 | # define SIZE(x) | 47 | # define SIZE(x) | |
48 | # ifdef __MACH__ | 48 | # ifdef __MACH__ | |
49 | # define HIDDEN(x) .private_extern SYM(x) | 49 | # define HIDDEN(x) .private_extern SYM(x) | |
50 | # else | 50 | # else | |
51 | # define HIDDEN(x) | 51 | # define HIDDEN(x) | |
52 | # endif | 52 | # endif | |
53 | #endif | 53 | #endif | |
54 | 54 | |||
55 | .text | 55 | .text | |
56 | 56 | |||
57 | .align 4 | 57 | .align 4 | |
58 | .globl SYM(_ITM_beginTransaction) | 58 | .globl SYM(_ITM_beginTransaction) | |
59 | 59 | |||
60 | SYM(_ITM_beginTransaction): | 60 | SYM(_ITM_beginTransaction): | |
61 | cfi_startproc | 61 | cfi_startproc | |
62 | #ifdef __x86_64__ | 62 | #ifdef __x86_64__ | |
63 | leaq 8(%rsp), %rax | 63 | leaq 8(%rsp), %rax | |
64 | subq $56, %rsp | 64 | subq $56, %rsp | |
65 | cfi_def_cfa_offset(64) | 65 | cfi_def_cfa_offset(64) | |
66 | movq %rax, (%rsp) | 66 | movq %rax, (%rsp) | |
67 | movq %rbx, 8(%rsp) | 67 | movq %rbx, 8(%rsp) | |
68 | movq %rbp, 16(%rsp) | 68 | movq %rbp, 16(%rsp) | |
69 | movq %r12, 24(%rsp) | 69 | movq %r12, 24(%rsp) | |
70 | movq %r13, 32(%rsp) | 70 | movq %r13, 32(%rsp) | |
71 | movq %r14, 40(%rsp) | 71 | movq %r14, 40(%rsp) | |
72 | movq %r15, 48(%rsp) | 72 | movq %r15, 48(%rsp) | |
73 | movq %rsp, %rsi | 73 | movq %rsp, %rsi | |
74 | call SYM(GTM_begin_transaction) | 74 | call SYM(GTM_begin_transaction) | |
75 | addq $56, %rsp | 75 | addq $56, %rsp | |
76 | cfi_def_cfa_offset(8) | 76 | cfi_def_cfa_offset(8) | |
77 | #else | 77 | #else | |
78 | leal 4(%esp), %ecx | 78 | leal 4(%esp), %ecx | |
79 | movl 4(%esp), %eax | 79 | movl 4(%esp), %eax | |
80 | subl $28, %esp | 80 | subl $28, %esp | |
81 | cfi_def_cfa_offset(32) | 81 | cfi_def_cfa_offset(32) | |
82 | movl %ecx, 8(%esp) | 82 | movl %ecx, 8(%esp) | |
83 | movl %ebx, 12(%esp) | 83 | movl %ebx, 12(%esp) | |
84 | movl %esi, 16(%esp) | 84 | movl %esi, 16(%esp) | |
85 | movl %edi, 20(%esp) | 85 | movl %edi, 20(%esp) | |
86 | movl %ebp, 24(%esp) | 86 | movl %ebp, 24(%esp) | |
87 | leal 8(%esp), %edx | 87 | leal 8(%esp), %edx | |
88 | #if defined HAVE_ATTRIBUTE_VISIBILITY || !defined __PIC__ | 88 | #if defined HAVE_ATTRIBUTE_VISIBILITY || !defined __PIC__ | |
89 | call SYM(GTM_begin_transaction) | 89 | call SYM(GTM_begin_transaction) | |
90 | #elif defined __ELF__ | 90 | #elif defined __ELF__ | |
91 | call 1f | 91 | call 1f | |
92 | 1: popl %ebx | 92 | 1: popl %ebx | |
93 | addl $_GLOBAL_OFFSET_TABLE_+[.-1b], %ebx | 93 | addl $_GLOBAL_OFFSET_TABLE_+[.-1b], %ebx | |
94 | call SYM(GTM_begin_transaction)@PLT | 94 | call SYM(GTM_begin_transaction)@PLT | |
95 | movl 12(%esp), %ebx | 95 | movl 12(%esp), %ebx | |
96 | #else | 96 | #else | |
97 | # error "Unsupported PIC sequence" | 97 | # error "Unsupported PIC sequence" | |
98 | #endif | 98 | #endif | |
99 | addl $28, %esp | 99 | addl $28, %esp | |
100 | cfi_def_cfa_offset(4) | 100 | cfi_def_cfa_offset(4) | |
101 | #endif | 101 | #endif | |
102 | ret | 102 | ret | |
103 | cfi_endproc | 103 | cfi_endproc | |
104 | 104 | |||
105 | TYPE(_ITM_beginTransaction) | 105 | TYPE(_ITM_beginTransaction) | |
106 | SIZE(_ITM_beginTransaction) | 106 | SIZE(_ITM_beginTransaction) | |
107 | 107 | |||
108 | .align 4 | 108 | .align 4 | |
109 | .globl SYM(GTM_longjmp) | 109 | .globl SYM(GTM_longjmp) | |
110 | 110 | |||
111 | SYM(GTM_longjmp): | 111 | SYM(GTM_longjmp): | |
112 | cfi_startproc | 112 | cfi_startproc | |
113 | #ifdef __x86_64__ | 113 | #ifdef __x86_64__ | |
114 | movq (%rsi), %rcx | 114 | movq (%rsi), %rcx | |
115 | movq 8(%rsi), %rbx | 115 | movq 8(%rsi), %rbx | |
116 | movq 16(%rsi), %rbp | 116 | movq 16(%rsi), %rbp | |
117 | movq 24(%rsi), %r12 | 117 | movq 24(%rsi), %r12 | |
118 | movq 32(%rsi), %r13 | 118 | movq 32(%rsi), %r13 | |
119 | movq 40(%rsi), %r14 | 119 | movq 40(%rsi), %r14 | |
120 | movq 48(%rsi), %r15 | 120 | movq 48(%rsi), %r15 | |
121 | movl %edi, %eax | 121 | movl %edi, %eax | |
122 | cfi_def_cfa(%rsi, 0) | 122 | cfi_def_cfa(%rsi, 0) | |
123 | cfi_offset(%rip, 56) | 123 | cfi_offset(%rip, 56) | |
124 | cfi_register(%rsp, %rcx) | 124 | cfi_register(%rsp, %rcx) | |
125 | movq %rcx, %rsp | 125 | movq %rcx, %rsp | |
126 | jmp *56(%rsi) | 126 | jmp *56(%rsi) | |
127 | #else | 127 | #else | |
128 | movl (%edx), %ecx | 128 | movl (%edx), %ecx | |
129 | movl 4(%edx), %ebx | 129 | movl 4(%edx), %ebx | |
130 | movl 8(%edx), %esi | 130 | movl 8(%edx), %esi | |
131 | movl 12(%edx), %edi | 131 | movl 12(%edx), %edi | |
132 | movl 16(%edx), %ebp | 132 | movl 16(%edx), %ebp | |
133 | cfi_def_cfa(%edx, 0) | 133 | cfi_def_cfa(%edx, 0) | |
134 | cfi_offset(%eip, 20) | 134 | cfi_offset(%eip, 20) | |
135 | cfi_register(%esp, %ecx) | 135 | cfi_register(%esp, %ecx) | |
136 | movl %ecx, %esp | 136 | movl %ecx, %esp | |
137 | jmp *20(%edx) | 137 | jmp *20(%edx) | |
138 | #endif | 138 | #endif | |
139 | cfi_endproc | 139 | cfi_endproc | |
140 | 140 | |||
141 | TYPE(GTM_longjmp) | 141 | TYPE(GTM_longjmp) | |
142 | HIDDEN(GTM_longjmp) | 142 | HIDDEN(GTM_longjmp) | |
143 | SIZE(GTM_longjmp) | 143 | SIZE(GTM_longjmp) | |
144 | 144 | |||
145 | #ifdef __linux__ | 145 | #if defined(__ELF__) && defined(__linux__) | |
146 | .section .note.GNU-stack, "", @progbits | 146 | .section .note.GNU-stack, "", @progbits | |
147 | #endif | 147 | #endif |
--- src/external/gpl3/gcc/dist/libsanitizer/tsan/tsan_rtl_amd64.S 2015/01/25 20:06:46 1.2
+++ src/external/gpl3/gcc/dist/libsanitizer/tsan/tsan_rtl_amd64.S 2015/11/07 16:53:08 1.3
@@ -1,172 +1,172 @@ | @@ -1,172 +1,172 @@ | |||
1 | .section .text | 1 | .section .text | |
2 | 2 | |||
3 | .globl __tsan_trace_switch_thunk | 3 | .globl __tsan_trace_switch_thunk | |
4 | __tsan_trace_switch_thunk: | 4 | __tsan_trace_switch_thunk: | |
5 | .cfi_startproc | 5 | .cfi_startproc | |
6 | # Save scratch registers. | 6 | # Save scratch registers. | |
7 | push %rax | 7 | push %rax | |
8 | .cfi_adjust_cfa_offset 8 | 8 | .cfi_adjust_cfa_offset 8 | |
9 | .cfi_rel_offset %rax, 0 | 9 | .cfi_rel_offset %rax, 0 | |
10 | push %rcx | 10 | push %rcx | |
11 | .cfi_adjust_cfa_offset 8 | 11 | .cfi_adjust_cfa_offset 8 | |
12 | .cfi_rel_offset %rcx, 0 | 12 | .cfi_rel_offset %rcx, 0 | |
13 | push %rdx | 13 | push %rdx | |
14 | .cfi_adjust_cfa_offset 8 | 14 | .cfi_adjust_cfa_offset 8 | |
15 | .cfi_rel_offset %rdx, 0 | 15 | .cfi_rel_offset %rdx, 0 | |
16 | push %rsi | 16 | push %rsi | |
17 | .cfi_adjust_cfa_offset 8 | 17 | .cfi_adjust_cfa_offset 8 | |
18 | .cfi_rel_offset %rsi, 0 | 18 | .cfi_rel_offset %rsi, 0 | |
19 | push %rdi | 19 | push %rdi | |
20 | .cfi_adjust_cfa_offset 8 | 20 | .cfi_adjust_cfa_offset 8 | |
21 | .cfi_rel_offset %rdi, 0 | 21 | .cfi_rel_offset %rdi, 0 | |
22 | push %r8 | 22 | push %r8 | |
23 | .cfi_adjust_cfa_offset 8 | 23 | .cfi_adjust_cfa_offset 8 | |
24 | .cfi_rel_offset %r8, 0 | 24 | .cfi_rel_offset %r8, 0 | |
25 | push %r9 | 25 | push %r9 | |
26 | .cfi_adjust_cfa_offset 8 | 26 | .cfi_adjust_cfa_offset 8 | |
27 | .cfi_rel_offset %r9, 0 | 27 | .cfi_rel_offset %r9, 0 | |
28 | push %r10 | 28 | push %r10 | |
29 | .cfi_adjust_cfa_offset 8 | 29 | .cfi_adjust_cfa_offset 8 | |
30 | .cfi_rel_offset %r10, 0 | 30 | .cfi_rel_offset %r10, 0 | |
31 | push %r11 | 31 | push %r11 | |
32 | .cfi_adjust_cfa_offset 8 | 32 | .cfi_adjust_cfa_offset 8 | |
33 | .cfi_rel_offset %r11, 0 | 33 | .cfi_rel_offset %r11, 0 | |
34 | # Align stack frame. | 34 | # Align stack frame. | |
35 | push %rbx # non-scratch | 35 | push %rbx # non-scratch | |
36 | .cfi_adjust_cfa_offset 8 | 36 | .cfi_adjust_cfa_offset 8 | |
37 | .cfi_rel_offset %rbx, 0 | 37 | .cfi_rel_offset %rbx, 0 | |
38 | mov %rsp, %rbx # save current rsp | 38 | mov %rsp, %rbx # save current rsp | |
39 | .cfi_def_cfa_register %rbx | 39 | .cfi_def_cfa_register %rbx | |
40 | shr $4, %rsp # clear 4 lsb, align to 16 | 40 | shr $4, %rsp # clear 4 lsb, align to 16 | |
41 | shl $4, %rsp | 41 | shl $4, %rsp | |
42 | 42 | |||
43 | #ifdef __PIC__ | 43 | #ifdef __PIC__ | |
44 | call __tsan_trace_switch@PLT | 44 | call __tsan_trace_switch@PLT | |
45 | #else | 45 | #else | |
46 | call __tsan_trace_switch | 46 | call __tsan_trace_switch | |
47 | #endif | 47 | #endif | |
48 | 48 | |||
49 | # Unalign stack frame back. | 49 | # Unalign stack frame back. | |
50 | mov %rbx, %rsp # restore the original rsp | 50 | mov %rbx, %rsp # restore the original rsp | |
51 | .cfi_def_cfa_register %rsp | 51 | .cfi_def_cfa_register %rsp | |
52 | pop %rbx | 52 | pop %rbx | |
53 | .cfi_adjust_cfa_offset -8 | 53 | .cfi_adjust_cfa_offset -8 | |
54 | # Restore scratch registers. | 54 | # Restore scratch registers. | |
55 | pop %r11 | 55 | pop %r11 | |
56 | .cfi_adjust_cfa_offset -8 | 56 | .cfi_adjust_cfa_offset -8 | |
57 | pop %r10 | 57 | pop %r10 | |
58 | .cfi_adjust_cfa_offset -8 | 58 | .cfi_adjust_cfa_offset -8 | |
59 | pop %r9 | 59 | pop %r9 | |
60 | .cfi_adjust_cfa_offset -8 | 60 | .cfi_adjust_cfa_offset -8 | |
61 | pop %r8 | 61 | pop %r8 | |
62 | .cfi_adjust_cfa_offset -8 | 62 | .cfi_adjust_cfa_offset -8 | |
63 | pop %rdi | 63 | pop %rdi | |
64 | .cfi_adjust_cfa_offset -8 | 64 | .cfi_adjust_cfa_offset -8 | |
65 | pop %rsi | 65 | pop %rsi | |
66 | .cfi_adjust_cfa_offset -8 | 66 | .cfi_adjust_cfa_offset -8 | |
67 | pop %rdx | 67 | pop %rdx | |
68 | .cfi_adjust_cfa_offset -8 | 68 | .cfi_adjust_cfa_offset -8 | |
69 | pop %rcx | 69 | pop %rcx | |
70 | .cfi_adjust_cfa_offset -8 | 70 | .cfi_adjust_cfa_offset -8 | |
71 | pop %rax | 71 | pop %rax | |
72 | .cfi_adjust_cfa_offset -8 | 72 | .cfi_adjust_cfa_offset -8 | |
73 | .cfi_restore %rax | 73 | .cfi_restore %rax | |
74 | .cfi_restore %rbx | 74 | .cfi_restore %rbx | |
75 | .cfi_restore %rcx | 75 | .cfi_restore %rcx | |
76 | .cfi_restore %rdx | 76 | .cfi_restore %rdx | |
77 | .cfi_restore %rsi | 77 | .cfi_restore %rsi | |
78 | .cfi_restore %rdi | 78 | .cfi_restore %rdi | |
79 | .cfi_restore %r8 | 79 | .cfi_restore %r8 | |
80 | .cfi_restore %r9 | 80 | .cfi_restore %r9 | |
81 | .cfi_restore %r10 | 81 | .cfi_restore %r10 | |
82 | .cfi_restore %r11 | 82 | .cfi_restore %r11 | |
83 | ret | 83 | ret | |
84 | .cfi_endproc | 84 | .cfi_endproc | |
85 | 85 | |||
86 | .globl __tsan_report_race_thunk | 86 | .globl __tsan_report_race_thunk | |
87 | __tsan_report_race_thunk: | 87 | __tsan_report_race_thunk: | |
88 | .cfi_startproc | 88 | .cfi_startproc | |
89 | # Save scratch registers. | 89 | # Save scratch registers. | |
90 | push %rax | 90 | push %rax | |
91 | .cfi_adjust_cfa_offset 8 | 91 | .cfi_adjust_cfa_offset 8 | |
92 | .cfi_rel_offset %rax, 0 | 92 | .cfi_rel_offset %rax, 0 | |
93 | push %rcx | 93 | push %rcx | |
94 | .cfi_adjust_cfa_offset 8 | 94 | .cfi_adjust_cfa_offset 8 | |
95 | .cfi_rel_offset %rcx, 0 | 95 | .cfi_rel_offset %rcx, 0 | |
96 | push %rdx | 96 | push %rdx | |
97 | .cfi_adjust_cfa_offset 8 | 97 | .cfi_adjust_cfa_offset 8 | |
98 | .cfi_rel_offset %rdx, 0 | 98 | .cfi_rel_offset %rdx, 0 | |
99 | push %rsi | 99 | push %rsi | |
100 | .cfi_adjust_cfa_offset 8 | 100 | .cfi_adjust_cfa_offset 8 | |
101 | .cfi_rel_offset %rsi, 0 | 101 | .cfi_rel_offset %rsi, 0 | |
102 | push %rdi | 102 | push %rdi | |
103 | .cfi_adjust_cfa_offset 8 | 103 | .cfi_adjust_cfa_offset 8 | |
104 | .cfi_rel_offset %rdi, 0 | 104 | .cfi_rel_offset %rdi, 0 | |
105 | push %r8 | 105 | push %r8 | |
106 | .cfi_adjust_cfa_offset 8 | 106 | .cfi_adjust_cfa_offset 8 | |
107 | .cfi_rel_offset %r8, 0 | 107 | .cfi_rel_offset %r8, 0 | |
108 | push %r9 | 108 | push %r9 | |
109 | .cfi_adjust_cfa_offset 8 | 109 | .cfi_adjust_cfa_offset 8 | |
110 | .cfi_rel_offset %r9, 0 | 110 | .cfi_rel_offset %r9, 0 | |
111 | push %r10 | 111 | push %r10 | |
112 | .cfi_adjust_cfa_offset 8 | 112 | .cfi_adjust_cfa_offset 8 | |
113 | .cfi_rel_offset %r10, 0 | 113 | .cfi_rel_offset %r10, 0 | |
114 | push %r11 | 114 | push %r11 | |
115 | .cfi_adjust_cfa_offset 8 | 115 | .cfi_adjust_cfa_offset 8 | |
116 | .cfi_rel_offset %r11, 0 | 116 | .cfi_rel_offset %r11, 0 | |
117 | # Align stack frame. | 117 | # Align stack frame. | |
118 | push %rbx # non-scratch | 118 | push %rbx # non-scratch | |
119 | .cfi_adjust_cfa_offset 8 | 119 | .cfi_adjust_cfa_offset 8 | |
120 | .cfi_rel_offset %rbx, 0 | 120 | .cfi_rel_offset %rbx, 0 | |
121 | mov %rsp, %rbx # save current rsp | 121 | mov %rsp, %rbx # save current rsp | |
122 | .cfi_def_cfa_register %rbx | 122 | .cfi_def_cfa_register %rbx | |
123 | shr $4, %rsp # clear 4 lsb, align to 16 | 123 | shr $4, %rsp # clear 4 lsb, align to 16 | |
124 | shl $4, %rsp | 124 | shl $4, %rsp | |
125 | 125 | |||
126 | #ifdef __PIC__ | 126 | #ifdef __PIC__ | |
127 | call __tsan_report_race@PLT | 127 | call __tsan_report_race@PLT | |
128 | #else | 128 | #else | |
129 | call __tsan_report_race | 129 | call __tsan_report_race | |
130 | #endif | 130 | #endif | |
131 | 131 | |||
132 | # Unalign stack frame back. | 132 | # Unalign stack frame back. | |
133 | mov %rbx, %rsp # restore the original rsp | 133 | mov %rbx, %rsp # restore the original rsp | |
134 | .cfi_def_cfa_register %rsp | 134 | .cfi_def_cfa_register %rsp | |
135 | pop %rbx | 135 | pop %rbx | |
136 | .cfi_adjust_cfa_offset -8 | 136 | .cfi_adjust_cfa_offset -8 | |
137 | # Restore scratch registers. | 137 | # Restore scratch registers. | |
138 | pop %r11 | 138 | pop %r11 | |
139 | .cfi_adjust_cfa_offset -8 | 139 | .cfi_adjust_cfa_offset -8 | |
140 | pop %r10 | 140 | pop %r10 | |
141 | .cfi_adjust_cfa_offset -8 | 141 | .cfi_adjust_cfa_offset -8 | |
142 | pop %r9 | 142 | pop %r9 | |
143 | .cfi_adjust_cfa_offset -8 | 143 | .cfi_adjust_cfa_offset -8 | |
144 | pop %r8 | 144 | pop %r8 | |
145 | .cfi_adjust_cfa_offset -8 | 145 | .cfi_adjust_cfa_offset -8 | |
146 | pop %rdi | 146 | pop %rdi | |
147 | .cfi_adjust_cfa_offset -8 | 147 | .cfi_adjust_cfa_offset -8 | |
148 | pop %rsi | 148 | pop %rsi | |
149 | .cfi_adjust_cfa_offset -8 | 149 | .cfi_adjust_cfa_offset -8 | |
150 | pop %rdx | 150 | pop %rdx | |
151 | .cfi_adjust_cfa_offset -8 | 151 | .cfi_adjust_cfa_offset -8 | |
152 | pop %rcx | 152 | pop %rcx | |
153 | .cfi_adjust_cfa_offset -8 | 153 | .cfi_adjust_cfa_offset -8 | |
154 | pop %rax | 154 | pop %rax | |
155 | .cfi_adjust_cfa_offset -8 | 155 | .cfi_adjust_cfa_offset -8 | |
156 | .cfi_restore %rax | 156 | .cfi_restore %rax | |
157 | .cfi_restore %rbx | 157 | .cfi_restore %rbx | |
158 | .cfi_restore %rcx | 158 | .cfi_restore %rcx | |
159 | .cfi_restore %rdx | 159 | .cfi_restore %rdx | |
160 | .cfi_restore %rsi | 160 | .cfi_restore %rsi | |
161 | .cfi_restore %rdi | 161 | .cfi_restore %rdi | |
162 | .cfi_restore %r8 | 162 | .cfi_restore %r8 | |
163 | .cfi_restore %r9 | 163 | .cfi_restore %r9 | |
164 | .cfi_restore %r10 | 164 | .cfi_restore %r10 | |
165 | .cfi_restore %r11 | 165 | .cfi_restore %r11 | |
166 | ret | 166 | ret | |
167 | .cfi_endproc | 167 | .cfi_endproc | |
168 | 168 | |||
169 | #ifdef __linux__ | 169 | #if defined(__ELF__) && defined(__linux__) | |
170 | /* We do not need executable stack. */ | 170 | /* We do not need executable stack. */ | |
171 | .section .note.GNU-stack,"",@progbits | 171 | .section .note.GNU-stack,"",@progbits | |
172 | #endif | 172 | #endif |