| @@ -29,1056 +29,1057 @@ | | | @@ -29,1056 +29,1057 @@ |
29 | #include "config.h" | | 29 | #include "config.h" |
30 | #endif | | 30 | #endif |
31 | | | 31 | |
32 | #include <assert.h> | | 32 | #include <assert.h> |
33 | #include <string.h> | | 33 | #include <string.h> |
34 | #include <stdlib.h> | | 34 | #include <stdlib.h> |
35 | #include <unistd.h> | | 35 | #include <unistd.h> |
36 | #include <stdio.h> | | 36 | #include <stdio.h> |
37 | #include <errno.h> | | 37 | #include <errno.h> |
38 | | | 38 | |
39 | #if USE_POLL | | 39 | #if USE_POLL |
40 | #include <poll.h> | | 40 | #include <poll.h> |
41 | #endif | | 41 | #endif |
42 | #ifndef _WIN32 | | 42 | #ifndef _WIN32 |
43 | #include <sys/select.h> | | 43 | #include <sys/select.h> |
44 | #include <sys/socket.h> | | 44 | #include <sys/socket.h> |
45 | #endif | | 45 | #endif |
46 | | | 46 | |
47 | #ifdef _WIN32 | | 47 | #ifdef _WIN32 |
48 | #include "xcb_windefs.h" | | 48 | #include "xcb_windefs.h" |
49 | #endif /* _WIN32 */ | | 49 | #endif /* _WIN32 */ |
50 | | | 50 | |
51 | #include "xcb.h" | | 51 | #include "xcb.h" |
52 | #include "xcbext.h" | | 52 | #include "xcbext.h" |
53 | #include "xcbint.h" | | 53 | #include "xcbint.h" |
54 | | | 54 | |
55 | #define XCB_ERROR 0 | | 55 | #define XCB_ERROR 0 |
56 | #define XCB_REPLY 1 | | 56 | #define XCB_REPLY 1 |
57 | #define XCB_XGE_EVENT 35 | | 57 | #define XCB_XGE_EVENT 35 |
58 | | | 58 | |
59 | struct event_list { | | 59 | struct event_list { |
60 | xcb_generic_event_t *event; | | 60 | xcb_generic_event_t *event; |
61 | struct event_list *next; | | 61 | struct event_list *next; |
62 | }; | | 62 | }; |
63 | | | 63 | |
64 | struct xcb_special_event { | | 64 | struct xcb_special_event { |
65 | | | 65 | |
66 | struct xcb_special_event *next; | | 66 | struct xcb_special_event *next; |
67 | | | 67 | |
68 | /* Match XGE events for the specific extension and event ID (the | | 68 | /* Match XGE events for the specific extension and event ID (the |
69 | * first 32 bit word after evtype) | | 69 | * first 32 bit word after evtype) |
70 | */ | | 70 | */ |
71 | uint8_t extension; | | 71 | uint8_t extension; |
72 | uint32_t eid; | | 72 | uint32_t eid; |
73 | uint32_t *stamp; | | 73 | uint32_t *stamp; |
74 | | | 74 | |
75 | struct event_list *events; | | 75 | struct event_list *events; |
76 | struct event_list **events_tail; | | 76 | struct event_list **events_tail; |
77 | | | 77 | |
78 | pthread_cond_t special_event_cond; | | 78 | pthread_cond_t special_event_cond; |
79 | }; | | 79 | }; |
80 | | | 80 | |
81 | struct reply_list { | | 81 | struct reply_list { |
82 | void *reply; | | 82 | void *reply; |
83 | struct reply_list *next; | | 83 | struct reply_list *next; |
84 | }; | | 84 | }; |
85 | | | 85 | |
86 | typedef struct pending_reply { | | 86 | typedef struct pending_reply { |
87 | uint64_t first_request; | | 87 | uint64_t first_request; |
88 | uint64_t last_request; | | 88 | uint64_t last_request; |
89 | enum workarounds workaround; | | 89 | enum workarounds workaround; |
90 | int flags; | | 90 | int flags; |
91 | struct pending_reply *next; | | 91 | struct pending_reply *next; |
92 | } pending_reply; | | 92 | } pending_reply; |
93 | | | 93 | |
94 | typedef struct reader_list { | | 94 | typedef struct reader_list { |
95 | uint64_t request; | | 95 | uint64_t request; |
96 | pthread_cond_t *data; | | 96 | pthread_cond_t *data; |
97 | struct reader_list *next; | | 97 | struct reader_list *next; |
98 | } reader_list; | | 98 | } reader_list; |
99 | | | 99 | |
100 | typedef struct special_list { | | 100 | typedef struct special_list { |
101 | xcb_special_event_t *se; | | 101 | xcb_special_event_t *se; |
102 | struct special_list *next; | | 102 | struct special_list *next; |
103 | } special_list; | | 103 | } special_list; |
104 | | | 104 | |
105 | static void remove_finished_readers(reader_list **prev_reader, uint64_t completed) | | 105 | static void remove_finished_readers(reader_list **prev_reader, uint64_t completed) |
106 | { | | 106 | { |
107 | while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed)) | | 107 | while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed)) |
108 | { | | 108 | { |
109 | /* If you don't have what you're looking for now, you never | | 109 | /* If you don't have what you're looking for now, you never |
110 | * will. Wake up and leave me alone. */ | | 110 | * will. Wake up and leave me alone. */ |
111 | pthread_cond_signal((*prev_reader)->data); | | 111 | pthread_cond_signal((*prev_reader)->data); |
112 | *prev_reader = (*prev_reader)->next; | | 112 | *prev_reader = (*prev_reader)->next; |
113 | } | | 113 | } |
114 | } | | 114 | } |
115 | | | 115 | |
116 | #if HAVE_SENDMSG | | 116 | #if HAVE_SENDMSG |
117 | static int read_fds(xcb_connection_t *c, int *fds, int nfd) | | 117 | static int read_fds(xcb_connection_t *c, int *fds, int nfd) |
118 | { | | 118 | { |
119 | int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd]; | | 119 | int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd]; |
120 | int infd = c->in.in_fd.nfd - c->in.in_fd.ifd; | | 120 | int infd = c->in.in_fd.nfd - c->in.in_fd.ifd; |
121 | | | 121 | |
122 | if (nfd > infd) | | 122 | if (nfd > infd) |
123 | return 0; | | 123 | return 0; |
124 | memcpy(fds, ifds, nfd * sizeof (int)); | | 124 | memcpy(fds, ifds, nfd * sizeof (int)); |
125 | c->in.in_fd.ifd += nfd; | | 125 | c->in.in_fd.ifd += nfd; |
126 | return 1; | | 126 | return 1; |
127 | } | | 127 | } |
128 | #endif | | 128 | #endif |
129 | | | 129 | |
130 | typedef struct xcb_ge_special_event_t { | | 130 | typedef struct xcb_ge_special_event_t { |
131 | uint8_t response_type; /**< */ | | 131 | uint8_t response_type; /**< */ |
132 | uint8_t extension; /**< */ | | 132 | uint8_t extension; /**< */ |
133 | uint16_t sequence; /**< */ | | 133 | uint16_t sequence; /**< */ |
134 | uint32_t length; /**< */ | | 134 | uint32_t length; /**< */ |
135 | uint16_t evtype; /**< */ | | 135 | uint16_t evtype; /**< */ |
136 | uint8_t pad0[2]; /**< */ | | 136 | uint8_t pad0[2]; /**< */ |
137 | uint32_t eid; /**< */ | | 137 | uint32_t eid; /**< */ |
138 | uint8_t pad1[16]; /**< */ | | 138 | uint8_t pad1[16]; /**< */ |
139 | } xcb_ge_special_event_t; | | 139 | } xcb_ge_special_event_t; |
140 | | | 140 | |
141 | static int event_special(xcb_connection_t *c, | | 141 | static int event_special(xcb_connection_t *c, |
142 | struct event_list *event) | | 142 | struct event_list *event) |
143 | { | | 143 | { |
144 | struct xcb_special_event *special_event; | | 144 | struct xcb_special_event *special_event; |
145 | struct xcb_ge_special_event_t *ges = (void *) event->event; | | 145 | struct xcb_ge_special_event_t *ges = (void *) event->event; |
146 | | | 146 | |
147 | /* Special events are always XGE events */ | | 147 | /* Special events are always XGE events */ |
148 | if ((ges->response_type & 0x7f) != XCB_XGE_EVENT) | | 148 | if ((ges->response_type & 0x7f) != XCB_XGE_EVENT) |
149 | return 0; | | 149 | return 0; |
150 | | | 150 | |
151 | for (special_event = c->in.special_events; | | 151 | for (special_event = c->in.special_events; |
152 | special_event; | | 152 | special_event; |
153 | special_event = special_event->next) | | 153 | special_event = special_event->next) |
154 | { | | 154 | { |
155 | if (ges->extension == special_event->extension && | | 155 | if (ges->extension == special_event->extension && |
156 | ges->eid == special_event->eid) | | 156 | ges->eid == special_event->eid) |
157 | { | | 157 | { |
158 | *special_event->events_tail = event; | | 158 | *special_event->events_tail = event; |
159 | special_event->events_tail = &event->next; | | 159 | special_event->events_tail = &event->next; |
160 | if (special_event->stamp) | | 160 | if (special_event->stamp) |
161 | ++(*special_event->stamp); | | 161 | ++(*special_event->stamp); |
162 | pthread_cond_signal(&special_event->special_event_cond); | | 162 | pthread_cond_signal(&special_event->special_event_cond); |
163 | return 1; | | 163 | return 1; |
164 | } | | 164 | } |
165 | } | | 165 | } |
166 | | | 166 | |
167 | return 0; | | 167 | return 0; |
168 | } | | 168 | } |
169 | | | 169 | |
170 | static int read_packet(xcb_connection_t *c) | | 170 | static int read_packet(xcb_connection_t *c) |
171 | { | | 171 | { |
172 | xcb_generic_reply_t genrep; | | 172 | xcb_generic_reply_t genrep; |
173 | uint64_t length = 32; | | 173 | uint64_t length = 32; |
174 | uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */ | | 174 | uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */ |
175 | int nfd = 0; /* Number of file descriptors attached to the reply */ | | 175 | int nfd = 0; /* Number of file descriptors attached to the reply */ |
176 | uint64_t bufsize; | | 176 | uint64_t bufsize; |
177 | void *buf; | | 177 | void *buf; |
178 | pending_reply *pend = 0; | | 178 | pending_reply *pend = 0; |
179 | struct event_list *event; | | 179 | struct event_list *event; |
180 | | | 180 | |
181 | /* Wait for there to be enough data for us to read a whole packet */ | | 181 | /* Wait for there to be enough data for us to read a whole packet */ |
182 | if(c->in.queue_len < length) | | 182 | if(c->in.queue_len < length) |
183 | return 0; | | 183 | return 0; |
184 | | | 184 | |
185 | /* Get the response type, length, and sequence number. */ | | 185 | /* Get the response type, length, and sequence number. */ |
186 | memcpy(&genrep, c->in.queue, sizeof(genrep)); | | 186 | memcpy(&genrep, c->in.queue, sizeof(genrep)); |
187 | | | 187 | |
188 | /* Compute 32-bit sequence number of this packet. */ | | 188 | /* Compute 32-bit sequence number of this packet. */ |
189 | if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY) | | 189 | if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY) |
190 | { | | 190 | { |
191 | uint64_t lastread = c->in.request_read; | | 191 | uint64_t lastread = c->in.request_read; |
192 | c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence; | | 192 | c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence; |
193 | if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread)) | | 193 | if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread)) |
194 | c->in.request_read += 0x10000; | | 194 | c->in.request_read += 0x10000; |
195 | if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected)) | | 195 | if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected)) |
196 | c->in.request_expected = c->in.request_read; | | 196 | c->in.request_expected = c->in.request_read; |
197 | | | 197 | |
198 | if(c->in.request_read != lastread) | | 198 | if(c->in.request_read != lastread) |
199 | { | | 199 | { |
200 | if(c->in.current_reply) | | 200 | if(c->in.current_reply) |
201 | { | | 201 | { |
202 | _xcb_map_put(c->in.replies, lastread, c->in.current_reply); | | 202 | _xcb_map_put(c->in.replies, lastread, c->in.current_reply); |
203 | c->in.current_reply = 0; | | 203 | c->in.current_reply = 0; |
204 | c->in.current_reply_tail = &c->in.current_reply; | | 204 | c->in.current_reply_tail = &c->in.current_reply; |
205 | } | | 205 | } |
206 | c->in.request_completed = c->in.request_read - 1; | | 206 | c->in.request_completed = c->in.request_read - 1; |
207 | } | | 207 | } |
208 | | | 208 | |
209 | while(c->in.pending_replies && | | 209 | while(c->in.pending_replies && |
210 | c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER && | | 210 | c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER && |
211 | XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed)) | | 211 | XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed)) |
212 | { | | 212 | { |
213 | pending_reply *oldpend = c->in.pending_replies; | | 213 | pending_reply *oldpend = c->in.pending_replies; |
214 | c->in.pending_replies = oldpend->next; | | 214 | c->in.pending_replies = oldpend->next; |
215 | if(!oldpend->next) | | 215 | if(!oldpend->next) |
216 | c->in.pending_replies_tail = &c->in.pending_replies; | | 216 | c->in.pending_replies_tail = &c->in.pending_replies; |
217 | free(oldpend); | | 217 | free(oldpend); |
218 | } | | 218 | } |
219 | | | 219 | |
220 | if(genrep.response_type == XCB_ERROR) | | 220 | if(genrep.response_type == XCB_ERROR) |
221 | c->in.request_completed = c->in.request_read; | | 221 | c->in.request_completed = c->in.request_read; |
222 | | | 222 | |
223 | remove_finished_readers(&c->in.readers, c->in.request_completed); | | 223 | remove_finished_readers(&c->in.readers, c->in.request_completed); |
224 | } | | 224 | } |
225 | | | 225 | |
226 | if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY) | | 226 | if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY) |
227 | { | | 227 | { |
228 | pend = c->in.pending_replies; | | 228 | pend = c->in.pending_replies; |
229 | if(pend && | | 229 | if(pend && |
230 | !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) && | | 230 | !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) && |
231 | (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER || | | 231 | (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER || |
232 | XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request)))) | | 232 | XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request)))) |
233 | pend = 0; | | 233 | pend = 0; |
234 | } | | 234 | } |
235 | | | 235 | |
236 | /* For reply packets, check that the entire packet is available. */ | | 236 | /* For reply packets, check that the entire packet is available. */ |
237 | if(genrep.response_type == XCB_REPLY) | | 237 | if(genrep.response_type == XCB_REPLY) |
238 | { | | 238 | { |
239 | if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG) | | 239 | if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG) |
240 | { | | 240 | { |
241 | uint32_t *p = (uint32_t *) c->in.queue; | | 241 | uint32_t *p = (uint32_t *) c->in.queue; |
242 | genrep.length = p[2] * p[3] * 2; | | 242 | genrep.length = p[2] * p[3] * 2; |
243 | } | | 243 | } |
244 | length += genrep.length * 4; | | 244 | length += genrep.length * 4; |
245 | | | 245 | |
246 | /* XXX a bit of a hack -- we "know" that all FD replys place | | 246 | /* XXX a bit of a hack -- we "know" that all FD replys place |
247 | * the number of fds in the pad0 byte */ | | 247 | * the number of fds in the pad0 byte */ |
248 | if (pend && pend->flags & XCB_REQUEST_REPLY_FDS) | | 248 | if (pend && pend->flags & XCB_REQUEST_REPLY_FDS) |
249 | nfd = genrep.pad0; | | 249 | nfd = genrep.pad0; |
250 | } | | 250 | } |
251 | | | 251 | |
252 | /* XGE events may have sizes > 32 */ | | 252 | /* XGE events may have sizes > 32 */ |
253 | if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT) | | 253 | if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT) |
254 | eventlength = genrep.length * 4; | | 254 | eventlength = genrep.length * 4; |
255 | | | 255 | |
256 | bufsize = length + eventlength + nfd * sizeof(int) + | | 256 | bufsize = length + eventlength + nfd * sizeof(int) + |
257 | (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)); | | 257 | (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)); |
258 | if (bufsize < INT32_MAX) | | 258 | if (bufsize < INT32_MAX) |
259 | buf = malloc((size_t) bufsize); | | 259 | buf = malloc((size_t) bufsize); |
260 | else | | 260 | else |
261 | buf = NULL; | | 261 | buf = NULL; |
262 | if(!buf) | | 262 | if(!buf) |
263 | { | | 263 | { |
264 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); | | 264 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); |
265 | return 0; | | 265 | return 0; |
266 | } | | 266 | } |
267 | | | 267 | |
268 | if(_xcb_in_read_block(c, buf, length) <= 0) | | 268 | if(_xcb_in_read_block(c, buf, length) <= 0) |
269 | { | | 269 | { |
270 | free(buf); | | 270 | free(buf); |
271 | return 0; | | 271 | return 0; |
272 | } | | 272 | } |
273 | | | 273 | |
274 | /* pull in XGE event data if available, append after event struct */ | | 274 | /* pull in XGE event data if available, append after event struct */ |
275 | if (eventlength) | | 275 | if (eventlength) |
276 | { | | 276 | { |
277 | if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0) | | 277 | if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0) |
278 | { | | 278 | { |
279 | free(buf); | | 279 | free(buf); |
280 | return 0; | | 280 | return 0; |
281 | } | | 281 | } |
282 | } | | 282 | } |
283 | | | 283 | |
284 | #if HAVE_SENDMSG | | 284 | #if HAVE_SENDMSG |
285 | if (nfd) | | 285 | if (nfd) |
286 | { | | 286 | { |
287 | if (!read_fds(c, (int *) &((char *) buf)[length], nfd)) | | 287 | if (!read_fds(c, (int *) &((char *) buf)[length], nfd)) |
288 | { | | 288 | { |
289 | free(buf); | | 289 | free(buf); |
290 | return 0; | | 290 | return 0; |
291 | } | | 291 | } |
292 | } | | 292 | } |
293 | #endif | | 293 | #endif |
294 | | | 294 | |
295 | if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY)) | | 295 | if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY)) |
296 | { | | 296 | { |
297 | free(buf); | | 297 | free(buf); |
298 | return 1; | | 298 | return 1; |
299 | } | | 299 | } |
300 | | | 300 | |
301 | if(genrep.response_type != XCB_REPLY) | | 301 | if(genrep.response_type != XCB_REPLY) |
302 | ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read; | | 302 | ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read; |
303 | | | 303 | |
304 | /* reply, or checked error */ | | 304 | /* reply, or checked error */ |
305 | if( genrep.response_type == XCB_REPLY || | | 305 | if( genrep.response_type == XCB_REPLY || |
306 | (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED))) | | 306 | (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED))) |
307 | { | | 307 | { |
308 | struct reply_list *cur = malloc(sizeof(struct reply_list)); | | 308 | struct reply_list *cur = malloc(sizeof(struct reply_list)); |
309 | if(!cur) | | 309 | if(!cur) |
310 | { | | 310 | { |
311 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); | | 311 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); |
312 | free(buf); | | 312 | free(buf); |
313 | return 0; | | 313 | return 0; |
314 | } | | 314 | } |
315 | cur->reply = buf; | | 315 | cur->reply = buf; |
316 | cur->next = 0; | | 316 | cur->next = 0; |
317 | *c->in.current_reply_tail = cur; | | 317 | *c->in.current_reply_tail = cur; |
318 | c->in.current_reply_tail = &cur->next; | | 318 | c->in.current_reply_tail = &cur->next; |
319 | if(c->in.readers && c->in.readers->request == c->in.request_read) | | 319 | if(c->in.readers && c->in.readers->request == c->in.request_read) |
320 | pthread_cond_signal(c->in.readers->data); | | 320 | pthread_cond_signal(c->in.readers->data); |
321 | return 1; | | 321 | return 1; |
322 | } | | 322 | } |
323 | | | 323 | |
324 | /* event, or unchecked error */ | | 324 | /* event, or unchecked error */ |
325 | event = malloc(sizeof(struct event_list)); | | 325 | event = malloc(sizeof(struct event_list)); |
326 | if(!event) | | 326 | if(!event) |
327 | { | | 327 | { |
328 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); | | 328 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); |
329 | free(buf); | | 329 | free(buf); |
330 | return 0; | | 330 | return 0; |
331 | } | | 331 | } |
332 | event->event = buf; | | 332 | event->event = buf; |
333 | event->next = 0; | | 333 | event->next = 0; |
334 | | | 334 | |
335 | if (!event_special(c, event)) { | | 335 | if (!event_special(c, event)) { |
336 | *c->in.events_tail = event; | | 336 | *c->in.events_tail = event; |
337 | c->in.events_tail = &event->next; | | 337 | c->in.events_tail = &event->next; |
338 | pthread_cond_signal(&c->in.event_cond); | | 338 | pthread_cond_signal(&c->in.event_cond); |
339 | } | | 339 | } |
340 | return 1; /* I have something for you... */ | | 340 | return 1; /* I have something for you... */ |
341 | } | | 341 | } |
342 | | | 342 | |
343 | static xcb_generic_event_t *get_event(xcb_connection_t *c) | | 343 | static xcb_generic_event_t *get_event(xcb_connection_t *c) |
344 | { | | 344 | { |
345 | struct event_list *cur = c->in.events; | | 345 | struct event_list *cur = c->in.events; |
346 | xcb_generic_event_t *ret; | | 346 | xcb_generic_event_t *ret; |
347 | if(!c->in.events) | | 347 | if(!c->in.events) |
348 | return 0; | | 348 | return 0; |
349 | ret = cur->event; | | 349 | ret = cur->event; |
350 | c->in.events = cur->next; | | 350 | c->in.events = cur->next; |
351 | if(!cur->next) | | 351 | if(!cur->next) |
352 | c->in.events_tail = &c->in.events; | | 352 | c->in.events_tail = &c->in.events; |
353 | free(cur); | | 353 | free(cur); |
354 | return ret; | | 354 | return ret; |
355 | } | | 355 | } |
356 | | | 356 | |
357 | static void free_reply_list(struct reply_list *head) | | 357 | static void free_reply_list(struct reply_list *head) |
358 | { | | 358 | { |
359 | while(head) | | 359 | while(head) |
360 | { | | 360 | { |
361 | struct reply_list *cur = head; | | 361 | struct reply_list *cur = head; |
362 | head = cur->next; | | 362 | head = cur->next; |
363 | free(cur->reply); | | 363 | free(cur->reply); |
364 | free(cur); | | 364 | free(cur); |
365 | } | | 365 | } |
366 | } | | 366 | } |
367 | | | 367 | |
368 | static int read_block(const int fd, void *buf, const ssize_t len) | | 368 | static int read_block(const int fd, void *buf, const ssize_t len) |
369 | { | | 369 | { |
370 | int done = 0; | | 370 | int done = 0; |
371 | while(done < len) | | 371 | while(done < len) |
372 | { | | 372 | { |
373 | int ret = recv(fd, ((char *) buf) + done, len - done, 0); | | 373 | int ret = recv(fd, ((char *) buf) + done, len - done, 0); |
374 | if(ret > 0) | | 374 | if(ret > 0) |
375 | done += ret; | | 375 | done += ret; |
376 | #ifndef _WIN32 | | 376 | #ifndef _WIN32 |
377 | if(ret < 0 && errno == EAGAIN) | | 377 | if(ret < 0 && errno == EAGAIN) |
378 | #else | | 378 | #else |
379 | if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK) | | 379 | if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK) |
380 | #endif /* !_Win32 */ | | 380 | #endif /* !_Win32 */ |
381 | { | | 381 | { |
382 | #if USE_POLL | | 382 | #if USE_POLL |
383 | struct pollfd pfd; | | 383 | struct pollfd pfd; |
384 | pfd.fd = fd; | | 384 | pfd.fd = fd; |
385 | pfd.events = POLLIN; | | 385 | pfd.events = POLLIN; |
386 | pfd.revents = 0; | | 386 | pfd.revents = 0; |
387 | do { | | 387 | do { |
388 | ret = poll(&pfd, 1, -1); | | 388 | ret = poll(&pfd, 1, -1); |
389 | } while (ret == -1 && errno == EINTR); | | 389 | } while (ret == -1 && errno == EINTR); |
390 | #else | | 390 | #else |
391 | fd_set fds; | | 391 | fd_set fds; |
392 | FD_ZERO(&fds); | | 392 | FD_ZERO(&fds); |
393 | FD_SET(fd, &fds); | | 393 | FD_SET(fd, &fds); |
394 | | | 394 | |
395 | /* Initializing errno here makes sure that for Win32 this loop will execute only once */ | | 395 | /* Initializing errno here makes sure that for Win32 this loop will execute only once */ |
396 | errno = 0; | | 396 | errno = 0; |
397 | do { | | 397 | do { |
398 | ret = select(fd + 1, &fds, 0, 0, 0); | | 398 | ret = select(fd + 1, &fds, 0, 0, 0); |
399 | } while (ret == -1 && errno == EINTR); | | 399 | } while (ret == -1 && errno == EINTR); |
400 | #endif /* USE_POLL */ | | 400 | #endif /* USE_POLL */ |
401 | } | | 401 | } |
402 | if(ret <= 0) | | 402 | if(ret <= 0) |
403 | return ret; | | 403 | return ret; |
404 | } | | 404 | } |
405 | return len; | | 405 | return len; |
406 | } | | 406 | } |
407 | | | 407 | |
408 | static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error) | | 408 | static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error) |
409 | { | | 409 | { |
410 | struct reply_list *head; | | 410 | struct reply_list *head; |
411 | | | 411 | |
412 | /* If an error occurred when issuing the request, fail immediately. */ | | 412 | /* If an error occurred when issuing the request, fail immediately. */ |
413 | if(!request) | | 413 | if(!request) |
414 | head = 0; | | 414 | head = 0; |
415 | /* We've read requests past the one we want, so if it has replies we have | | 415 | /* We've read requests past the one we want, so if it has replies we have |
416 | * them all and they're in the replies map. */ | | 416 | * them all and they're in the replies map. */ |
417 | else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read)) | | 417 | else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read)) |
418 | { | | 418 | { |
419 | head = _xcb_map_remove(c->in.replies, request); | | 419 | head = _xcb_map_remove(c->in.replies, request); |
420 | if(head && head->next) | | 420 | if(head && head->next) |
421 | _xcb_map_put(c->in.replies, request, head->next); | | 421 | _xcb_map_put(c->in.replies, request, head->next); |
422 | } | | 422 | } |
423 | /* We're currently processing the responses to the request we want, and we | | 423 | /* We're currently processing the responses to the request we want, and we |
424 | * have a reply ready to return. So just return it without blocking. */ | | 424 | * have a reply ready to return. So just return it without blocking. */ |
425 | else if(request == c->in.request_read && c->in.current_reply) | | 425 | else if(request == c->in.request_read && c->in.current_reply) |
426 | { | | 426 | { |
427 | head = c->in.current_reply; | | 427 | head = c->in.current_reply; |
428 | c->in.current_reply = head->next; | | 428 | c->in.current_reply = head->next; |
429 | if(!head->next) | | 429 | if(!head->next) |
430 | c->in.current_reply_tail = &c->in.current_reply; | | 430 | c->in.current_reply_tail = &c->in.current_reply; |
431 | } | | 431 | } |
432 | /* We know this request can't have any more replies, and we've already | | 432 | /* We know this request can't have any more replies, and we've already |
433 | * established it doesn't have a reply now. Don't bother blocking. */ | | 433 | * established it doesn't have a reply now. Don't bother blocking. */ |
434 | else if(request == c->in.request_completed) | | 434 | else if(request == c->in.request_completed) |
435 | head = 0; | | 435 | head = 0; |
436 | /* We may have more replies on the way for this request: block until we're | | 436 | /* We may have more replies on the way for this request: block until we're |
437 | * sure. */ | | 437 | * sure. */ |
438 | else | | 438 | else |
439 | return 0; | | 439 | return 0; |
440 | | | 440 | |
441 | if(error) | | 441 | if(error) |
442 | *error = 0; | | 442 | *error = 0; |
443 | *reply = 0; | | 443 | *reply = 0; |
444 | | | 444 | |
445 | if(head) | | 445 | if(head) |
446 | { | | 446 | { |
447 | if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR) | | 447 | if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR) |
448 | { | | 448 | { |
449 | if(error) | | 449 | if(error) |
450 | *error = head->reply; | | 450 | *error = head->reply; |
451 | else | | 451 | else |
452 | free(head->reply); | | 452 | free(head->reply); |
453 | } | | 453 | } |
454 | else | | 454 | else |
455 | *reply = head->reply; | | 455 | *reply = head->reply; |
456 | | | 456 | |
457 | free(head); | | 457 | free(head); |
458 | } | | 458 | } |
459 | | | 459 | |
460 | return 1; | | 460 | return 1; |
461 | } | | 461 | } |
462 | | | 462 | |
463 | static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond) | | 463 | static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond) |
464 | { | | 464 | { |
465 | while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request)) | | 465 | while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request)) |
466 | prev_reader = &(*prev_reader)->next; | | 466 | prev_reader = &(*prev_reader)->next; |
467 | reader->request = request; | | 467 | reader->request = request; |
468 | reader->data = cond; | | 468 | reader->data = cond; |
469 | reader->next = *prev_reader; | | 469 | reader->next = *prev_reader; |
470 | *prev_reader = reader; | | 470 | *prev_reader = reader; |
471 | } | | 471 | } |
472 | | | 472 | |
473 | static void remove_reader(reader_list **prev_reader, reader_list *reader) | | 473 | static void remove_reader(reader_list **prev_reader, reader_list *reader) |
474 | { | | 474 | { |
475 | while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request)) | | 475 | while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request)) |
476 | if(*prev_reader == reader) | | 476 | if(*prev_reader == reader) |
477 | { | | 477 | { |
478 | *prev_reader = (*prev_reader)->next; | | 478 | *prev_reader = (*prev_reader)->next; |
479 | break; | | 479 | break; |
480 | } | | 480 | } |
481 | } | | 481 | } |
482 | | | 482 | |
483 | static void insert_special(special_list **prev_special, special_list *special, xcb_special_event_t *se) | | 483 | static void insert_special(special_list **prev_special, special_list *special, xcb_special_event_t *se) |
484 | { | | 484 | { |
485 | special->se = se; | | 485 | special->se = se; |
486 | special->next = *prev_special; | | 486 | special->next = *prev_special; |
487 | *prev_special = special; | | 487 | *prev_special = special; |
488 | } | | 488 | } |
489 | | | 489 | |
490 | static void remove_special(special_list **prev_special, special_list *special) | | 490 | static void remove_special(special_list **prev_special, special_list *special) |
491 | { | | 491 | { |
492 | while(*prev_special) | | 492 | while(*prev_special) |
493 | { | | 493 | { |
494 | if(*prev_special == special) | | 494 | if(*prev_special == special) |
495 | { | | 495 | { |
496 | *prev_special = (*prev_special)->next; | | 496 | *prev_special = (*prev_special)->next; |
497 | break; | | 497 | break; |
498 | } | | 498 | } |
499 | prev_special = &(*prev_special)->next; | | 499 | prev_special = &(*prev_special)->next; |
500 | } | | 500 | } |
501 | } | | 501 | } |
502 | | | 502 | |
503 | static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e) | | 503 | static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e) |
504 | { | | 504 | { |
505 | void *ret = 0; | | 505 | void *ret = 0; |
506 | | | 506 | |
507 | /* If this request has not been written yet, write it. */ | | 507 | /* If this request has not been written yet, write it. */ |
508 | if(c->out.return_socket || _xcb_out_flush_to(c, request)) | | 508 | if(c->out.return_socket || _xcb_out_flush_to(c, request)) |
509 | { | | 509 | { |
510 | pthread_cond_t cond = PTHREAD_COND_INITIALIZER; | | 510 | pthread_cond_t cond = PTHREAD_COND_INITIALIZER; |
511 | reader_list reader; | | 511 | reader_list reader; |
512 | | | 512 | |
513 | insert_reader(&c->in.readers, &reader, request, &cond); | | 513 | insert_reader(&c->in.readers, &reader, request, &cond); |
514 | | | 514 | |
515 | while(!poll_for_reply(c, request, &ret, e)) | | 515 | while(!poll_for_reply(c, request, &ret, e)) |
516 | if(!_xcb_conn_wait(c, &cond, 0, 0)) | | 516 | if(!_xcb_conn_wait(c, &cond, 0, 0)) |
517 | break; | | 517 | break; |
518 | | | 518 | |
519 | remove_reader(&c->in.readers, &reader); | | 519 | remove_reader(&c->in.readers, &reader); |
520 | pthread_cond_destroy(&cond); | | 520 | pthread_cond_destroy(&cond); |
521 | } | | 521 | } |
522 | | | 522 | |
523 | _xcb_in_wake_up_next_reader(c); | | 523 | _xcb_in_wake_up_next_reader(c); |
524 | return ret; | | 524 | return ret; |
525 | } | | 525 | } |
526 | | | 526 | |
527 | static uint64_t widen(xcb_connection_t *c, unsigned int request) | | 527 | static uint64_t widen(xcb_connection_t *c, unsigned int request) |
528 | { | | 528 | { |
529 | uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request; | | 529 | uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request; |
530 | if(widened_request > c->out.request) | | 530 | if(widened_request > c->out.request) |
531 | widened_request -= UINT64_C(1) << 32; | | 531 | widened_request -= UINT64_C(1) << 32; |
532 | return widened_request; | | 532 | return widened_request; |
533 | } | | 533 | } |
534 | | | 534 | |
535 | /* Public interface */ | | 535 | /* Public interface */ |
536 | | | 536 | |
537 | void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e) | | 537 | void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e) |
538 | { | | 538 | { |
539 | void *ret; | | 539 | void *ret; |
540 | if(e) | | 540 | if(e) |
541 | *e = 0; | | 541 | *e = 0; |
542 | if(c->has_error) | | 542 | if(c->has_error) |
543 | return 0; | | 543 | return 0; |
544 | | | 544 | |
545 | pthread_mutex_lock(&c->iolock); | | 545 | pthread_mutex_lock(&c->iolock); |
546 | ret = wait_for_reply(c, widen(c, request), e); | | 546 | ret = wait_for_reply(c, widen(c, request), e); |
547 | pthread_mutex_unlock(&c->iolock); | | 547 | pthread_mutex_unlock(&c->iolock); |
548 | return ret; | | 548 | return ret; |
549 | } | | 549 | } |
550 | | | 550 | |
551 | void *xcb_wait_for_reply64(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e) | | 551 | void *xcb_wait_for_reply64(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e) |
552 | { | | 552 | { |
553 | void *ret; | | 553 | void *ret; |
554 | if(e) | | 554 | if(e) |
555 | *e = 0; | | 555 | *e = 0; |
556 | if(c->has_error) | | 556 | if(c->has_error) |
557 | return 0; | | 557 | return 0; |
558 | | | 558 | |
559 | pthread_mutex_lock(&c->iolock); | | 559 | pthread_mutex_lock(&c->iolock); |
560 | ret = wait_for_reply(c, request, e); | | 560 | ret = wait_for_reply(c, request, e); |
561 | pthread_mutex_unlock(&c->iolock); | | 561 | pthread_mutex_unlock(&c->iolock); |
562 | return ret; | | 562 | return ret; |
563 | } | | 563 | } |
564 | | | 564 | |
565 | int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size) | | 565 | int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size) |
566 | { | | 566 | { |
567 | return (int *) (&((char *) reply)[reply_size]); | | 567 | return (int *) (&((char *) reply)[reply_size]); |
568 | } | | 568 | } |
569 | | | 569 | |
570 | static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq) | | 570 | static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq) |
571 | { | | 571 | { |
572 | pending_reply *pend; | | 572 | pending_reply *pend; |
573 | pend = malloc(sizeof(*pend)); | | 573 | pend = malloc(sizeof(*pend)); |
574 | if(!pend) | | 574 | if(!pend) |
575 | { | | 575 | { |
576 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); | | 576 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); |
577 | return; | | 577 | return; |
578 | } | | 578 | } |
579 | | | 579 | |
580 | pend->first_request = seq; | | 580 | pend->first_request = seq; |
581 | pend->last_request = seq; | | 581 | pend->last_request = seq; |
582 | pend->workaround = 0; | | 582 | pend->workaround = 0; |
583 | pend->flags = XCB_REQUEST_DISCARD_REPLY; | | 583 | pend->flags = XCB_REQUEST_DISCARD_REPLY; |
584 | pend->next = *prev_next; | | 584 | pend->next = *prev_next; |
585 | *prev_next = pend; | | 585 | *prev_next = pend; |
586 | | | 586 | |
587 | if(!pend->next) | | 587 | if(!pend->next) |
588 | c->in.pending_replies_tail = &pend->next; | | 588 | c->in.pending_replies_tail = &pend->next; |
589 | } | | 589 | } |
590 | | | 590 | |
591 | static void discard_reply(xcb_connection_t *c, uint64_t request) | | 591 | static void discard_reply(xcb_connection_t *c, uint64_t request) |
592 | { | | 592 | { |
593 | void *reply; | | 593 | void *reply; |
594 | pending_reply **prev_pend; | | 594 | pending_reply **prev_pend; |
595 | | | 595 | |
596 | /* Free any replies or errors that we've already read. Stop if | | 596 | /* Free any replies or errors that we've already read. Stop if |
597 | * xcb_wait_for_reply would block or we've run out of replies. */ | | 597 | * xcb_wait_for_reply would block or we've run out of replies. */ |
598 | while(poll_for_reply(c, request, &reply, 0) && reply) | | 598 | while(poll_for_reply(c, request, &reply, 0) && reply) |
599 | free(reply); | | 599 | free(reply); |
600 | | | 600 | |
601 | /* If we've proven there are no more responses coming, we're done. */ | | 601 | /* If we've proven there are no more responses coming, we're done. */ |
602 | if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed)) | | 602 | if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed)) |
603 | return; | | 603 | return; |
604 | | | 604 | |
605 | /* Walk the list of pending requests. Mark the first match for deletion. */ | | 605 | /* Walk the list of pending requests. Mark the first match for deletion. */ |
606 | for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next) | | 606 | for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next) |
607 | { | | 607 | { |
608 | if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request)) | | 608 | if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request)) |
609 | break; | | 609 | break; |
610 | | | 610 | |
611 | if((*prev_pend)->first_request == request) | | 611 | if((*prev_pend)->first_request == request) |
612 | { | | 612 | { |
613 | /* Pending reply found. Mark for discard: */ | | 613 | /* Pending reply found. Mark for discard: */ |
614 | (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY; | | 614 | (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY; |
615 | return; | | 615 | return; |
616 | } | | 616 | } |
617 | } | | 617 | } |
618 | | | 618 | |
619 | /* Pending reply not found (likely due to _unchecked request). Create one: */ | | 619 | /* Pending reply not found (likely due to _unchecked request). Create one: */ |
620 | insert_pending_discard(c, prev_pend, request); | | 620 | insert_pending_discard(c, prev_pend, request); |
621 | } | | 621 | } |
622 | | | 622 | |
623 | void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence) | | 623 | void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence) |
624 | { | | 624 | { |
625 | if(c->has_error) | | 625 | if(c->has_error) |
626 | return; | | 626 | return; |
627 | | | 627 | |
628 | /* If an error occurred when issuing the request, fail immediately. */ | | 628 | /* If an error occurred when issuing the request, fail immediately. */ |
629 | if(!sequence) | | 629 | if(!sequence) |
630 | return; | | 630 | return; |
631 | | | 631 | |
632 | pthread_mutex_lock(&c->iolock); | | 632 | pthread_mutex_lock(&c->iolock); |
633 | discard_reply(c, widen(c, sequence)); | | 633 | discard_reply(c, widen(c, sequence)); |
634 | pthread_mutex_unlock(&c->iolock); | | 634 | pthread_mutex_unlock(&c->iolock); |
635 | } | | 635 | } |
636 | | | 636 | |
637 | void xcb_discard_reply64(xcb_connection_t *c, uint64_t sequence) | | 637 | void xcb_discard_reply64(xcb_connection_t *c, uint64_t sequence) |
638 | { | | 638 | { |
639 | if(c->has_error) | | 639 | if(c->has_error) |
640 | return; | | 640 | return; |
641 | | | 641 | |
642 | /* If an error occurred when issuing the request, fail immediately. */ | | 642 | /* If an error occurred when issuing the request, fail immediately. */ |
643 | if(!sequence) | | 643 | if(!sequence) |
644 | return; | | 644 | return; |
645 | | | 645 | |
646 | pthread_mutex_lock(&c->iolock); | | 646 | pthread_mutex_lock(&c->iolock); |
647 | discard_reply(c, sequence); | | 647 | discard_reply(c, sequence); |
648 | pthread_mutex_unlock(&c->iolock); | | 648 | pthread_mutex_unlock(&c->iolock); |
649 | } | | 649 | } |
650 | | | 650 | |
651 | int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error) | | 651 | int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error) |
652 | { | | 652 | { |
653 | int ret; | | 653 | int ret; |
654 | if(c->has_error) | | 654 | if(c->has_error) |
655 | { | | 655 | { |
656 | *reply = 0; | | 656 | *reply = 0; |
657 | if(error) | | 657 | if(error) |
658 | *error = 0; | | 658 | *error = 0; |
659 | return 1; /* would not block */ | | 659 | return 1; /* would not block */ |
660 | } | | 660 | } |
661 | assert(reply != 0); | | 661 | assert(reply != 0); |
662 | pthread_mutex_lock(&c->iolock); | | 662 | pthread_mutex_lock(&c->iolock); |
663 | ret = poll_for_reply(c, widen(c, request), reply, error); | | 663 | ret = poll_for_reply(c, widen(c, request), reply, error); |
664 | if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ | | 664 | if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ |
665 | ret = poll_for_reply(c, widen(c, request), reply, error); | | 665 | ret = poll_for_reply(c, widen(c, request), reply, error); |
666 | pthread_mutex_unlock(&c->iolock); | | 666 | pthread_mutex_unlock(&c->iolock); |
667 | return ret; | | 667 | return ret; |
668 | } | | 668 | } |
669 | | | 669 | |
670 | int xcb_poll_for_reply64(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error) | | 670 | int xcb_poll_for_reply64(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error) |
671 | { | | 671 | { |
672 | int ret; | | 672 | int ret; |
673 | if(c->has_error) | | 673 | if(c->has_error) |
674 | { | | 674 | { |
675 | *reply = 0; | | 675 | *reply = 0; |
676 | if(error) | | 676 | if(error) |
677 | *error = 0; | | 677 | *error = 0; |
678 | return 1; /* would not block */ | | 678 | return 1; /* would not block */ |
679 | } | | 679 | } |
680 | assert(reply != 0); | | 680 | assert(reply != 0); |
681 | pthread_mutex_lock(&c->iolock); | | 681 | pthread_mutex_lock(&c->iolock); |
682 | ret = poll_for_reply(c, request, reply, error); | | 682 | ret = poll_for_reply(c, request, reply, error); |
683 | if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ | | 683 | if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ |
684 | ret = poll_for_reply(c, request, reply, error); | | 684 | ret = poll_for_reply(c, request, reply, error); |
685 | pthread_mutex_unlock(&c->iolock); | | 685 | pthread_mutex_unlock(&c->iolock); |
686 | return ret; | | 686 | return ret; |
687 | } | | 687 | } |
688 | | | 688 | |
689 | xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c) | | 689 | xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c) |
690 | { | | 690 | { |
691 | xcb_generic_event_t *ret; | | 691 | xcb_generic_event_t *ret; |
692 | if(c->has_error) | | 692 | if(c->has_error) |
693 | return 0; | | 693 | return 0; |
694 | pthread_mutex_lock(&c->iolock); | | 694 | pthread_mutex_lock(&c->iolock); |
695 | /* get_event returns 0 on empty list. */ | | 695 | /* get_event returns 0 on empty list. */ |
696 | while(!(ret = get_event(c))) | | 696 | while(!(ret = get_event(c))) |
697 | if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0)) | | 697 | if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0)) |
698 | break; | | 698 | break; |
699 | | | 699 | |
700 | _xcb_in_wake_up_next_reader(c); | | 700 | _xcb_in_wake_up_next_reader(c); |
701 | pthread_mutex_unlock(&c->iolock); | | 701 | pthread_mutex_unlock(&c->iolock); |
702 | return ret; | | 702 | return ret; |
703 | } | | 703 | } |
704 | | | 704 | |
705 | static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued) | | 705 | static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued) |
706 | { | | 706 | { |
707 | xcb_generic_event_t *ret = 0; | | 707 | xcb_generic_event_t *ret = 0; |
708 | if(!c->has_error) | | 708 | if(!c->has_error) |
709 | { | | 709 | { |
710 | pthread_mutex_lock(&c->iolock); | | 710 | pthread_mutex_lock(&c->iolock); |
711 | /* FIXME: follow X meets Z architecture changes. */ | | 711 | /* FIXME: follow X meets Z architecture changes. */ |
712 | ret = get_event(c); | | 712 | ret = get_event(c); |
713 | if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ | | 713 | if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ |
714 | ret = get_event(c); | | 714 | ret = get_event(c); |
715 | pthread_mutex_unlock(&c->iolock); | | 715 | pthread_mutex_unlock(&c->iolock); |
716 | } | | 716 | } |
717 | return ret; | | 717 | return ret; |
718 | } | | 718 | } |
719 | | | 719 | |
720 | xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c) | | 720 | xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c) |
721 | { | | 721 | { |
722 | return poll_for_next_event(c, 0); | | 722 | return poll_for_next_event(c, 0); |
723 | } | | 723 | } |
724 | | | 724 | |
725 | xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c) | | 725 | xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c) |
726 | { | | 726 | { |
727 | return poll_for_next_event(c, 1); | | 727 | return poll_for_next_event(c, 1); |
728 | } | | 728 | } |
729 | | | 729 | |
730 | xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie) | | 730 | xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie) |
731 | { | | 731 | { |
732 | uint64_t request; | | 732 | uint64_t request; |
733 | xcb_generic_error_t *ret = 0; | | 733 | xcb_generic_error_t *ret = 0; |
734 | void *reply; | | 734 | void *reply; |
735 | if(c->has_error) | | 735 | if(c->has_error) |
736 | return 0; | | 736 | return 0; |
737 | pthread_mutex_lock(&c->iolock); | | 737 | pthread_mutex_lock(&c->iolock); |
738 | request = widen(c, cookie.sequence); | | 738 | request = widen(c, cookie.sequence); |
739 | if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected) | | 739 | if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected) |
740 | && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed)) | | 740 | && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed)) |
741 | { | | 741 | { |
742 | _xcb_out_send_sync(c); | | 742 | _xcb_out_send_sync(c); |
743 | _xcb_out_flush_to(c, c->out.request); | | 743 | _xcb_out_flush_to(c, c->out.request); |
744 | } | | 744 | } |
745 | reply = wait_for_reply(c, request, &ret); | | 745 | reply = wait_for_reply(c, request, &ret); |
746 | assert(!reply); | | 746 | assert(!reply); |
747 | pthread_mutex_unlock(&c->iolock); | | 747 | pthread_mutex_unlock(&c->iolock); |
748 | return ret; | | 748 | return ret; |
749 | } | | 749 | } |
750 | | | 750 | |
751 | static xcb_generic_event_t *get_special_event(xcb_connection_t *c, | | 751 | static xcb_generic_event_t *get_special_event(xcb_connection_t *c, |
752 | xcb_special_event_t *se) | | 752 | xcb_special_event_t *se) |
753 | { | | 753 | { |
754 | xcb_generic_event_t *event = NULL; | | 754 | xcb_generic_event_t *event = NULL; |
755 | struct event_list *events; | | 755 | struct event_list *events; |
756 | | | 756 | |
757 | if ((events = se->events) != NULL) { | | 757 | if ((events = se->events) != NULL) { |
758 | event = events->event; | | 758 | event = events->event; |
759 | if (!(se->events = events->next)) | | 759 | if (!(se->events = events->next)) |
760 | se->events_tail = &se->events; | | 760 | se->events_tail = &se->events; |
761 | free (events); | | 761 | free (events); |
762 | } | | 762 | } |
763 | return event; | | 763 | return event; |
764 | } | | 764 | } |
765 | | | 765 | |
766 | xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c, | | 766 | xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c, |
767 | xcb_special_event_t *se) | | 767 | xcb_special_event_t *se) |
768 | { | | 768 | { |
769 | xcb_generic_event_t *event; | | 769 | xcb_generic_event_t *event; |
770 | | | 770 | |
771 | if(c->has_error) | | 771 | if(c->has_error) |
772 | return 0; | | 772 | return 0; |
773 | pthread_mutex_lock(&c->iolock); | | 773 | pthread_mutex_lock(&c->iolock); |
774 | event = get_special_event(c, se); | | 774 | event = get_special_event(c, se); |
775 | if(!event && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ | | 775 | if(!event && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */ |
776 | event = get_special_event(c, se); | | 776 | event = get_special_event(c, se); |
777 | pthread_mutex_unlock(&c->iolock); | | 777 | pthread_mutex_unlock(&c->iolock); |
778 | return event; | | 778 | return event; |
779 | } | | 779 | } |
780 | | | 780 | |
781 | xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c, | | 781 | xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c, |
782 | xcb_special_event_t *se) | | 782 | xcb_special_event_t *se) |
783 | { | | 783 | { |
784 | special_list special; | | 784 | special_list special; |
785 | xcb_generic_event_t *event; | | 785 | xcb_generic_event_t *event; |
786 | | | 786 | |
787 | if(c->has_error) | | 787 | if(c->has_error) |
788 | return 0; | | 788 | return 0; |
789 | pthread_mutex_lock(&c->iolock); | | 789 | pthread_mutex_lock(&c->iolock); |
790 | | | 790 | |
791 | insert_special(&c->in.special_waiters, &special, se); | | 791 | insert_special(&c->in.special_waiters, &special, se); |
792 | | | 792 | |
793 | /* get_special_event returns 0 on empty list. */ | | 793 | /* get_special_event returns 0 on empty list. */ |
794 | while(!(event = get_special_event(c, se))) | | 794 | while(!(event = get_special_event(c, se))) |
795 | if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0)) | | 795 | if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0)) |
796 | break; | | 796 | break; |
797 | | | 797 | |
798 | remove_special(&c->in.special_waiters, &special); | | 798 | remove_special(&c->in.special_waiters, &special); |
799 | | | 799 | |
800 | _xcb_in_wake_up_next_reader(c); | | 800 | _xcb_in_wake_up_next_reader(c); |
801 | pthread_mutex_unlock(&c->iolock); | | 801 | pthread_mutex_unlock(&c->iolock); |
802 | return event; | | 802 | return event; |
803 | } | | 803 | } |
804 | | | 804 | |
805 | xcb_special_event_t * | | 805 | xcb_special_event_t * |
806 | xcb_register_for_special_xge(xcb_connection_t *c, | | 806 | xcb_register_for_special_xge(xcb_connection_t *c, |
807 | xcb_extension_t *ext, | | 807 | xcb_extension_t *ext, |
808 | uint32_t eid, | | 808 | uint32_t eid, |
809 | uint32_t *stamp) | | 809 | uint32_t *stamp) |
810 | { | | 810 | { |
811 | xcb_special_event_t *se; | | 811 | xcb_special_event_t *se; |
812 | const xcb_query_extension_reply_t *ext_reply; | | 812 | const xcb_query_extension_reply_t *ext_reply; |
813 | | | 813 | |
814 | if(c->has_error) | | 814 | if(c->has_error) |
815 | return NULL; | | 815 | return NULL; |
816 | ext_reply = xcb_get_extension_data(c, ext); | | 816 | ext_reply = xcb_get_extension_data(c, ext); |
817 | if (!ext_reply) | | 817 | if (!ext_reply) |
818 | return NULL; | | 818 | return NULL; |
819 | pthread_mutex_lock(&c->iolock); | | 819 | pthread_mutex_lock(&c->iolock); |
820 | for (se = c->in.special_events; se; se = se->next) { | | 820 | for (se = c->in.special_events; se; se = se->next) { |
821 | if (se->extension == ext_reply->major_opcode && | | 821 | if (se->extension == ext_reply->major_opcode && |
822 | se->eid == eid) { | | 822 | se->eid == eid) { |
823 | pthread_mutex_unlock(&c->iolock); | | 823 | pthread_mutex_unlock(&c->iolock); |
824 | return NULL; | | 824 | return NULL; |
825 | } | | 825 | } |
826 | } | | 826 | } |
827 | se = calloc(1, sizeof(xcb_special_event_t)); | | 827 | se = calloc(1, sizeof(xcb_special_event_t)); |
828 | if (!se) { | | 828 | if (!se) { |
829 | pthread_mutex_unlock(&c->iolock); | | 829 | pthread_mutex_unlock(&c->iolock); |
830 | return NULL; | | 830 | return NULL; |
831 | } | | 831 | } |
832 | | | 832 | |
833 | se->extension = ext_reply->major_opcode; | | 833 | se->extension = ext_reply->major_opcode; |
834 | se->eid = eid; | | 834 | se->eid = eid; |
835 | | | 835 | |
836 | se->events = NULL; | | 836 | se->events = NULL; |
837 | se->events_tail = &se->events; | | 837 | se->events_tail = &se->events; |
838 | se->stamp = stamp; | | 838 | se->stamp = stamp; |
839 | | | 839 | |
840 | pthread_cond_init(&se->special_event_cond, 0); | | 840 | pthread_cond_init(&se->special_event_cond, 0); |
841 | | | 841 | |
842 | se->next = c->in.special_events; | | 842 | se->next = c->in.special_events; |
843 | c->in.special_events = se; | | 843 | c->in.special_events = se; |
844 | pthread_mutex_unlock(&c->iolock); | | 844 | pthread_mutex_unlock(&c->iolock); |
845 | return se; | | 845 | return se; |
846 | } | | 846 | } |
847 | | | 847 | |
848 | void | | 848 | void |
849 | xcb_unregister_for_special_event(xcb_connection_t *c, | | 849 | xcb_unregister_for_special_event(xcb_connection_t *c, |
850 | xcb_special_event_t *se) | | 850 | xcb_special_event_t *se) |
851 | { | | 851 | { |
852 | xcb_special_event_t *s, **prev; | | 852 | xcb_special_event_t *s, **prev; |
853 | struct event_list *events, *next; | | 853 | struct event_list *events, *next; |
854 | | | 854 | |
855 | if (!se) | | 855 | if (!se) |
856 | return; | | 856 | return; |
857 | | | 857 | |
858 | if (c->has_error) | | 858 | if (c->has_error) |
859 | return; | | 859 | return; |
860 | | | 860 | |
861 | pthread_mutex_lock(&c->iolock); | | 861 | pthread_mutex_lock(&c->iolock); |
862 | | | 862 | |
863 | for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) { | | 863 | for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) { |
864 | if (s == se) { | | 864 | if (s == se) { |
865 | *prev = se->next; | | 865 | *prev = se->next; |
866 | for (events = se->events; events; events = next) { | | 866 | for (events = se->events; events; events = next) { |
867 | next = events->next; | | 867 | next = events->next; |
868 | free (events->event); | | 868 | free (events->event); |
869 | free (events); | | 869 | free (events); |
870 | } | | 870 | } |
871 | pthread_cond_destroy(&se->special_event_cond); | | 871 | pthread_cond_destroy(&se->special_event_cond); |
872 | free (se); | | 872 | free (se); |
873 | break; | | 873 | break; |
874 | } | | 874 | } |
875 | } | | 875 | } |
876 | pthread_mutex_unlock(&c->iolock); | | 876 | pthread_mutex_unlock(&c->iolock); |
877 | } | | 877 | } |
878 | | | 878 | |
879 | /* Private interface */ | | 879 | /* Private interface */ |
880 | | | 880 | |
881 | int _xcb_in_init(_xcb_in *in) | | 881 | int _xcb_in_init(_xcb_in *in) |
882 | { | | 882 | { |
883 | if(pthread_cond_init(&in->event_cond, 0)) | | 883 | if(pthread_cond_init(&in->event_cond, 0)) |
884 | return 0; | | 884 | return 0; |
885 | in->reading = 0; | | 885 | in->reading = 0; |
886 | | | 886 | |
887 | in->queue_len = 0; | | 887 | in->queue_len = 0; |
888 | | | 888 | |
889 | in->request_read = 0; | | 889 | in->request_read = 0; |
890 | in->request_completed = 0; | | 890 | in->request_completed = 0; |
891 | | | 891 | |
892 | in->replies = _xcb_map_new(); | | 892 | in->replies = _xcb_map_new(); |
893 | if(!in->replies) | | 893 | if(!in->replies) |
894 | return 0; | | 894 | return 0; |
895 | | | 895 | |
896 | in->current_reply_tail = &in->current_reply; | | 896 | in->current_reply_tail = &in->current_reply; |
897 | in->events_tail = &in->events; | | 897 | in->events_tail = &in->events; |
898 | in->pending_replies_tail = &in->pending_replies; | | 898 | in->pending_replies_tail = &in->pending_replies; |
899 | | | 899 | |
900 | return 1; | | 900 | return 1; |
901 | } | | 901 | } |
902 | | | 902 | |
903 | void _xcb_in_destroy(_xcb_in *in) | | 903 | void _xcb_in_destroy(_xcb_in *in) |
904 | { | | 904 | { |
905 | pthread_cond_destroy(&in->event_cond); | | 905 | pthread_cond_destroy(&in->event_cond); |
906 | free_reply_list(in->current_reply); | | 906 | free_reply_list(in->current_reply); |
907 | _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list); | | 907 | _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list); |
908 | while(in->events) | | 908 | while(in->events) |
909 | { | | 909 | { |
910 | struct event_list *e = in->events; | | 910 | struct event_list *e = in->events; |
911 | in->events = e->next; | | 911 | in->events = e->next; |
912 | free(e->event); | | 912 | free(e->event); |
913 | free(e); | | 913 | free(e); |
914 | } | | 914 | } |
915 | while(in->pending_replies) | | 915 | while(in->pending_replies) |
916 | { | | 916 | { |
917 | pending_reply *pend = in->pending_replies; | | 917 | pending_reply *pend = in->pending_replies; |
918 | in->pending_replies = pend->next; | | 918 | in->pending_replies = pend->next; |
919 | free(pend); | | 919 | free(pend); |
920 | } | | 920 | } |
921 | } | | 921 | } |
922 | | | 922 | |
923 | void _xcb_in_wake_up_next_reader(xcb_connection_t *c) | | 923 | void _xcb_in_wake_up_next_reader(xcb_connection_t *c) |
924 | { | | 924 | { |
925 | int pthreadret; | | 925 | int pthreadret; |
926 | if(c->in.readers) | | 926 | if(c->in.readers) |
927 | pthreadret = pthread_cond_signal(c->in.readers->data); | | 927 | pthreadret = pthread_cond_signal(c->in.readers->data); |
928 | else if(c->in.special_waiters) | | 928 | else if(c->in.special_waiters) |
929 | pthreadret = pthread_cond_signal(&c->in.special_waiters->se->special_event_cond); | | 929 | pthreadret = pthread_cond_signal(&c->in.special_waiters->se->special_event_cond); |
930 | else | | 930 | else |
931 | pthreadret = pthread_cond_signal(&c->in.event_cond); | | 931 | pthreadret = pthread_cond_signal(&c->in.event_cond); |
932 | assert(pthreadret == 0); | | 932 | assert(pthreadret == 0); |
933 | } | | 933 | } |
934 | | | 934 | |
935 | int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags) | | 935 | int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags) |
936 | { | | 936 | { |
937 | pending_reply *pend = malloc(sizeof(pending_reply)); | | 937 | pending_reply *pend = malloc(sizeof(pending_reply)); |
938 | assert(workaround != WORKAROUND_NONE || flags != 0); | | 938 | assert(workaround != WORKAROUND_NONE || flags != 0); |
939 | if(!pend) | | 939 | if(!pend) |
940 | { | | 940 | { |
941 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); | | 941 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT); |
942 | return 0; | | 942 | return 0; |
943 | } | | 943 | } |
944 | pend->first_request = pend->last_request = request; | | 944 | pend->first_request = pend->last_request = request; |
945 | pend->workaround = workaround; | | 945 | pend->workaround = workaround; |
946 | pend->flags = flags; | | 946 | pend->flags = flags; |
947 | pend->next = 0; | | 947 | pend->next = 0; |
948 | *c->in.pending_replies_tail = pend; | | 948 | *c->in.pending_replies_tail = pend; |
949 | c->in.pending_replies_tail = &pend->next; | | 949 | c->in.pending_replies_tail = &pend->next; |
950 | return 1; | | 950 | return 1; |
951 | } | | 951 | } |
952 | | | 952 | |
953 | void _xcb_in_replies_done(xcb_connection_t *c) | | 953 | void _xcb_in_replies_done(xcb_connection_t *c) |
954 | { | | 954 | { |
955 | struct pending_reply *pend; | | 955 | struct pending_reply *pend; |
956 | if (c->in.pending_replies_tail != &c->in.pending_replies) | | 956 | if (c->in.pending_replies_tail != &c->in.pending_replies) |
957 | { | | 957 | { |
958 | pend = container_of(c->in.pending_replies_tail, struct pending_reply, next); | | 958 | pend = container_of(c->in.pending_replies_tail, struct pending_reply, next); |
959 | if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER) | | 959 | if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER) |
960 | { | | 960 | { |
961 | if (XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->out.request)) { | | 961 | if (XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->out.request)) { |
962 | pend->last_request = c->out.request; | | 962 | pend->last_request = c->out.request; |
963 | pend->workaround = WORKAROUND_NONE; | | 963 | pend->workaround = WORKAROUND_NONE; |
964 | } else { | | 964 | } else { |
965 | /* The socket was taken, but no requests were actually sent | | 965 | /* The socket was taken, but no requests were actually sent |
966 | * so just discard the pending_reply that was created. | | 966 | * so just discard the pending_reply that was created. |
967 | */ | | 967 | */ |
968 | struct pending_reply **prev_next = &c->in.pending_replies; | | 968 | struct pending_reply **prev_next = &c->in.pending_replies; |
969 | while (*prev_next != pend) | | 969 | while (*prev_next != pend) |
970 | prev_next = &(*prev_next)->next; | | 970 | prev_next = &(*prev_next)->next; |
971 | *prev_next = NULL; | | 971 | *prev_next = NULL; |
972 | c->in.pending_replies_tail = prev_next; | | 972 | c->in.pending_replies_tail = prev_next; |
973 | free(pend); | | 973 | free(pend); |
974 | } | | 974 | } |
975 | } | | 975 | } |
976 | } | | 976 | } |
977 | } | | 977 | } |
978 | | | 978 | |
979 | int _xcb_in_read(xcb_connection_t *c) | | 979 | int _xcb_in_read(xcb_connection_t *c) |
980 | { | | 980 | { |
981 | int n; | | 981 | int n; |
982 | | | 982 | |
983 | #if HAVE_SENDMSG | | 983 | #if HAVE_SENDMSG |
984 | struct iovec iov = { | | 984 | struct iovec iov = { |
985 | .iov_base = c->in.queue + c->in.queue_len, | | 985 | .iov_base = c->in.queue + c->in.queue_len, |
986 | .iov_len = sizeof(c->in.queue) - c->in.queue_len, | | 986 | .iov_len = sizeof(c->in.queue) - c->in.queue_len, |
987 | }; | | 987 | }; |
988 | union { | | 988 | union { |
989 | struct cmsghdr cmsghdr; | | 989 | struct cmsghdr cmsghdr; |
990 | char buf[CMSG_SPACE(XCB_MAX_PASS_FD * sizeof(int))]; | | 990 | char buf[CMSG_SPACE(XCB_MAX_PASS_FD * sizeof(int))]; |
991 | } cmsgbuf; | | 991 | } cmsgbuf; |
992 | struct msghdr msg = { | | 992 | struct msghdr msg = { |
993 | .msg_name = NULL, | | 993 | .msg_name = NULL, |
994 | .msg_namelen = 0, | | 994 | .msg_namelen = 0, |
995 | .msg_iov = &iov, | | 995 | .msg_iov = &iov, |
996 | .msg_iovlen = 1, | | 996 | .msg_iovlen = 1, |
997 | .msg_control = cmsgbuf.buf, | | 997 | .msg_control = cmsgbuf.buf, |
998 | .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)), | | 998 | .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)), |
999 | }; | | 999 | }; |
1000 | n = recvmsg(c->fd, &msg, 0); | | 1000 | n = recvmsg(c->fd, &msg, 0); |
1001 | | | 1001 | |
1002 | /* Check for truncation errors. Only MSG_CTRUNC is | | 1002 | /* Check for truncation errors. Only MSG_CTRUNC is |
1003 | * probably possible here, which would indicate that | | 1003 | * probably possible here, which would indicate that |
1004 | * the sender tried to transmit more than XCB_MAX_PASS_FD | | 1004 | * the sender tried to transmit more than XCB_MAX_PASS_FD |
1005 | * file descriptors. | | 1005 | * file descriptors. |
1006 | */ | | 1006 | */ |
1007 | if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) { | | 1007 | if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) { |
1008 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED); | | 1008 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED); |
1009 | return 0; | | 1009 | return 0; |
1010 | } | | 1010 | } |
1011 | #else | | 1011 | #else |
1012 | n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0); | | 1012 | n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0); |
1013 | #endif | | 1013 | #endif |
1014 | if(n > 0) { | | 1014 | if(n > 0) { |
1015 | #if HAVE_SENDMSG | | 1015 | #if HAVE_SENDMSG |
1016 | struct cmsghdr *hdr; | | 1016 | struct cmsghdr *hdr; |
1017 | | | 1017 | |
1018 | if (msg.msg_controllen >= sizeof (struct cmsghdr)) { | | 1018 | if (msg.msg_controllen >= sizeof (struct cmsghdr)) { |
1019 | for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) { | | 1019 | for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) { |
1020 | if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) { | | 1020 | if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) { |
1021 | int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int); | | 1021 | int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int); |
1022 | memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int)); | | 1022 | memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int)); |
1023 | c->in.in_fd.nfd += nfd; | | 1023 | c->in.in_fd.nfd += nfd; |
1024 | } | | 1024 | } |
1025 | } | | 1025 | } |
1026 | } | | 1026 | } |
1027 | #endif | | 1027 | #endif |
| | | 1028 | c->in.total_read += n; |
1028 | c->in.queue_len += n; | | 1029 | c->in.queue_len += n; |
1029 | } | | 1030 | } |
1030 | while(read_packet(c)) | | 1031 | while(read_packet(c)) |
1031 | /* empty */; | | 1032 | /* empty */; |
1032 | #if HAVE_SENDMSG | | 1033 | #if HAVE_SENDMSG |
1033 | if (c->in.in_fd.nfd) { | | 1034 | if (c->in.in_fd.nfd) { |
1034 | c->in.in_fd.nfd -= c->in.in_fd.ifd; | | 1035 | c->in.in_fd.nfd -= c->in.in_fd.ifd; |
1035 | memmove(&c->in.in_fd.fd[0], | | 1036 | memmove(&c->in.in_fd.fd[0], |
1036 | &c->in.in_fd.fd[c->in.in_fd.ifd], | | 1037 | &c->in.in_fd.fd[c->in.in_fd.ifd], |
1037 | c->in.in_fd.nfd * sizeof (int)); | | 1038 | c->in.in_fd.nfd * sizeof (int)); |
1038 | c->in.in_fd.ifd = 0; | | 1039 | c->in.in_fd.ifd = 0; |
1039 | | | 1040 | |
1040 | /* If we have any left-over file descriptors after emptying | | 1041 | /* If we have any left-over file descriptors after emptying |
1041 | * the input buffer, then the server sent some that we weren't | | 1042 | * the input buffer, then the server sent some that we weren't |
1042 | * expecting. Close them and mark the connection as broken; | | 1043 | * expecting. Close them and mark the connection as broken; |
1043 | */ | | 1044 | */ |
1044 | if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) { | | 1045 | if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) { |
1045 | int i; | | 1046 | int i; |
1046 | for (i = 0; i < c->in.in_fd.nfd; i++) | | 1047 | for (i = 0; i < c->in.in_fd.nfd; i++) |
1047 | close(c->in.in_fd.fd[i]); | | 1048 | close(c->in.in_fd.fd[i]); |
1048 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED); | | 1049 | _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED); |
1049 | return 0; | | 1050 | return 0; |
1050 | } | | 1051 | } |
1051 | } | | 1052 | } |
1052 | #endif | | 1053 | #endif |
1053 | #ifndef _WIN32 | | 1054 | #ifndef _WIN32 |
1054 | if((n > 0) || (n < 0 && errno == EAGAIN)) | | 1055 | if((n > 0) || (n < 0 && (errno == EAGAIN || errno == EINTR))) |
1055 | #else | | 1056 | #else |
1056 | if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK)) | | 1057 | if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK)) |
1057 | #endif /* !_WIN32 */ | | 1058 | #endif /* !_WIN32 */ |
1058 | return 1; | | 1059 | return 1; |
1059 | _xcb_conn_shutdown(c, XCB_CONN_ERROR); | | 1060 | _xcb_conn_shutdown(c, XCB_CONN_ERROR); |
1060 | return 0; | | 1061 | return 0; |
1061 | } | | 1062 | } |
1062 | | | 1063 | |
1063 | int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len) | | 1064 | int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len) |
1064 | { | | 1065 | { |
1065 | int done = c->in.queue_len; | | 1066 | int done = c->in.queue_len; |
1066 | if(len < done) | | 1067 | if(len < done) |
1067 | done = len; | | 1068 | done = len; |
1068 | | | 1069 | |
1069 | memcpy(buf, c->in.queue, done); | | 1070 | memcpy(buf, c->in.queue, done); |
1070 | c->in.queue_len -= done; | | 1071 | c->in.queue_len -= done; |
1071 | memmove(c->in.queue, c->in.queue + done, c->in.queue_len); | | 1072 | memmove(c->in.queue, c->in.queue + done, c->in.queue_len); |
1072 | | | 1073 | |
1073 | if(len > done) | | 1074 | if(len > done) |
1074 | { | | 1075 | { |
1075 | int ret = read_block(c->fd, (char *) buf + done, len - done); | | 1076 | int ret = read_block(c->fd, (char *) buf + done, len - done); |
1076 | if(ret <= 0) | | 1077 | if(ret <= 0) |
1077 | { | | 1078 | { |
1078 | _xcb_conn_shutdown(c, XCB_CONN_ERROR); | | 1079 | _xcb_conn_shutdown(c, XCB_CONN_ERROR); |
1079 | return ret; | | 1080 | return ret; |
1080 | } | | 1081 | } |
1081 | } | | 1082 | } |
1082 | | | 1083 | |
1083 | return len; | | 1084 | return len; |
1084 | } | | 1085 | } |