Tue Nov 3 08:41:30 2020 UTC ()
Fix build on aa64


(skrll)
diff -r1.22 -r1.23 src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_2835_arm.c
diff -r1.6 -r1.7 src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_core.h

cvs diff -r1.22 -r1.23 src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_2835_arm.c (switch to unified diff)

--- src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_2835_arm.c 2020/09/26 12:58:23 1.22
+++ src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_2835_arm.c 2020/11/03 08:41:30 1.23
@@ -1,583 +1,585 @@ @@ -1,583 +1,585 @@
1/** 1/**
2 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer, 8 * notice, this list of conditions, and the following disclaimer,
9 * without modification. 9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used 13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without 14 * to endorse or promote products derived from this software without
15 * specific prior written permission. 15 * specific prior written permission.
16 * 16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the 17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free 18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation. 19 * Software Foundation.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <interface/compat/vchi_bsd.h> 34#include <interface/compat/vchi_bsd.h>
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/malloc.h> 37#include <sys/malloc.h>
38#include <sys/bus.h> 38#include <sys/bus.h>
39#include <sys/kmem.h> 39#include <sys/kmem.h>
40 40
41#include <linux/completion.h> 41#include <linux/completion.h>
42 42
43#include <uvm/uvm_extern.h> 43#include <uvm/uvm_extern.h>
44 44
 45#include <arm/cpufunc.h>
 46
45#include <arch/arm/broadcom/bcm2835_mbox.h> 47#include <arch/arm/broadcom/bcm2835_mbox.h>
46#include <arch/arm/broadcom/bcm2835var.h> 48#include <arch/arm/broadcom/bcm2835var.h>
47 49
48#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) 50#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
49 51
50#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0 52#define VCHIQ_DOORBELL_IRQ IRQ_ARM_DOORBELL_0
51 53
52#define IS_USER_ADDRESS(va) \ 54#define IS_USER_ADDRESS(va) \
53 ((vaddr_t)(va) >= VM_MIN_ADDRESS && (vaddr_t)(va) < VM_MAX_ADDRESS) 55 ((vaddr_t)(va) >= VM_MIN_ADDRESS && (vaddr_t)(va) < VM_MAX_ADDRESS)
54 56
55#include "vchiq_arm.h" 57#include "vchiq_arm.h"
56#include "vchiq_2835.h" 58#include "vchiq_2835.h"
57#include "vchiq_netbsd.h" 59#include "vchiq_netbsd.h"
58#include "vchiq_connected.h" 60#include "vchiq_connected.h"
59 61
60#define VCPAGE_OFFSET 0x0fff 62#define VCPAGE_OFFSET 0x0fff
61#define VCPAGE_SHIFT 12 63#define VCPAGE_SHIFT 12
62 64
63#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2) 65#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
64 66
65typedef struct vchiq_2835_state_struct { 67typedef struct vchiq_2835_state_struct {
66 int inited; 68 int inited;
67 VCHIQ_ARM_STATE_T arm_state; 69 VCHIQ_ARM_STATE_T arm_state;
68} VCHIQ_2835_ARM_STATE_T; 70} VCHIQ_2835_ARM_STATE_T;
69 71
70/* BSD DMA */ 72/* BSD DMA */
71static bus_dma_tag_t dma_tag; 73static bus_dma_tag_t dma_tag;
72static bus_dmamap_t dma_map; 74static bus_dmamap_t dma_map;
73 75
74static unsigned int g_cache_line_size = CACHE_LINE_SIZE; 76static unsigned int g_cache_line_size = CACHE_LINE_SIZE;
75static unsigned int g_fragments_size; 77static unsigned int g_fragments_size;
76static char *g_fragments_base; 78static char *g_fragments_base;
77static char *g_free_fragments; 79static char *g_free_fragments;
78 80
79struct semaphore g_free_fragments_sema; 81struct semaphore g_free_fragments_sema;
80static struct semaphore g_free_fragments_mutex; 82static struct semaphore g_free_fragments_mutex;
81 83
82void 84void
83vchiq_platform_attach(bus_dma_tag_t tag) 85vchiq_platform_attach(bus_dma_tag_t tag)
84{ 86{
85 dma_tag = tag; 87 dma_tag = tag;
86} 88}
87 89
88int __init 90int __init
89vchiq_platform_init(VCHIQ_STATE_T *state) 91vchiq_platform_init(VCHIQ_STATE_T *state)
90{ 92{
91 VCHIQ_SLOT_ZERO_T *vchiq_slot_zero; 93 VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
92 bus_dma_segment_t dma_segs[1]; 94 bus_dma_segment_t dma_segs[1];
93 int dma_nsegs; 95 int dma_nsegs;
94 void *slot_mem; 96 void *slot_mem;
95 bus_addr_t slot_phys; 97 bus_addr_t slot_phys;
96 int slot_mem_size, frag_mem_size; 98 int slot_mem_size, frag_mem_size;
97 int err; 99 int err;
98 int i; 100 int i;
99 101
100 _sema_init(&g_free_fragments_mutex, 1); 102 _sema_init(&g_free_fragments_mutex, 1);
101 103
102 g_cache_line_size = 32; 104 g_cache_line_size = 32;
103 105
104 g_fragments_size = 2 * g_cache_line_size; 106 g_fragments_size = 2 * g_cache_line_size;
105 107
106 /* Allocate space for the channels in coherent memory */ 108 /* Allocate space for the channels in coherent memory */
107 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE); 109 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
108 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS); 110 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
109 111
110 dma_nsegs = __arraycount(dma_segs); 112 dma_nsegs = __arraycount(dma_segs);
111 err = bus_dmamem_alloc(dma_tag, 113 err = bus_dmamem_alloc(dma_tag,
112 slot_mem_size + frag_mem_size, PAGE_SIZE, 0, 114 slot_mem_size + frag_mem_size, PAGE_SIZE, 0,
113 dma_segs, dma_nsegs, &dma_nsegs, BUS_DMA_WAITOK); 115 dma_segs, dma_nsegs, &dma_nsegs, BUS_DMA_WAITOK);
114 if (err) { 116 if (err) {
115 vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory"); 117 vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory");
116 err = -ENOMEM; 118 err = -ENOMEM;
117 goto failed_alloc; 119 goto failed_alloc;
118 } 120 }
119 121
120 err = bus_dmamem_map(dma_tag, 122 err = bus_dmamem_map(dma_tag,
121 dma_segs, dma_nsegs, slot_mem_size + frag_mem_size, 123 dma_segs, dma_nsegs, slot_mem_size + frag_mem_size,
122 (void **)&slot_mem, BUS_DMA_COHERENT | BUS_DMA_WAITOK); 124 (void **)&slot_mem, BUS_DMA_COHERENT | BUS_DMA_WAITOK);
123 if (err) { 125 if (err) {
124 vchiq_log_error(vchiq_core_log_level, "Unable to map channel memory"); 126 vchiq_log_error(vchiq_core_log_level, "Unable to map channel memory");
125 err = -ENOMEM; 127 err = -ENOMEM;
126 goto failed_alloc; 128 goto failed_alloc;
127 } 129 }
128 130
129 err = bus_dmamap_create(dma_tag, 131 err = bus_dmamap_create(dma_tag,
130 slot_mem_size + frag_mem_size, 1, /* maxsize, nsegments */ 132 slot_mem_size + frag_mem_size, 1, /* maxsize, nsegments */
131 slot_mem_size + frag_mem_size, 0, /* maxsegsize, boundary */ 133 slot_mem_size + frag_mem_size, 0, /* maxsegsize, boundary */
132 BUS_DMA_WAITOK, 134 BUS_DMA_WAITOK,
133 &dma_map); 135 &dma_map);
134 if (err) { 136 if (err) {
135 vchiq_log_error(vchiq_core_log_level, "Unable to create DMA map"); 137 vchiq_log_error(vchiq_core_log_level, "Unable to create DMA map");
136 err = -ENOMEM; 138 err = -ENOMEM;
137 goto failed_alloc; 139 goto failed_alloc;
138 } 140 }
139 141
140 err = bus_dmamap_load(dma_tag, dma_map, slot_mem, 142 err = bus_dmamap_load(dma_tag, dma_map, slot_mem,
141 slot_mem_size + frag_mem_size, NULL, BUS_DMA_WAITOK); 143 slot_mem_size + frag_mem_size, NULL, BUS_DMA_WAITOK);
142 if (err) { 144 if (err) {
143 vchiq_log_error(vchiq_core_log_level, "cannot load DMA map (%d)", err); 145 vchiq_log_error(vchiq_core_log_level, "cannot load DMA map (%d)", err);
144 err = -ENOMEM; 146 err = -ENOMEM;
145 goto failed_load; 147 goto failed_load;
146 } 148 }
147 slot_phys = dma_map->dm_segs[0].ds_addr; 149 slot_phys = dma_map->dm_segs[0].ds_addr;
148 150
149 vchiq_log_info(vchiq_arm_log_level, 151 vchiq_log_info(vchiq_arm_log_level,
150 "%s: slot_phys = %lx\n", __func__, slot_phys); 152 "%s: slot_phys = %lx\n", __func__, slot_phys);
151 153
152 WARN_ON(((uintptr_t)slot_mem & (PAGE_SIZE - 1)) != 0); 154 WARN_ON(((uintptr_t)slot_mem & (PAGE_SIZE - 1)) != 0);
153 155
154 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size); 156 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
155 if (!vchiq_slot_zero) { 157 if (!vchiq_slot_zero) {
156 err = -EINVAL; 158 err = -EINVAL;
157 goto failed_init_slots; 159 goto failed_init_slots;
158 } 160 }
159 161
160 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] = 162 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
161 (int)slot_phys + slot_mem_size; 163 (int)slot_phys + slot_mem_size;
162 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] = 164 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
163 MAX_FRAGMENTS; 165 MAX_FRAGMENTS;
164 166
165 g_fragments_base = (char *)slot_mem + slot_mem_size; 167 g_fragments_base = (char *)slot_mem + slot_mem_size;
166 slot_mem_size += frag_mem_size; 168 slot_mem_size += frag_mem_size;
167 169
168 g_free_fragments = g_fragments_base; 170 g_free_fragments = g_fragments_base;
169 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) { 171 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
170 *(char **)&g_fragments_base[i*g_fragments_size] = 172 *(char **)&g_fragments_base[i*g_fragments_size] =
171 &g_fragments_base[(i + 1)*g_fragments_size]; 173 &g_fragments_base[(i + 1)*g_fragments_size];
172 } 174 }
173 *(char **)&g_fragments_base[i * g_fragments_size] = NULL; 175 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
174 176
175 _sema_init(&g_free_fragments_sema, MAX_FRAGMENTS); 177 _sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
176 178
177 if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) != 179 if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
178 VCHIQ_SUCCESS) { 180 VCHIQ_SUCCESS) {
179 err = -EINVAL; 181 err = -EINVAL;
180 goto failed_vchiq_init; 182 goto failed_vchiq_init;
181 } 183 }
182 184
183 /* Send the base address of the slots to VideoCore */ 185 /* Send the base address of the slots to VideoCore */
184 dsb(sy); /* Ensure all writes have completed */ 186 dsb(sy); /* Ensure all writes have completed */
185 187
186 bus_dmamap_sync(dma_tag, dma_map, 0, slot_mem_size, 188 bus_dmamap_sync(dma_tag, dma_map, 0, slot_mem_size,
187 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
188 bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)slot_phys); 190 bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)slot_phys);
189 bus_dmamap_sync(dma_tag, dma_map, 0, slot_mem_size, 191 bus_dmamap_sync(dma_tag, dma_map, 0, slot_mem_size,
190 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 192 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
191 193
192 vchiq_log_info(vchiq_arm_log_level, 194 vchiq_log_info(vchiq_arm_log_level,
193 "vchiq_init - done (slots %p, phys %x)", 195 "vchiq_init - done (slots %p, phys %x)",
194 vchiq_slot_zero, (unsigned int)slot_phys); 196 vchiq_slot_zero, (unsigned int)slot_phys);
195 197
196 vchiq_call_connected_callbacks(); 198 vchiq_call_connected_callbacks();
197 199
198 return 0; 200 return 0;
199 201
200failed_vchiq_init: 202failed_vchiq_init:
201failed_init_slots: 203failed_init_slots:
202failed_load: 204failed_load:
203 bus_dmamap_unload(dma_tag, dma_map); 205 bus_dmamap_unload(dma_tag, dma_map);
204failed_alloc: 206failed_alloc:
205 bus_dmamap_destroy(dma_tag, dma_map); 207 bus_dmamap_destroy(dma_tag, dma_map);
206 208
207 return err; 209 return err;
208} 210}
209 211
210void __exit 212void __exit
211vchiq_platform_exit(VCHIQ_STATE_T *state) 213vchiq_platform_exit(VCHIQ_STATE_T *state)
212{ 214{
213 215
214 bus_dmamap_unload(dma_tag, dma_map); 216 bus_dmamap_unload(dma_tag, dma_map);
215 bus_dmamap_destroy(dma_tag, dma_map); 217 bus_dmamap_destroy(dma_tag, dma_map);
216} 218}
217 219
218 220
219VCHIQ_STATUS_T 221VCHIQ_STATUS_T
220vchiq_platform_init_state(VCHIQ_STATE_T *state) 222vchiq_platform_init_state(VCHIQ_STATE_T *state)
221{ 223{
222 VCHIQ_STATUS_T status = VCHIQ_SUCCESS; 224 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
223 state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL); 225 state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
224 ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1; 226 ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 1;
225 status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state); 227 status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state);
226 if(status != VCHIQ_SUCCESS) 228 if(status != VCHIQ_SUCCESS)
227 { 229 {
228 ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0; 230 ((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited = 0;
229 } 231 }
230 return status; 232 return status;
231} 233}
232 234
233VCHIQ_ARM_STATE_T* 235VCHIQ_ARM_STATE_T*
234vchiq_platform_get_arm_state(VCHIQ_STATE_T *state) 236vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
235{ 237{
236 if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited) 238 if(!((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->inited)
237 { 239 {
238 BUG(); 240 BUG();
239 } 241 }
240 return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state; 242 return &((VCHIQ_2835_ARM_STATE_T*)state->platform_state)->arm_state;
241} 243}
242 244
243int 245int
244vchiq_copy_from_user(void *dst, const void *src, int size) 246vchiq_copy_from_user(void *dst, const void *src, int size)
245{ 247{
246 vaddr_t va = (vaddr_t)src; 248 vaddr_t va = (vaddr_t)src;
247 249
248 if (IS_USER_ADDRESS(va)) { 250 if (IS_USER_ADDRESS(va)) {
249 int error = copyin(src, dst, size); 251 int error = copyin(src, dst, size);
250 return error ? VCHIQ_ERROR : VCHIQ_SUCCESS; 252 return error ? VCHIQ_ERROR : VCHIQ_SUCCESS;
251 } else { 253 } else {
252 kcopy(src, dst, size); 254 kcopy(src, dst, size);
253 return VCHIQ_SUCCESS; 255 return VCHIQ_SUCCESS;
254 } 256 }
255} 257}
256 258
257typedef struct bulkinfo_struct { 259typedef struct bulkinfo_struct {
258 void *pagelist; 260 void *pagelist;
259 bus_dma_segment_t pagelist_sgs[1]; 261 bus_dma_segment_t pagelist_sgs[1];
260 bus_size_t pagelist_size; 262 bus_size_t pagelist_size;
261 bus_dmamap_t pagelist_map; 263 bus_dmamap_t pagelist_map;
262 bus_dmamap_t dmamap; 264 bus_dmamap_t dmamap;
263 struct proc *proc; 265 struct proc *proc;
264 void *buf; 266 void *buf;
265 int size; 267 int size;
266} BULKINFO_T; 268} BULKINFO_T;
267 269
268/* There is a potential problem with partial cache lines (pages?) 270/* There is a potential problem with partial cache lines (pages?)
269** at the ends of the block when reading. If the CPU accessed anything in 271** at the ends of the block when reading. If the CPU accessed anything in
270** the same line (page?) then it may have pulled old data into the cache, 272** the same line (page?) then it may have pulled old data into the cache,
271** obscuring the new data underneath. We can solve this by transferring the 273** obscuring the new data underneath. We can solve this by transferring the
272** partial cache lines separately, and allowing the ARM to copy into the 274** partial cache lines separately, and allowing the ARM to copy into the
273** cached area. 275** cached area.
274*/ 276*/
275VCHIQ_STATUS_T 277VCHIQ_STATUS_T
276vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle, 278vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
277 void *buf, int size, int dir) 279 void *buf, int size, int dir)
278{ 280{
279 PAGELIST_T *pagelist; 281 PAGELIST_T *pagelist;
280 BULKINFO_T *bi; 282 BULKINFO_T *bi;
281 int nsegs; 283 int nsegs;
282 int ret; 284 int ret;
283 285
284 vchiq_log_info(vchiq_arm_log_level, 286 vchiq_log_info(vchiq_arm_log_level,
285 "%s: buf %p size %08x dir %s", __func__, buf, size, 287 "%s: buf %p size %08x dir %s", __func__, buf, size,
286 dir == VCHIQ_BULK_RECEIVE ? "read" : "write"); 288 dir == VCHIQ_BULK_RECEIVE ? "read" : "write");
287 289
288 vaddr_t va = (vaddr_t)buf; 290 vaddr_t va = (vaddr_t)buf;
289 const size_t maxsegs = atop(round_page(va + size) - trunc_page(va)); 291 const size_t maxsegs = atop(round_page(va + size) - trunc_page(va));
290 const int uvmflags = (dir == VCHIQ_BULK_RECEIVE ? 292 const int uvmflags = (dir == VCHIQ_BULK_RECEIVE ?
291 VM_PROT_READ : VM_PROT_WRITE); 293 VM_PROT_READ : VM_PROT_WRITE);
292 const int dmaflags = (dir == VCHIQ_BULK_RECEIVE ? 294 const int dmaflags = (dir == VCHIQ_BULK_RECEIVE ?
293 BUS_DMA_READ : BUS_DMA_WRITE); 295 BUS_DMA_READ : BUS_DMA_WRITE);
294 296
295 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID); 297 WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
296 298
297 bi = kmem_alloc(sizeof(*bi), KM_SLEEP); 299 bi = kmem_alloc(sizeof(*bi), KM_SLEEP);
298 bi->buf = buf; 300 bi->buf = buf;
299 bi->size = size; 301 bi->size = size;
300 bi->pagelist_size = sizeof(PAGELIST_T) + 302 bi->pagelist_size = sizeof(PAGELIST_T) +
301 (maxsegs * sizeof(unsigned long)); 303 (maxsegs * sizeof(unsigned long));
302 bi->proc = curproc; 304 bi->proc = curproc;
303 305
304 ret = bus_dmamem_alloc(dma_tag, bi->pagelist_size, 306 ret = bus_dmamem_alloc(dma_tag, bi->pagelist_size,
305 0 /*CACHE_LINE_SIZE*/, 0, bi->pagelist_sgs, 307 0 /*CACHE_LINE_SIZE*/, 0, bi->pagelist_sgs,
306 __arraycount(bi->pagelist_sgs), &nsegs, BUS_DMA_WAITOK); 308 __arraycount(bi->pagelist_sgs), &nsegs, BUS_DMA_WAITOK);
307 309
308 if (ret != 0) 310 if (ret != 0)
309 goto fail1; 311 goto fail1;
310 312
311 ret = bus_dmamem_map(dma_tag, bi->pagelist_sgs, nsegs, 313 ret = bus_dmamem_map(dma_tag, bi->pagelist_sgs, nsegs,
312 bi->pagelist_size, &bi->pagelist, BUS_DMA_COHERENT | BUS_DMA_WAITOK); 314 bi->pagelist_size, &bi->pagelist, BUS_DMA_COHERENT | BUS_DMA_WAITOK);
313 if (ret != 0) 315 if (ret != 0)
314 goto fail2; 316 goto fail2;
315 317
316 pagelist = bi->pagelist; 318 pagelist = bi->pagelist;
317 319
318 ret = bus_dmamap_create(dma_tag, bi->pagelist_size, 320 ret = bus_dmamap_create(dma_tag, bi->pagelist_size,
319 nsegs, bi->pagelist_size, 0, BUS_DMA_WAITOK, &bi->pagelist_map); 321 nsegs, bi->pagelist_size, 0, BUS_DMA_WAITOK, &bi->pagelist_map);
320 if (ret != 0) 322 if (ret != 0)
321 goto fail3; 323 goto fail3;
322 324
323 ret = bus_dmamap_load(dma_tag, bi->pagelist_map, pagelist, 325 ret = bus_dmamap_load(dma_tag, bi->pagelist_map, pagelist,
324 bi->pagelist_size, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE); 326 bi->pagelist_size, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
325 if (ret != 0) 327 if (ret != 0)
326 goto fail4; 328 goto fail4;
327 329
328 /* 330 /*
329 * Need to wire the buffer pages in. 331 * Need to wire the buffer pages in.
330 */ 332 */
331 if (IS_USER_ADDRESS(buf)) { 333 if (IS_USER_ADDRESS(buf)) {
332 ret = uvm_vslock(bi->proc->p_vmspace, buf, size, uvmflags); 334 ret = uvm_vslock(bi->proc->p_vmspace, buf, size, uvmflags);
333 if (ret != 0) { 335 if (ret != 0) {
334 printf("%s: uvm_vslock failed (%d)\n", __func__, ret); 336 printf("%s: uvm_vslock failed (%d)\n", __func__, ret);
335 goto fail5; 337 goto fail5;
336 } 338 }
337 } 339 }
338 340
339 ret = bus_dmamap_create(dma_tag, size, maxsegs, size, 0, 341 ret = bus_dmamap_create(dma_tag, size, maxsegs, size, 0,
340 BUS_DMA_WAITOK, &bi->dmamap); 342 BUS_DMA_WAITOK, &bi->dmamap);
341 343
342 if (ret != 0) 344 if (ret != 0)
343 goto fail6; 345 goto fail6;
344 346
345 ret = bus_dmamap_load(dma_tag, bi->dmamap, buf, size, 347 ret = bus_dmamap_load(dma_tag, bi->dmamap, buf, size,
346 curproc, BUS_DMA_WAITOK | dmaflags); 348 curproc, BUS_DMA_WAITOK | dmaflags);
347 349
348 if (ret != 0) 350 if (ret != 0)
349 goto fail7; 351 goto fail7;
350 352
351 bulk->handle = memhandle; 353 bulk->handle = memhandle;
352 /* 354 /*
353 * We've now got the bus_addr_t for the pagelist we want the transfer 355 * We've now got the bus_addr_t for the pagelist we want the transfer
354 * to use. 356 * to use.
355 */ 357 */
356 bulk->data = (void *)bi->pagelist_map->dm_segs[0].ds_addr; 358 bulk->data = (void *)bi->pagelist_map->dm_segs[0].ds_addr;
357 359
358 pagelist->type = (dir == VCHIQ_BULK_RECEIVE) ? 360 pagelist->type = (dir == VCHIQ_BULK_RECEIVE) ?
359 PAGELIST_READ : PAGELIST_WRITE; 361 PAGELIST_READ : PAGELIST_WRITE;
360 pagelist->length = size; 362 pagelist->length = size;
361 pagelist->offset = va & VCPAGE_OFFSET; 363 pagelist->offset = va & VCPAGE_OFFSET;
362 364
363 /* 365 /*
364 * busdma already coalesces contiguous pages for us 366 * busdma already coalesces contiguous pages for us
365 */ 367 */
366 for (int i = 0; i < bi->dmamap->dm_nsegs; i++) { 368 for (int i = 0; i < bi->dmamap->dm_nsegs; i++) {
367 bus_addr_t addr = bi->dmamap->dm_segs[i].ds_addr; 369 bus_addr_t addr = bi->dmamap->dm_segs[i].ds_addr;
368 bus_size_t len = bi->dmamap->dm_segs[i].ds_len; 370 bus_size_t len = bi->dmamap->dm_segs[i].ds_len;
369 bus_size_t off = addr & VCPAGE_OFFSET; 371 bus_size_t off = addr & VCPAGE_OFFSET;
370 int npgs = ((off + len + VCPAGE_OFFSET) >> VCPAGE_SHIFT); 372 int npgs = ((off + len + VCPAGE_OFFSET) >> VCPAGE_SHIFT);
371 373
372 pagelist->addrs[i] = addr & ~VCPAGE_OFFSET; 374 pagelist->addrs[i] = addr & ~VCPAGE_OFFSET;
373 pagelist->addrs[i] |= npgs - 1; 375 pagelist->addrs[i] |= npgs - 1;
374 } 376 }
375 377
376 /* Partial cache lines (fragments) require special measures */ 378 /* Partial cache lines (fragments) require special measures */
377 if ((pagelist->type == PAGELIST_READ) && 379 if ((pagelist->type == PAGELIST_READ) &&
378 ((pagelist->offset & (g_cache_line_size - 1)) || 380 ((pagelist->offset & (g_cache_line_size - 1)) ||
379 ((pagelist->offset + pagelist->length) & (g_cache_line_size - 1)))) { 381 ((pagelist->offset + pagelist->length) & (g_cache_line_size - 1)))) {
380 char *fragments; 382 char *fragments;
381 383
382 if (down_interruptible(&g_free_fragments_sema) != 0) { 384 if (down_interruptible(&g_free_fragments_sema) != 0) {
383 goto fail7; 385 goto fail7;
384 } 386 }
385 387
386 WARN_ON(g_free_fragments == NULL); 388 WARN_ON(g_free_fragments == NULL);
387 389
388 down(&g_free_fragments_mutex); 390 down(&g_free_fragments_mutex);
389 fragments = g_free_fragments; 391 fragments = g_free_fragments;
390 WARN_ON(fragments == NULL); 392 WARN_ON(fragments == NULL);
391 g_free_fragments = *(char **) g_free_fragments; 393 g_free_fragments = *(char **) g_free_fragments;
392 up(&g_free_fragments_mutex); 394 up(&g_free_fragments_mutex);
393 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + 395 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
394 (fragments - g_fragments_base) / g_fragments_size; 396 (fragments - g_fragments_base) / g_fragments_size;
395 bus_dmamap_sync(dma_tag, dma_map, 397 bus_dmamap_sync(dma_tag, dma_map,
396 (char *)fragments - g_fragments_base, sizeof(*fragments), 398 (char *)fragments - g_fragments_base, sizeof(*fragments),
397 BUS_DMASYNC_PREREAD); 399 BUS_DMASYNC_PREREAD);
398 } 400 }
399 401
400 /* 402 /*
401 * Store the BULKINFO_T address in remote_data, which isn't used by the 403 * Store the BULKINFO_T address in remote_data, which isn't used by the
402 * slave. 404 * slave.
403 */ 405 */
404 bulk->remote_data = bi; 406 bulk->remote_data = bi;
405 407
406 bus_dmamap_sync(dma_tag, bi->pagelist_map, 0, 408 bus_dmamap_sync(dma_tag, bi->pagelist_map, 0,
407 bi->pagelist_size, BUS_DMASYNC_PREWRITE); 409 bi->pagelist_size, BUS_DMASYNC_PREWRITE);
408 410
409 bus_dmamap_sync(dma_tag, bi->dmamap, 0, bi->size, 411 bus_dmamap_sync(dma_tag, bi->dmamap, 0, bi->size,
410 pagelist->type == PAGELIST_WRITE ? 412 pagelist->type == PAGELIST_WRITE ?
411 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 413 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
412 414
413 return VCHIQ_SUCCESS; 415 return VCHIQ_SUCCESS;
414 416
415fail7: 417fail7:
416 bus_dmamap_destroy(dma_tag, bi->dmamap); 418 bus_dmamap_destroy(dma_tag, bi->dmamap);
417 419
418fail6: 420fail6:
419 if (IS_USER_ADDRESS(bi->buf)) 421 if (IS_USER_ADDRESS(bi->buf))
420 uvm_vsunlock(curproc->p_vmspace, bi->buf, bi->size); 422 uvm_vsunlock(curproc->p_vmspace, bi->buf, bi->size);
421 423
422fail5: 424fail5:
423 bus_dmamap_unload(dma_tag, bi->pagelist_map); 425 bus_dmamap_unload(dma_tag, bi->pagelist_map);
424 426
425fail4: 427fail4:
426 bus_dmamap_destroy(dma_tag, bi->pagelist_map); 428 bus_dmamap_destroy(dma_tag, bi->pagelist_map);
427 429
428fail3: 430fail3:
429 bus_dmamem_unmap(dma_tag, bi->pagelist, bi->pagelist_size); 431 bus_dmamem_unmap(dma_tag, bi->pagelist, bi->pagelist_size);
430 432
431fail2: 433fail2:
432 bus_dmamem_free(dma_tag, bi->pagelist_sgs, 434 bus_dmamem_free(dma_tag, bi->pagelist_sgs,
433 __arraycount(bi->pagelist_sgs)); 435 __arraycount(bi->pagelist_sgs));
434 436
435fail1: 437fail1:
436 kmem_free(bi, sizeof(*bi)); 438 kmem_free(bi, sizeof(*bi));
437 return VCHIQ_ERROR; 439 return VCHIQ_ERROR;
438} 440}
439 441
440void 442void
441vchiq_complete_bulk(VCHIQ_BULK_T *bulk) 443vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
442{ 444{
443 if (bulk && bulk->remote_data && bulk->actual) { 445 if (bulk && bulk->remote_data && bulk->actual) {
444 int actual = bulk->actual; 446 int actual = bulk->actual;
445 BULKINFO_T *bi = bulk->remote_data; 447 BULKINFO_T *bi = bulk->remote_data;
446 PAGELIST_T *pagelist = bi->pagelist; 448 PAGELIST_T *pagelist = bi->pagelist;
447 449
448 vchiq_log_trace(vchiq_arm_log_level, 450 vchiq_log_trace(vchiq_arm_log_level,
449 "free_pagelist - %p, %d", pagelist, actual); 451 "free_pagelist - %p, %d", pagelist, actual);
450 452
451 bus_dmamap_sync(dma_tag, bi->pagelist_map, 0, 453 bus_dmamap_sync(dma_tag, bi->pagelist_map, 0,
452 bi->pagelist_size, BUS_DMASYNC_POSTWRITE); 454 bi->pagelist_size, BUS_DMASYNC_POSTWRITE);
453 455
454 bus_dmamap_sync(dma_tag, bi->dmamap, 0, bi->size, 456 bus_dmamap_sync(dma_tag, bi->dmamap, 0, bi->size,
455 pagelist->type == PAGELIST_WRITE ? 457 pagelist->type == PAGELIST_WRITE ?
456 BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD); 458 BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
457 459
458 /* Deal with any partial cache lines (fragments) */ 460 /* Deal with any partial cache lines (fragments) */
459 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) { 461 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
460 char *fragments = g_fragments_base + 462 char *fragments = g_fragments_base +
461 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) * 463 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
462 g_fragments_size; 464 g_fragments_size;
463 int head_bytes, tail_bytes; 465 int head_bytes, tail_bytes;
464 466
465 bus_dmamap_sync(dma_tag, dma_map, 467 bus_dmamap_sync(dma_tag, dma_map,
466 (char *)fragments - g_fragments_base, g_fragments_size, 468 (char *)fragments - g_fragments_base, g_fragments_size,
467 BUS_DMASYNC_POSTREAD); 469 BUS_DMASYNC_POSTREAD);
468 470
469 head_bytes = (g_cache_line_size - pagelist->offset) & 471 head_bytes = (g_cache_line_size - pagelist->offset) &
470 (g_cache_line_size - 1); 472 (g_cache_line_size - 1);
471 tail_bytes = (pagelist->offset + actual) & 473 tail_bytes = (pagelist->offset + actual) &
472 (g_cache_line_size - 1); 474 (g_cache_line_size - 1);
473 475
474 if ((actual >= 0) && (head_bytes != 0)) { 476 if ((actual >= 0) && (head_bytes != 0)) {
475 if (head_bytes > actual) 477 if (head_bytes > actual)
476 head_bytes = actual; 478 head_bytes = actual;
477 479
478 if (IS_USER_ADDRESS(bi->buf)) { 480 if (IS_USER_ADDRESS(bi->buf)) {
479 copyout_proc(bi->proc, fragments, 481 copyout_proc(bi->proc, fragments,
480 bi->buf, head_bytes); 482 bi->buf, head_bytes);
481 } else { 483 } else {
482 kcopy(fragments, bi->buf, head_bytes); 484 kcopy(fragments, bi->buf, head_bytes);
483 } 485 }
484 } 486 }
485 if ((actual >= 0) && (head_bytes < actual) && 487 if ((actual >= 0) && (head_bytes < actual) &&
486 (tail_bytes != 0)) { 488 (tail_bytes != 0)) {
487 void *t = (char *)bi->buf + bi->size - 489 void *t = (char *)bi->buf + bi->size -
488 tail_bytes; 490 tail_bytes;
489 491
490 if (IS_USER_ADDRESS(bi->buf)) { 492 if (IS_USER_ADDRESS(bi->buf)) {
491 copyout_proc(bi->proc, 493 copyout_proc(bi->proc,
492 fragments + g_cache_line_size, t, 494 fragments + g_cache_line_size, t,
493 tail_bytes); 495 tail_bytes);
494 } else { 496 } else {
495 kcopy(fragments + g_cache_line_size, t, 497 kcopy(fragments + g_cache_line_size, t,
496 tail_bytes); 498 tail_bytes);
497 } 499 }
498 } 500 }
499 501
500 down(&g_free_fragments_mutex); 502 down(&g_free_fragments_mutex);
501 *(char **)fragments = g_free_fragments; 503 *(char **)fragments = g_free_fragments;
502 g_free_fragments = fragments; 504 g_free_fragments = fragments;
503 up(&g_free_fragments_mutex); 505 up(&g_free_fragments_mutex);
504 up(&g_free_fragments_sema); 506 up(&g_free_fragments_sema);
505 } 507 }
506 bus_dmamap_unload(dma_tag, bi->dmamap); 508 bus_dmamap_unload(dma_tag, bi->dmamap);
507 bus_dmamap_destroy(dma_tag, bi->dmamap); 509 bus_dmamap_destroy(dma_tag, bi->dmamap);
508 if (IS_USER_ADDRESS(bi->buf)) 510 if (IS_USER_ADDRESS(bi->buf))
509 uvm_vsunlock(bi->proc->p_vmspace, bi->buf, bi->size); 511 uvm_vsunlock(bi->proc->p_vmspace, bi->buf, bi->size);
510 512
511 bus_dmamap_unload(dma_tag, bi->pagelist_map); 513 bus_dmamap_unload(dma_tag, bi->pagelist_map);
512 bus_dmamap_destroy(dma_tag, bi->pagelist_map); 514 bus_dmamap_destroy(dma_tag, bi->pagelist_map);
513 bus_dmamem_unmap(dma_tag, bi->pagelist, 515 bus_dmamem_unmap(dma_tag, bi->pagelist,
514 bi->pagelist_size); 516 bi->pagelist_size);
515 bus_dmamem_free(dma_tag, bi->pagelist_sgs, 517 bus_dmamem_free(dma_tag, bi->pagelist_sgs,
516 __arraycount(bi->pagelist_sgs)); 518 __arraycount(bi->pagelist_sgs));
517 kmem_free(bi, sizeof(*bi)); 519 kmem_free(bi, sizeof(*bi));
518 } 520 }
519} 521}
520 522
521void 523void
522vchiq_transfer_bulk(VCHIQ_BULK_T *bulk) 524vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
523{ 525{
524 /* 526 /*
525 * This should only be called on the master (VideoCore) side, but 527 * This should only be called on the master (VideoCore) side, but
526 * provide an implementation to avoid the need for ifdefery. 528 * provide an implementation to avoid the need for ifdefery.
527 */ 529 */
528 BUG(); 530 BUG();
529} 531}
530 532
531void 533void
532vchiq_dump_platform_state(void *dump_context) 534vchiq_dump_platform_state(void *dump_context)
533{ 535{
534 char buf[80]; 536 char buf[80];
535 int len; 537 int len;
536 len = snprintf(buf, sizeof(buf), 538 len = snprintf(buf, sizeof(buf),
537 " Platform: 2835 (VC master)"); 539 " Platform: 2835 (VC master)");
538 vchiq_dump(dump_context, buf, len + 1); 540 vchiq_dump(dump_context, buf, len + 1);
539} 541}
540 542
541VCHIQ_STATUS_T 543VCHIQ_STATUS_T
542vchiq_platform_suspend(VCHIQ_STATE_T *state) 544vchiq_platform_suspend(VCHIQ_STATE_T *state)
543{ 545{
544 return VCHIQ_ERROR; 546 return VCHIQ_ERROR;
545} 547}
546 548
547VCHIQ_STATUS_T 549VCHIQ_STATUS_T
548vchiq_platform_resume(VCHIQ_STATE_T *state) 550vchiq_platform_resume(VCHIQ_STATE_T *state)
549{ 551{
550 return VCHIQ_SUCCESS; 552 return VCHIQ_SUCCESS;
551} 553}
552 554
553void 555void
554vchiq_platform_paused(VCHIQ_STATE_T *state) 556vchiq_platform_paused(VCHIQ_STATE_T *state)
555{ 557{
556} 558}
557 559
558void 560void
559vchiq_platform_resumed(VCHIQ_STATE_T *state) 561vchiq_platform_resumed(VCHIQ_STATE_T *state)
560{ 562{
561} 563}
562 564
563int 565int
564vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state) 566vchiq_platform_videocore_wanted(VCHIQ_STATE_T* state)
565{ 567{
566 return 1; // autosuspend not supported - videocore always wanted 568 return 1; // autosuspend not supported - videocore always wanted
567} 569}
568 570
569int 571int
570vchiq_platform_use_suspend_timer(void) 572vchiq_platform_use_suspend_timer(void)
571{ 573{
572 return 0; 574 return 0;
573} 575}
574void 576void
575vchiq_dump_platform_use_state(VCHIQ_STATE_T *state) 577vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
576{ 578{
577 vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use"); 579 vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
578} 580}
579void 581void
580vchiq_platform_handle_timeout(VCHIQ_STATE_T *state) 582vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
581{ 583{
582 (void)state; 584 (void)state;
583} 585}

cvs diff -r1.6 -r1.7 src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_core.h (switch to unified diff)

--- src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_core.h 2020/09/26 12:58:23 1.6
+++ src/sys/external/bsd/vchiq/dist/interface/vchiq_arm/vchiq_core.h 2020/11/03 08:41:30 1.7
@@ -1,717 +1,719 @@ @@ -1,717 +1,719 @@
1/** 1/**
2 * Copyright (c) 2010-2012 Broadcom. All rights reserved. 2 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions, and the following disclaimer, 8 * notice, this list of conditions, and the following disclaimer,
9 * without modification. 9 * without modification.
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the 11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution. 12 * documentation and/or other materials provided with the distribution.
13 * 3. The names of the above-listed copyright holders may not be used 13 * 3. The names of the above-listed copyright holders may not be used
14 * to endorse or promote products derived from this software without 14 * to endorse or promote products derived from this software without
15 * specific prior written permission. 15 * specific prior written permission.
16 * 16 *
17 * ALTERNATIVELY, this software may be distributed under the terms of the 17 * ALTERNATIVELY, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2, as published by the Free 18 * GNU General Public License ("GPL") version 2, as published by the Free
19 * Software Foundation. 19 * Software Foundation.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 25 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 28 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#ifndef VCHIQ_CORE_H 34#ifndef VCHIQ_CORE_H
35#define VCHIQ_CORE_H 35#define VCHIQ_CORE_H
36 36
37#include <interface/compat/vchi_bsd.h> 37#include <interface/compat/vchi_bsd.h>
38#include <linux/list.h> 38#include <linux/list.h>
39 39
 40#include <arm/cpufunc.h>
 41
40#include "vchiq_cfg.h" 42#include "vchiq_cfg.h"
41 43
42#include "vchiq.h" 44#include "vchiq.h"
43 45
44/* Run time control of log level, based on KERN_XXX level. */ 46/* Run time control of log level, based on KERN_XXX level. */
45#ifndef VCHIQ_LOG_DEFAULT 47#ifndef VCHIQ_LOG_DEFAULT
46#define VCHIQ_LOG_DEFAULT 7 48#define VCHIQ_LOG_DEFAULT 7
47#endif 49#endif
48#define VCHIQ_LOG_ERROR 3 50#define VCHIQ_LOG_ERROR 3
49#define VCHIQ_LOG_WARNING 4 51#define VCHIQ_LOG_WARNING 4
50#define VCHIQ_LOG_INFO 6 52#define VCHIQ_LOG_INFO 6
51#define VCHIQ_LOG_TRACE 7 53#define VCHIQ_LOG_TRACE 7
52 54
53#define VCHIQ_LOG_PREFIX "vchiq: " 55#define VCHIQ_LOG_PREFIX "vchiq: "
54 56
55#ifndef vchiq_log_error 57#ifndef vchiq_log_error
56#define vchiq_log_error(cat, fmt, ...) \ 58#define vchiq_log_error(cat, fmt, ...) \
57 do { if (cat >= VCHIQ_LOG_ERROR) \ 59 do { if (cat >= VCHIQ_LOG_ERROR) \
58 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0) 60 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
59#endif 61#endif
60#ifndef vchiq_log_warning 62#ifndef vchiq_log_warning
61#define vchiq_log_warning(cat, fmt, ...) \ 63#define vchiq_log_warning(cat, fmt, ...) \
62 do { if (cat >= VCHIQ_LOG_WARNING) \ 64 do { if (cat >= VCHIQ_LOG_WARNING) \
63 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0) 65 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
64#endif 66#endif
65#ifndef vchiq_log_info 67#ifndef vchiq_log_info
66#define vchiq_log_info(cat, fmt, ...) \ 68#define vchiq_log_info(cat, fmt, ...) \
67 do { if (cat >= VCHIQ_LOG_INFO) \ 69 do { if (cat >= VCHIQ_LOG_INFO) \
68 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0) 70 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
69#endif 71#endif
70#ifndef vchiq_log_trace 72#ifndef vchiq_log_trace
71#define vchiq_log_trace(cat, fmt, ...) \ 73#define vchiq_log_trace(cat, fmt, ...) \
72 do { if (cat >= VCHIQ_LOG_TRACE) \ 74 do { if (cat >= VCHIQ_LOG_TRACE) \
73 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0) 75 printf_tolog(VCHIQ_LOG_PREFIX fmt "\n", ##__VA_ARGS__); } while (0)
74#endif 76#endif
75 77
76#define vchiq_loud_error(...) \ 78#define vchiq_loud_error(...) \
77 vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__) 79 vchiq_log_error(vchiq_core_log_level, "===== " __VA_ARGS__)
78 80
79#ifndef vchiq_static_assert 81#ifndef vchiq_static_assert
80#define vchiq_static_assert(cond) __attribute__((unused)) \ 82#define vchiq_static_assert(cond) __attribute__((unused)) \
81 extern int vchiq_static_assert[(cond) ? 1 : -1] 83 extern int vchiq_static_assert[(cond) ? 1 : -1]
82#endif 84#endif
83 85
84#define IS_POW2(x) (x && ((x & (x - 1)) == 0)) 86#define IS_POW2(x) (x && ((x & (x - 1)) == 0))
85 87
86/* Ensure that the slot size and maximum number of slots are powers of 2 */ 88/* Ensure that the slot size and maximum number of slots are powers of 2 */
87vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE)); 89vchiq_static_assert(IS_POW2(VCHIQ_SLOT_SIZE));
88vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS)); 90vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS));
89vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE)); 91vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
90 92
91#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1) 93#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
92#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1) 94#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
93#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \ 95#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(VCHIQ_SLOT_ZERO_T) + \
94 VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE) 96 VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
95 97
96#define VCHIQ_MSG_PADDING 0 /* - */ 98#define VCHIQ_MSG_PADDING 0 /* - */
97#define VCHIQ_MSG_CONNECT 1 /* - */ 99#define VCHIQ_MSG_CONNECT 1 /* - */
98#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */ 100#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
99#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */ 101#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
100#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */ 102#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
101#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */ 103#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
102#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */ 104#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
103#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */ 105#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
104#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */ 106#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
105#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */ 107#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
106#define VCHIQ_MSG_PAUSE 10 /* - */ 108#define VCHIQ_MSG_PAUSE 10 /* - */
107#define VCHIQ_MSG_RESUME 11 /* - */ 109#define VCHIQ_MSG_RESUME 11 /* - */
108#define VCHIQ_MSG_REMOTE_USE 12 /* - */ 110#define VCHIQ_MSG_REMOTE_USE 12 /* - */
109#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */ 111#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
110#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */ 112#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
111 113
112#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1) 114#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
113#define VCHIQ_PORT_FREE 0x1000 115#define VCHIQ_PORT_FREE 0x1000
114#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE) 116#define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
115#define VCHIQ_MAKE_MSG(type, srcport, dstport) \ 117#define VCHIQ_MAKE_MSG(type, srcport, dstport) \
116 ((type<<24) | (srcport<<12) | (dstport<<0)) 118 ((type<<24) | (srcport<<12) | (dstport<<0))
117#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24) 119#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
118#define VCHIQ_MSG_SRCPORT(msgid) \ 120#define VCHIQ_MSG_SRCPORT(msgid) \
119 (unsigned short)(((unsigned int)msgid >> 12) & 0xfff) 121 (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
120#define VCHIQ_MSG_DSTPORT(msgid) \ 122#define VCHIQ_MSG_DSTPORT(msgid) \
121 ((unsigned short)msgid & 0xfff) 123 ((unsigned short)msgid & 0xfff)
122 124
123#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \ 125#define VCHIQ_FOURCC_AS_4CHARS(fourcc) \
124 ((fourcc) >> 24) & 0xff, \ 126 ((fourcc) >> 24) & 0xff, \
125 ((fourcc) >> 16) & 0xff, \ 127 ((fourcc) >> 16) & 0xff, \
126 ((fourcc) >> 8) & 0xff, \ 128 ((fourcc) >> 8) & 0xff, \
127 (fourcc) & 0xff 129 (fourcc) & 0xff
128 130
129/* Ensure the fields are wide enough */ 131/* Ensure the fields are wide enough */
130vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX)) 132vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
131 == 0); 133 == 0);
132vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0); 134vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
133vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX < 135vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
134 (unsigned int)VCHIQ_PORT_FREE); 136 (unsigned int)VCHIQ_PORT_FREE);
135 137
136#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0) 138#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
137#define VCHIQ_MSGID_CLAIMED 0x40000000 139#define VCHIQ_MSGID_CLAIMED 0x40000000
138 140
139#define VCHIQ_FOURCC_INVALID 0x00000000 141#define VCHIQ_FOURCC_INVALID 0x00000000
140#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID) 142#define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
141 143
142#define VCHIQ_BULK_ACTUAL_ABORTED -1 144#define VCHIQ_BULK_ACTUAL_ABORTED -1
143 145
144typedef uint32_t BITSET_T; 146typedef uint32_t BITSET_T;
145 147
146vchiq_static_assert((sizeof(BITSET_T) * 8) == 32); 148vchiq_static_assert((sizeof(BITSET_T) * 8) == 32);
147 149
148#define BITSET_SIZE(b) ((b + 31) >> 5) 150#define BITSET_SIZE(b) ((b + 31) >> 5)
149#define BITSET_WORD(b) (b >> 5) 151#define BITSET_WORD(b) (b >> 5)
150#define BITSET_BIT(b) (1 << (b & 31)) 152#define BITSET_BIT(b) (1 << (b & 31))
151#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs)) 153#define BITSET_ZERO(bs) memset(bs, 0, sizeof(bs))
152#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b)) 154#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
153#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b)) 155#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
154#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b)) 156#define BITSET_CLR(bs, b) (bs[BITSET_WORD(b)] &= ~BITSET_BIT(b))
155 157
156#if VCHIQ_ENABLE_STATS 158#if VCHIQ_ENABLE_STATS
157#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++) 159#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
158#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++) 160#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
159#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \ 161#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
160 (service->stats. stat += addend) 162 (service->stats. stat += addend)
161#else 163#else
162#define VCHIQ_STATS_INC(state, stat) ((void)0) 164#define VCHIQ_STATS_INC(state, stat) ((void)0)
163#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0) 165#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
164#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0) 166#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
165#endif 167#endif
166 168
167enum { 169enum {
168 DEBUG_ENTRIES, 170 DEBUG_ENTRIES,
169#if VCHIQ_ENABLE_DEBUG 171#if VCHIQ_ENABLE_DEBUG
170 DEBUG_SLOT_HANDLER_COUNT, 172 DEBUG_SLOT_HANDLER_COUNT,
171 DEBUG_SLOT_HANDLER_LINE, 173 DEBUG_SLOT_HANDLER_LINE,
172 DEBUG_PARSE_LINE, 174 DEBUG_PARSE_LINE,
173 DEBUG_PARSE_HEADER, 175 DEBUG_PARSE_HEADER,
174 DEBUG_PARSE_MSGID, 176 DEBUG_PARSE_MSGID,
175 DEBUG_AWAIT_COMPLETION_LINE, 177 DEBUG_AWAIT_COMPLETION_LINE,
176 DEBUG_DEQUEUE_MESSAGE_LINE, 178 DEBUG_DEQUEUE_MESSAGE_LINE,
177 DEBUG_SERVICE_CALLBACK_LINE, 179 DEBUG_SERVICE_CALLBACK_LINE,
178 DEBUG_MSG_QUEUE_FULL_COUNT, 180 DEBUG_MSG_QUEUE_FULL_COUNT,
179 DEBUG_COMPLETION_QUEUE_FULL_COUNT, 181 DEBUG_COMPLETION_QUEUE_FULL_COUNT,
180#endif 182#endif
181 DEBUG_MAX 183 DEBUG_MAX
182}; 184};
183 185
184#if VCHIQ_ENABLE_DEBUG 186#if VCHIQ_ENABLE_DEBUG
185 187
186#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug; 188#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
187#define DEBUG_TRACE(d) \ 189#define DEBUG_TRACE(d) \
188 do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0) 190 do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
189#define DEBUG_VALUE(d, v) \ 191#define DEBUG_VALUE(d, v) \
190 do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0) 192 do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
191#define DEBUG_COUNT(d) \ 193#define DEBUG_COUNT(d) \
192 do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0) 194 do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
193 195
194#else /* VCHIQ_ENABLE_DEBUG */ 196#else /* VCHIQ_ENABLE_DEBUG */
195 197
196#define DEBUG_INITIALISE(local) 198#define DEBUG_INITIALISE(local)
197#define DEBUG_TRACE(d) 199#define DEBUG_TRACE(d)
198#define DEBUG_VALUE(d, v) 200#define DEBUG_VALUE(d, v)
199#define DEBUG_COUNT(d) 201#define DEBUG_COUNT(d)
200 202
201#endif /* VCHIQ_ENABLE_DEBUG */ 203#endif /* VCHIQ_ENABLE_DEBUG */
202 204
203typedef enum { 205typedef enum {
204 VCHIQ_CONNSTATE_DISCONNECTED, 206 VCHIQ_CONNSTATE_DISCONNECTED,
205 VCHIQ_CONNSTATE_CONNECTING, 207 VCHIQ_CONNSTATE_CONNECTING,
206 VCHIQ_CONNSTATE_CONNECTED, 208 VCHIQ_CONNSTATE_CONNECTED,
207 VCHIQ_CONNSTATE_PAUSING, 209 VCHIQ_CONNSTATE_PAUSING,
208 VCHIQ_CONNSTATE_PAUSE_SENT, 210 VCHIQ_CONNSTATE_PAUSE_SENT,
209 VCHIQ_CONNSTATE_PAUSED, 211 VCHIQ_CONNSTATE_PAUSED,
210 VCHIQ_CONNSTATE_RESUMING, 212 VCHIQ_CONNSTATE_RESUMING,
211 VCHIQ_CONNSTATE_PAUSE_TIMEOUT, 213 VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
212 VCHIQ_CONNSTATE_RESUME_TIMEOUT 214 VCHIQ_CONNSTATE_RESUME_TIMEOUT
213} VCHIQ_CONNSTATE_T; 215} VCHIQ_CONNSTATE_T;
214 216
215enum { 217enum {
216 VCHIQ_SRVSTATE_FREE, 218 VCHIQ_SRVSTATE_FREE,
217 VCHIQ_SRVSTATE_HIDDEN, 219 VCHIQ_SRVSTATE_HIDDEN,
218 VCHIQ_SRVSTATE_LISTENING, 220 VCHIQ_SRVSTATE_LISTENING,
219 VCHIQ_SRVSTATE_OPENING, 221 VCHIQ_SRVSTATE_OPENING,
220 VCHIQ_SRVSTATE_OPEN, 222 VCHIQ_SRVSTATE_OPEN,
221 VCHIQ_SRVSTATE_OPENSYNC, 223 VCHIQ_SRVSTATE_OPENSYNC,
222 VCHIQ_SRVSTATE_CLOSESENT, 224 VCHIQ_SRVSTATE_CLOSESENT,
223 VCHIQ_SRVSTATE_CLOSERECVD, 225 VCHIQ_SRVSTATE_CLOSERECVD,
224 VCHIQ_SRVSTATE_CLOSEWAIT, 226 VCHIQ_SRVSTATE_CLOSEWAIT,
225 VCHIQ_SRVSTATE_CLOSED 227 VCHIQ_SRVSTATE_CLOSED
226}; 228};
227 229
228enum { 230enum {
229 VCHIQ_POLL_TERMINATE, 231 VCHIQ_POLL_TERMINATE,
230 VCHIQ_POLL_REMOVE, 232 VCHIQ_POLL_REMOVE,
231 VCHIQ_POLL_TXNOTIFY, 233 VCHIQ_POLL_TXNOTIFY,
232 VCHIQ_POLL_RXNOTIFY, 234 VCHIQ_POLL_RXNOTIFY,
233 VCHIQ_POLL_COUNT 235 VCHIQ_POLL_COUNT
234}; 236};
235 237
236typedef enum { 238typedef enum {
237 VCHIQ_BULK_TRANSMIT, 239 VCHIQ_BULK_TRANSMIT,
238 VCHIQ_BULK_RECEIVE 240 VCHIQ_BULK_RECEIVE
239} VCHIQ_BULK_DIR_T; 241} VCHIQ_BULK_DIR_T;
240 242
241typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata); 243typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
242 244
243typedef struct vchiq_bulk_struct { 245typedef struct vchiq_bulk_struct {
244 short mode; 246 short mode;
245 short dir; 247 short dir;
246 void *userdata; 248 void *userdata;
247 VCHI_MEM_HANDLE_T handle; 249 VCHI_MEM_HANDLE_T handle;
248 void *data; 250 void *data;
249 int size; 251 int size;
250 void *remote_data; 252 void *remote_data;
251 int remote_size; 253 int remote_size;
252 int actual; 254 int actual;
253} VCHIQ_BULK_T; 255} VCHIQ_BULK_T;
254 256
255typedef struct vchiq_bulk_queue_struct { 257typedef struct vchiq_bulk_queue_struct {
256 int local_insert; /* Where to insert the next local bulk */ 258 int local_insert; /* Where to insert the next local bulk */
257 int remote_insert; /* Where to insert the next remote bulk (master) */ 259 int remote_insert; /* Where to insert the next remote bulk (master) */
258 int process; /* Bulk to transfer next */ 260 int process; /* Bulk to transfer next */
259 int remote_notify; /* Bulk to notify the remote client of next (mstr) */ 261 int remote_notify; /* Bulk to notify the remote client of next (mstr) */
260 int remove; /* Bulk to notify the local client of, and remove, 262 int remove; /* Bulk to notify the local client of, and remove,
261 ** next */ 263 ** next */
262 VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS]; 264 VCHIQ_BULK_T bulks[VCHIQ_NUM_SERVICE_BULKS];
263} VCHIQ_BULK_QUEUE_T; 265} VCHIQ_BULK_QUEUE_T;
264 266
265typedef struct remote_event_struct { 267typedef struct remote_event_struct {
266 int32_t armed; 268 int32_t armed;
267 int32_t fired; 269 int32_t fired;
268 uint32_t event; /* offset to VCHIQ_STATE_T */ 270 uint32_t event; /* offset to VCHIQ_STATE_T */
269#define REMOTE_EVENT_SEMA(s,e) ((struct semaphore *)((char *)(s) + (e)->event)) 271#define REMOTE_EVENT_SEMA(s,e) ((struct semaphore *)((char *)(s) + (e)->event))
270} REMOTE_EVENT_T; 272} REMOTE_EVENT_T;
271 273
272typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T; 274typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
273 275
274typedef struct vchiq_state_struct VCHIQ_STATE_T; 276typedef struct vchiq_state_struct VCHIQ_STATE_T;
275 277
276typedef struct vchiq_slot_struct { 278typedef struct vchiq_slot_struct {
277 char data[VCHIQ_SLOT_SIZE]; 279 char data[VCHIQ_SLOT_SIZE];
278} VCHIQ_SLOT_T; 280} VCHIQ_SLOT_T;
279 281
280typedef struct vchiq_slot_info_struct { 282typedef struct vchiq_slot_info_struct {
281 /* Use two counters rather than one to avoid the need for a mutex. */ 283 /* Use two counters rather than one to avoid the need for a mutex. */
282 int16_t use_count; 284 int16_t use_count;
283 int16_t release_count; 285 int16_t release_count;
284} VCHIQ_SLOT_INFO_T; 286} VCHIQ_SLOT_INFO_T;
285 287
286typedef struct vchiq_service_struct { 288typedef struct vchiq_service_struct {
287 VCHIQ_SERVICE_BASE_T base; 289 VCHIQ_SERVICE_BASE_T base;
288 VCHIQ_SERVICE_HANDLE_T handle; 290 VCHIQ_SERVICE_HANDLE_T handle;
289 unsigned int ref_count; 291 unsigned int ref_count;
290 int srvstate; 292 int srvstate;
291 VCHIQ_USERDATA_TERM_T userdata_term; 293 VCHIQ_USERDATA_TERM_T userdata_term;
292 unsigned int localport; 294 unsigned int localport;
293 unsigned int remoteport; 295 unsigned int remoteport;
294 int public_fourcc; 296 int public_fourcc;
295 int client_id; 297 int client_id;
296 char auto_close; 298 char auto_close;
297 char sync; 299 char sync;
298 char closing; 300 char closing;
299 char trace; 301 char trace;
300 atomic_t poll_flags; 302 atomic_t poll_flags;
301 short version; 303 short version;
302 short version_min; 304 short version_min;
303 short peer_version; 305 short peer_version;
304 306
305 VCHIQ_STATE_T *state; 307 VCHIQ_STATE_T *state;
306 VCHIQ_INSTANCE_T instance; 308 VCHIQ_INSTANCE_T instance;
307 309
308 int service_use_count; 310 int service_use_count;
309 311
310 VCHIQ_BULK_QUEUE_T bulk_tx; 312 VCHIQ_BULK_QUEUE_T bulk_tx;
311 VCHIQ_BULK_QUEUE_T bulk_rx; 313 VCHIQ_BULK_QUEUE_T bulk_rx;
312 314
313 struct semaphore remove_event; 315 struct semaphore remove_event;
314 struct semaphore bulk_remove_event; 316 struct semaphore bulk_remove_event;
315 struct mutex bulk_mutex; 317 struct mutex bulk_mutex;
316 318
317 struct service_stats_struct { 319 struct service_stats_struct {
318 int quota_stalls; 320 int quota_stalls;
319 int slot_stalls; 321 int slot_stalls;
320 int bulk_stalls; 322 int bulk_stalls;
321 int error_count; 323 int error_count;
322 int ctrl_tx_count; 324 int ctrl_tx_count;
323 int ctrl_rx_count; 325 int ctrl_rx_count;
324 int bulk_tx_count; 326 int bulk_tx_count;
325 int bulk_rx_count; 327 int bulk_rx_count;
326 int bulk_aborted_count; 328 int bulk_aborted_count;
327 uint64_t ctrl_tx_bytes; 329 uint64_t ctrl_tx_bytes;
328 uint64_t ctrl_rx_bytes; 330 uint64_t ctrl_rx_bytes;
329 uint64_t bulk_tx_bytes; 331 uint64_t bulk_tx_bytes;
330 uint64_t bulk_rx_bytes; 332 uint64_t bulk_rx_bytes;
331 } stats; 333 } stats;
332} VCHIQ_SERVICE_T; 334} VCHIQ_SERVICE_T;
333 335
334/* The quota information is outside VCHIQ_SERVICE_T so that it can be 336/* The quota information is outside VCHIQ_SERVICE_T so that it can be
335 statically allocated, since for accounting reasons a service's slot 337 statically allocated, since for accounting reasons a service's slot
336 usage is carried over between users of the same port number. 338 usage is carried over between users of the same port number.
337 */ 339 */
338typedef struct vchiq_service_quota_struct { 340typedef struct vchiq_service_quota_struct {
339 unsigned short slot_quota; 341 unsigned short slot_quota;
340 unsigned short slot_use_count; 342 unsigned short slot_use_count;
341 unsigned short message_quota; 343 unsigned short message_quota;
342 unsigned short message_use_count; 344 unsigned short message_use_count;
343 struct semaphore quota_event; 345 struct semaphore quota_event;
344 int previous_tx_index; 346 int previous_tx_index;
345} VCHIQ_SERVICE_QUOTA_T; 347} VCHIQ_SERVICE_QUOTA_T;
346 348
347typedef struct vchiq_shared_state_struct { 349typedef struct vchiq_shared_state_struct {
348 350
349 /* A non-zero value here indicates that the content is valid. */ 351 /* A non-zero value here indicates that the content is valid. */
350 int32_t initialised; 352 int32_t initialised;
351 353
352 /* The first and last (inclusive) slots allocated to the owner. */ 354 /* The first and last (inclusive) slots allocated to the owner. */
353 int32_t slot_first; 355 int32_t slot_first;
354 int32_t slot_last; 356 int32_t slot_last;
355 357
356 /* The slot allocated to synchronous messages from the owner. */ 358 /* The slot allocated to synchronous messages from the owner. */
357 int32_t slot_sync; 359 int32_t slot_sync;
358 360
359 /* Signalling this event indicates that owner's slot handler thread 361 /* Signalling this event indicates that owner's slot handler thread
360 ** should run. */ 362 ** should run. */
361 REMOTE_EVENT_T trigger; 363 REMOTE_EVENT_T trigger;
362 364
363 /* Indicates the byte position within the stream where the next message 365 /* Indicates the byte position within the stream where the next message
364 ** will be written. The least significant bits are an index into the 366 ** will be written. The least significant bits are an index into the
365 ** slot. The next bits are the index of the slot in slot_queue. */ 367 ** slot. The next bits are the index of the slot in slot_queue. */
366 int32_t tx_pos; 368 int32_t tx_pos;
367 369
368 /* This event should be signalled when a slot is recycled. */ 370 /* This event should be signalled when a slot is recycled. */
369 REMOTE_EVENT_T recycle; 371 REMOTE_EVENT_T recycle;
370 372
371 /* The slot_queue index where the next recycled slot will be written. */ 373 /* The slot_queue index where the next recycled slot will be written. */
372 int slot_queue_recycle; 374 int slot_queue_recycle;
373 375
374 /* This event should be signalled when a synchronous message is sent. */ 376 /* This event should be signalled when a synchronous message is sent. */
375 REMOTE_EVENT_T sync_trigger; 377 REMOTE_EVENT_T sync_trigger;
376 378
377 /* This event should be signalled when a synchronous message has been 379 /* This event should be signalled when a synchronous message has been
378 ** released. */ 380 ** released. */
379 REMOTE_EVENT_T sync_release; 381 REMOTE_EVENT_T sync_release;
380 382
381 /* A circular buffer of slot indexes. */ 383 /* A circular buffer of slot indexes. */
382 int32_t slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE]; 384 int32_t slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
383 385
384 /* Debugging state */ 386 /* Debugging state */
385 int32_t debug[DEBUG_MAX]; 387 int32_t debug[DEBUG_MAX];
386} VCHIQ_SHARED_STATE_T; 388} VCHIQ_SHARED_STATE_T;
387 389
388typedef struct vchiq_slot_zero_struct { 390typedef struct vchiq_slot_zero_struct {
389 int32_t magic; 391 int32_t magic;
390 int16_t version; 392 int16_t version;
391 int16_t version_min; 393 int16_t version_min;
392 int32_t slot_zero_size; 394 int32_t slot_zero_size;
393 int32_t slot_size; 395 int32_t slot_size;
394 int32_t max_slots; 396 int32_t max_slots;
395 int32_t max_slots_per_side; 397 int32_t max_slots_per_side;
396 int32_t platform_data[2]; 398 int32_t platform_data[2];
397 VCHIQ_SHARED_STATE_T master; 399 VCHIQ_SHARED_STATE_T master;
398 VCHIQ_SHARED_STATE_T slave; 400 VCHIQ_SHARED_STATE_T slave;
399 VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS]; 401 VCHIQ_SLOT_INFO_T slots[VCHIQ_MAX_SLOTS];
400} VCHIQ_SLOT_ZERO_T; 402} VCHIQ_SLOT_ZERO_T;
401 403
402struct vchiq_state_struct { 404struct vchiq_state_struct {
403 int id; 405 int id;
404 int initialised; 406 int initialised;
405 VCHIQ_CONNSTATE_T conn_state; 407 VCHIQ_CONNSTATE_T conn_state;
406 int is_master; 408 int is_master;
407 short version_common; 409 short version_common;
408 410
409 VCHIQ_SHARED_STATE_T *local; 411 VCHIQ_SHARED_STATE_T *local;
410 VCHIQ_SHARED_STATE_T *remote; 412 VCHIQ_SHARED_STATE_T *remote;
411 VCHIQ_SLOT_T *slot_data; 413 VCHIQ_SLOT_T *slot_data;
412 414
413 unsigned short default_slot_quota; 415 unsigned short default_slot_quota;
414 unsigned short default_message_quota; 416 unsigned short default_message_quota;
415 417
416 /* Event indicating connect message received */ 418 /* Event indicating connect message received */
417 struct semaphore connect; 419 struct semaphore connect;
418 420
419 /* Mutex protecting services */ 421 /* Mutex protecting services */
420 struct mutex mutex; 422 struct mutex mutex;
421 VCHIQ_INSTANCE_T *instance; 423 VCHIQ_INSTANCE_T *instance;
422 424
423 /* Processes incoming messages */ 425 /* Processes incoming messages */
424 VCHIQ_THREAD_T slot_handler_thread; 426 VCHIQ_THREAD_T slot_handler_thread;
425 427
426 /* Processes recycled slots */ 428 /* Processes recycled slots */
427 VCHIQ_THREAD_T recycle_thread; 429 VCHIQ_THREAD_T recycle_thread;
428 430
429 /* Processes synchronous messages */ 431 /* Processes synchronous messages */
430 VCHIQ_THREAD_T sync_thread; 432 VCHIQ_THREAD_T sync_thread;
431 433
432 /* Local implementation of the trigger remote event */ 434 /* Local implementation of the trigger remote event */
433 struct semaphore trigger_event; 435 struct semaphore trigger_event;
434 436
435 /* Local implementation of the recycle remote event */ 437 /* Local implementation of the recycle remote event */
436 struct semaphore recycle_event; 438 struct semaphore recycle_event;
437 439
438 /* Local implementation of the sync trigger remote event */ 440 /* Local implementation of the sync trigger remote event */
439 struct semaphore sync_trigger_event; 441 struct semaphore sync_trigger_event;
440 442
441 /* Local implementation of the sync release remote event */ 443 /* Local implementation of the sync release remote event */
442 struct semaphore sync_release_event; 444 struct semaphore sync_release_event;
443 445
444 char *tx_data; 446 char *tx_data;
445 char *rx_data; 447 char *rx_data;
446 VCHIQ_SLOT_INFO_T *rx_info; 448 VCHIQ_SLOT_INFO_T *rx_info;
447 449
448 struct mutex slot_mutex; 450 struct mutex slot_mutex;
449 451
450 struct mutex recycle_mutex; 452 struct mutex recycle_mutex;
451 453
452 struct mutex sync_mutex; 454 struct mutex sync_mutex;
453 455
454 struct mutex bulk_transfer_mutex; 456 struct mutex bulk_transfer_mutex;
455 457
456 /* Indicates the byte position within the stream from where the next 458 /* Indicates the byte position within the stream from where the next
457 ** message will be read. The least significant bits are an index into 459 ** message will be read. The least significant bits are an index into
458 ** the slot.The next bits are the index of the slot in 460 ** the slot.The next bits are the index of the slot in
459 ** remote->slot_queue. */ 461 ** remote->slot_queue. */
460 int rx_pos; 462 int rx_pos;
461 463
462 /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read 464 /* A cached copy of local->tx_pos. Only write to local->tx_pos, and read
463 from remote->tx_pos. */ 465 from remote->tx_pos. */
464 int local_tx_pos; 466 int local_tx_pos;
465 467
466 /* The slot_queue index of the slot to become available next. */ 468 /* The slot_queue index of the slot to become available next. */
467 int slot_queue_available; 469 int slot_queue_available;
468 470
469 /* A flag to indicate if any poll has been requested */ 471 /* A flag to indicate if any poll has been requested */
470 int poll_needed; 472 int poll_needed;
471 473
472 /* Ths index of the previous slot used for data messages. */ 474 /* Ths index of the previous slot used for data messages. */
473 int previous_data_index; 475 int previous_data_index;
474 476
475 /* The number of slots occupied by data messages. */ 477 /* The number of slots occupied by data messages. */
476 unsigned short data_use_count; 478 unsigned short data_use_count;
477 479
478 /* The maximum number of slots to be occupied by data messages. */ 480 /* The maximum number of slots to be occupied by data messages. */
479 unsigned short data_quota; 481 unsigned short data_quota;
480 482
481 /* An array of bit sets indicating which services must be polled. */ 483 /* An array of bit sets indicating which services must be polled. */
482 atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)]; 484 atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
483 485
484 /* The number of the first unused service */ 486 /* The number of the first unused service */
485 int unused_service; 487 int unused_service;
486 488
487 /* Signalled when a free slot becomes available. */ 489 /* Signalled when a free slot becomes available. */
488 struct semaphore slot_available_event; 490 struct semaphore slot_available_event;
489 491
490 struct semaphore slot_remove_event; 492 struct semaphore slot_remove_event;
491 493
492 /* Signalled when a free data slot becomes available. */ 494 /* Signalled when a free data slot becomes available. */
493 struct semaphore data_quota_event; 495 struct semaphore data_quota_event;
494 496
495 /* Incremented when there are bulk transfers which cannot be processed 497 /* Incremented when there are bulk transfers which cannot be processed
496 * whilst paused and must be processed on resume */ 498 * whilst paused and must be processed on resume */
497 int deferred_bulks; 499 int deferred_bulks;
498 500
499 struct state_stats_struct { 501 struct state_stats_struct {
500 int slot_stalls; 502 int slot_stalls;
501 int data_stalls; 503 int data_stalls;
502 int ctrl_tx_count; 504 int ctrl_tx_count;
503 int ctrl_rx_count; 505 int ctrl_rx_count;
504 int error_count; 506 int error_count;
505 } stats; 507 } stats;
506 508
507 VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES]; 509 VCHIQ_SERVICE_T * services[VCHIQ_MAX_SERVICES];
508 VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES]; 510 VCHIQ_SERVICE_QUOTA_T service_quotas[VCHIQ_MAX_SERVICES];
509 VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS]; 511 VCHIQ_SLOT_INFO_T slot_info[VCHIQ_MAX_SLOTS];
510 512
511 VCHIQ_PLATFORM_STATE_T platform_state; 513 VCHIQ_PLATFORM_STATE_T platform_state;
512}; 514};
513 515
514struct bulk_waiter { 516struct bulk_waiter {
515 VCHIQ_BULK_T *bulk; 517 VCHIQ_BULK_T *bulk;
516 struct semaphore event; 518 struct semaphore event;
517 int actual; 519 int actual;
518}; 520};
519 521
520extern spinlock_t bulk_waiter_spinlock; 522extern spinlock_t bulk_waiter_spinlock;
521 523
522extern int vchiq_core_log_level; 524extern int vchiq_core_log_level;
523extern int vchiq_core_msg_log_level; 525extern int vchiq_core_msg_log_level;
524extern int vchiq_sync_log_level; 526extern int vchiq_sync_log_level;
525 527
526extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES]; 528extern VCHIQ_STATE_T *vchiq_states[VCHIQ_MAX_STATES];
527 529
528extern const char * 530extern const char *
529get_conn_state_name(VCHIQ_CONNSTATE_T conn_state); 531get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
530 532
531extern VCHIQ_SLOT_ZERO_T * 533extern VCHIQ_SLOT_ZERO_T *
532vchiq_init_slots(void *mem_base, int mem_size); 534vchiq_init_slots(void *mem_base, int mem_size);
533 535
534extern VCHIQ_STATUS_T 536extern VCHIQ_STATUS_T
535vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero, 537vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
536 int is_master); 538 int is_master);
537 539
538extern VCHIQ_STATUS_T 540extern VCHIQ_STATUS_T
539vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance); 541vchiq_connect_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
540 542
541extern VCHIQ_SERVICE_T * 543extern VCHIQ_SERVICE_T *
542vchiq_add_service_internal(VCHIQ_STATE_T *state, 544vchiq_add_service_internal(VCHIQ_STATE_T *state,
543 const VCHIQ_SERVICE_PARAMS_T *params, int srvstate, 545 const VCHIQ_SERVICE_PARAMS_T *params, int srvstate,
544 VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term); 546 VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term);
545 547
546extern VCHIQ_STATUS_T 548extern VCHIQ_STATUS_T
547vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id); 549vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id);
548 550
549extern VCHIQ_STATUS_T 551extern VCHIQ_STATUS_T
550vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd); 552vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd);
551 553
552extern void 554extern void
553vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service); 555vchiq_terminate_service_internal(VCHIQ_SERVICE_T *service);
554 556
555extern void 557extern void
556vchiq_free_service_internal(VCHIQ_SERVICE_T *service); 558vchiq_free_service_internal(VCHIQ_SERVICE_T *service);
557 559
558extern VCHIQ_STATUS_T 560extern VCHIQ_STATUS_T
559vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance); 561vchiq_shutdown_internal(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance);
560 562
561extern VCHIQ_STATUS_T 563extern VCHIQ_STATUS_T
562vchiq_pause_internal(VCHIQ_STATE_T *state); 564vchiq_pause_internal(VCHIQ_STATE_T *state);
563 565
564extern VCHIQ_STATUS_T 566extern VCHIQ_STATUS_T
565vchiq_resume_internal(VCHIQ_STATE_T *state); 567vchiq_resume_internal(VCHIQ_STATE_T *state);
566 568
567extern void 569extern void
568remote_event_pollall(VCHIQ_STATE_T *state); 570remote_event_pollall(VCHIQ_STATE_T *state);
569 571
570extern VCHIQ_STATUS_T 572extern VCHIQ_STATUS_T
571vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, 573vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
572 VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata, 574 VCHI_MEM_HANDLE_T memhandle, void *offset, int size, void *userdata,
573 VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir); 575 VCHIQ_BULK_MODE_T mode, VCHIQ_BULK_DIR_T dir);
574 576
575extern void 577extern void
576vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state); 578vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state);
577 579
578extern void 580extern void
579vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service); 581vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service);
580 582
581extern void 583extern void
582vchiq_loud_error_header(void); 584vchiq_loud_error_header(void);
583 585
584extern void 586extern void
585vchiq_loud_error_footer(void); 587vchiq_loud_error_footer(void);
586 588
587extern void 589extern void
588request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type); 590request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type);
589 591
590static inline VCHIQ_SERVICE_T * 592static inline VCHIQ_SERVICE_T *
591handle_to_service(VCHIQ_SERVICE_HANDLE_T handle) 593handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
592{ 594{
593 VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) & 595 VCHIQ_STATE_T *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
594 (VCHIQ_MAX_STATES - 1)]; 596 (VCHIQ_MAX_STATES - 1)];
595 if (!state) 597 if (!state)
596 return NULL; 598 return NULL;
597 599
598 return state->services[handle & (VCHIQ_MAX_SERVICES - 1)]; 600 return state->services[handle & (VCHIQ_MAX_SERVICES - 1)];
599} 601}
600 602
601extern VCHIQ_SERVICE_T * 603extern VCHIQ_SERVICE_T *
602find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle); 604find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
603 605
604extern VCHIQ_SERVICE_T * 606extern VCHIQ_SERVICE_T *
605find_service_by_port(VCHIQ_STATE_T *state, int localport); 607find_service_by_port(VCHIQ_STATE_T *state, int localport);
606 608
607extern VCHIQ_SERVICE_T * 609extern VCHIQ_SERVICE_T *
608find_service_for_instance(VCHIQ_INSTANCE_T instance, 610find_service_for_instance(VCHIQ_INSTANCE_T instance,
609 VCHIQ_SERVICE_HANDLE_T handle); 611 VCHIQ_SERVICE_HANDLE_T handle);
610 612
611extern VCHIQ_SERVICE_T * 613extern VCHIQ_SERVICE_T *
612find_closed_service_for_instance(VCHIQ_INSTANCE_T instance, 614find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
613 VCHIQ_SERVICE_HANDLE_T handle); 615 VCHIQ_SERVICE_HANDLE_T handle);
614 616
615extern VCHIQ_SERVICE_T * 617extern VCHIQ_SERVICE_T *
616next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance, 618next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
617 int *pidx); 619 int *pidx);
618 620
619extern void 621extern void
620lock_service(VCHIQ_SERVICE_T *service); 622lock_service(VCHIQ_SERVICE_T *service);
621 623
622extern void 624extern void
623unlock_service(VCHIQ_SERVICE_T *service); 625unlock_service(VCHIQ_SERVICE_T *service);
624 626
625/* The following functions are called from vchiq_core, and external 627/* The following functions are called from vchiq_core, and external
626** implementations must be provided. */ 628** implementations must be provided. */
627 629
628extern VCHIQ_STATUS_T 630extern VCHIQ_STATUS_T
629vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, 631vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk,
630 VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir); 632 VCHI_MEM_HANDLE_T memhandle, void *offset, int size, int dir);
631 633
632extern void 634extern void
633vchiq_transfer_bulk(VCHIQ_BULK_T *bulk); 635vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
634 636
635extern void 637extern void
636vchiq_complete_bulk(VCHIQ_BULK_T *bulk); 638vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
637 639
638extern VCHIQ_STATUS_T 640extern VCHIQ_STATUS_T
639vchiq_copy_from_user(void *dst, const void *src, int size); 641vchiq_copy_from_user(void *dst, const void *src, int size);
640 642
641extern void 643extern void
642remote_event_signal(REMOTE_EVENT_T *event); 644remote_event_signal(REMOTE_EVENT_T *event);
643 645
644void 646void
645vchiq_platform_check_suspend(VCHIQ_STATE_T *state); 647vchiq_platform_check_suspend(VCHIQ_STATE_T *state);
646 648
647extern void 649extern void
648vchiq_platform_paused(VCHIQ_STATE_T *state); 650vchiq_platform_paused(VCHIQ_STATE_T *state);
649 651
650extern VCHIQ_STATUS_T 652extern VCHIQ_STATUS_T
651vchiq_platform_resume(VCHIQ_STATE_T *state); 653vchiq_platform_resume(VCHIQ_STATE_T *state);
652 654
653extern void 655extern void
654vchiq_platform_resumed(VCHIQ_STATE_T *state); 656vchiq_platform_resumed(VCHIQ_STATE_T *state);
655 657
656extern void 658extern void
657vchiq_dump(void *dump_context, const char *str, int len); 659vchiq_dump(void *dump_context, const char *str, int len);
658 660
659extern void 661extern void
660vchiq_dump_platform_state(void *dump_context); 662vchiq_dump_platform_state(void *dump_context);
661 663
662extern void 664extern void
663vchiq_dump_platform_instances(void *dump_context); 665vchiq_dump_platform_instances(void *dump_context);
664 666
665extern void 667extern void
666vchiq_dump_platform_service_state(void *dump_context, 668vchiq_dump_platform_service_state(void *dump_context,
667 VCHIQ_SERVICE_T *service); 669 VCHIQ_SERVICE_T *service);
668 670
669extern VCHIQ_STATUS_T 671extern VCHIQ_STATUS_T
670vchiq_use_service_internal(VCHIQ_SERVICE_T *service); 672vchiq_use_service_internal(VCHIQ_SERVICE_T *service);
671 673
672extern VCHIQ_STATUS_T 674extern VCHIQ_STATUS_T
673vchiq_release_service_internal(VCHIQ_SERVICE_T *service); 675vchiq_release_service_internal(VCHIQ_SERVICE_T *service);
674 676
675extern void 677extern void
676vchiq_on_remote_use(VCHIQ_STATE_T *state); 678vchiq_on_remote_use(VCHIQ_STATE_T *state);
677 679
678extern void 680extern void
679vchiq_on_remote_release(VCHIQ_STATE_T *state); 681vchiq_on_remote_release(VCHIQ_STATE_T *state);
680 682
681extern VCHIQ_STATUS_T 683extern VCHIQ_STATUS_T
682vchiq_platform_init_state(VCHIQ_STATE_T *state); 684vchiq_platform_init_state(VCHIQ_STATE_T *state);
683 685
684extern VCHIQ_STATUS_T 686extern VCHIQ_STATUS_T
685vchiq_check_service(VCHIQ_SERVICE_T *service); 687vchiq_check_service(VCHIQ_SERVICE_T *service);
686 688
687extern void 689extern void
688vchiq_on_remote_use_active(VCHIQ_STATE_T *state); 690vchiq_on_remote_use_active(VCHIQ_STATE_T *state);
689 691
690extern VCHIQ_STATUS_T 692extern VCHIQ_STATUS_T
691vchiq_send_remote_use(VCHIQ_STATE_T *state); 693vchiq_send_remote_use(VCHIQ_STATE_T *state);
692 694
693extern VCHIQ_STATUS_T 695extern VCHIQ_STATUS_T
694vchiq_send_remote_release(VCHIQ_STATE_T *state); 696vchiq_send_remote_release(VCHIQ_STATE_T *state);
695 697
696extern VCHIQ_STATUS_T 698extern VCHIQ_STATUS_T
697vchiq_send_remote_use_active(VCHIQ_STATE_T *state); 699vchiq_send_remote_use_active(VCHIQ_STATE_T *state);
698 700
699extern void 701extern void
700vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state, 702vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
701 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate); 703 VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate);
702 704
703extern void 705extern void
704vchiq_platform_handle_timeout(VCHIQ_STATE_T *state); 706vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
705 707
706extern void 708extern void
707vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate); 709vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
708 710
709 711
710extern void 712extern void
711vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem, 713vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
712 size_t numBytes); 714 size_t numBytes);
713 715
714extern void 716extern void
715vchiq_core_initialize(void); 717vchiq_core_initialize(void);
716 718
717#endif 719#endif