Sat Feb 28 18:25:39 2015 UTC ()
New macro DRM_SPIN_WAIT_ON better reflects DRM_WAIT_ON.

We still need to adapt all waits from upstream to use an interlock,
so we can't implement DRM_WAIT_ON verbatim, but this more closely
reflects the API of DRM_WAIT_ON than DRM_*WAIT*_UNTIL do.

Major difference is that this polls every tick, like DRM_WAIT_ON,
unlike DRM_*WAIT*_UNTIL.  So it will mask missing wakeups, but it
wouldn't surprise me if there were such things upstream.


(riastradh)
diff -r1.7 -r1.8 src/sys/external/bsd/drm2/dist/drm/drm_irq.c
diff -r1.14 -r1.15 src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c
diff -r1.3 -r1.4 src/sys/external/bsd/drm2/dist/drm/via/via_dmablit.c
diff -r1.3 -r1.4 src/sys/external/bsd/drm2/dist/drm/via/via_irq.c
diff -r1.3 -r1.4 src/sys/external/bsd/drm2/dist/drm/via/via_video.c
diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/via/via_drv.h
diff -r1.8 -r1.9 src/sys/external/bsd/drm2/include/drm/drm_wait_netbsd.h

cvs diff -r1.7 -r1.8 src/sys/external/bsd/drm2/dist/drm/drm_irq.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/drm_irq.c 2015/02/28 03:05:09 1.7
+++ src/sys/external/bsd/drm2/dist/drm/drm_irq.c 2015/02/28 18:25:39 1.8
@@ -1283,40 +1283,34 @@ int drm_wait_vblank(struct drm_device *d @@ -1283,40 +1283,34 @@ int drm_wait_vblank(struct drm_device *d
1283 } 1283 }
1284 1284
1285 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1285 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1286 (seq - vblwait->request.sequence) <= (1<<23)) { 1286 (seq - vblwait->request.sequence) <= (1<<23)) {
1287 vblwait->request.sequence = seq + 1; 1287 vblwait->request.sequence = seq + 1;
1288 } 1288 }
1289 1289
1290 DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 1290 DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
1291 vblwait->request.sequence, crtc); 1291 vblwait->request.sequence, crtc);
1292 dev->vblank[crtc].last_wait = vblwait->request.sequence; 1292 dev->vblank[crtc].last_wait = vblwait->request.sequence;
1293#ifdef __NetBSD__ 1293#ifdef __NetBSD__
1294 { 1294 {
1295 unsigned long irqflags; 1295 unsigned long irqflags;
 1296
1296 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1297 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1297 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &dev->vblank[crtc].queue, 1298 DRM_SPIN_WAIT_ON(ret, &dev->vblank[crtc].queue, &dev->vbl_lock,
1298 &dev->vbl_lock, 1299 3 * HZ,
1299 (3 * HZ), 
1300 (((drm_vblank_count(dev, crtc) - 1300 (((drm_vblank_count(dev, crtc) -
1301 vblwait->request.sequence) <= (1 << 23)) || 1301 vblwait->request.sequence) <= (1 << 23)) ||
1302 !dev->irq_enabled)); 1302 !dev->irq_enabled));
1303 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1303 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1304 if (ret < 0) /* Failed: return negative error as is. */ 
1305 ; 
1306 else if (ret == 0) /* Timed out: return -EBUSY like Linux. */ 
1307 ret = -EBUSY; 
1308 else /* Succeeded (ret > 0): return 0. */ 
1309 ret = 0; 
1310 } 1304 }
1311#else 1305#else
1312 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ, 1306 DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
1313 (((drm_vblank_count(dev, crtc) - 1307 (((drm_vblank_count(dev, crtc) -
1314 vblwait->request.sequence) <= (1 << 23)) || 1308 vblwait->request.sequence) <= (1 << 23)) ||
1315 !dev->irq_enabled)); 1309 !dev->irq_enabled));
1316#endif 1310#endif
1317 1311
1318 if (ret != -EINTR) { 1312 if (ret != -EINTR) {
1319 struct timeval now; 1313 struct timeval now;
1320 1314
1321 vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); 1315 vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
1322 vblwait->reply.tval_sec = now.tv_sec; 1316 vblwait->reply.tval_sec = now.tv_sec;

cvs diff -r1.14 -r1.15 src/sys/external/bsd/drm2/dist/drm/i915/Attic/i915_dma.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/i915/Attic/i915_dma.c 2015/02/28 03:06:46 1.14
+++ src/sys/external/bsd/drm2/dist/drm/i915/Attic/i915_dma.c 2015/02/28 18:25:39 1.15
@@ -802,36 +802,29 @@ static int i915_wait_irq(struct drm_devi @@ -802,36 +802,29 @@ static int i915_wait_irq(struct drm_devi
802 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 802 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
803 if (master_priv->sarea_priv) 803 if (master_priv->sarea_priv)
804 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 804 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
805 return 0; 805 return 0;
806 } 806 }
807 807
808 if (master_priv->sarea_priv) 808 if (master_priv->sarea_priv)
809 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 809 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
810 810
811 if (ring->irq_get(ring)) { 811 if (ring->irq_get(ring)) {
812#ifdef __NetBSD__ 812#ifdef __NetBSD__
813 unsigned long flags; 813 unsigned long flags;
814 spin_lock_irqsave(&dev_priv->irq_lock, flags); 814 spin_lock_irqsave(&dev_priv->irq_lock, flags);
815 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &ring->irq_queue, 815 DRM_SPIN_WAIT_ON(ret, &ring->irq_queue, &dev_priv->irq_lock,
816 &dev_priv->irq_lock, 
817 3 * DRM_HZ, 816 3 * DRM_HZ,
818 READ_BREADCRUMB(dev_priv) >= irq_nr); 817 READ_BREADCRUMB(dev_priv) >= irq_nr);
819 if (ret < 0) /* Failure: return negative error as is. */ 
820 ; 
821 else if (ret == 0) /* Timed out: return -EBUSY like Linux. */ 
822 ret = -EBUSY; 
823 else /* Succeeded (ret > 0): return 0. */ 
824 ret = 0; 
825 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 818 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
826#else 819#else
827 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ, 820 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
828 READ_BREADCRUMB(dev_priv) >= irq_nr); 821 READ_BREADCRUMB(dev_priv) >= irq_nr);
829#endif 822#endif
830 ring->irq_put(ring); 823 ring->irq_put(ring);
831 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000)) 824 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
832 ret = -EBUSY; 825 ret = -EBUSY;
833 826
834 if (ret == -EBUSY) { 827 if (ret == -EBUSY) {
835 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 828 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
836 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter); 829 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
837 } 830 }

cvs diff -r1.3 -r1.4 src/sys/external/bsd/drm2/dist/drm/via/via_dmablit.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/via/via_dmablit.c 2015/02/28 03:23:32 1.3
+++ src/sys/external/bsd/drm2/dist/drm/via/via_dmablit.c 2015/02/28 18:25:39 1.4
@@ -587,35 +587,28 @@ via_dmablit_sync(struct drm_device *dev, @@ -587,35 +587,28 @@ via_dmablit_sync(struct drm_device *dev,
587 587
588 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; 588 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
589 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; 589 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
590#ifdef __NetBSD__ 590#ifdef __NetBSD__
591 drm_waitqueue_t *queue; 591 drm_waitqueue_t *queue;
592#else 592#else
593 wait_queue_head_t *queue; 593 wait_queue_head_t *queue;
594#endif 594#endif
595 int ret = 0; 595 int ret = 0;
596 596
597#ifdef __NetBSD__ 597#ifdef __NetBSD__
598 spin_lock(&blitq->blit_lock); 598 spin_lock(&blitq->blit_lock);
599 if (via_dmablit_active(blitq, engine, handle, &queue)) { 599 if (via_dmablit_active(blitq, engine, handle, &queue)) {
600 DRM_SPIN_TIMED_WAIT_UNTIL(ret, queue, &blitq->blit_lock, 600 DRM_SPIN_WAIT_ON(ret, queue, &blitq->blit_lock, 3*DRM_HZ,
601 3*DRM_HZ, 
602 !via_dmablit_active(blitq, engine, handle, NULL)); 601 !via_dmablit_active(blitq, engine, handle, NULL));
603 if (ret < 0) /* Failure: return negative error as is. */ 
604 ; 
605 else if (ret == 0) /* Timed out: return -EBUSY like Linux. */ 
606 ret = -EBUSY; 
607 else /* Succeeded (ret > 0): return 0. */ 
608 ret = 0; 
609 } 602 }
610 spin_unlock(&blitq->blit_lock); 603 spin_unlock(&blitq->blit_lock);
611#else 604#else
612 if (via_dmablit_active(blitq, engine, handle, &queue)) { 605 if (via_dmablit_active(blitq, engine, handle, &queue)) {
613 DRM_WAIT_ON(ret, *queue, 3 * HZ, 606 DRM_WAIT_ON(ret, *queue, 3 * HZ,
614 !via_dmablit_active(blitq, engine, handle, NULL)); 607 !via_dmablit_active(blitq, engine, handle, NULL));
615 } 608 }
616#endif 609#endif
617 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", 610 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
618 handle, engine, ret); 611 handle, engine, ret);
619 612
620 return ret; 613 return ret;
621} 614}
@@ -871,35 +864,29 @@ via_build_sg_info(struct drm_device *dev @@ -871,35 +864,29 @@ via_build_sg_info(struct drm_device *dev
871 * to become available. Otherwise -EBUSY is returned. 864 * to become available. Otherwise -EBUSY is returned.
872 */ 865 */
873 866
874static int 867static int
875via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) 868via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
876{ 869{
877 int ret = 0; 870 int ret = 0;
878 unsigned long irqsave; 871 unsigned long irqsave;
879 872
880 DRM_DEBUG("Num free is %d\n", blitq->num_free); 873 DRM_DEBUG("Num free is %d\n", blitq->num_free);
881 spin_lock_irqsave(&blitq->blit_lock, irqsave); 874 spin_lock_irqsave(&blitq->blit_lock, irqsave);
882 while (blitq->num_free == 0) { 875 while (blitq->num_free == 0) {
883#ifdef __NetBSD__ 876#ifdef __NetBSD__
884 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &blitq->busy_queue, 877 DRM_SPIN_WAIT_ON(ret, &blitq->busy_queue, &blitq->blit_lock,
885 &blitq->blit_lock, DRM_HZ, 878 DRM_HZ,
886 blitq->num_free > 0); 879 blitq->num_free > 0);
887 if (ret < 0) /* Failure: return negative error as is. */ 
888 ; 
889 else if (ret == 0) /* Timed out: return -EBUSY like Linux. */ 
890 ret = -EBUSY; 
891 else /* Success (ret > 0): return 0. */ 
892 ret = 0; 
893 /* Map -EINTR to -EAGAIN. */ 880 /* Map -EINTR to -EAGAIN. */
894 if (ret == -EINTR) 881 if (ret == -EINTR)
895 ret = -EAGAIN; 882 ret = -EAGAIN;
896 /* Bail on failure. */ 883 /* Bail on failure. */
897 if (ret) { 884 if (ret) {
898 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 885 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
899 return ret; 886 return ret;
900 } 887 }
901#else 888#else
902 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); 889 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
903 890
904 DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); 891 DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
905 if (ret) 892 if (ret)

cvs diff -r1.3 -r1.4 src/sys/external/bsd/drm2/dist/drm/via/via_irq.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/via/via_irq.c 2015/02/28 03:23:32 1.3
+++ src/sys/external/bsd/drm2/dist/drm/via/via_irq.c 2015/02/28 18:25:39 1.4
@@ -239,43 +239,37 @@ via_driver_irq_wait(struct drm_device *d @@ -239,43 +239,37 @@ via_driver_irq_wait(struct drm_device *d
239 239
240 if (real_irq < 0) { 240 if (real_irq < 0) {
241 DRM_ERROR("Video IRQ %d not available on this hardware.\n", 241 DRM_ERROR("Video IRQ %d not available on this hardware.\n",
242 irq); 242 irq);
243 return -EINVAL; 243 return -EINVAL;
244 } 244 }
245 245
246 masks = dev_priv->irq_masks; 246 masks = dev_priv->irq_masks;
247 cur_irq = dev_priv->via_irqs + real_irq; 247 cur_irq = dev_priv->via_irqs + real_irq;
248 248
249#ifdef __NetBSD__ 249#ifdef __NetBSD__
250 spin_lock(&cur_irq->irq_lock); 250 spin_lock(&cur_irq->irq_lock);
251 if (masks[real_irq][2] && !force_sequence) { 251 if (masks[real_irq][2] && !force_sequence) {
252 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &cur_irq->irq_queue, 252 DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock,
253 &cur_irq->irq_lock, 3 * DRM_HZ, 253 3 * DRM_HZ,
254 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 254 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
255 masks[irq][4])); 255 masks[irq][4]));
256 cur_irq_sequence = cur_irq->irq_received; 256 cur_irq_sequence = cur_irq->irq_received;
257 } else { 257 } else {
258 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &cur_irq->irq_queue, 258 DRM_SPIN_WAIT_ON(ret, &cur_irq->irq_queue, &cur_irq->irq_lock,
259 &cur_irq->irq_lock, 3 * DRM_HZ, 259 3 * DRM_HZ,
260 (((cur_irq_sequence = cur_irq->irq_received) - 260 (((cur_irq_sequence = cur_irq->irq_received) -
261 *sequence) <= (1 << 23))); 261 *sequence) <= (1 << 23)));
262 } 262 }
263 if (ret < 0) /* Failure: return negative error as is. */ 
264 ; 
265 else if (ret == 0) /* Timed out: return -EBUSY like Linux. */ 
266 ret = -EBUSY; 
267 else /* Success (ret > 0): return 0. */ 
268 ret = 0; 
269 spin_unlock(&cur_irq->irq_lock); 263 spin_unlock(&cur_irq->irq_lock);
270#else 264#else
271 if (masks[real_irq][2] && !force_sequence) { 265 if (masks[real_irq][2] && !force_sequence) {
272 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, 266 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
273 ((VIA_READ(masks[irq][2]) & masks[irq][3]) == 267 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
274 masks[irq][4])); 268 masks[irq][4]));
275 cur_irq_sequence = atomic_read(&cur_irq->irq_received); 269 cur_irq_sequence = atomic_read(&cur_irq->irq_received);
276 } else { 270 } else {
277 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, 271 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
278 (((cur_irq_sequence = 272 (((cur_irq_sequence =
279 atomic_read(&cur_irq->irq_received)) - 273 atomic_read(&cur_irq->irq_received)) -
280 *sequence) <= (1 << 23))); 274 *sequence) <= (1 << 23)));
281 } 275 }

cvs diff -r1.3 -r1.4 src/sys/external/bsd/drm2/dist/drm/via/via_video.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/via/via_video.c 2015/02/28 03:23:32 1.3
+++ src/sys/external/bsd/drm2/dist/drm/via/via_video.c 2015/02/28 18:25:39 1.4
@@ -27,65 +27,65 @@ @@ -27,65 +27,65 @@
27 27
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/via_drm.h> 29#include <drm/via_drm.h>
30#include "via_drv.h" 30#include "via_drv.h"
31 31
32void via_init_futex(drm_via_private_t *dev_priv) 32void via_init_futex(drm_via_private_t *dev_priv)
33{ 33{
34 unsigned int i; 34 unsigned int i;
35 35
36 DRM_DEBUG("\n"); 36 DRM_DEBUG("\n");
37 37
38 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 38 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
39#ifdef __NetBSD__ 39#ifdef __NetBSD__
40 linux_mutex_init(&dev_priv->decoder_lock[i]); 40 spin_lock_init(&dev_priv->decoder_lock[i]);
41 DRM_INIT_WAITQUEUE(&dev_priv->decoder_queue[i], "viadec"); 41 DRM_INIT_WAITQUEUE(&dev_priv->decoder_queue[i], "viadec");
42#else 42#else
43 init_waitqueue_head(&(dev_priv->decoder_queue[i])); 43 init_waitqueue_head(&(dev_priv->decoder_queue[i]));
44#endif 44#endif
45 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0; 45 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
46 } 46 }
47} 47}
48 48
49void via_cleanup_futex(drm_via_private_t *dev_priv) 49void via_cleanup_futex(drm_via_private_t *dev_priv)
50{ 50{
51#ifdef __NetBSD__ 51#ifdef __NetBSD__
52 unsigned i; 52 unsigned i;
53 53
54 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 54 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
55 DRM_DESTROY_WAITQUEUE(&dev_priv->decoder_queue[i]); 55 DRM_DESTROY_WAITQUEUE(&dev_priv->decoder_queue[i]);
56 linux_mutex_destroy(&dev_priv->decoder_lock[i]); 56 spin_lock_destroy(&dev_priv->decoder_lock[i]);
57 } 57 }
58#endif 58#endif
59} 59}
60 60
61void via_release_futex(drm_via_private_t *dev_priv, int context) 61void via_release_futex(drm_via_private_t *dev_priv, int context)
62{ 62{
63 unsigned int i; 63 unsigned int i;
64 volatile int *lock; 64 volatile int *lock;
65 65
66 if (!dev_priv->sarea_priv) 66 if (!dev_priv->sarea_priv)
67 return; 67 return;
68 68
69 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) { 69 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
70 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); 70 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
71 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { 71 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
72 if (_DRM_LOCK_IS_HELD(*lock) 72 if (_DRM_LOCK_IS_HELD(*lock)
73 && (*lock & _DRM_LOCK_CONT)) { 73 && (*lock & _DRM_LOCK_CONT)) {
74#ifdef __NetBSD__ 74#ifdef __NetBSD__
75 mutex_lock(&dev_priv->decoder_lock[i]); 75 spin_lock(&dev_priv->decoder_lock[i]);
76 DRM_WAKEUP_ALL(&dev_priv->decoder_queue[i], 76 DRM_SPIN_WAKEUP_ALL(&dev_priv->decoder_queue[i],
77 &dev_priv->decoder_lock[i]); 77 &dev_priv->decoder_lock[i]);
78 mutex_unlock(&dev_priv->decoder_lock[i]); 78 spin_unlock(&dev_priv->decoder_lock[i]);
79#else 79#else
80 wake_up(&(dev_priv->decoder_queue[i])); 80 wake_up(&(dev_priv->decoder_queue[i]));
81#endif 81#endif
82 } 82 }
83 *lock = 0; 83 *lock = 0;
84 } 84 }
85 } 85 }
86} 86}
87 87
88int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv) 88int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
89{ 89{
90 drm_via_futex_t *fx = data; 90 drm_via_futex_t *fx = data;
91 volatile int *lock; 91 volatile int *lock;
@@ -93,38 +93,32 @@ int via_decoder_futex(struct drm_device  @@ -93,38 +93,32 @@ int via_decoder_futex(struct drm_device
93 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv; 93 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
94 int ret = 0; 94 int ret = 0;
95 95
96 DRM_DEBUG("\n"); 96 DRM_DEBUG("\n");
97 97
98 if (fx->lock >= VIA_NR_XVMC_LOCKS) 98 if (fx->lock >= VIA_NR_XVMC_LOCKS)
99 return -EFAULT; 99 return -EFAULT;
100 100
101 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); 101 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
102 102
103 switch (fx->func) { 103 switch (fx->func) {
104 case VIA_FUTEX_WAIT: 104 case VIA_FUTEX_WAIT:
105#ifdef __NetBSD__ 105#ifdef __NetBSD__
106 mutex_lock(&dev_priv->decoder_lock[fx->lock]); 106 spin_lock(&dev_priv->decoder_lock[fx->lock]);
107 DRM_TIMED_WAIT_UNTIL(ret, &dev_priv->decoder_queue[fx->lock], 107 DRM_SPIN_WAIT_ON(ret, &dev_priv->decoder_queue[fx->lock],
108 &dev_priv->decoder_lock[fx->lock], 108 &dev_priv->decoder_lock[fx->lock],
109 (fx->ms / 10) * (DRM_HZ / 100), 109 (fx->ms / 10) * (DRM_HZ / 100),
110 *lock != fx->val); 110 *lock != fx->val);
111 if (ret < 0) /* Failure: return negative error as is. */ 111 spin_unlock(&dev_priv->decoder_lock[fx->lock]);
112 ; 
113 else if (ret == 0) /* Timed out: return -EBUSY like Linux. */ 
114 ret = -EBUSY; 
115 else /* Success (ret > 0): return 0. */ 
116 ret = 0; 
117 mutex_unlock(&dev_priv->decoder_lock[fx->lock]); 
118#else 112#else
119 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], 113 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
120 (fx->ms / 10) * (HZ / 100), *lock != fx->val); 114 (fx->ms / 10) * (HZ / 100), *lock != fx->val);
121#endif 115#endif
122 return ret; 116 return ret;
123 case VIA_FUTEX_WAKE: 117 case VIA_FUTEX_WAKE:
124#ifdef __NetBSD__ 118#ifdef __NetBSD__
125 mutex_lock(&dev_priv->decoder_lock[fx->lock]); 119 mutex_lock(&dev_priv->decoder_lock[fx->lock]);
126 DRM_WAKEUP_ALL(&dev_priv->decoder_queue[fx->lock], 120 DRM_WAKEUP_ALL(&dev_priv->decoder_queue[fx->lock],
127 &dev_priv->decoder_lock[fx->lock]); 121 &dev_priv->decoder_lock[fx->lock]);
128 mutex_unlock(&dev_priv->decoder_lock[fx->lock]); 122 mutex_unlock(&dev_priv->decoder_lock[fx->lock]);
129#else 123#else
130 wake_up(&(dev_priv->decoder_queue[fx->lock])); 124 wake_up(&(dev_priv->decoder_queue[fx->lock]));

cvs diff -r1.2 -r1.3 src/sys/external/bsd/drm2/dist/drm/via/via_drv.h (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/via/via_drv.h 2014/08/26 17:28:14 1.2
+++ src/sys/external/bsd/drm2/dist/drm/via/via_drv.h 2015/02/28 18:25:39 1.3
@@ -63,27 +63,27 @@ typedef struct drm_via_irq { @@ -63,27 +63,27 @@ typedef struct drm_via_irq {
63 drm_waitqueue_t irq_queue; 63 drm_waitqueue_t irq_queue;
64#else 64#else
65 wait_queue_head_t irq_queue; 65 wait_queue_head_t irq_queue;
66#endif 66#endif
67} drm_via_irq_t; 67} drm_via_irq_t;
68 68
69typedef struct drm_via_private { 69typedef struct drm_via_private {
70 drm_via_sarea_t *sarea_priv; 70 drm_via_sarea_t *sarea_priv;
71 drm_local_map_t *sarea; 71 drm_local_map_t *sarea;
72 drm_local_map_t *fb; 72 drm_local_map_t *fb;
73 drm_local_map_t *mmio; 73 drm_local_map_t *mmio;
74 unsigned long agpAddr; 74 unsigned long agpAddr;
75#ifdef __NetBSD__ 75#ifdef __NetBSD__
76 struct mutex decoder_lock[VIA_NR_XVMC_LOCKS]; 76 spinlock_t decoder_lock[VIA_NR_XVMC_LOCKS];
77 drm_waitqueue_t decoder_queue[VIA_NR_XVMC_LOCKS]; 77 drm_waitqueue_t decoder_queue[VIA_NR_XVMC_LOCKS];
78#else 78#else
79 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS]; 79 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
80#endif 80#endif
81 char *dma_ptr; 81 char *dma_ptr;
82 unsigned int dma_low; 82 unsigned int dma_low;
83 unsigned int dma_high; 83 unsigned int dma_high;
84 unsigned int dma_offset; 84 unsigned int dma_offset;
85 uint32_t dma_wrap; 85 uint32_t dma_wrap;
86 volatile uint32_t *last_pause_ptr; 86 volatile uint32_t *last_pause_ptr;
87 volatile uint32_t *hw_addr_ptr; 87 volatile uint32_t *hw_addr_ptr;
88 drm_via_ring_buffer_t ring; 88 drm_via_ring_buffer_t ring;
89 struct timeval last_vblank; 89 struct timeval last_vblank;

cvs diff -r1.8 -r1.9 src/sys/external/bsd/drm2/include/drm/drm_wait_netbsd.h (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/include/drm/drm_wait_netbsd.h 2015/02/28 04:57:12 1.8
+++ src/sys/external/bsd/drm2/include/drm/drm_wait_netbsd.h 2015/02/28 18:25:39 1.9
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: drm_wait_netbsd.h,v 1.8 2015/02/28 04:57:12 riastradh Exp $ */ 1/* $NetBSD: drm_wait_netbsd.h,v 1.9 2015/02/28 18:25:39 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -95,51 +95,100 @@ DRM_SPIN_WAKEUP_ONE(drm_waitqueue_t *q,  @@ -95,51 +95,100 @@ DRM_SPIN_WAKEUP_ONE(drm_waitqueue_t *q,
95{ 95{
96 KASSERT(spin_is_locked(interlock)); 96 KASSERT(spin_is_locked(interlock));
97 cv_signal(q); 97 cv_signal(q);
98} 98}
99 99
100static inline void 100static inline void
101DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q, spinlock_t *interlock) 101DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q, spinlock_t *interlock)
102{ 102{
103 KASSERT(spin_is_locked(interlock)); 103 KASSERT(spin_is_locked(interlock));
104 cv_broadcast(q); 104 cv_broadcast(q);
105} 105}
106 106
107/* 107/*
108 * WARNING: These DRM_*WAIT*_UNTIL macros are designed to replace the 108 * DRM_SPIN_WAIT_ON is a replacement for the legacy DRM_WAIT_ON
109 * Linux wait_event* macros. They have a different return value 109 * portability macro. It requires a spin interlock, which may require
110 * convention from the legacy portability DRM_WAIT_ON macro and a 110 * changes to the surrounding code so that the waits actually are
 111 * interlocked by a spin lock. It also polls the condition at every
 112 * tick, which masks missing wakeups. Since DRM_WAIT_ON is going away,
 113 * in favour of Linux's native wait_event* API, waits in new code
 114 * should be written to use the DRM_*WAIT*_UNTIL macros below.
 115 *
 116 * Like the legacy DRM_WAIT_ON, DRM_SPIN_WAIT_ON returns
 117 *
 118 * . -EBUSY if timed out (yes, -EBUSY, not -ETIMEDOUT or -EWOULDBLOCK),
 119 * . -EINTR/-ERESTART if interrupted by a signal, or
 120 * . 0 if the condition was true before or just after the timeout.
 121 *
 122 * Note that cv_timedwait* return -EWOULDBLOCK, not -EBUSY, on timeout.
 123 */
 124
 125#define DRM_SPIN_WAIT_ON(RET, Q, INTERLOCK, TICKS, CONDITION) do \
 126{ \
 127 extern int hardclock_ticks; \
 128 const int _dswo_start = hardclock_ticks; \
 129 const int _dswo_end = _dswo_start + (TICKS); \
 130 \
 131 KASSERT(spin_is_locked((INTERLOCK))); \
 132 KASSERT(!cpu_intr_p()); \
 133 KASSERT(!cpu_softintr_p()); \
 134 KASSERT(!cold); \
 135 \
 136 for (;;) { \
 137 if (CONDITION) { \
 138 (RET) = 0; \
 139 break; \
 140 } \
 141 const int _dswo_now = hardclock_ticks; \
 142 if (_dswo_end < _dswo_now) { \
 143 (RET) = -EBUSY; /* Match Linux... */ \
 144 break; \
 145 } \
 146 /* XXX errno NetBSD->Linux */ \
 147 (RET) = -cv_timedwait_sig((Q), &(INTERLOCK)->sl_lock, \
 148 (_dswo_end - _dswo_now)); \
 149 if (RET) { \
 150 if ((RET) == -EWOULDBLOCK) \
 151 (RET) = (CONDITION) ? 0 : -EBUSY; \
 152 \
 153 break; \
 154 } \
 155 } \
 156} while (0)
 157
 158/*
 159 * The DRM_*WAIT*_UNTIL macros are replacements for the Linux
 160 * wait_event* macros. Like DRM_SPIN_WAIT_ON, they add an interlock,
 161 * and so may require some changes to the surrounding code. They have
 162 * a different return value convention from DRM_SPIN_WAIT_ON and a
111 * different return value convention from cv_*wait*. 163 * different return value convention from cv_*wait*.
112 * 164 *
113 * Specifically, the untimed macros 165 * The untimed DRM_*WAIT*_UNTIL macros return
114 * 166 *
115 * - return negative error code on failure (interruption), and 167 * . -EINTR/-ERESTART if interrupted by a signal, or
116 * - return zero on sucess. 168 * . zero if the condition evaluated
117 * 169 *
118 * The timed macros 170 * The timed DRM_*TIMED_WAIT*_UNTIL macros return
119 * 171 *
120 * - return negative error code on failure (interruption), 172 * . -EINTR/-ERESTART if interrupted by a signal,
121 * - return zero on timeout, and 173 * . 0 if the condition was false after the timeout,
122 * - return one on success. 174 * . 1 if the condition was true just after the timeout, or
 175 * . the number of ticks remaining if the condition was true before the
 176 * timeout.
123 * 177 *
124 * Contrast DRM_WAIT_ON which returns -EINTR/-ERESTART on interruption, 178 * Contrast DRM_SPIN_WAIT_ON which returns -EINTR/-ERESTART on signal,
125 * -EBUSY on timeout, and zero on success; and cv_*wait*, which return 179 * -EBUSY on timeout, and zero on success; and cv_*wait*, which return
126 * -EINTR/-ERESTART on interruption, -EWOULDBLOCK on timeout, and zero 180 * -EINTR/-ERESTART on signal, -EWOULDBLOCK on timeout, and zero on
127 * on success. 181 * success.
128 * 
129 * We don't simply implement DRM_WAIT_ON because, like Linux 
130 * wait_event*, it lacks an interlock, whereas we require an interlock 
131 * for any waits in order to avoid the standard race conditions 
132 * associated with non-interlocked waits that plague Linux drivers. 
133 * 182 *
134 * XXX In retrospect, giving the timed and untimed macros a different 183 * XXX In retrospect, giving the timed and untimed macros a different
135 * return convention from one another to match Linux may have been a 184 * return convention from one another to match Linux may have been a
136 * bad idea. All of this inconsistent timeout return convention logic 185 * bad idea. All of this inconsistent timeout return convention logic
137 * has been a consistent source of bugs. 186 * has been a consistent source of bugs.
138 */ 187 */
139 188
140#define _DRM_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \ 189#define _DRM_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, CONDITION) do \
141{ \ 190{ \
142 KASSERT(mutex_is_locked((INTERLOCK))); \ 191 KASSERT(mutex_is_locked((INTERLOCK))); \
143 ASSERT_SLEEPABLE(); \ 192 ASSERT_SLEEPABLE(); \
144 KASSERT(!cold); \ 193 KASSERT(!cold); \
145 for (;;) { \ 194 for (;;) { \
@@ -152,39 +201,26 @@ DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q,  @@ -152,39 +201,26 @@ DRM_SPIN_WAKEUP_ALL(drm_waitqueue_t *q,
152 if (RET) \ 201 if (RET) \
153 break; \ 202 break; \
154 } \ 203 } \
155} while (0) 204} while (0)
156 205
157#define cv_wait_nointr(Q, I) (cv_wait((Q), (I)), 0) 206#define cv_wait_nointr(Q, I) (cv_wait((Q), (I)), 0)
158 207
159#define DRM_WAIT_NOINTR_UNTIL(RET, Q, I, C) \ 208#define DRM_WAIT_NOINTR_UNTIL(RET, Q, I, C) \
160 _DRM_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C) 209 _DRM_WAIT_UNTIL(RET, cv_wait_nointr, Q, I, C)
161 210
162#define DRM_WAIT_UNTIL(RET, Q, I, C) \ 211#define DRM_WAIT_UNTIL(RET, Q, I, C) \
163 _DRM_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C) 212 _DRM_WAIT_UNTIL(RET, cv_wait_sig, Q, I, C)
164 213
165/* 
166 * Timed wait. Return: 
167 * 
168 * - 0 if condition is false after timeout, 
169 * - 1 if condition is true after timeout or one tick before timeout, 
170 * - number of ticks left if condition evaluated to true before timeout, or 
171 * - negative error if failure (e.g., interrupted). 
172 * 
173 * XXX Comments in Linux say it returns -ERESTARTSYS if interrupted. 
174 * What if by a signal without SA_RESTART? Shouldn't it be -EINTR 
175 * then? I'm going to leave it as what cv_timedwait returned, which is 
176 * ERESTART for signals with SA_RESTART and EINTR otherwise. 
177 */ 
178#define _DRM_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) do \ 214#define _DRM_TIMED_WAIT_UNTIL(RET, WAIT, Q, INTERLOCK, TICKS, CONDITION) do \
179{ \ 215{ \
180 extern int hardclock_ticks; \ 216 extern int hardclock_ticks; \
181 const int _dtwu_start = hardclock_ticks; \ 217 const int _dtwu_start = hardclock_ticks; \
182 int _dtwu_ticks = (TICKS); \ 218 int _dtwu_ticks = (TICKS); \
183 KASSERT(mutex_is_locked((INTERLOCK))); \ 219 KASSERT(mutex_is_locked((INTERLOCK))); \
184 ASSERT_SLEEPABLE(); \ 220 ASSERT_SLEEPABLE(); \
185 KASSERT(!cold); \ 221 KASSERT(!cold); \
186 for (;;) { \ 222 for (;;) { \
187 if (CONDITION) { \ 223 if (CONDITION) { \
188 (RET) = _dtwu_ticks; \ 224 (RET) = _dtwu_ticks; \
189 break; \ 225 break; \
190 } \ 226 } \