Tue Oct 25 23:33:44 2022 UTC ()
vmwgfx(4): Convert cmdbuf to drm_waitqueue_t.


(riastradh)
diff -r1.5 -r1.6 src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c

cvs diff -r1.5 -r1.6 src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c 2022/10/25 23:32:04 1.5
+++ src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c 2022/10/25 23:33:44 1.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vmwgfx_cmdbuf.c,v 1.5 2022/10/25 23:32:04 riastradh Exp $ */ 1/* $NetBSD: vmwgfx_cmdbuf.c,v 1.6 2022/10/25 23:33:44 riastradh Exp $ */
2 2
3// SPDX-License-Identifier: GPL-2.0 OR MIT 3// SPDX-License-Identifier: GPL-2.0 OR MIT
4/************************************************************************** 4/**************************************************************************
5 * 5 *
6 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA 6 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the 9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including 10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish, 11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to 12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to 13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions: 14 * the following conditions:
@@ -18,27 +18,27 @@ @@ -18,27 +18,27 @@
18 * of the Software. 18 * of the Software.
19 * 19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * 27 *
28 **************************************************************************/ 28 **************************************************************************/
29 29
30#include <sys/cdefs.h> 30#include <sys/cdefs.h>
31__KERNEL_RCSID(0, "$NetBSD: vmwgfx_cmdbuf.c,v 1.5 2022/10/25 23:32:04 riastradh Exp $"); 31__KERNEL_RCSID(0, "$NetBSD: vmwgfx_cmdbuf.c,v 1.6 2022/10/25 23:33:44 riastradh Exp $");
32 32
33#include <linux/dmapool.h> 33#include <linux/dmapool.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35 35
36#include <drm/ttm/ttm_bo_api.h> 36#include <drm/ttm/ttm_bo_api.h>
37 37
38#include "vmwgfx_drv.h" 38#include "vmwgfx_drv.h"
39 39
40#include <linux/nbsd-namespace.h> 40#include <linux/nbsd-namespace.h>
41 41
42/* 42/*
43 * Size of inline command buffers. Try to make sure that a page size is a 43 * Size of inline command buffers. Try to make sure that a page size is a
44 * multiple of the DMA pool allocation size. 44 * multiple of the DMA pool allocation size.
@@ -122,28 +122,28 @@ struct vmw_cmdbuf_man { @@ -122,28 +122,28 @@ struct vmw_cmdbuf_man {
122 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; 122 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
123 struct list_head error; 123 struct list_head error;
124 struct drm_mm mm; 124 struct drm_mm mm;
125 struct ttm_buffer_object *cmd_space; 125 struct ttm_buffer_object *cmd_space;
126 struct ttm_bo_kmap_obj map_obj; 126 struct ttm_bo_kmap_obj map_obj;
127 u8 *map; 127 u8 *map;
128 struct vmw_cmdbuf_header *cur; 128 struct vmw_cmdbuf_header *cur;
129 size_t cur_pos; 129 size_t cur_pos;
130 size_t default_size; 130 size_t default_size;
131 unsigned max_hw_submitted; 131 unsigned max_hw_submitted;
132 spinlock_t lock; 132 spinlock_t lock;
133 struct dma_pool *headers; 133 struct dma_pool *headers;
134 struct dma_pool *dheaders; 134 struct dma_pool *dheaders;
135 wait_queue_head_t alloc_queue; 135 drm_waitqueue_t alloc_queue;
136 wait_queue_head_t idle_queue; 136 drm_waitqueue_t idle_queue;
137 bool irq_on; 137 bool irq_on;
138 bool using_mob; 138 bool using_mob;
139 bool has_pool; 139 bool has_pool;
140#ifdef __NetBSD__ 140#ifdef __NetBSD__
141 bus_dmamap_t dmamap; 141 bus_dmamap_t dmamap;
142 bus_dma_segment_t dmaseg; 142 bus_dma_segment_t dmaseg;
143#endif 143#endif
144 dma_addr_t handle; 144 dma_addr_t handle;
145 size_t size; 145 size_t size;
146 u32 num_contexts; 146 u32 num_contexts;
147}; 147};
148 148
149/** 149/**
@@ -266,27 +266,27 @@ static void vmw_cmdbuf_header_inline_fre @@ -266,27 +266,27 @@ static void vmw_cmdbuf_header_inline_fre
266 */ 266 */
267static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 267static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
268{ 268{
269 struct vmw_cmdbuf_man *man = header->man; 269 struct vmw_cmdbuf_man *man = header->man;
270 270
271 lockdep_assert_held_once(&man->lock); 271 lockdep_assert_held_once(&man->lock);
272 272
273 if (header->inline_space) { 273 if (header->inline_space) {
274 vmw_cmdbuf_header_inline_free(header); 274 vmw_cmdbuf_header_inline_free(header);
275 return; 275 return;
276 } 276 }
277 277
278 drm_mm_remove_node(&header->node); 278 drm_mm_remove_node(&header->node);
279 wake_up_all(&man->alloc_queue); 279 DRM_SPIN_WAKEUP_ALL(&man->alloc_queue, &man->lock); /* XXX */
280 if (header->cb_header) 280 if (header->cb_header)
281 dma_pool_free(man->headers, header->cb_header, 281 dma_pool_free(man->headers, header->cb_header,
282 header->handle); 282 header->handle);
283 kfree(header); 283 kfree(header);
284} 284}
285 285
286/** 286/**
287 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 287 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
288 * associated structures. 288 * associated structures.
289 * 289 *
290 * @header: Pointer to the header to free. 290 * @header: Pointer to the header to free.
291 */ 291 */
292void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 292void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
@@ -381,36 +381,38 @@ static void vmw_cmdbuf_ctx_submit(struct @@ -381,36 +381,38 @@ static void vmw_cmdbuf_ctx_submit(struct
381 * @man: The command buffer manager. 381 * @man: The command buffer manager.
382 * @ctx: The command buffer context. 382 * @ctx: The command buffer context.
383 * 383 *
384 * Submit command buffers to hardware if possible, and process finished 384 * Submit command buffers to hardware if possible, and process finished
385 * buffers. Typically freeing them, but on preemption or error take 385 * buffers. Typically freeing them, but on preemption or error take
386 * appropriate action. Wake up waiters if appropriate. 386 * appropriate action. Wake up waiters if appropriate.
387 */ 387 */
388static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, 388static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
389 struct vmw_cmdbuf_context *ctx, 389 struct vmw_cmdbuf_context *ctx,
390 int *notempty) 390 int *notempty)
391{ 391{
392 struct vmw_cmdbuf_header *entry, *next; 392 struct vmw_cmdbuf_header *entry, *next;
393 393
 394 assert_spin_locked(&man->lock);
 395
394 vmw_cmdbuf_ctx_submit(man, ctx); 396 vmw_cmdbuf_ctx_submit(man, ctx);
395 397
396 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { 398 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
397 SVGACBStatus status = entry->cb_header->status; 399 SVGACBStatus status = entry->cb_header->status;
398 400
399 if (status == SVGA_CB_STATUS_NONE) 401 if (status == SVGA_CB_STATUS_NONE)
400 break; 402 break;
401 403
402 list_del(&entry->list); 404 list_del(&entry->list);
403 wake_up_all(&man->idle_queue); 405 DRM_SPIN_WAKEUP_ONE(&man->idle_queue, &man->lock);
404 ctx->num_hw_submitted--; 406 ctx->num_hw_submitted--;
405 switch (status) { 407 switch (status) {
406 case SVGA_CB_STATUS_COMPLETED: 408 case SVGA_CB_STATUS_COMPLETED:
407 __vmw_cmdbuf_header_free(entry); 409 __vmw_cmdbuf_header_free(entry);
408 break; 410 break;
409 case SVGA_CB_STATUS_COMMAND_ERROR: 411 case SVGA_CB_STATUS_COMMAND_ERROR:
410 WARN_ONCE(true, "Command buffer error.\n"); 412 WARN_ONCE(true, "Command buffer error.\n");
411 entry->cb_header->status = SVGA_CB_STATUS_NONE; 413 entry->cb_header->status = SVGA_CB_STATUS_NONE;
412 list_add_tail(&entry->list, &man->error); 414 list_add_tail(&entry->list, &man->error);
413 schedule_work(&man->work); 415 schedule_work(&man->work);
414 break; 416 break;
415 case SVGA_CB_STATUS_PREEMPTED: 417 case SVGA_CB_STATUS_PREEMPTED:
416 entry->cb_header->status = SVGA_CB_STATUS_NONE; 418 entry->cb_header->status = SVGA_CB_STATUS_NONE;
@@ -438,26 +440,28 @@ static void vmw_cmdbuf_ctx_process(struc @@ -438,26 +440,28 @@ static void vmw_cmdbuf_ctx_process(struc
438 * 440 *
439 * @man: The command buffer manager. 441 * @man: The command buffer manager.
440 * 442 *
441 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 443 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
442 * command buffers left that are not submitted to hardware, Make sure 444 * command buffers left that are not submitted to hardware, Make sure
443 * IRQ handling is turned on. Otherwise, make sure it's turned off. 445 * IRQ handling is turned on. Otherwise, make sure it's turned off.
444 */ 446 */
445static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 447static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
446{ 448{
447 int notempty; 449 int notempty;
448 struct vmw_cmdbuf_context *ctx; 450 struct vmw_cmdbuf_context *ctx;
449 int i; 451 int i;
450 452
 453 assert_spin_locked(&man->lock);
 454
451retry: 455retry:
452 notempty = 0; 456 notempty = 0;
453 for_each_cmdbuf_ctx(man, i, ctx) 457 for_each_cmdbuf_ctx(man, i, ctx)
454 vmw_cmdbuf_ctx_process(man, ctx, &notempty); 458 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
455 459
456 if (man->irq_on && !notempty) { 460 if (man->irq_on && !notempty) {
457 vmw_generic_waiter_remove(man->dev_priv, 461 vmw_generic_waiter_remove(man->dev_priv,
458 SVGA_IRQFLAG_COMMAND_BUFFER, 462 SVGA_IRQFLAG_COMMAND_BUFFER,
459 &man->dev_priv->cmdbuf_waiters); 463 &man->dev_priv->cmdbuf_waiters);
460 man->irq_on = false; 464 man->irq_on = false;
461 } else if (!man->irq_on && notempty) { 465 } else if (!man->irq_on && notempty) {
462 vmw_generic_waiter_add(man->dev_priv, 466 vmw_generic_waiter_add(man->dev_priv,
463 SVGA_IRQFLAG_COMMAND_BUFFER, 467 SVGA_IRQFLAG_COMMAND_BUFFER,
@@ -612,60 +616,61 @@ static void vmw_cmdbuf_work_func(struct  @@ -612,60 +616,61 @@ static void vmw_cmdbuf_work_func(struct
612 ctx->block_submission = false; 616 ctx->block_submission = false;
613 list_splice_init(&restart_head[i], &ctx->submitted); 617 list_splice_init(&restart_head[i], &ctx->submitted);
614 } 618 }
615 619
616 vmw_cmdbuf_man_process(man); 620 vmw_cmdbuf_man_process(man);
617 spin_unlock(&man->lock); 621 spin_unlock(&man->lock);
618 622
619 if (global_block && vmw_cmdbuf_startstop(man, 0, true)) 623 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
620 DRM_ERROR("Failed restarting command buffer contexts\n"); 624 DRM_ERROR("Failed restarting command buffer contexts\n");
621 625
622 /* Send a new fence in case one was removed */ 626 /* Send a new fence in case one was removed */
623 if (send_fence) { 627 if (send_fence) {
624 vmw_fifo_send_fence(man->dev_priv, &dummy); 628 vmw_fifo_send_fence(man->dev_priv, &dummy);
625 wake_up_all(&man->idle_queue); 629 spin_lock(&man->lock);
 630 DRM_SPIN_WAKEUP_ALL(&man->idle_queue, &man->lock);
 631 spin_unlock(&man->lock);
626 } 632 }
627 633
628 mutex_unlock(&man->error_mutex); 634 mutex_unlock(&man->error_mutex);
629} 635}
630 636
631/** 637/**
632 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 638 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
633 * 639 *
634 * @man: The command buffer manager. 640 * @man: The command buffer manager.
635 * @check_preempted: Check also the preempted queue for pending command buffers. 641 * @check_preempted: Check also the preempted queue for pending command buffers.
636 * 642 *
637 */ 643 */
638static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, 644static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
639 bool check_preempted) 645 bool check_preempted)
640{ 646{
641 struct vmw_cmdbuf_context *ctx; 647 struct vmw_cmdbuf_context *ctx;
642 bool idle = false; 648 bool idle = false;
643 int i; 649 int i;
644 650
645 spin_lock(&man->lock); 651 assert_spin_locked(&man->lock);
 652
646 vmw_cmdbuf_man_process(man); 653 vmw_cmdbuf_man_process(man);
647 for_each_cmdbuf_ctx(man, i, ctx) { 654 for_each_cmdbuf_ctx(man, i, ctx) {
648 if (!list_empty(&ctx->submitted) || 655 if (!list_empty(&ctx->submitted) ||
649 !list_empty(&ctx->hw_submitted) || 656 !list_empty(&ctx->hw_submitted) ||
650 (check_preempted && !list_empty(&ctx->preempted))) 657 (check_preempted && !list_empty(&ctx->preempted)))
651 goto out_unlock; 658 goto out;
652 } 659 }
653 660
654 idle = list_empty(&man->error); 661 idle = list_empty(&man->error);
655 662
656out_unlock: 663out:
657 spin_unlock(&man->lock); 
658 
659 return idle; 664 return idle;
660} 665}
661 666
662/** 667/**
663 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 668 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
664 * command submissions 669 * command submissions
665 * 670 *
666 * @man: The command buffer manager. 671 * @man: The command buffer manager.
667 * 672 *
668 * Flushes the current command buffer without allocating a new one. A new one 673 * Flushes the current command buffer without allocating a new one. A new one
669 * is automatically allocated when needed. Call with @man->cur_mutex held. 674 * is automatically allocated when needed. Call with @man->cur_mutex held.
670 */ 675 */
671static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) 676static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
@@ -722,48 +727,48 @@ int vmw_cmdbuf_cur_flush(struct vmw_cmdb @@ -722,48 +727,48 @@ int vmw_cmdbuf_cur_flush(struct vmw_cmdb
722 * @interruptible: Sleep interruptible while waiting. 727 * @interruptible: Sleep interruptible while waiting.
723 * @timeout: Time out after this many ticks. 728 * @timeout: Time out after this many ticks.
724 * 729 *
725 * Wait until the command buffer manager has processed all command buffers, 730 * Wait until the command buffer manager has processed all command buffers,
726 * or until a timeout occurs. If a timeout occurs, the function will return 731 * or until a timeout occurs. If a timeout occurs, the function will return
727 * -EBUSY. 732 * -EBUSY.
728 */ 733 */
729int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 734int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
730 unsigned long timeout) 735 unsigned long timeout)
731{ 736{
732 int ret; 737 int ret;
733 738
734 ret = vmw_cmdbuf_cur_flush(man, interruptible); 739 ret = vmw_cmdbuf_cur_flush(man, interruptible);
 740 spin_lock(&man->lock);
735 vmw_generic_waiter_add(man->dev_priv, 741 vmw_generic_waiter_add(man->dev_priv,
736 SVGA_IRQFLAG_COMMAND_BUFFER, 742 SVGA_IRQFLAG_COMMAND_BUFFER,
737 &man->dev_priv->cmdbuf_waiters); 743 &man->dev_priv->cmdbuf_waiters);
738 
739 if (interruptible) { 744 if (interruptible) {
740 ret = wait_event_interruptible_timeout 745 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &man->idle_queue, &man->lock,
741 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 746 timeout, vmw_cmdbuf_man_idle(man, true));
742 timeout); 
743 } else { 747 } else {
744 ret = wait_event_timeout 748 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &man->idle_queue,
745 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), 749 &man->lock,
746 timeout); 750 timeout, vmw_cmdbuf_man_idle(man, true));
747 } 751 }
748 vmw_generic_waiter_remove(man->dev_priv, 752 vmw_generic_waiter_remove(man->dev_priv,
749 SVGA_IRQFLAG_COMMAND_BUFFER, 753 SVGA_IRQFLAG_COMMAND_BUFFER,
750 &man->dev_priv->cmdbuf_waiters); 754 &man->dev_priv->cmdbuf_waiters);
751 if (ret == 0) { 755 if (ret == 0) {
752 if (!vmw_cmdbuf_man_idle(man, true)) 756 if (!vmw_cmdbuf_man_idle(man, true))
753 ret = -EBUSY; 757 ret = -EBUSY;
754 else 758 else
755 ret = 0; 759 ret = 0;
756 } 760 }
 761 spin_unlock(&man->lock);
757 if (ret > 0) 762 if (ret > 0)
758 ret = 0; 763 ret = 0;
759 764
760 return ret; 765 return ret;
761} 766}
762 767
763/** 768/**
764 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. 769 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
765 * 770 *
766 * @man: The command buffer manager. 771 * @man: The command buffer manager.
767 * @info: Allocation info. Will hold the size on entry and allocated mm node 772 * @info: Allocation info. Will hold the size on entry and allocated mm node
768 * on successful return. 773 * on successful return.
769 * 774 *
@@ -815,55 +820,62 @@ static int vmw_cmdbuf_alloc_space(struct @@ -815,55 +820,62 @@ static int vmw_cmdbuf_alloc_space(struct
815 info.node = node; 820 info.node = node;
816 info.done = false; 821 info.done = false;
817 822
818 /* 823 /*
819 * To prevent starvation of large requests, only one allocating call 824 * To prevent starvation of large requests, only one allocating call
820 * at a time waiting for space. 825 * at a time waiting for space.
821 */ 826 */
822 if (interruptible) { 827 if (interruptible) {
823 if (mutex_lock_interruptible(&man->space_mutex)) 828 if (mutex_lock_interruptible(&man->space_mutex))
824 return -ERESTARTSYS; 829 return -ERESTARTSYS;
825 } else { 830 } else {
826 mutex_lock(&man->space_mutex); 831 mutex_lock(&man->space_mutex);
827 } 832 }
 833 spin_lock(&man->lock);
828 834
829 /* Try to allocate space without waiting. */ 835 /* Try to allocate space without waiting. */
830 if (vmw_cmdbuf_try_alloc(man, &info)) 836 if (vmw_cmdbuf_try_alloc(man, &info))
831 goto out_unlock; 837 goto out_unlock;
832 838
833 vmw_generic_waiter_add(man->dev_priv, 839 vmw_generic_waiter_add(man->dev_priv,
834 SVGA_IRQFLAG_COMMAND_BUFFER, 840 SVGA_IRQFLAG_COMMAND_BUFFER,
835 &man->dev_priv->cmdbuf_waiters); 841 &man->dev_priv->cmdbuf_waiters);
836 842
837 if (interruptible) { 843 if (interruptible) {
838 int ret; 844 int ret;
839 845
840 ret = wait_event_interruptible 846 DRM_SPIN_WAIT_UNTIL(ret, &man->alloc_queue, &man->lock,
841 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 847 vmw_cmdbuf_try_alloc(man, &info));
842 if (ret) { 848 if (ret) {
843 vmw_generic_waiter_remove 849 vmw_generic_waiter_remove
844 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, 850 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
845 &man->dev_priv->cmdbuf_waiters); 851 &man->dev_priv->cmdbuf_waiters);
 852 spin_unlock(&man->lock);
846 mutex_unlock(&man->space_mutex); 853 mutex_unlock(&man->space_mutex);
847 return ret; 854 return ret;
848 } 855 }
849 } else { 856 } else {
850 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); 857 int ret;
 858
 859 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &man->alloc_queue, &man->lock,
 860 vmw_cmdbuf_try_alloc(man, &info));
 861 BUG_ON(ret);
851 } 862 }
852 vmw_generic_waiter_remove(man->dev_priv, 863 vmw_generic_waiter_remove(man->dev_priv,
853 SVGA_IRQFLAG_COMMAND_BUFFER, 864 SVGA_IRQFLAG_COMMAND_BUFFER,
854 &man->dev_priv->cmdbuf_waiters); 865 &man->dev_priv->cmdbuf_waiters);
855 866
856out_unlock: 867out_unlock:
 868 spin_unlock(&man->lock);
857 mutex_unlock(&man->space_mutex); 869 mutex_unlock(&man->space_mutex);
858 870
859 return 0; 871 return 0;
860} 872}
861 873
862/** 874/**
863 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer 875 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
864 * space from the main pool. 876 * space from the main pool.
865 * 877 *
866 * @man: The command buffer manager. 878 * @man: The command buffer manager.
867 * @header: Pointer to the header to set up. 879 * @header: Pointer to the header to set up.
868 * @size: The requested size of the buffer space. 880 * @size: The requested size of the buffer space.
869 * @interruptible: Whether to sleep interruptible while waiting for space. 881 * @interruptible: Whether to sleep interruptible while waiting for space.
@@ -1377,28 +1389,28 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_cr @@ -1377,28 +1389,28 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_cr
1377 ret = -ENOMEM; 1389 ret = -ENOMEM;
1378 goto out_no_dpool; 1390 goto out_no_dpool;
1379 } 1391 }
1380 1392
1381 for_each_cmdbuf_ctx(man, i, ctx) 1393 for_each_cmdbuf_ctx(man, i, ctx)
1382 vmw_cmdbuf_ctx_init(ctx); 1394 vmw_cmdbuf_ctx_init(ctx);
1383 1395
1384 INIT_LIST_HEAD(&man->error); 1396 INIT_LIST_HEAD(&man->error);
1385 spin_lock_init(&man->lock); 1397 spin_lock_init(&man->lock);
1386 mutex_init(&man->cur_mutex); 1398 mutex_init(&man->cur_mutex);
1387 mutex_init(&man->space_mutex); 1399 mutex_init(&man->space_mutex);
1388 mutex_init(&man->error_mutex); 1400 mutex_init(&man->error_mutex);
1389 man->default_size = VMW_CMDBUF_INLINE_SIZE; 1401 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1390 init_waitqueue_head(&man->alloc_queue); 1402 DRM_INIT_WAITQUEUE(&man->alloc_queue, "vmwgfxaq");
1391 init_waitqueue_head(&man->idle_queue); 1403 DRM_INIT_WAITQUEUE(&man->idle_queue, "vmwgfxiq");
1392 man->dev_priv = dev_priv; 1404 man->dev_priv = dev_priv;
1393 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; 1405 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1394 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 1406 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1395 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 1407 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1396 &dev_priv->error_waiters); 1408 &dev_priv->error_waiters);
1397 ret = vmw_cmdbuf_startstop(man, 0, true); 1409 ret = vmw_cmdbuf_startstop(man, 0, true);
1398 if (ret) { 1410 if (ret) {
1399 DRM_ERROR("Failed starting command buffer contexts\n"); 1411 DRM_ERROR("Failed starting command buffer contexts\n");
1400 vmw_cmdbuf_man_destroy(man); 1412 vmw_cmdbuf_man_destroy(man);
1401 return ERR_PTR(ret); 1413 return ERR_PTR(ret);
1402 } 1414 }
1403 1415
1404 return man; 1416 return man;
@@ -1458,18 +1470,21 @@ void vmw_cmdbuf_remove_pool(struct vmw_c @@ -1458,18 +1470,21 @@ void vmw_cmdbuf_remove_pool(struct vmw_c
1458void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 1470void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1459{ 1471{
1460 WARN_ON_ONCE(man->has_pool); 1472 WARN_ON_ONCE(man->has_pool);
1461 (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1473 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1462 1474
1463 if (vmw_cmdbuf_startstop(man, 0, false)) 1475 if (vmw_cmdbuf_startstop(man, 0, false))
1464 DRM_ERROR("Failed stopping command buffer contexts.\n"); 1476 DRM_ERROR("Failed stopping command buffer contexts.\n");
1465 1477
1466 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1478 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1467 &man->dev_priv->error_waiters); 1479 &man->dev_priv->error_waiters);
1468 (void) cancel_work_sync(&man->work); 1480 (void) cancel_work_sync(&man->work);
1469 dma_pool_destroy(man->dheaders); 1481 dma_pool_destroy(man->dheaders);
1470 dma_pool_destroy(man->headers); 1482 dma_pool_destroy(man->headers);
 1483 DRM_DESTROY_WAITQUEUE(&man->idle_queue);
 1484 DRM_DESTROY_WAITQUEUE(&man->alloc_queue);
1471 mutex_destroy(&man->cur_mutex); 1485 mutex_destroy(&man->cur_mutex);
1472 mutex_destroy(&man->space_mutex); 1486 mutex_destroy(&man->space_mutex);
1473 mutex_destroy(&man->error_mutex); 1487 mutex_destroy(&man->error_mutex);
 1488 spin_lock_destroy(&man->lock);
1474 kfree(man); 1489 kfree(man);
1475} 1490}