Sun Mar 10 19:00:27 2024 UTC (71d)
Pull up following revision(s) (requested by riastradh in ticket #1942):

	sys/dev/usb/if_urtwn.c: revision 1.109 (patch)

urtwn(4): Ditch old queued commands on overflow.
Don't increment ring->queued past what the task will decrement.

This is a stop-gap measure; really, we should just have one task for
each operation that is deferred to the task thread.

PR kern/57965


(martin)
diff -r1.53.2.6 -r1.53.2.7 src/sys/dev/usb/if_urtwn.c

cvs diff -r1.53.2.6 -r1.53.2.7 src/sys/dev/usb/if_urtwn.c (expand / switch to unified diff)

--- src/sys/dev/usb/if_urtwn.c 2019/12/14 12:33:47 1.53.2.6
+++ src/sys/dev/usb/if_urtwn.c 2024/03/10 19:00:27 1.53.2.7
@@ -1,41 +1,41 @@ @@ -1,41 +1,41 @@
1/* $NetBSD: if_urtwn.c,v 1.53.2.6 2019/12/14 12:33:47 martin Exp $ */ 1/* $NetBSD: if_urtwn.c,v 1.53.2.7 2024/03/10 19:00:27 martin Exp $ */
2/* $OpenBSD: if_urtwn.c,v 1.42 2015/02/10 23:25:46 mpi Exp $ */ 2/* $OpenBSD: if_urtwn.c,v 1.42 2015/02/10 23:25:46 mpi Exp $ */
3 3
4/*- 4/*-
5 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2014 Kevin Lo <kevlo@FreeBSD.org> 6 * Copyright (c) 2014 Kevin Lo <kevlo@FreeBSD.org>
7 * Copyright (c) 2016 Nathanial Sloss <nathanialsloss@yahoo.com.au> 7 * Copyright (c) 2016 Nathanial Sloss <nathanialsloss@yahoo.com.au>
8 * 8 *
9 * Permission to use, copy, modify, and distribute this software for any 9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above 10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies. 11 * copyright notice and this permission notice appear in all copies.
12 * 12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */ 20 */
21 21
22/*- 22/*-
23 * Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188EU/RTL8188RU/RTL8192CU 23 * Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188EU/RTL8188RU/RTL8192CU
24 * RTL8192EU. 24 * RTL8192EU.
25 */ 25 */
26 26
27#include <sys/cdefs.h> 27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: if_urtwn.c,v 1.53.2.6 2019/12/14 12:33:47 martin Exp $"); 28__KERNEL_RCSID(0, "$NetBSD: if_urtwn.c,v 1.53.2.7 2024/03/10 19:00:27 martin Exp $");
29 29
30#ifdef _KERNEL_OPT 30#ifdef _KERNEL_OPT
31#include "opt_inet.h" 31#include "opt_inet.h"
32#include "opt_usb.h" 32#include "opt_usb.h"
33#endif 33#endif
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/sockio.h> 36#include <sys/sockio.h>
37#include <sys/sysctl.h> 37#include <sys/sysctl.h>
38#include <sys/mbuf.h> 38#include <sys/mbuf.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/socket.h> 40#include <sys/socket.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
@@ -798,80 +798,115 @@ urtwn_free_tx_list(struct urtwn_softc *s @@ -798,80 +798,115 @@ urtwn_free_tx_list(struct urtwn_softc *s
798 798
799 /* NB: Caller must abort pipe first. */ 799 /* NB: Caller must abort pipe first. */
800 for (size_t j = 0; j < sc->tx_npipe; j++) { 800 for (size_t j = 0; j < sc->tx_npipe; j++) {
801 for (i = 0; i < URTWN_TX_LIST_COUNT; i++) { 801 for (i = 0; i < URTWN_TX_LIST_COUNT; i++) {
802 CTASSERT(sizeof(xfer) == sizeof(void *)); 802 CTASSERT(sizeof(xfer) == sizeof(void *));
803 xfer = atomic_swap_ptr(&sc->tx_data[j][i].xfer, NULL); 803 xfer = atomic_swap_ptr(&sc->tx_data[j][i].xfer, NULL);
804 if (xfer != NULL) 804 if (xfer != NULL)
805 usbd_destroy_xfer(xfer); 805 usbd_destroy_xfer(xfer);
806 } 806 }
807 } 807 }
808} 808}
809 809
810static void 810static void
 811urtwn_cmdq_invariants(struct urtwn_softc *sc)
 812{
 813 struct urtwn_host_cmd_ring *const ring __diagused = &sc->cmdq;
 814
 815 KASSERT(mutex_owned(&sc->sc_task_mtx));
 816 KASSERTMSG((ring->cur >= 0 && ring->cur < URTWN_HOST_CMD_RING_COUNT),
 817 "%s: cur=%d next=%d queued=%d",
 818 device_xname(sc->sc_dev), ring->cur, ring->next, ring->queued);
 819 KASSERTMSG((ring->next >= 0 && ring->next < URTWN_HOST_CMD_RING_COUNT),
 820 "%s: cur=%d next=%d queued=%d",
 821 device_xname(sc->sc_dev), ring->cur, ring->next, ring->queued);
 822 KASSERTMSG((ring->queued >= 0 &&
 823 ring->queued <= URTWN_HOST_CMD_RING_COUNT),
 824 "%s: %d commands queued",
 825 device_xname(sc->sc_dev), ring->queued);
 826}
 827
 828static void
811urtwn_task(void *arg) 829urtwn_task(void *arg)
812{ 830{
813 struct urtwn_softc *sc = arg; 831 struct urtwn_softc *sc = arg;
814 struct urtwn_host_cmd_ring *ring = &sc->cmdq; 832 struct urtwn_host_cmd_ring *ring = &sc->cmdq;
815 struct urtwn_host_cmd *cmd; 833 struct urtwn_host_cmd *cmd;
816 int s; 834 int s;
817 835
818 DPRINTFN(DBG_FN, ("%s: %s\n", device_xname(sc->sc_dev), __func__)); 836 DPRINTFN(DBG_FN, ("%s: %s\n", device_xname(sc->sc_dev), __func__));
819 837
820 /* Process host commands. */ 838 /* Process host commands. */
821 s = splusb(); 839 s = splusb();
822 mutex_spin_enter(&sc->sc_task_mtx); 840 mutex_spin_enter(&sc->sc_task_mtx);
 841 urtwn_cmdq_invariants(sc);
823 while (ring->next != ring->cur) { 842 while (ring->next != ring->cur) {
 843 KASSERTMSG(ring->queued > 0, "%s: cur=%d next=%d queued=%d",
 844 device_xname(sc->sc_dev),
 845 ring->cur, ring->next, ring->queued);
824 cmd = &ring->cmd[ring->next]; 846 cmd = &ring->cmd[ring->next];
825 mutex_spin_exit(&sc->sc_task_mtx); 847 mutex_spin_exit(&sc->sc_task_mtx);
826 splx(s); 848 splx(s);
827 /* Invoke callback with kernel lock held. */ 849 /* Invoke callback with kernel lock held. */
828 cmd->cb(sc, cmd->data); 850 cmd->cb(sc, cmd->data);
829 s = splusb(); 851 s = splusb();
830 mutex_spin_enter(&sc->sc_task_mtx); 852 mutex_spin_enter(&sc->sc_task_mtx);
 853 urtwn_cmdq_invariants(sc);
 854 KASSERTMSG(ring->queued > 0, "%s: cur=%d next=%d queued=%d",
 855 device_xname(sc->sc_dev),
 856 ring->cur, ring->next, ring->queued);
831 ring->queued--; 857 ring->queued--;
832 ring->next = (ring->next + 1) % URTWN_HOST_CMD_RING_COUNT; 858 ring->next = (ring->next + 1) % URTWN_HOST_CMD_RING_COUNT;
833 } 859 }
834 mutex_spin_exit(&sc->sc_task_mtx); 860 mutex_spin_exit(&sc->sc_task_mtx);
835 wakeup(&sc->cmdq); 861 wakeup(&sc->cmdq);
836 splx(s); 862 splx(s);
837} 863}
838 864
839static void 865static void
840urtwn_do_async(struct urtwn_softc *sc, void (*cb)(struct urtwn_softc *, void *), 866urtwn_do_async(struct urtwn_softc *sc, void (*cb)(struct urtwn_softc *, void *),
841 void *arg, int len) 867 void *arg, int len)
842{ 868{
843 struct urtwn_host_cmd_ring *ring = &sc->cmdq; 869 struct urtwn_host_cmd_ring *ring = &sc->cmdq;
844 struct urtwn_host_cmd *cmd; 870 struct urtwn_host_cmd *cmd;
 871 bool schedtask = false;
845 int s; 872 int s;
846 873
847 DPRINTFN(DBG_FN, ("%s: %s: cb=%p, arg=%p, len=%d\n", 874 DPRINTFN(DBG_FN, ("%s: %s: cb=%p, arg=%p, len=%d\n",
848 device_xname(sc->sc_dev), __func__, cb, arg, len)); 875 device_xname(sc->sc_dev), __func__, cb, arg, len));
849 876
850 s = splusb(); 877 s = splusb();
851 mutex_spin_enter(&sc->sc_task_mtx); 878 mutex_spin_enter(&sc->sc_task_mtx);
 879 urtwn_cmdq_invariants(sc);
852 cmd = &ring->cmd[ring->cur]; 880 cmd = &ring->cmd[ring->cur];
853 cmd->cb = cb; 881 cmd->cb = cb;
854 KASSERT(len <= sizeof(cmd->data)); 882 KASSERT(len <= sizeof(cmd->data));
855 memcpy(cmd->data, arg, len); 883 memcpy(cmd->data, arg, len);
856 ring->cur = (ring->cur + 1) % URTWN_HOST_CMD_RING_COUNT; 884 ring->cur = (ring->cur + 1) % URTWN_HOST_CMD_RING_COUNT;
857 885
858 /* If there is no pending command already, schedule a task. */ 886 /*
859 if (!sc->sc_dying && ++ring->queued == 1) { 887 * Schedule a task to process the command if need be.
860 mutex_spin_exit(&sc->sc_task_mtx); 888 */
861 usb_add_task(sc->sc_udev, &sc->sc_task, USB_TASKQ_DRIVER); 889 if (!sc->sc_dying) {
862 } else 890 if (ring->queued == URTWN_HOST_CMD_RING_COUNT)
863 mutex_spin_exit(&sc->sc_task_mtx); 891 device_printf(sc->sc_dev, "command queue overflow\n");
 892 else if (ring->queued++ == 0)
 893 schedtask = true;
 894 }
 895 mutex_spin_exit(&sc->sc_task_mtx);
864 splx(s); 896 splx(s);
 897
 898 if (schedtask)
 899 usb_add_task(sc->sc_udev, &sc->sc_task, USB_TASKQ_DRIVER);
865} 900}
866 901
867static void 902static void
868urtwn_wait_async(struct urtwn_softc *sc) 903urtwn_wait_async(struct urtwn_softc *sc)
869{ 904{
870 905
871 DPRINTFN(DBG_FN, ("%s: %s\n", device_xname(sc->sc_dev), __func__)); 906 DPRINTFN(DBG_FN, ("%s: %s\n", device_xname(sc->sc_dev), __func__));
872 907
873 /* Wait for all queued asynchronous commands to complete. */ 908 /* Wait for all queued asynchronous commands to complete. */
874 while (sc->cmdq.queued > 0) 909 while (sc->cmdq.queued > 0)
875 tsleep(&sc->cmdq, 0, "endtask", 0); 910 tsleep(&sc->cmdq, 0, "endtask", 0);
876} 911}
877 912