Fri Mar 12 01:53:36 2021 UTC ()
Remove extra unlock/lock processing around if_percpuq_enqueue().

same as if_wm.c:r1.700


(knakahara)
diff -r1.67 -r1.68 src/sys/dev/pci/ixgbe/ix_txrx.c

cvs diff -r1.67 -r1.68 src/sys/dev/pci/ixgbe/ix_txrx.c (expand / switch to unified diff)

--- src/sys/dev/pci/ixgbe/ix_txrx.c 2021/03/09 10:03:18 1.67
+++ src/sys/dev/pci/ixgbe/ix_txrx.c 2021/03/12 01:53:36 1.68
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ix_txrx.c,v 1.67 2021/03/09 10:03:18 msaitoh Exp $ */ 1/* $NetBSD: ix_txrx.c,v 1.68 2021/03/12 01:53:36 knakahara Exp $ */
2 2
3/****************************************************************************** 3/******************************************************************************
4 4
5 Copyright (c) 2001-2017, Intel Corporation 5 Copyright (c) 2001-2017, Intel Corporation
6 All rights reserved. 6 All rights reserved.
7 7
8 Redistribution and use in source and binary forms, with or without 8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met: 9 modification, are permitted provided that the following conditions are met:
10 10
11 1. Redistributions of source code must retain the above copyright notice, 11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer. 12 this list of conditions and the following disclaimer.
13 13
14 2. Redistributions in binary form must reproduce the above copyright 14 2. Redistributions in binary form must reproduce the above copyright
@@ -2070,29 +2070,27 @@ ixgbe_rxeof(struct ix_queue *que) @@ -2070,29 +2070,27 @@ ixgbe_rxeof(struct ix_queue *que)
2070#endif 2070#endif
2071 } 2071 }
2072next_desc: 2072next_desc:
2073 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 2073 ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2074 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2074 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2075 2075
2076 /* Advance our pointers to the next descriptor. */ 2076 /* Advance our pointers to the next descriptor. */
2077 if (++i == rxr->num_desc) 2077 if (++i == rxr->num_desc)
2078 i = 0; 2078 i = 0;
2079 2079
2080 /* Now send to the stack or do LRO */ 2080 /* Now send to the stack or do LRO */
2081 if (sendmp != NULL) { 2081 if (sendmp != NULL) {
2082 rxr->next_to_check = i; 2082 rxr->next_to_check = i;
2083 IXGBE_RX_UNLOCK(rxr); 
2084 ixgbe_rx_input(rxr, ifp, sendmp, ptype); 2083 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
2085 IXGBE_RX_LOCK(rxr); 
2086 i = rxr->next_to_check; 2084 i = rxr->next_to_check;
2087 } 2085 }
2088 2086
2089 /* Every 8 descriptors we go to refresh mbufs */ 2087 /* Every 8 descriptors we go to refresh mbufs */
2090 if (processed == 8) { 2088 if (processed == 8) {
2091 ixgbe_refresh_mbufs(rxr, i); 2089 ixgbe_refresh_mbufs(rxr, i);
2092 processed = 0; 2090 processed = 0;
2093 } 2091 }
2094 } 2092 }
2095 2093
2096 /* Refresh any remaining buf structs */ 2094 /* Refresh any remaining buf structs */
2097 if (ixgbe_rx_unrefreshed(rxr)) 2095 if (ixgbe_rx_unrefreshed(rxr))
2098 ixgbe_refresh_mbufs(rxr, i); 2096 ixgbe_refresh_mbufs(rxr, i);