Wed Mar 21 16:26:05 2018 UTC ()
Remove these global variables. They are unused, racy, and the only thing
they do is triggering cache synchronization latencies between CPUs.


(maxv)
diff -r1.182 -r1.183 src/sys/kern/uipc_mbuf.c

cvs diff -r1.182 -r1.183 src/sys/kern/uipc_mbuf.c (expand / switch to unified diff)

--- src/sys/kern/uipc_mbuf.c 2018/03/09 11:57:38 1.182
+++ src/sys/kern/uipc_mbuf.c 2018/03/21 16:26:04 1.183
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: uipc_mbuf.c,v 1.182 2018/03/09 11:57:38 maxv Exp $ */ 1/* $NetBSD: uipc_mbuf.c,v 1.183 2018/03/21 16:26:04 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1999, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -52,27 +52,27 @@ @@ -52,27 +52,27 @@
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE. 59 * SUCH DAMAGE.
60 * 60 *
61 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 61 * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.182 2018/03/09 11:57:38 maxv Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: uipc_mbuf.c,v 1.183 2018/03/21 16:26:04 maxv Exp $");
66 66
67#ifdef _KERNEL_OPT 67#ifdef _KERNEL_OPT
68#include "opt_mbuftrace.h" 68#include "opt_mbuftrace.h"
69#include "opt_nmbclusters.h" 69#include "opt_nmbclusters.h"
70#include "opt_ddb.h" 70#include "opt_ddb.h"
71#endif 71#endif
72 72
73#include <sys/param.h> 73#include <sys/param.h>
74#include <sys/systm.h> 74#include <sys/systm.h>
75#include <sys/atomic.h> 75#include <sys/atomic.h>
76#include <sys/cpu.h> 76#include <sys/cpu.h>
77#include <sys/proc.h> 77#include <sys/proc.h>
78#include <sys/mbuf.h> 78#include <sys/mbuf.h>
@@ -697,28 +697,26 @@ m_prepend(struct mbuf *m, int len, int h @@ -697,28 +697,26 @@ m_prepend(struct mbuf *m, int len, int h
697 if (len < MLEN) 697 if (len < MLEN)
698 M_ALIGN(m, len); 698 M_ALIGN(m, len);
699 } 699 }
700 700
701 m->m_len = len; 701 m->m_len = len;
702 return m; 702 return m;
703} 703}
704 704
705/* 705/*
706 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 706 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
707 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 707 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
708 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 708 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
709 */ 709 */
710int MCFail; 
711 
712struct mbuf * 710struct mbuf *
713m_copym(struct mbuf *m, int off0, int len, int wait) 711m_copym(struct mbuf *m, int off0, int len, int wait)
714{ 712{
715 713
716 return m_copym0(m, off0, len, wait, false); /* shallow copy on M_EXT */ 714 return m_copym0(m, off0, len, wait, false); /* shallow copy on M_EXT */
717} 715}
718 716
719struct mbuf * 717struct mbuf *
720m_dup(struct mbuf *m, int off0, int len, int wait) 718m_dup(struct mbuf *m, int off0, int len, int wait)
721{ 719{
722 720
723 return m_copym0(m, off0, len, wait, true); /* deep copy */ 721 return m_copym0(m, off0, len, wait, true); /* deep copy */
724} 722}
@@ -802,34 +800,30 @@ m_copym0(struct mbuf *m, int off0, int l @@ -802,34 +800,30 @@ m_copym0(struct mbuf *m, int off0, int l
802 len -= n->m_len; 800 len -= n->m_len;
803 off += n->m_len; 801 off += n->m_len;
804#ifdef DIAGNOSTIC 802#ifdef DIAGNOSTIC
805 if (off > m->m_len) 803 if (off > m->m_len)
806 panic("m_copym0 overrun %d %d", off, m->m_len); 804 panic("m_copym0 overrun %d %d", off, m->m_len);
807#endif 805#endif
808 if (off == m->m_len) { 806 if (off == m->m_len) {
809 m = m->m_next; 807 m = m->m_next;
810 off = 0; 808 off = 0;
811 } 809 }
812 np = &n->m_next; 810 np = &n->m_next;
813 } 811 }
814 812
815 if (top == NULL) 
816 MCFail++; 
817 
818 return top; 813 return top;
819 814
820nospace: 815nospace:
821 m_freem(top); 816 m_freem(top);
822 MCFail++; 
823 return NULL; 817 return NULL;
824} 818}
825 819
826/* 820/*
827 * Copy an entire packet, including header (which must be present). 821 * Copy an entire packet, including header (which must be present).
828 * An optimization of the common case 'm_copym(m, 0, M_COPYALL, how)'. 822 * An optimization of the common case 'm_copym(m, 0, M_COPYALL, how)'.
829 */ 823 */
830struct mbuf * 824struct mbuf *
831m_copypacket(struct mbuf *m, int how) 825m_copypacket(struct mbuf *m, int how)
832{ 826{
833 struct mbuf *top, *n, *o; 827 struct mbuf *top, *n, *o;
834 828
835 n = m_get(how, m->m_type); 829 n = m_get(how, m->m_type);
@@ -861,27 +855,26 @@ m_copypacket(struct mbuf *m, int how) @@ -861,27 +855,26 @@ m_copypacket(struct mbuf *m, int how)
861 if (m->m_flags & M_EXT) { 855 if (m->m_flags & M_EXT) {
862 n->m_data = m->m_data; 856 n->m_data = m->m_data;
863 MCLADDREFERENCE(m, n); 857 MCLADDREFERENCE(m, n);
864 } else { 858 } else {
865 memcpy(mtod(n, char *), mtod(m, char *), n->m_len); 859 memcpy(mtod(n, char *), mtod(m, char *), n->m_len);
866 } 860 }
867 861
868 m = m->m_next; 862 m = m->m_next;
869 } 863 }
870 return top; 864 return top;
871 865
872nospace: 866nospace:
873 m_freem(top); 867 m_freem(top);
874 MCFail++; 
875 return NULL; 868 return NULL;
876} 869}
877 870
878/* 871/*
879 * Copy data from an mbuf chain starting "off" bytes from the beginning, 872 * Copy data from an mbuf chain starting "off" bytes from the beginning,
880 * continuing for "len" bytes, into the indicated buffer. 873 * continuing for "len" bytes, into the indicated buffer.
881 */ 874 */
882void 875void
883m_copydata(struct mbuf *m, int off, int len, void *vp) 876m_copydata(struct mbuf *m, int off, int len, void *vp)
884{ 877{
885 unsigned count; 878 unsigned count;
886 void *cp = vp; 879 void *cp = vp;
887 struct mbuf *m0 = m; 880 struct mbuf *m0 = m;
@@ -1072,50 +1065,45 @@ m_ensure_contig(struct mbuf **m0, int le @@ -1072,50 +1065,45 @@ m_ensure_contig(struct mbuf **m0, int le
1072 else 1065 else
1073 n = m_free(n); 1066 n = m_free(n);
1074 } while (len > 0 && n); 1067 } while (len > 0 && n);
1075 1068
1076 m->m_next = n; 1069 m->m_next = n;
1077 *m0 = m; 1070 *m0 = m;
1078 1071
1079 return len <= 0; 1072 return len <= 0;
1080} 1073}
1081 1074
1082/* 1075/*
1083 * m_pullup: same as m_ensure_contig(), but destroys mbuf chain on error. 1076 * m_pullup: same as m_ensure_contig(), but destroys mbuf chain on error.
1084 */ 1077 */
1085int MPFail; 
1086 
1087struct mbuf * 1078struct mbuf *
1088m_pullup(struct mbuf *n, int len) 1079m_pullup(struct mbuf *n, int len)
1089{ 1080{
1090 struct mbuf *m = n; 1081 struct mbuf *m = n;
1091 1082
1092 KASSERT(len != M_COPYALL); 1083 KASSERT(len != M_COPYALL);
1093 if (!m_ensure_contig(&m, len)) { 1084 if (!m_ensure_contig(&m, len)) {
1094 KASSERT(m != NULL); 1085 KASSERT(m != NULL);
1095 m_freem(m); 1086 m_freem(m);
1096 MPFail++; 
1097 m = NULL; 1087 m = NULL;
1098 } 1088 }
1099 return m; 1089 return m;
1100} 1090}
1101 1091
1102/* 1092/*
1103 * Like m_pullup(), except a new mbuf is always allocated, and we allow 1093 * Like m_pullup(), except a new mbuf is always allocated, and we allow
1104 * the amount of empty space before the data in the new mbuf to be specified 1094 * the amount of empty space before the data in the new mbuf to be specified
1105 * (in the event that the caller expects to prepend later). 1095 * (in the event that the caller expects to prepend later).
1106 */ 1096 */
1107int MSFail; 
1108 
1109struct mbuf * 1097struct mbuf *
1110m_copyup(struct mbuf *n, int len, int dstoff) 1098m_copyup(struct mbuf *n, int len, int dstoff)
1111{ 1099{
1112 struct mbuf *m; 1100 struct mbuf *m;
1113 int count, space; 1101 int count, space;
1114 1102
1115 KASSERT(len != M_COPYALL); 1103 KASSERT(len != M_COPYALL);
1116 if (len > (MHLEN - dstoff)) 1104 if (len > (MHLEN - dstoff))
1117 goto bad; 1105 goto bad;
1118 m = m_get(M_DONTWAIT, n->m_type); 1106 m = m_get(M_DONTWAIT, n->m_type);
1119 if (m == NULL) 1107 if (m == NULL)
1120 goto bad; 1108 goto bad;
1121 MCLAIM(m, n->m_owner); 1109 MCLAIM(m, n->m_owner);
@@ -1135,27 +1123,26 @@ m_copyup(struct mbuf *n, int len, int ds @@ -1135,27 +1123,26 @@ m_copyup(struct mbuf *n, int len, int ds
1135 if (n->m_len) 1123 if (n->m_len)
1136 n->m_data += count; 1124 n->m_data += count;
1137 else 1125 else
1138 n = m_free(n); 1126 n = m_free(n);
1139 } while (len > 0 && n); 1127 } while (len > 0 && n);
1140 if (len > 0) { 1128 if (len > 0) {
1141 (void) m_free(m); 1129 (void) m_free(m);
1142 goto bad; 1130 goto bad;
1143 } 1131 }
1144 m->m_next = n; 1132 m->m_next = n;
1145 return (m); 1133 return (m);
1146 bad: 1134 bad:
1147 m_freem(n); 1135 m_freem(n);
1148 MSFail++; 
1149 return (NULL); 1136 return (NULL);
1150} 1137}
1151 1138
1152/* 1139/*
1153 * Partition an mbuf chain in two pieces, returning the tail -- 1140 * Partition an mbuf chain in two pieces, returning the tail --
1154 * all but the first len0 bytes. In case of failure, it returns NULL and 1141 * all but the first len0 bytes. In case of failure, it returns NULL and
1155 * attempts to restore the chain to its original state. 1142 * attempts to restore the chain to its original state.
1156 */ 1143 */
1157struct mbuf * 1144struct mbuf *
1158m_split(struct mbuf *m0, int len0, int wait) 1145m_split(struct mbuf *m0, int len0, int wait)
1159{ 1146{
1160 1147
1161 return m_split0(m0, len0, wait, true); 1148 return m_split0(m0, len0, wait, true);