Fri Jul 29 20:29:38 2016 UTC ()
Don't trigger BJ_ASSERT(false) on invalid BPF_Jxxx opcode in jmp_to_op().

This change helps survive AFL fuzzing without calling bpf_validate() first.

Also change alu_to_op() function to have a similar interface.


(alnsn)
diff -r1.45 -r1.46 src/sys/net/bpfjit.c

cvs diff -r1.45 -r1.46 src/sys/net/bpfjit.c (expand / switch to unified diff)

--- src/sys/net/bpfjit.c 2016/05/29 17:20:22 1.45
+++ src/sys/net/bpfjit.c 2016/07/29 20:29:38 1.46
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $ */ 1/* $NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011-2015 Alexander Nasonov. 4 * Copyright (c) 2011-2015 Alexander Nasonov.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in 14 * notice, this list of conditions and the following disclaimer in
@@ -21,29 +21,29 @@ @@ -21,29 +21,29 @@
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33#ifdef _KERNEL 33#ifdef _KERNEL
34__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $");
35#else 35#else
36__RCSID("$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $"); 36__RCSID("$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $");
37#endif 37#endif
38 38
39#include <sys/types.h> 39#include <sys/types.h>
40#include <sys/queue.h> 40#include <sys/queue.h>
41 41
42#ifndef _KERNEL 42#ifndef _KERNEL
43#include <assert.h> 43#include <assert.h>
44#define BJ_ASSERT(c) assert(c) 44#define BJ_ASSERT(c) assert(c)
45#else 45#else
46#define BJ_ASSERT(c) KASSERT(c) 46#define BJ_ASSERT(c) KASSERT(c)
47#endif 47#endif
48 48
49#ifndef _KERNEL 49#ifndef _KERNEL
@@ -1584,80 +1584,94 @@ optimize(const bpf_ctx_t *bc, const stru @@ -1584,80 +1584,94 @@ optimize(const bpf_ctx_t *bc, const stru
1584 1584
1585 if (!optimize_pass1(bc, insns, insn_dat, insn_count, initmask, hints)) 1585 if (!optimize_pass1(bc, insns, insn_dat, insn_count, initmask, hints))
1586 return false; 1586 return false;
1587 1587
1588 optimize_pass2(bc, insns, insn_dat, insn_count); 1588 optimize_pass2(bc, insns, insn_dat, insn_count);
1589 optimize_pass3(insns, insn_dat, insn_count); 1589 optimize_pass3(insns, insn_dat, insn_count);
1590 1590
1591 return true; 1591 return true;
1592} 1592}
1593 1593
1594/* 1594/*
1595 * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation. 1595 * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
1596 */ 1596 */
1597static int 1597static bool
1598bpf_alu_to_sljit_op(const struct bpf_insn *pc) 1598alu_to_op(const struct bpf_insn *pc, int *res)
1599{ 1599{
1600 const int bad = SLJIT_UNUSED; 
1601 const uint32_t k = pc->k; 1600 const uint32_t k = pc->k;
1602 1601
1603 /* 1602 /*
1604 * Note: all supported 64bit arches have 32bit multiply 1603 * Note: all supported 64bit arches have 32bit multiply
1605 * instruction so SLJIT_I32_OP doesn't have any overhead. 1604 * instruction so SLJIT_I32_OP doesn't have any overhead.
1606 */ 1605 */
1607 switch (BPF_OP(pc->code)) { 1606 switch (BPF_OP(pc->code)) {
1608 case BPF_ADD: return SLJIT_ADD; 1607 case BPF_ADD:
1609 case BPF_SUB: return SLJIT_SUB; 1608 *res = SLJIT_ADD;
1610 case BPF_MUL: return SLJIT_MUL|SLJIT_I32_OP; 1609 return true;
1611 case BPF_OR: return SLJIT_OR; 1610 case BPF_SUB:
1612 case BPF_XOR: return SLJIT_XOR; 1611 *res = SLJIT_SUB;
1613 case BPF_AND: return SLJIT_AND; 1612 return true;
1614 case BPF_LSH: return (k > 31) ? bad : SLJIT_SHL; 1613 case BPF_MUL:
1615 case BPF_RSH: return (k > 31) ? bad : SLJIT_LSHR|SLJIT_I32_OP; 1614 *res = SLJIT_MUL|SLJIT_I32_OP;
 1615 return true;
 1616 case BPF_OR:
 1617 *res = SLJIT_OR;
 1618 return true;
 1619 case BPF_XOR:
 1620 *res = SLJIT_XOR;
 1621 return true;
 1622 case BPF_AND:
 1623 *res = SLJIT_AND;
 1624 return true;
 1625 case BPF_LSH:
 1626 *res = SLJIT_SHL;
 1627 return k < 32;
 1628 case BPF_RSH:
 1629 *res = SLJIT_LSHR|SLJIT_I32_OP;
 1630 return k < 32;
1616 default: 1631 default:
1617 return bad; 1632 return false;
1618 } 1633 }
1619} 1634}
1620 1635
1621/* 1636/*
1622 * Convert BPF_JMP operations except BPF_JA to sljit condition. 1637 * Convert BPF_JMP operations except BPF_JA to sljit condition.
1623 */ 1638 */
1624static int 1639static bool
1625bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate) 1640jmp_to_cond(const struct bpf_insn *pc, bool negate, int *res)
1626{ 1641{
 1642
1627 /* 1643 /*
1628 * Note: all supported 64bit arches have 32bit comparison 1644 * Note: all supported 64bit arches have 32bit comparison
1629 * instructions so SLJIT_I32_OP doesn't have any overhead. 1645 * instructions so SLJIT_I32_OP doesn't have any overhead.
1630 */ 1646 */
1631 int rv = SLJIT_I32_OP; 1647 *res = SLJIT_I32_OP;
1632 1648
1633 switch (BPF_OP(pc->code)) { 1649 switch (BPF_OP(pc->code)) {
1634 case BPF_JGT: 1650 case BPF_JGT:
1635 rv |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER; 1651 *res |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER;
1636 break; 1652 return true;
1637 case BPF_JGE: 1653 case BPF_JGE:
1638 rv |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL; 1654 *res |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL;
1639 break; 1655 return true;
1640 case BPF_JEQ: 1656 case BPF_JEQ:
1641 rv |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL; 1657 *res |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL;
1642 break; 1658 return true;
1643 case BPF_JSET: 1659 case BPF_JSET:
1644 rv |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL; 1660 *res |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL;
1645 break; 1661 return true;
1646 default: 1662 default:
1647 BJ_ASSERT(false); 1663 return false;
1648 } 1664 }
1649 
1650 return rv; 
1651} 1665}
1652 1666
1653/* 1667/*
1654 * Convert BPF_K and BPF_X to sljit register. 1668 * Convert BPF_K and BPF_X to sljit register.
1655 */ 1669 */
1656static int 1670static int
1657kx_to_reg(const struct bpf_insn *pc) 1671kx_to_reg(const struct bpf_insn *pc)
1658{ 1672{
1659 1673
1660 switch (BPF_SRC(pc->code)) { 1674 switch (BPF_SRC(pc->code)) {
1661 case BPF_K: return SLJIT_IMM; 1675 case BPF_K: return SLJIT_IMM;
1662 case BPF_X: return BJ_XREG; 1676 case BPF_X: return BJ_XREG;
1663 default: 1677 default:
@@ -1685,29 +1699,29 @@ generate_insn_code(struct sljit_compiler @@ -1685,29 +1699,29 @@ generate_insn_code(struct sljit_compiler
1685 struct bpfjit_insn_data *insn_dat, size_t insn_count) 1699 struct bpfjit_insn_data *insn_dat, size_t insn_count)
1686{ 1700{
1687 /* a list of jumps to out-of-bound return from a generated function */ 1701 /* a list of jumps to out-of-bound return from a generated function */
1688 struct sljit_jump **ret0; 1702 struct sljit_jump **ret0;
1689 size_t ret0_size, ret0_maxsize; 1703 size_t ret0_size, ret0_maxsize;
1690 1704
1691 struct sljit_jump *jump; 1705 struct sljit_jump *jump;
1692 struct sljit_label *label; 1706 struct sljit_label *label;
1693 const struct bpf_insn *pc; 1707 const struct bpf_insn *pc;
1694 struct bpfjit_jump *bjump, *jtf; 1708 struct bpfjit_jump *bjump, *jtf;
1695 struct sljit_jump *to_mchain_jump; 1709 struct sljit_jump *to_mchain_jump;
1696 1710
1697 size_t i; 1711 size_t i;
1698 int status; 
1699 int branching, negate; 
1700 unsigned int rval, mode, src, op; 1712 unsigned int rval, mode, src, op;
 1713 int branching, negate;
 1714 int status, cond, op2;
1701 uint32_t jt, jf; 1715 uint32_t jt, jf;
1702 1716
1703 bool unconditional_ret; 1717 bool unconditional_ret;
1704 bool rv; 1718 bool rv;
1705 1719
1706 const size_t extwords = GET_EXTWORDS(bc); 1720 const size_t extwords = GET_EXTWORDS(bc);
1707 const size_t memwords = GET_MEMWORDS(bc); 1721 const size_t memwords = GET_MEMWORDS(bc);
1708 1722
1709 ret0 = NULL; 1723 ret0 = NULL;
1710 rv = false; 1724 rv = false;
1711 1725
1712 ret0_size = 0; 1726 ret0_size = 0;
1713 ret0_maxsize = 64; 1727 ret0_maxsize = 64;
@@ -1925,30 +1939,29 @@ generate_insn_code(struct sljit_compiler @@ -1925,30 +1939,29 @@ generate_insn_code(struct sljit_compiler
1925 if (pc->code == (BPF_ALU|BPF_NEG)) { 1939 if (pc->code == (BPF_ALU|BPF_NEG)) {
1926 status = sljit_emit_op1(compiler, 1940 status = sljit_emit_op1(compiler,
1927 SLJIT_NEG, 1941 SLJIT_NEG,
1928 BJ_AREG, 0, 1942 BJ_AREG, 0,
1929 BJ_AREG, 0); 1943 BJ_AREG, 0);
1930 if (status != SLJIT_SUCCESS) 1944 if (status != SLJIT_SUCCESS)
1931 goto fail; 1945 goto fail;
1932 1946
1933 continue; 1947 continue;
1934 } 1948 }
1935 1949
1936 op = BPF_OP(pc->code); 1950 op = BPF_OP(pc->code);
1937 if (op != BPF_DIV && op != BPF_MOD) { 1951 if (op != BPF_DIV && op != BPF_MOD) {
1938 const int op2 = bpf_alu_to_sljit_op(pc); 1952 if (!alu_to_op(pc, &op2))
1939 
1940 if (op2 == SLJIT_UNUSED) 
1941 goto fail; 1953 goto fail;
 1954
1942 status = sljit_emit_op2(compiler, 1955 status = sljit_emit_op2(compiler,
1943 op2, BJ_AREG, 0, BJ_AREG, 0, 1956 op2, BJ_AREG, 0, BJ_AREG, 0,
1944 kx_to_reg(pc), kx_to_reg_arg(pc)); 1957 kx_to_reg(pc), kx_to_reg_arg(pc));
1945 if (status != SLJIT_SUCCESS) 1958 if (status != SLJIT_SUCCESS)
1946 goto fail; 1959 goto fail;
1947 1960
1948 continue; 1961 continue;
1949 } 1962 }
1950 1963
1951 /* BPF_DIV/BPF_MOD */ 1964 /* BPF_DIV/BPF_MOD */
1952 1965
1953 src = BPF_SRC(pc->code); 1966 src = BPF_SRC(pc->code);
1954 if (src != BPF_X && src != BPF_K) 1967 if (src != BPF_X && src != BPF_K)
@@ -1995,43 +2008,44 @@ generate_insn_code(struct sljit_compiler @@ -1995,43 +2008,44 @@ generate_insn_code(struct sljit_compiler
1995 if (op == BPF_JA) { 2008 if (op == BPF_JA) {
1996 jt = jf = pc->k; 2009 jt = jf = pc->k;
1997 } else { 2010 } else {
1998 jt = pc->jt; 2011 jt = pc->jt;
1999 jf = pc->jf; 2012 jf = pc->jf;
2000 } 2013 }
2001 2014
2002 negate = (jt == 0) ? 1 : 0; 2015 negate = (jt == 0) ? 1 : 0;
2003 branching = (jt == jf) ? 0 : 1; 2016 branching = (jt == jf) ? 0 : 1;
2004 jtf = insn_dat[i].u.jdata.jtf; 2017 jtf = insn_dat[i].u.jdata.jtf;
2005 2018
2006 if (branching) { 2019 if (branching) {
2007 if (op != BPF_JSET) { 2020 if (op != BPF_JSET) {
 2021 if (!jmp_to_cond(pc, negate, &cond))
 2022 goto fail;
2008 jump = sljit_emit_cmp(compiler, 2023 jump = sljit_emit_cmp(compiler,
2009 bpf_jmp_to_sljit_cond(pc, negate), 2024 cond, BJ_AREG, 0,
2010 BJ_AREG, 0, 
2011 kx_to_reg(pc), kx_to_reg_arg(pc)); 2025 kx_to_reg(pc), kx_to_reg_arg(pc));
2012 } else { 2026 } else {
2013 status = sljit_emit_op2(compiler, 2027 status = sljit_emit_op2(compiler,
2014 SLJIT_AND, 2028 SLJIT_AND,
2015 BJ_TMP1REG, 0, 2029 BJ_TMP1REG, 0,
2016 BJ_AREG, 0, 2030 BJ_AREG, 0,
2017 kx_to_reg(pc), kx_to_reg_arg(pc)); 2031 kx_to_reg(pc), kx_to_reg_arg(pc));
2018 if (status != SLJIT_SUCCESS) 2032 if (status != SLJIT_SUCCESS)
2019 goto fail; 2033 goto fail;
2020 2034
 2035 if (!jmp_to_cond(pc, negate, &cond))
 2036 goto fail;
2021 jump = sljit_emit_cmp(compiler, 2037 jump = sljit_emit_cmp(compiler,
2022 bpf_jmp_to_sljit_cond(pc, negate), 2038 cond, BJ_TMP1REG, 0, SLJIT_IMM, 0);
2023 BJ_TMP1REG, 0, 
2024 SLJIT_IMM, 0); 
2025 } 2039 }
2026 2040
2027 if (jump == NULL) 2041 if (jump == NULL)
2028 goto fail; 2042 goto fail;
2029 2043
2030 BJ_ASSERT(jtf[negate].sjump == NULL); 2044 BJ_ASSERT(jtf[negate].sjump == NULL);
2031 jtf[negate].sjump = jump; 2045 jtf[negate].sjump = jump;
2032 } 2046 }
2033 2047
2034 if (!branching || (jt != 0 && jf != 0)) { 2048 if (!branching || (jt != 0 && jf != 0)) {
2035 jump = sljit_emit_jump(compiler, SLJIT_JUMP); 2049 jump = sljit_emit_jump(compiler, SLJIT_JUMP);
2036 if (jump == NULL) 2050 if (jump == NULL)
2037 goto fail; 2051 goto fail;