| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: xennetback_xenbus.c,v 1.52 2013/10/20 11:37:53 bouyer Exp $ */ | | 1 | /* $NetBSD: xennetback_xenbus.c,v 1.52.4.1 2016/01/08 21:05:14 snj Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2006 Manuel Bouyer. | | 4 | * Copyright (c) 2006 Manuel Bouyer. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
| @@ -16,27 +16,27 @@ | | | @@ -16,27 +16,27 @@ |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | * | | 25 | * |
26 | */ | | 26 | */ |
27 | | | 27 | |
28 | #include <sys/cdefs.h> | | 28 | #include <sys/cdefs.h> |
29 | __KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.52 2013/10/20 11:37:53 bouyer Exp $"); | | 29 | __KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.52.4.1 2016/01/08 21:05:14 snj Exp $"); |
30 | | | 30 | |
31 | #include "opt_xen.h" | | 31 | #include "opt_xen.h" |
32 | | | 32 | |
33 | #include <sys/types.h> | | 33 | #include <sys/types.h> |
34 | #include <sys/param.h> | | 34 | #include <sys/param.h> |
35 | #include <sys/systm.h> | | 35 | #include <sys/systm.h> |
36 | #include <sys/malloc.h> | | 36 | #include <sys/malloc.h> |
37 | #include <sys/queue.h> | | 37 | #include <sys/queue.h> |
38 | #include <sys/kernel.h> | | 38 | #include <sys/kernel.h> |
39 | #include <sys/mbuf.h> | | 39 | #include <sys/mbuf.h> |
40 | #include <sys/protosw.h> | | 40 | #include <sys/protosw.h> |
41 | #include <sys/socket.h> | | 41 | #include <sys/socket.h> |
42 | #include <sys/ioctl.h> | | 42 | #include <sys/ioctl.h> |
| @@ -705,181 +705,181 @@ xennetback_tx_response(struct xnetback_i | | | @@ -705,181 +705,181 @@ xennetback_tx_response(struct xnetback_i |
705 | xneti->xni_txring.rsp_prod_pvt++; | | 705 | xneti->xni_txring.rsp_prod_pvt++; |
706 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event); | | 706 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event); |
707 | if (do_event) { | | 707 | if (do_event) { |
708 | XENPRINTF(("%s send event\n", xneti->xni_if.if_xname)); | | 708 | XENPRINTF(("%s send event\n", xneti->xni_if.if_xname)); |
709 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); | | 709 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); |
710 | } | | 710 | } |
711 | } | | 711 | } |
712 | | | 712 | |
713 | static int | | 713 | static int |
714 | xennetback_evthandler(void *arg) | | 714 | xennetback_evthandler(void *arg) |
715 | { | | 715 | { |
716 | struct xnetback_instance *xneti = arg; | | 716 | struct xnetback_instance *xneti = arg; |
717 | struct ifnet *ifp = &xneti->xni_if; | | 717 | struct ifnet *ifp = &xneti->xni_if; |
718 | netif_tx_request_t *txreq; | | 718 | netif_tx_request_t txreq; |
719 | struct xni_pkt *pkt; | | 719 | struct xni_pkt *pkt; |
720 | vaddr_t pkt_va; | | 720 | vaddr_t pkt_va; |
721 | struct mbuf *m; | | 721 | struct mbuf *m; |
722 | int receive_pending, err; | | 722 | int receive_pending, err; |
723 | RING_IDX req_cons; | | 723 | RING_IDX req_cons; |
724 | | | 724 | |
725 | XENPRINTF(("xennetback_evthandler ")); | | 725 | XENPRINTF(("xennetback_evthandler ")); |
726 | req_cons = xneti->xni_txring.req_cons; | | 726 | req_cons = xneti->xni_txring.req_cons; |
727 | xen_rmb(); | | 727 | xen_rmb(); |
728 | while (1) { | | 728 | while (1) { |
729 | xen_rmb(); /* be sure to read the request before updating */ | | 729 | xen_rmb(); /* be sure to read the request before updating */ |
730 | xneti->xni_txring.req_cons = req_cons; | | 730 | xneti->xni_txring.req_cons = req_cons; |
731 | xen_wmb(); | | 731 | xen_wmb(); |
732 | RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, | | 732 | RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, |
733 | receive_pending); | | 733 | receive_pending); |
734 | if (receive_pending == 0) | | 734 | if (receive_pending == 0) |
735 | break; | | 735 | break; |
736 | txreq = RING_GET_REQUEST(&xneti->xni_txring, req_cons); | | 736 | RING_COPY_REQUEST(&xneti->xni_txring, req_cons, &txreq); |
737 | xen_rmb(); | | 737 | xen_rmb(); |
738 | XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, | | 738 | XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, |
739 | txreq->size)); | | 739 | txreq.size)); |
740 | req_cons++; | | 740 | req_cons++; |
741 | if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != | | 741 | if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != |
742 | (IFF_UP | IFF_RUNNING))) { | | 742 | (IFF_UP | IFF_RUNNING))) { |
743 | /* interface not up, drop */ | | 743 | /* interface not up, drop */ |
744 | xennetback_tx_response(xneti, txreq->id, | | 744 | xennetback_tx_response(xneti, txreq.id, |
745 | NETIF_RSP_DROPPED); | | 745 | NETIF_RSP_DROPPED); |
746 | continue; | | 746 | continue; |
747 | } | | 747 | } |
748 | /* | | 748 | /* |
749 | * Do some sanity checks, and map the packet's page. | | 749 | * Do some sanity checks, and map the packet's page. |
750 | */ | | 750 | */ |
751 | if (__predict_false(txreq->size < ETHER_HDR_LEN || | | 751 | if (__predict_false(txreq.size < ETHER_HDR_LEN || |
752 | txreq->size > (ETHER_MAX_LEN - ETHER_CRC_LEN))) { | | 752 | txreq.size > (ETHER_MAX_LEN - ETHER_CRC_LEN))) { |
753 | printf("%s: packet size %d too big\n", | | 753 | printf("%s: packet size %d too big\n", |
754 | ifp->if_xname, txreq->size); | | 754 | ifp->if_xname, txreq.size); |
755 | xennetback_tx_response(xneti, txreq->id, | | 755 | xennetback_tx_response(xneti, txreq.id, |
756 | NETIF_RSP_ERROR); | | 756 | NETIF_RSP_ERROR); |
757 | ifp->if_ierrors++; | | 757 | ifp->if_ierrors++; |
758 | continue; | | 758 | continue; |
759 | } | | 759 | } |
760 | /* don't cross page boundaries */ | | 760 | /* don't cross page boundaries */ |
761 | if (__predict_false( | | 761 | if (__predict_false( |
762 | txreq->offset + txreq->size > PAGE_SIZE)) { | | 762 | txreq.offset + txreq.size > PAGE_SIZE)) { |
763 | printf("%s: packet cross page boundary\n", | | 763 | printf("%s: packet cross page boundary\n", |
764 | ifp->if_xname); | | 764 | ifp->if_xname); |
765 | xennetback_tx_response(xneti, txreq->id, | | 765 | xennetback_tx_response(xneti, txreq.id, |
766 | NETIF_RSP_ERROR); | | 766 | NETIF_RSP_ERROR); |
767 | ifp->if_ierrors++; | | 767 | ifp->if_ierrors++; |
768 | continue; | | 768 | continue; |
769 | } | | 769 | } |
770 | /* get a mbuf for this packet */ | | 770 | /* get a mbuf for this packet */ |
771 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 771 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
772 | if (__predict_false(m == NULL)) { | | 772 | if (__predict_false(m == NULL)) { |
773 | static struct timeval lasttime; | | 773 | static struct timeval lasttime; |
774 | if (ratecheck(&lasttime, &xni_pool_errintvl)) | | 774 | if (ratecheck(&lasttime, &xni_pool_errintvl)) |
775 | printf("%s: mbuf alloc failed\n", | | 775 | printf("%s: mbuf alloc failed\n", |
776 | ifp->if_xname); | | 776 | ifp->if_xname); |
777 | xennetback_tx_response(xneti, txreq->id, | | 777 | xennetback_tx_response(xneti, txreq.id, |
778 | NETIF_RSP_DROPPED); | | 778 | NETIF_RSP_DROPPED); |
779 | ifp->if_ierrors++; | | 779 | ifp->if_ierrors++; |
780 | continue; | | 780 | continue; |
781 | } | | 781 | } |
782 | | | 782 | |
783 | XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n", | | 783 | XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n", |
784 | xneti->xni_if.if_xname, txreq->offset, | | 784 | xneti->xni_if.if_xname, txreq.offset, |
785 | txreq->size, txreq->id, MASK_NETIF_TX_IDX(req_cons))); | | 785 | txreq.size, txreq.id, MASK_NETIF_TX_IDX(req_cons))); |
786 | | | 786 | |
787 | pkt = pool_get(&xni_pkt_pool, PR_NOWAIT); | | 787 | pkt = pool_get(&xni_pkt_pool, PR_NOWAIT); |
788 | if (__predict_false(pkt == NULL)) { | | 788 | if (__predict_false(pkt == NULL)) { |
789 | static struct timeval lasttime; | | 789 | static struct timeval lasttime; |
790 | if (ratecheck(&lasttime, &xni_pool_errintvl)) | | 790 | if (ratecheck(&lasttime, &xni_pool_errintvl)) |
791 | printf("%s: xnbpkt alloc failed\n", | | 791 | printf("%s: xnbpkt alloc failed\n", |
792 | ifp->if_xname); | | 792 | ifp->if_xname); |
793 | xennetback_tx_response(xneti, txreq->id, | | 793 | xennetback_tx_response(xneti, txreq.id, |
794 | NETIF_RSP_DROPPED); | | 794 | NETIF_RSP_DROPPED); |
795 | ifp->if_ierrors++; | | 795 | ifp->if_ierrors++; |
796 | m_freem(m); | | 796 | m_freem(m); |
797 | continue; | | 797 | continue; |
798 | } | | 798 | } |
799 | err = xen_shm_map(1, xneti->xni_domid, &txreq->gref, &pkt_va, | | 799 | err = xen_shm_map(1, xneti->xni_domid, &txreq.gref, &pkt_va, |
800 | &pkt->pkt_handle, XSHM_RO); | | 800 | &pkt->pkt_handle, XSHM_RO); |
801 | if (__predict_false(err == ENOMEM)) { | | 801 | if (__predict_false(err == ENOMEM)) { |
802 | xennetback_tx_response(xneti, txreq->id, | | 802 | xennetback_tx_response(xneti, txreq.id, |
803 | NETIF_RSP_DROPPED); | | 803 | NETIF_RSP_DROPPED); |
804 | ifp->if_ierrors++; | | 804 | ifp->if_ierrors++; |
805 | pool_put(&xni_pkt_pool, pkt); | | 805 | pool_put(&xni_pkt_pool, pkt); |
806 | m_freem(m); | | 806 | m_freem(m); |
807 | continue; | | 807 | continue; |
808 | } | | 808 | } |
809 | | | 809 | |
810 | if (__predict_false(err)) { | | 810 | if (__predict_false(err)) { |
811 | printf("%s: mapping foreing page failed: %d\n", | | 811 | printf("%s: mapping foreing page failed: %d\n", |
812 | xneti->xni_if.if_xname, err); | | 812 | xneti->xni_if.if_xname, err); |
813 | xennetback_tx_response(xneti, txreq->id, | | 813 | xennetback_tx_response(xneti, txreq.id, |
814 | NETIF_RSP_ERROR); | | 814 | NETIF_RSP_ERROR); |
815 | ifp->if_ierrors++; | | 815 | ifp->if_ierrors++; |
816 | pool_put(&xni_pkt_pool, pkt); | | 816 | pool_put(&xni_pkt_pool, pkt); |
817 | m_freem(m); | | 817 | m_freem(m); |
818 | continue; | | 818 | continue; |
819 | } | | 819 | } |
820 | | | 820 | |
821 | if ((ifp->if_flags & IFF_PROMISC) == 0) { | | 821 | if ((ifp->if_flags & IFF_PROMISC) == 0) { |
822 | struct ether_header *eh = | | 822 | struct ether_header *eh = |
823 | (void*)(pkt_va + txreq->offset); | | 823 | (void*)(pkt_va + txreq.offset); |
824 | if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && | | 824 | if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && |
825 | memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, | | 825 | memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, |
826 | ETHER_ADDR_LEN) != 0) { | | 826 | ETHER_ADDR_LEN) != 0) { |
827 | xni_pkt_unmap(pkt, pkt_va); | | 827 | xni_pkt_unmap(pkt, pkt_va); |
828 | m_freem(m); | | 828 | m_freem(m); |
829 | xennetback_tx_response(xneti, txreq->id, | | 829 | xennetback_tx_response(xneti, txreq.id, |
830 | NETIF_RSP_OKAY); | | 830 | NETIF_RSP_OKAY); |
831 | continue; /* packet is not for us */ | | 831 | continue; /* packet is not for us */ |
832 | } | | 832 | } |
833 | } | | 833 | } |
834 | #ifdef notyet | | 834 | #ifdef notyet |
835 | a lot of work is needed in the tcp stack to handle read-only ext storage | | 835 | a lot of work is needed in the tcp stack to handle read-only ext storage |
836 | so always copy for now. | | 836 | so always copy for now. |
837 | if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == | | 837 | if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == |
838 | (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) | | 838 | (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) |
839 | #else | | 839 | #else |
840 | if (1) | | 840 | if (1) |
841 | #endif /* notyet */ | | 841 | #endif /* notyet */ |
842 | { | | 842 | { |
843 | /* | | 843 | /* |
844 | * This is the last TX buffer. Copy the data and | | 844 | * This is the last TX buffer. Copy the data and |
845 | * ack it. Delaying it until the mbuf is | | 845 | * ack it. Delaying it until the mbuf is |
846 | * freed will stall transmit. | | 846 | * freed will stall transmit. |
847 | */ | | 847 | */ |
848 | m->m_len = min(MHLEN, txreq->size); | | 848 | m->m_len = min(MHLEN, txreq.size); |
849 | m->m_pkthdr.len = 0; | | 849 | m->m_pkthdr.len = 0; |
850 | m_copyback(m, 0, txreq->size, | | 850 | m_copyback(m, 0, txreq.size, |
851 | (void *)(pkt_va + txreq->offset)); | | 851 | (void *)(pkt_va + txreq.offset)); |
852 | xni_pkt_unmap(pkt, pkt_va); | | 852 | xni_pkt_unmap(pkt, pkt_va); |
853 | if (m->m_pkthdr.len < txreq->size) { | | 853 | if (m->m_pkthdr.len < txreq.size) { |
854 | ifp->if_ierrors++; | | 854 | ifp->if_ierrors++; |
855 | m_freem(m); | | 855 | m_freem(m); |
856 | xennetback_tx_response(xneti, txreq->id, | | 856 | xennetback_tx_response(xneti, txreq.id, |
857 | NETIF_RSP_DROPPED); | | 857 | NETIF_RSP_DROPPED); |
858 | continue; | | 858 | continue; |
859 | } | | 859 | } |
860 | xennetback_tx_response(xneti, txreq->id, | | 860 | xennetback_tx_response(xneti, txreq.id, |
861 | NETIF_RSP_OKAY); | | 861 | NETIF_RSP_OKAY); |
862 | } else { | | 862 | } else { |
863 | | | 863 | |
864 | pkt->pkt_id = txreq->id; | | 864 | pkt->pkt_id = txreq.id; |
865 | pkt->pkt_xneti = xneti; | | 865 | pkt->pkt_xneti = xneti; |
866 | | | 866 | |
867 | MEXTADD(m, pkt_va + txreq->offset, | | 867 | MEXTADD(m, pkt_va + txreq.offset, |
868 | txreq->size, M_DEVBUF, xennetback_tx_free, pkt); | | 868 | txreq.size, M_DEVBUF, xennetback_tx_free, pkt); |
869 | m->m_pkthdr.len = m->m_len = txreq->size; | | 869 | m->m_pkthdr.len = m->m_len = txreq.size; |
870 | m->m_flags |= M_EXT_ROMAP; | | 870 | m->m_flags |= M_EXT_ROMAP; |
871 | } | | 871 | } |
872 | if ((txreq->flags & NETTXF_csum_blank) != 0) { | | 872 | if ((txreq.flags & NETTXF_csum_blank) != 0) { |
873 | xennet_checksum_fill(&m); | | 873 | xennet_checksum_fill(&m); |
874 | if (m == NULL) { | | 874 | if (m == NULL) { |
875 | ifp->if_ierrors++; | | 875 | ifp->if_ierrors++; |
876 | continue; | | 876 | continue; |
877 | } | | 877 | } |
878 | } | | 878 | } |
879 | m->m_pkthdr.rcvif = ifp; | | 879 | m->m_pkthdr.rcvif = ifp; |
880 | ifp->if_ipackets++; | | 880 | ifp->if_ipackets++; |
881 | | | 881 | |
882 | bpf_mtap(ifp, m); | | 882 | bpf_mtap(ifp, m); |
883 | (*ifp->if_input)(ifp, m); | | 883 | (*ifp->if_input)(ifp, m); |
884 | } | | 884 | } |
885 | xen_rmb(); /* be sure to read the request before updating pointer */ | | 885 | xen_rmb(); /* be sure to read the request before updating pointer */ |
| @@ -943,26 +943,27 @@ static void | | | @@ -943,26 +943,27 @@ static void |
943 | xennetback_ifsoftstart_transfer(void *arg) | | 943 | xennetback_ifsoftstart_transfer(void *arg) |
944 | { | | 944 | { |
945 | struct xnetback_instance *xneti = arg; | | 945 | struct xnetback_instance *xneti = arg; |
946 | struct ifnet *ifp = &xneti->xni_if; | | 946 | struct ifnet *ifp = &xneti->xni_if; |
947 | struct mbuf *m; | | 947 | struct mbuf *m; |
948 | vaddr_t xmit_va; | | 948 | vaddr_t xmit_va; |
949 | paddr_t xmit_pa; | | 949 | paddr_t xmit_pa; |
950 | paddr_t xmit_ma; | | 950 | paddr_t xmit_ma; |
951 | paddr_t newp_ma = 0; /* XXX gcc */ | | 951 | paddr_t newp_ma = 0; /* XXX gcc */ |
952 | int i, j, nppitems; | | 952 | int i, j, nppitems; |
953 | mmu_update_t *mmup; | | 953 | mmu_update_t *mmup; |
954 | multicall_entry_t *mclp; | | 954 | multicall_entry_t *mclp; |
955 | netif_rx_response_t *rxresp; | | 955 | netif_rx_response_t *rxresp; |
| | | 956 | netif_rx_request_t rxreq; |
956 | RING_IDX req_prod, resp_prod; | | 957 | RING_IDX req_prod, resp_prod; |
957 | int do_event = 0; | | 958 | int do_event = 0; |
958 | gnttab_transfer_t *gop; | | 959 | gnttab_transfer_t *gop; |
959 | int id, offset; | | 960 | int id, offset; |
960 | | | 961 | |
961 | XENPRINTF(("xennetback_ifsoftstart_transfer ")); | | 962 | XENPRINTF(("xennetback_ifsoftstart_transfer ")); |
962 | int s = splnet(); | | 963 | int s = splnet(); |
963 | if (__predict_false( | | 964 | if (__predict_false( |
964 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { | | 965 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { |
965 | splx(s); | | 966 | splx(s); |
966 | return; | | 967 | return; |
967 | } | | 968 | } |
968 | | | 969 | |
| @@ -1018,30 +1019,30 @@ xennetback_ifsoftstart_transfer(void *ar | | | @@ -1018,30 +1019,30 @@ xennetback_ifsoftstart_transfer(void *ar |
1018 | KASSERT(xmit_pa != POOL_PADDR_INVALID); | | 1019 | KASSERT(xmit_pa != POOL_PADDR_INVALID); |
1019 | xmit_ma = xpmap_ptom(xmit_pa); | | 1020 | xmit_ma = xpmap_ptom(xmit_pa); |
1020 | XENPRINTF(("xennetback_get_xmit_page: got va " | | 1021 | XENPRINTF(("xennetback_get_xmit_page: got va " |
1021 | "0x%x ma 0x%x\n", (u_int)xmit_va, | | 1022 | "0x%x ma 0x%x\n", (u_int)xmit_va, |
1022 | (u_int)xmit_ma)); | | 1023 | (u_int)xmit_ma)); |
1023 | m_copydata(m, 0, m->m_pkthdr.len, | | 1024 | m_copydata(m, 0, m->m_pkthdr.len, |
1024 | (char *)xmit_va + LINUX_REQUESTED_OFFSET); | | 1025 | (char *)xmit_va + LINUX_REQUESTED_OFFSET); |
1025 | offset = LINUX_REQUESTED_OFFSET; | | 1026 | offset = LINUX_REQUESTED_OFFSET; |
1026 | pages_pool_free[nppitems].va = xmit_va; | | 1027 | pages_pool_free[nppitems].va = xmit_va; |
1027 | pages_pool_free[nppitems].pa = xmit_pa; | | 1028 | pages_pool_free[nppitems].pa = xmit_pa; |
1028 | nppitems++; | | 1029 | nppitems++; |
1029 | } | | 1030 | } |
1030 | /* start filling ring */ | | 1031 | /* start filling ring */ |
1031 | gop->ref = RING_GET_REQUEST(&xneti->xni_rxring, | | 1032 | RING_COPY_REQUEST(&xneti->xni_rxring, |
1032 | xneti->xni_rxring.req_cons)->gref; | | 1033 | xneti->xni_rxring.req_cons, &rxreq); |
1033 | id = RING_GET_REQUEST(&xneti->xni_rxring, | | 1034 | gop->ref = rxreq.gref; |
1034 | xneti->xni_rxring.req_cons)->id; | | 1035 | id = rxreq.id; |
1035 | xen_rmb(); | | 1036 | xen_rmb(); |
1036 | xneti->xni_rxring.req_cons++; | | 1037 | xneti->xni_rxring.req_cons++; |
1037 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, | | 1038 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
1038 | resp_prod); | | 1039 | resp_prod); |
1039 | rxresp->id = id; | | 1040 | rxresp->id = id; |
1040 | rxresp->offset = offset; | | 1041 | rxresp->offset = offset; |
1041 | rxresp->status = m->m_pkthdr.len; | | 1042 | rxresp->status = m->m_pkthdr.len; |
1042 | if ((m->m_pkthdr.csum_flags & | | 1043 | if ((m->m_pkthdr.csum_flags & |
1043 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { | | 1044 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { |
1044 | rxresp->flags = NETRXF_csum_blank; | | 1045 | rxresp->flags = NETRXF_csum_blank; |
1045 | } else { | | 1046 | } else { |
1046 | rxresp->flags = 0; | | 1047 | rxresp->flags = 0; |
1047 | } | | 1048 | } |
| @@ -1188,26 +1189,27 @@ xennetback_ifsoftstart_transfer(void *ar | | | @@ -1188,26 +1189,27 @@ xennetback_ifsoftstart_transfer(void *ar |
1188 | splx(s); | | 1189 | splx(s); |
1189 | } | | 1190 | } |
1190 | | | 1191 | |
1191 | static void | | 1192 | static void |
1192 | xennetback_ifsoftstart_copy(void *arg) | | 1193 | xennetback_ifsoftstart_copy(void *arg) |
1193 | { | | 1194 | { |
1194 | struct xnetback_instance *xneti = arg; | | 1195 | struct xnetback_instance *xneti = arg; |
1195 | struct ifnet *ifp = &xneti->xni_if; | | 1196 | struct ifnet *ifp = &xneti->xni_if; |
1196 | struct mbuf *m, *new_m; | | 1197 | struct mbuf *m, *new_m; |
1197 | paddr_t xmit_pa; | | 1198 | paddr_t xmit_pa; |
1198 | paddr_t xmit_ma; | | 1199 | paddr_t xmit_ma; |
1199 | int i, j; | | 1200 | int i, j; |
1200 | netif_rx_response_t *rxresp; | | 1201 | netif_rx_response_t *rxresp; |
| | | 1202 | netif_rx_request_t rxreq; |
1201 | RING_IDX req_prod, resp_prod; | | 1203 | RING_IDX req_prod, resp_prod; |
1202 | int do_event = 0; | | 1204 | int do_event = 0; |
1203 | gnttab_copy_t *gop; | | 1205 | gnttab_copy_t *gop; |
1204 | int id, offset; | | 1206 | int id, offset; |
1205 | | | 1207 | |
1206 | XENPRINTF(("xennetback_ifsoftstart_copy ")); | | 1208 | XENPRINTF(("xennetback_ifsoftstart_copy ")); |
1207 | int s = splnet(); | | 1209 | int s = splnet(); |
1208 | if (__predict_false( | | 1210 | if (__predict_false( |
1209 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { | | 1211 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { |
1210 | splx(s); | | 1212 | splx(s); |
1211 | return; | | 1213 | return; |
1212 | } | | 1214 | } |
1213 | | | 1215 | |
| @@ -1299,36 +1301,36 @@ xennetback_ifsoftstart_copy(void *arg) | | | @@ -1299,36 +1301,36 @@ xennetback_ifsoftstart_copy(void *arg) |
1299 | } else { | | 1301 | } else { |
1300 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 1302 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1301 | } | | 1303 | } |
1302 | | | 1304 | |
1303 | KASSERT(xmit_pa != POOL_PADDR_INVALID); | | 1305 | KASSERT(xmit_pa != POOL_PADDR_INVALID); |
1304 | KASSERT((offset + m->m_pkthdr.len) <= PAGE_SIZE); | | 1306 | KASSERT((offset + m->m_pkthdr.len) <= PAGE_SIZE); |
1305 | xmit_ma = xpmap_ptom(xmit_pa); | | 1307 | xmit_ma = xpmap_ptom(xmit_pa); |
1306 | /* start filling ring */ | | 1308 | /* start filling ring */ |
1307 | gop->flags = GNTCOPY_dest_gref; | | 1309 | gop->flags = GNTCOPY_dest_gref; |
1308 | gop->source.offset = offset; | | 1310 | gop->source.offset = offset; |
1309 | gop->source.domid = DOMID_SELF; | | 1311 | gop->source.domid = DOMID_SELF; |
1310 | gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT; | | 1312 | gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT; |
1311 | | | 1313 | |
1312 | gop->dest.u.ref = RING_GET_REQUEST(&xneti->xni_rxring, | | 1314 | RING_COPY_REQUEST(&xneti->xni_rxring, |
1313 | xneti->xni_rxring.req_cons)->gref; | | 1315 | xneti->xni_rxring.req_cons, &rxreq); |
| | | 1316 | gop->dest.u.ref = rxreq.gref; |
1314 | gop->dest.offset = 0; | | 1317 | gop->dest.offset = 0; |
1315 | gop->dest.domid = xneti->xni_domid; | | 1318 | gop->dest.domid = xneti->xni_domid; |
1316 | | | 1319 | |
1317 | gop->len = m->m_pkthdr.len; | | 1320 | gop->len = m->m_pkthdr.len; |
1318 | gop++; | | 1321 | gop++; |
1319 | | | 1322 | |
1320 | id = RING_GET_REQUEST(&xneti->xni_rxring, | | 1323 | id = rxreq.id; |
1321 | xneti->xni_rxring.req_cons)->id; | | | |
1322 | xen_rmb(); | | 1324 | xen_rmb(); |
1323 | xneti->xni_rxring.req_cons++; | | 1325 | xneti->xni_rxring.req_cons++; |
1324 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, | | 1326 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
1325 | resp_prod); | | 1327 | resp_prod); |
1326 | rxresp->id = id; | | 1328 | rxresp->id = id; |
1327 | rxresp->offset = 0; | | 1329 | rxresp->offset = 0; |
1328 | rxresp->status = m->m_pkthdr.len; | | 1330 | rxresp->status = m->m_pkthdr.len; |
1329 | if ((m->m_pkthdr.csum_flags & | | 1331 | if ((m->m_pkthdr.csum_flags & |
1330 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { | | 1332 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { |
1331 | rxresp->flags = NETRXF_csum_blank; | | 1333 | rxresp->flags = NETRXF_csum_blank; |
1332 | } else { | | 1334 | } else { |
1333 | rxresp->flags = 0; | | 1335 | rxresp->flags = 0; |
1334 | } | | 1336 | } |