Thu Jul 16 01:20:38 2020 UTC ()
Set PCI_COMMAND_MASTER_ENABLE and PCI_COMMAND_MEM_ENABLE
to activate the pci devices

This configuration is needed when BIOS or UEFI do not make them set.


(yamaguchi)
diff -r1.67 -r1.68 src/sys/dev/pci/if_ixl.c

cvs diff -r1.67 -r1.68 src/sys/dev/pci/if_ixl.c (expand / switch to unified diff)

--- src/sys/dev/pci/if_ixl.c 2020/06/11 02:39:30 1.67
+++ src/sys/dev/pci/if_ixl.c 2020/07/16 01:20:38 1.68
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_ixl.c,v 1.67 2020/06/11 02:39:30 thorpej Exp $ */ 1/* $NetBSD: if_ixl.c,v 1.68 2020/07/16 01:20:38 yamaguchi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2013-2015, Intel Corporation 4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved. 5 * All rights reserved.
6 6
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met: 8 * modification, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright notice, 10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 11 * this list of conditions and the following disclaimer.
12 * 12 *
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -64,27 +64,27 @@ @@ -64,27 +64,27 @@
64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 64 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 65 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 66 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 67 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 68 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 69 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 70 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 71 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 72 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73 * POSSIBILITY OF SUCH DAMAGE. 73 * POSSIBILITY OF SUCH DAMAGE.
74 */ 74 */
75 75
76#include <sys/cdefs.h> 76#include <sys/cdefs.h>
77__KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.67 2020/06/11 02:39:30 thorpej Exp $"); 77__KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.68 2020/07/16 01:20:38 yamaguchi Exp $");
78 78
79#ifdef _KERNEL_OPT 79#ifdef _KERNEL_OPT
80#include "opt_net_mpsafe.h" 80#include "opt_net_mpsafe.h"
81#include "opt_if_ixl.h" 81#include "opt_if_ixl.h"
82#endif 82#endif
83 83
84#include <sys/param.h> 84#include <sys/param.h>
85#include <sys/types.h> 85#include <sys/types.h>
86 86
87#include <sys/cpu.h> 87#include <sys/cpu.h>
88#include <sys/device.h> 88#include <sys/device.h>
89#include <sys/evcnt.h> 89#include <sys/evcnt.h>
90#include <sys/interrupt.h> 90#include <sys/interrupt.h>
@@ -754,27 +754,28 @@ do { \ @@ -754,27 +754,28 @@ do { \
754#define IXL_STATS_INTERVAL_MSEC 10000 754#define IXL_STATS_INTERVAL_MSEC 10000
755#endif 755#endif
756#ifndef IXL_QUEUE_NUM 756#ifndef IXL_QUEUE_NUM
757#define IXL_QUEUE_NUM 0 757#define IXL_QUEUE_NUM 0
758#endif 758#endif
759 759
760static bool ixl_param_nomsix = false; 760static bool ixl_param_nomsix = false;
761static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC; 761static int ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
762static int ixl_param_nqps_limit = IXL_QUEUE_NUM; 762static int ixl_param_nqps_limit = IXL_QUEUE_NUM;
763static unsigned int ixl_param_tx_ndescs = 1024; 763static unsigned int ixl_param_tx_ndescs = 1024;
764static unsigned int ixl_param_rx_ndescs = 1024; 764static unsigned int ixl_param_rx_ndescs = 1024;
765 765
766static enum i40e_mac_type 766static enum i40e_mac_type
767 ixl_mactype(pci_product_id_t); 767 ixl_mactype(pci_product_id_t);
 768static void ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
768static void ixl_clear_hw(struct ixl_softc *); 769static void ixl_clear_hw(struct ixl_softc *);
769static int ixl_pf_reset(struct ixl_softc *); 770static int ixl_pf_reset(struct ixl_softc *);
770 771
771static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *, 772static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
772 bus_size_t, bus_size_t); 773 bus_size_t, bus_size_t);
773static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *); 774static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
774 775
775static int ixl_arq_fill(struct ixl_softc *); 776static int ixl_arq_fill(struct ixl_softc *);
776static void ixl_arq_unfill(struct ixl_softc *); 777static void ixl_arq_unfill(struct ixl_softc *);
777 778
778static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *, 779static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
779 unsigned int); 780 unsigned int);
780static void ixl_atq_set(struct ixl_atq *, 781static void ixl_atq_set(struct ixl_atq *,
@@ -1097,26 +1098,28 @@ ixl_attach(device_t parent, device_t sel @@ -1097,26 +1098,28 @@ ixl_attach(device_t parent, device_t sel
1097 int tries, rv, link; 1098 int tries, rv, link;
1098 1099
1099 sc = device_private(self); 1100 sc = device_private(self);
1100 sc->sc_dev = self; 1101 sc->sc_dev = self;
1101 ifp = &sc->sc_ec.ec_if; 1102 ifp = &sc->sc_ec.ec_if;
1102 1103
1103 sc->sc_pa = *pa; 1104 sc->sc_pa = *pa;
1104 sc->sc_dmat = (pci_dma64_available(pa)) ? 1105 sc->sc_dmat = (pci_dma64_available(pa)) ?
1105 pa->pa_dmat64 : pa->pa_dmat; 1106 pa->pa_dmat64 : pa->pa_dmat;
1106 sc->sc_aq_regs = &ixl_pf_aq_regs; 1107 sc->sc_aq_regs = &ixl_pf_aq_regs;
1107 1108
1108 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id)); 1109 sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1109 1110
 1111 ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag);
 1112
1110 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG); 1113 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1111 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0, 1114 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1112 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { 1115 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1113 aprint_error(": unable to map registers\n"); 1116 aprint_error(": unable to map registers\n");
1114 return; 1117 return;
1115 } 1118 }
1116 1119
1117 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET); 1120 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1118 1121
1119 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC); 1122 firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1120 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK; 1123 firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1121 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1124 firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1122 sc->sc_base_queue = firstq; 1125 sc->sc_base_queue = firstq;
@@ -1933,26 +1936,37 @@ ixl_mactype(pci_product_id_t id) @@ -1933,26 +1936,37 @@ ixl_mactype(pci_product_id_t id)
1933 1936
1934 case PCI_PRODUCT_INTEL_X722_KX: 1937 case PCI_PRODUCT_INTEL_X722_KX:
1935 case PCI_PRODUCT_INTEL_X722_QSFP: 1938 case PCI_PRODUCT_INTEL_X722_QSFP:
1936 case PCI_PRODUCT_INTEL_X722_SFP: 1939 case PCI_PRODUCT_INTEL_X722_SFP:
1937 case PCI_PRODUCT_INTEL_X722_1G_BASET: 1940 case PCI_PRODUCT_INTEL_X722_1G_BASET:
1938 case PCI_PRODUCT_INTEL_X722_10G_BASET: 1941 case PCI_PRODUCT_INTEL_X722_10G_BASET:
1939 case PCI_PRODUCT_INTEL_X722_I_SFP: 1942 case PCI_PRODUCT_INTEL_X722_I_SFP:
1940 return I40E_MAC_X722; 1943 return I40E_MAC_X722;
1941 } 1944 }
1942 1945
1943 return I40E_MAC_GENERIC; 1946 return I40E_MAC_GENERIC;
1944} 1947}
1945 1948
 1949static void
 1950ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
 1951{
 1952 pcireg_t csr;
 1953
 1954 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
 1955 csr |= (PCI_COMMAND_MASTER_ENABLE |
 1956 PCI_COMMAND_MEM_ENABLE);
 1957 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
 1958}
 1959
1946static inline void * 1960static inline void *
1947ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i) 1961ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1948{ 1962{
1949 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd); 1963 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1950 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type]; 1964 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1951 1965
1952 if (i >= e->hmc_count) 1966 if (i >= e->hmc_count)
1953 return NULL; 1967 return NULL;
1954 1968
1955 kva += e->hmc_base; 1969 kva += e->hmc_base;
1956 kva += i * e->hmc_size; 1970 kva += i * e->hmc_size;
1957 1971
1958 return kva; 1972 return kva;