drivers/pcie: Add support for MSI-X
It's disabled by default. When enabled, and if the device exposes both MSI and MSI-X capabilities: MSI-X will be selected and MSI disabled on the device. Signed-off-by: Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>
This commit is contained in:
parent
bfbe9b6df5
commit
a2491b321e
@ -18,9 +18,10 @@ config PCIE_MSI
|
||||
runtime) to use them. This is typically required for PCIe devices
|
||||
to generate interrupts at all.
|
||||
|
||||
if PCIE_MSI
|
||||
|
||||
config PCIE_MSI_MULTI_VECTOR
|
||||
bool "Enable MSI multi-vector support"
|
||||
depends on PCIE_MSI
|
||||
help
|
||||
MSI can support up to 32 different messages. This will enable the
|
||||
support of such capability so each message can get a vector
|
||||
@ -29,6 +30,16 @@ config PCIE_MSI_MULTI_VECTOR
|
||||
the vectors cannot be managed by the hardware or if none of the
|
||||
peripheral require this.
|
||||
|
||||
config PCIE_MSI_X
|
||||
bool "Enable MSI-X support"
|
||||
help
|
||||
If one or more device support MSI-X, you'll need to enable this.
|
||||
If a device exposes support for both MSI-X and MSI, MSI-X will be
|
||||
used and MSI disabled on that device. Enable PCIE_MSI_MULTI_VECTOR
|
||||
if you want to support multi-vector on MSI-X as well.
|
||||
|
||||
endif # PCIE_MSI
|
||||
|
||||
config PCIE_SHELL
|
||||
bool "Enable PCIe/new PCI Shell"
|
||||
default y
|
||||
|
||||
@ -36,27 +36,118 @@ __weak bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCIE_MSI_X
|
||||
|
||||
static uint32_t get_msix_table_size(pcie_bdf_t bdf,
|
||||
uint32_t base)
|
||||
{
|
||||
uint32_t mcr;
|
||||
|
||||
mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR);
|
||||
|
||||
return ((mcr & PCIE_MSIX_MCR_TSIZE) >> PCIE_MSIX_MCR_TSIZE_SHIFT) + 1;
|
||||
}
|
||||
|
||||
static bool map_msix_table_entries(pcie_bdf_t bdf,
|
||||
uint32_t base,
|
||||
msi_vector_t *vectors,
|
||||
uint8_t n_vector)
|
||||
{
|
||||
uint32_t table_offset;
|
||||
uint8_t table_bir;
|
||||
struct pcie_mbar bar;
|
||||
uintptr_t mapped_table;
|
||||
int i;
|
||||
|
||||
table_offset = pcie_conf_read(bdf, base + PCIE_MSIX_TR);
|
||||
table_bir = table_offset & PCIE_MSIX_TR_BIR;
|
||||
table_offset &= PCIE_MSIX_TR_OFFSET;
|
||||
|
||||
if (!pcie_get_mbar(bdf, table_bir, &bar)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
z_mem_map((uint8_t **)&mapped_table,
|
||||
bar.phys_addr + table_offset,
|
||||
bar.size, K_MEM_PERM_RW);
|
||||
|
||||
for (i = 0; i < n_vector; i++) {
|
||||
vectors[i].msix_vector = (struct msix_vector *)
|
||||
(mapped_table + (i * PCIE_MSIR_TABLE_ENTRY_SIZE));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void set_msix(msi_vector_t *vectors,
|
||||
uint8_t n_vector,
|
||||
bool msix)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n_vector; i++) {
|
||||
vectors[i].msix = msix;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#define get_msix_table_size(...) 0
|
||||
#define map_msix_table_entries(...) true
|
||||
#define set_msix(...)
|
||||
#endif /* CONFIG_PCIE_MSI_X */
|
||||
|
||||
static uint32_t get_msi_mmc(pcie_bdf_t bdf,
|
||||
uint32_t base)
|
||||
{
|
||||
uint32_t mcr;
|
||||
|
||||
mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
|
||||
|
||||
/* Getting MMC true count: 2^(MMC field) */
|
||||
return 1 << ((mcr & PCIE_MSI_MCR_MMC) >> PCIE_MSI_MCR_MMC_SHIFT);
|
||||
}
|
||||
|
||||
uint8_t pcie_msi_vectors_allocate(pcie_bdf_t bdf,
|
||||
unsigned int priority,
|
||||
msi_vector_t *vectors,
|
||||
uint8_t n_vector)
|
||||
{
|
||||
bool msi = true;
|
||||
uint32_t base;
|
||||
uint32_t mcr;
|
||||
uint8_t mmc;
|
||||
uint32_t req_vectors;
|
||||
|
||||
base = pcie_get_cap(bdf, PCIE_MSI_CAP_ID);
|
||||
if (base == 0U) {
|
||||
return 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
|
||||
uint32_t base_msix;
|
||||
|
||||
base_msix = pcie_get_cap(bdf, PCIE_MSIX_CAP_ID);
|
||||
if (base_msix != 0U) {
|
||||
base = base_msix;
|
||||
}
|
||||
|
||||
msi = false;
|
||||
base = base_msix;
|
||||
}
|
||||
|
||||
mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
|
||||
mmc = (uint8_t)((mcr & PCIE_MSI_MCR_MMC) >> PCIE_MSI_MCR_MMC_SHIFT);
|
||||
/* Getting MMC true count: 2^(MMC field) */
|
||||
mmc = 1 << mmc;
|
||||
if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
|
||||
set_msix(vectors, n_vector, !msi);
|
||||
|
||||
if (n_vector > mmc) {
|
||||
n_vector = mmc;
|
||||
if (!msi) {
|
||||
req_vectors = get_msix_table_size(bdf, base);
|
||||
if (!map_msix_table_entries(bdf, base,
|
||||
vectors, n_vector)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (msi) {
|
||||
req_vectors = get_msi_mmc(bdf, base);
|
||||
}
|
||||
|
||||
if (n_vector > req_vectors) {
|
||||
n_vector = req_vectors;
|
||||
}
|
||||
|
||||
return arch_pcie_msi_vectors_allocate(priority, vectors, n_vector);
|
||||
@ -71,6 +162,16 @@ bool pcie_msi_vector_connect(pcie_bdf_t bdf,
|
||||
uint32_t base;
|
||||
|
||||
base = pcie_get_cap(bdf, PCIE_MSI_CAP_ID);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
|
||||
uint32_t base_msix;
|
||||
|
||||
base_msix = pcie_get_cap(bdf, PCIE_MSIX_CAP_ID);
|
||||
if (base_msix != 0U) {
|
||||
base = base_msix;
|
||||
}
|
||||
}
|
||||
|
||||
if (base == 0U) {
|
||||
return false;
|
||||
}
|
||||
@ -82,22 +183,59 @@ bool pcie_msi_vector_connect(pcie_bdf_t bdf,
|
||||
|
||||
#endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
|
||||
|
||||
bool pcie_msi_enable(pcie_bdf_t bdf,
|
||||
msi_vector_t *vectors,
|
||||
uint8_t n_vector)
|
||||
#ifdef CONFIG_PCIE_MSI_X
|
||||
|
||||
static void enable_msix(pcie_bdf_t bdf,
|
||||
msi_vector_t *vectors,
|
||||
uint8_t n_vector,
|
||||
uint32_t base)
|
||||
{
|
||||
unsigned int irq;
|
||||
uint32_t mcr;
|
||||
int i;
|
||||
|
||||
irq = pcie_get_irq(bdf);
|
||||
|
||||
for (i = 0; i < n_vector; i++) {
|
||||
uint32_t map = pcie_msi_map(irq, &vectors[i]);
|
||||
uint32_t mdr = pcie_msi_mdr(irq, &vectors[i]);
|
||||
|
||||
vectors[i].msix_vector->msg_addr = map;
|
||||
vectors[i].msix_vector->msg_up_addr = 0;
|
||||
vectors[i].msix_vector->msg_data = mdr;
|
||||
vectors[i].msix_vector->vector_ctrl = 0;
|
||||
}
|
||||
|
||||
mcr = pcie_conf_read(bdf, base + PCIE_MSIX_MCR);
|
||||
mcr |= PCIE_MSIX_MCR_EN;
|
||||
pcie_conf_write(bdf, base + PCIE_MSIX_MCR, mcr);
|
||||
}
|
||||
|
||||
#else
|
||||
#define enable_msix(...)
|
||||
#endif /* CONFIG_PCIE_MSI_X */
|
||||
|
||||
static void disable_msi(pcie_bdf_t bdf,
|
||||
uint32_t base)
|
||||
{
|
||||
uint32_t mcr;
|
||||
|
||||
mcr = pcie_conf_read(bdf, base + PCIE_MSI_MCR);
|
||||
mcr &= ~PCIE_MSI_MCR_EN;
|
||||
pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr);
|
||||
}
|
||||
|
||||
static void enable_msi(pcie_bdf_t bdf,
|
||||
msi_vector_t *vectors,
|
||||
uint8_t n_vector,
|
||||
uint32_t base)
|
||||
{
|
||||
unsigned int irq;
|
||||
uint32_t base;
|
||||
uint32_t mcr;
|
||||
uint32_t map;
|
||||
uint32_t mdr;
|
||||
uint32_t mme;
|
||||
|
||||
base = pcie_get_cap(bdf, PCIE_MSI_CAP_ID);
|
||||
if (base == 0U) {
|
||||
return false;
|
||||
}
|
||||
|
||||
irq = pcie_get_irq(bdf);
|
||||
|
||||
map = pcie_msi_map(irq, vectors);
|
||||
@ -121,6 +259,39 @@ bool pcie_msi_enable(pcie_bdf_t bdf,
|
||||
|
||||
mcr |= PCIE_MSI_MCR_EN;
|
||||
pcie_conf_write(bdf, base + PCIE_MSI_MCR, mcr);
|
||||
}
|
||||
|
||||
bool pcie_msi_enable(pcie_bdf_t bdf,
|
||||
msi_vector_t *vectors,
|
||||
uint8_t n_vector)
|
||||
{
|
||||
bool msi = true;
|
||||
uint32_t base;
|
||||
|
||||
base = pcie_get_cap(bdf, PCIE_MSI_CAP_ID);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCIE_MSI_X)) {
|
||||
uint32_t base_msix;
|
||||
|
||||
base_msix = pcie_get_cap(bdf, PCIE_MSIX_CAP_ID);
|
||||
if ((base_msix != 0U) && (base != 0U)) {
|
||||
disable_msi(bdf, base);
|
||||
}
|
||||
|
||||
msi = false;
|
||||
base = base_msix;
|
||||
}
|
||||
|
||||
if (base == 0U) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!msi && IS_ENABLED(CONFIG_PCIE_MSI_X)) {
|
||||
enable_msix(bdf, vectors, n_vector, base);
|
||||
} else {
|
||||
enable_msi(bdf, vectors, n_vector, base);
|
||||
}
|
||||
|
||||
pcie_set_cmd(bdf, PCIE_CONF_CMDSTAT_MASTER, true);
|
||||
|
||||
return true;
|
||||
|
||||
@ -17,9 +17,20 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct msix_vector {
|
||||
uint32_t msg_addr;
|
||||
uint32_t msg_up_addr;
|
||||
uint32_t msg_data;
|
||||
uint32_t vector_ctrl;
|
||||
};
|
||||
|
||||
struct msi_vector {
|
||||
pcie_bdf_t bdf;
|
||||
arch_msi_vector_t arch;
|
||||
#ifdef CONFIG_PCIE_MSI_X
|
||||
struct msix_vector *msix_vector;
|
||||
bool msix;
|
||||
#endif /* CONFIG_PCIE_MSI_X */
|
||||
};
|
||||
|
||||
typedef struct msi_vector msi_vector_t;
|
||||
@ -127,6 +138,28 @@ extern bool pcie_msi_enable(pcie_bdf_t bdf,
|
||||
#define PCIE_MSI_MDR_32 2U
|
||||
#define PCIE_MSI_MDR_64 3U
|
||||
|
||||
/*
|
||||
* As for MSI, he first word of the MSI-X capability is shared
|
||||
* with the capability ID and list link. The high 16 bits are the MCR.
|
||||
*/
|
||||
|
||||
#define PCIE_MSIX_MCR 0U
|
||||
|
||||
#define PCIE_MSIX_MCR_EN 0x80000000U /* Enable MSI-X */
|
||||
#define PCIE_MSIX_MCR_FMASK 0x40000000U /* Function Mask */
|
||||
#define PCIE_MSIX_MCR_TSIZE 0x07FF0000U /* Table size mask */
|
||||
#define PCIE_MSIX_MCR_TSIZE_SHIFT 16
|
||||
#define PCIE_MSIR_TABLE_ENTRY_SIZE 16
|
||||
|
||||
#define PCIE_MSIX_TR 1U
|
||||
#define PCIE_MSIX_TR_BIR 0x00000007U /* BIR mask */
|
||||
#define PCIE_MSIX_TR_OFFSET 0xFFFFFFF8U /* Offset mask */
|
||||
|
||||
#define PCIE_VTBL_MA 0U /* Msg Address offset */
|
||||
#define PCIE_VTBL_MUA 4U /* Msg Upper Address offset */
|
||||
#define PCIE_VTBL_MD 8U /* Msg Data offset */
|
||||
#define PCIE_VTBL_VCTRL 12U /* Vector control offset */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
Loading…
Reference in New Issue
Block a user