drivers: ethernet: Add Xilinx AXI Enet driver
The Xilinx AXI Ethernet subsystem is commonly found in FPGA designs. This patch adds a driver and device tree bindings for the Ethernet MAC core and its MDIO controller. The driver was tested on a RISC-V softcore in an FPGA design, with an RGMII phy and Ethernet subsystem version 7.2 Rev. 14. Device tree bindings match the device tree generated by Vitis hsi. Note that Vitis generates one of the two included compatible strings depending on version. Signed-off-by: Eric Ackermann <eric.ackermann@cispa.de>
This commit is contained in:
parent
c05cfbf15e
commit
4342d7108b
@ -44,6 +44,7 @@ zephyr_library_sources_ifdef(CONFIG_ETH_RENESAS_RA eth_renesas_ra.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ETH_LAN9250 eth_lan9250.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ETH_SY1XX eth_sensry_sy1xx_mac.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ETH_NXP_ENET eth_nxp_enet.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ETH_XILINX_AXIENET eth_xilinx_axienet.c)
|
||||
|
||||
if(CONFIG_ETH_NXP_S32_NETC)
|
||||
zephyr_library_sources(eth_nxp_s32_netc.c)
|
||||
|
||||
@ -76,6 +76,7 @@ source "drivers/ethernet/Kconfig.xmc4xxx"
|
||||
source "drivers/ethernet/Kconfig.test"
|
||||
source "drivers/ethernet/Kconfig.lan9250"
|
||||
source "drivers/ethernet/Kconfig.sy1xx_mac"
|
||||
source "drivers/ethernet/Kconfig.xilinx_axienet"
|
||||
|
||||
source "drivers/ethernet/eth_nxp_enet_qos/Kconfig"
|
||||
|
||||
|
||||
41
drivers/ethernet/Kconfig.xilinx_axienet
Normal file
41
drivers/ethernet/Kconfig.xilinx_axienet
Normal file
@ -0,0 +1,41 @@
|
||||
#
|
||||
#Xilinx AXI 1G / 2.5G Ethernet Subsystem
|
||||
#
|
||||
#Copyright(c) 2024, CISPA Helmholtz Center for Information Security
|
||||
#SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
config ETH_XILINX_AXIENET
|
||||
bool "Xilinx AXI Ethernet Driver"
|
||||
default y
|
||||
depends on DT_HAS_XLNX_AXI_ETHERNET_1_00_A_ENABLED
|
||||
depends on DMA_XILINX_AXI_DMA
|
||||
help
|
||||
Enable Xilinx 1G / 2.5G AXI Ethernet driver,
|
||||
commonly found on FPGAs.
|
||||
|
||||
config ETH_XILINX_AXIENET_BUFFER_NUM_TX
|
||||
int "Number of buffers for concurrent transmission (TX)."
|
||||
depends on ETH_XILINX_AXIENET
|
||||
default DMA_XILINX_AXI_DMA_SG_DESCRIPTOR_NUM_TX
|
||||
range 2 DMA_XILINX_AXI_DMA_SG_DESCRIPTOR_NUM_TX
|
||||
help
|
||||
Number of buffers for transmission allocated in the Xilinx AXI Ethernet driver.
|
||||
This limits how many transmissions can be in-flight at the same time.
|
||||
This cannot exceed the number of available TX buffers in the AXI DMA. However,
|
||||
in scenarios with multiple AXI DMAs for different purposes in the system, it
|
||||
may be desirable to reduce the number of concurrent transmissions to conserve
|
||||
resources.
|
||||
|
||||
config ETH_XILINX_AXIENET_BUFFER_NUM_RX
|
||||
int "Number of buffers for concurrent reception (RX)."
|
||||
depends on ETH_XILINX_AXIENET
|
||||
default DMA_XILINX_AXI_DMA_SG_DESCRIPTOR_NUM_RX
|
||||
range 2 DMA_XILINX_AXI_DMA_SG_DESCRIPTOR_NUM_RX
|
||||
help
|
||||
Number of buffers for reception allocated in the Xilinx AXI Ethernet driver.
|
||||
This limits how many transmissions can be in-flight at the same time.
|
||||
This cannot exceed the number of available RX buffers in the AXI DMA. However,
|
||||
in scenarios with multiple AXI DMAs for different purposes in the system, it
|
||||
may be desirable to reduce the number of concurrent transmissions to conserve
|
||||
resources.
|
||||
607
drivers/ethernet/eth_xilinx_axienet.c
Normal file
607
drivers/ethernet/eth_xilinx_axienet.c
Normal file
@ -0,0 +1,607 @@
|
||||
/*
|
||||
* Xilinx AXI 1G / 2.5G Ethernet Subsystem
|
||||
*
|
||||
* Copyright(c) 2024, CISPA Helmholtz Center for Information Security
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_REGISTER(eth_xilinx_axienet, CONFIG_ETHERNET_LOG_LEVEL);
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/net/ethernet.h>
|
||||
#include <ethernet/eth_stats.h>
|
||||
#include <zephyr/drivers/dma.h>
|
||||
#include <zephyr/net/phy.h>
|
||||
#include <zephyr/irq.h>
|
||||
#include <zephyr/sys/barrier.h>
|
||||
|
||||
#include "../dma/dma_xilinx_axi_dma.h"
|
||||
|
||||
/* register offsets and masks */
|
||||
#define XILINX_AXIENET_INTERRUPT_STATUS_OFFSET 0x0000000C
|
||||
#define XILINX_AXIENET_INTERRUPT_STATUS_RXREJ_MASK 0x00000008
|
||||
#define XILINX_AXIENET_INTERRUPT_STATUS_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_OFFSET 0x00000010
|
||||
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_RXCMPIT_MASK 0x00000004 /* Rx complete */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_TXCMPIT_MASK 0x00000020 /* Tx complete */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_RXDCMLOCK_MASK 0x00000040 /* Rx Dcm Lock */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_MGTRDY_MASK 0x00000080 /* MGT clock Lock */
|
||||
#define XILINX_AXIENET_INTERRUPT_PENDING_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
|
||||
|
||||
#define XILINX_AXIENET_INTERRUPT_ENABLE_OFFSET 0x00000014
|
||||
#define XILINX_AXIENET_INTERRUPT_ENABLE_RXREJ_MASK 0x00000008
|
||||
#define XILINX_AXIENET_INTERRUPT_ENABLE_OVR_MASK 0x00000010 /* FIFO overrun */
|
||||
|
||||
#define XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_0_REG_OFFSET 0x00000400
|
||||
#define XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_OFFSET 0x00000404
|
||||
#define XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_RX_EN_MASK 0x10000000
|
||||
#define XILINX_AXIENET_RECEIVER_CONFIGURATION_FLOW_CONTROL_OFFSET 0x0000040C
|
||||
#define XILINX_AXIENET_RECEIVER_CONFIGURATION_FLOW_CONTROL_EN_MASK 0x20000000
|
||||
#define XILINX_AXIENET_TX_CONTROL_REG_OFFSET 0x00000408
|
||||
#define XILINX_AXIENET_TX_CONTROL_TX_EN_MASK (1 << 11)
|
||||
|
||||
#define XILINX_AXIENET_UNICAST_ADDRESS_WORD_0_OFFSET 0x00000700
|
||||
#define XILINX_AXIENET_UNICAST_ADDRESS_WORD_1_OFFSET 0x00000704
|
||||
|
||||
#if (CONFIG_DCACHE_LINE_SIZE > 0)
|
||||
/* cache-line aligned to allow selective cache-line invalidation on the buffer */
|
||||
#define XILINX_AXIENET_ETH_ALIGN CONFIG_DCACHE_LINE_SIZE
|
||||
#else
|
||||
/* pointer-aligned to reduce padding in the struct */
|
||||
#define XILINX_AXIENET_ETH_ALIGN sizeof(void *)
|
||||
#endif
|
||||
|
||||
#define XILINX_AXIENET_ETH_BUFFER_SIZE \
|
||||
((NET_ETH_MAX_FRAME_SIZE + XILINX_AXIENET_ETH_ALIGN - 1) & ~(XILINX_AXIENET_ETH_ALIGN - 1))
|
||||
|
||||
struct xilinx_axienet_buffer {
|
||||
uint8_t buffer[XILINX_AXIENET_ETH_BUFFER_SIZE];
|
||||
} __aligned(XILINX_AXIENET_ETH_ALIGN);
|
||||
|
||||
/* device state */
|
||||
struct xilinx_axienet_data {
|
||||
struct xilinx_axienet_buffer tx_buffer[CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_TX];
|
||||
struct xilinx_axienet_buffer rx_buffer[CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_RX];
|
||||
|
||||
size_t rx_populated_buffer_index;
|
||||
size_t rx_completed_buffer_index;
|
||||
size_t tx_populated_buffer_index;
|
||||
size_t tx_completed_buffer_index;
|
||||
|
||||
struct net_if *interface;
|
||||
|
||||
/* device mac address */
|
||||
uint8_t mac_addr[NET_ETH_ADDR_LEN];
|
||||
bool dma_is_configured_rx;
|
||||
bool dma_is_configured_tx;
|
||||
};
|
||||
|
||||
/* global configuration per Ethernet device */
|
||||
struct xilinx_axienet_config {
|
||||
void (*config_func)(const struct xilinx_axienet_data *dev);
|
||||
const struct device *dma;
|
||||
|
||||
const struct device *phy;
|
||||
|
||||
mem_addr_t reg;
|
||||
|
||||
int irq_num;
|
||||
bool have_irq;
|
||||
|
||||
bool have_rx_csum_offload;
|
||||
bool have_tx_csum_offload;
|
||||
};
|
||||
|
||||
static void xilinx_axienet_write_register(const struct xilinx_axienet_config *config,
|
||||
mem_addr_t reg_offset, uint32_t value)
|
||||
{
|
||||
sys_write32(value, config->reg + reg_offset);
|
||||
}
|
||||
|
||||
static uint32_t xilinx_axienet_read_register(const struct xilinx_axienet_config *config,
|
||||
mem_addr_t reg_offset)
|
||||
{
|
||||
return sys_read32(config->reg + reg_offset);
|
||||
}
|
||||
static int setup_dma_rx_transfer(const struct device *dev,
|
||||
const struct xilinx_axienet_config *config,
|
||||
struct xilinx_axienet_data *data);
|
||||
|
||||
/* called by DMA when a packet is available */
|
||||
static void xilinx_axienet_rx_callback(const struct device *dma, void *user_data, uint32_t channel,
|
||||
int status)
|
||||
{
|
||||
struct device *ethdev = (struct device *)user_data;
|
||||
struct xilinx_axienet_data *data = ethdev->data;
|
||||
unsigned int packet_size;
|
||||
struct net_pkt *pkt;
|
||||
|
||||
size_t next_descriptor =
|
||||
(data->rx_completed_buffer_index + 1) % CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_RX;
|
||||
size_t current_descriptor = data->rx_completed_buffer_index;
|
||||
|
||||
if (!net_if_is_up(data->interface)) {
|
||||
/*
|
||||
* cannot receive data now, so discard silently
|
||||
* setup new transfer for when the interface is back up
|
||||
*/
|
||||
goto setup_new_transfer;
|
||||
}
|
||||
|
||||
if (status < 0) {
|
||||
LOG_ERR("DMA RX error: %d", status);
|
||||
eth_stats_update_errors_rx(data->interface);
|
||||
goto setup_new_transfer;
|
||||
}
|
||||
|
||||
data->rx_completed_buffer_index = next_descriptor;
|
||||
|
||||
packet_size = dma_xilinx_axi_dma_last_received_frame_length(dma);
|
||||
pkt = net_pkt_rx_alloc_with_buffer(data->interface, packet_size, AF_UNSPEC, 0, K_NO_WAIT);
|
||||
|
||||
if (!pkt) {
|
||||
LOG_ERR("Could not allocate a packet!");
|
||||
goto setup_new_transfer;
|
||||
}
|
||||
if (net_pkt_write(pkt, data->rx_buffer[current_descriptor].buffer, packet_size)) {
|
||||
LOG_ERR("Could not write RX buffer into packet!");
|
||||
net_pkt_unref(pkt);
|
||||
goto setup_new_transfer;
|
||||
}
|
||||
if (net_recv_data(data->interface, pkt) < 0) {
|
||||
LOG_ERR("Coult not receive packet data!");
|
||||
net_pkt_unref(pkt);
|
||||
goto setup_new_transfer;
|
||||
}
|
||||
|
||||
LOG_DBG("Packet with %u bytes received!\n", packet_size);
|
||||
|
||||
/* we need to start a new DMA transfer irregardless of whether the DMA reported an error */
|
||||
/* otherwise, the ethernet subsystem would just stop receiving */
|
||||
setup_new_transfer:
|
||||
if (setup_dma_rx_transfer(ethdev, ethdev->config, ethdev->data)) {
|
||||
LOG_ERR("Could not set up next RX DMA transfer!");
|
||||
}
|
||||
}
|
||||
|
||||
static void xilinx_axienet_tx_callback(const struct device *dev, void *user_data, uint32_t channel,
|
||||
int status)
|
||||
{
|
||||
struct device *ethdev = (struct device *)user_data;
|
||||
struct xilinx_axienet_data *data = ethdev->data;
|
||||
size_t next_descriptor =
|
||||
(data->tx_completed_buffer_index + 1) % CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_TX;
|
||||
|
||||
data->tx_completed_buffer_index = next_descriptor;
|
||||
|
||||
if (status < 0) {
|
||||
LOG_ERR("DMA TX error: %d", status);
|
||||
eth_stats_update_errors_tx(data->interface);
|
||||
}
|
||||
}
|
||||
|
||||
static int setup_dma_rx_transfer(const struct device *dev,
|
||||
const struct xilinx_axienet_config *config,
|
||||
struct xilinx_axienet_data *data)
|
||||
{
|
||||
int err;
|
||||
size_t next_descriptor =
|
||||
(data->rx_populated_buffer_index + 1) % CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_RX;
|
||||
size_t current_descriptor = data->rx_populated_buffer_index;
|
||||
|
||||
if (next_descriptor == data->rx_completed_buffer_index) {
|
||||
LOG_ERR("Cannot start RX via DMA - populated buffer %zu will run into completed"
|
||||
" buffer %zu!",
|
||||
data->rx_populated_buffer_index, data->rx_completed_buffer_index);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (!data->dma_is_configured_rx) {
|
||||
struct dma_block_config head_block = {
|
||||
.source_address = 0x0,
|
||||
.dest_address = (uintptr_t)data->rx_buffer[current_descriptor].buffer,
|
||||
.block_size = sizeof(data->rx_buffer[current_descriptor].buffer),
|
||||
.next_block = NULL,
|
||||
.source_addr_adj = DMA_ADDR_ADJ_INCREMENT,
|
||||
.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT};
|
||||
struct dma_config dma_conf = {.dma_slot = 0,
|
||||
.channel_direction = PERIPHERAL_TO_MEMORY,
|
||||
.complete_callback_en = 1,
|
||||
.error_callback_dis = 0,
|
||||
.block_count = 1,
|
||||
.head_block = &head_block,
|
||||
.user_data = (void *)dev,
|
||||
.dma_callback = xilinx_axienet_rx_callback};
|
||||
|
||||
if (config->have_rx_csum_offload) {
|
||||
dma_conf.linked_channel = XILINX_AXI_DMA_LINKED_CHANNEL_FULL_CSUM_OFFLOAD;
|
||||
} else {
|
||||
dma_conf.linked_channel = XILINX_AXI_DMA_LINKED_CHANNEL_NO_CSUM_OFFLOAD;
|
||||
}
|
||||
|
||||
err = dma_config(config->dma, XILINX_AXI_DMA_RX_CHANNEL_NUM, &dma_conf);
|
||||
if (err) {
|
||||
LOG_ERR("DMA config failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
data->dma_is_configured_rx = true;
|
||||
} else {
|
||||
/* can use faster "reload" API, as everything else stays the same */
|
||||
err = dma_reload(config->dma, XILINX_AXI_DMA_RX_CHANNEL_NUM, 0x0,
|
||||
(uintptr_t)data->rx_buffer[current_descriptor].buffer,
|
||||
sizeof(data->rx_buffer[current_descriptor].buffer));
|
||||
if (err) {
|
||||
LOG_ERR("DMA reconfigure failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
LOG_DBG("Receiving one packet with DMA!");
|
||||
|
||||
/* prevent concurrent modification */
|
||||
data->rx_populated_buffer_index = next_descriptor;
|
||||
|
||||
err = dma_start(config->dma, XILINX_AXI_DMA_RX_CHANNEL_NUM);
|
||||
|
||||
if (err) {
|
||||
/* buffer has not been accepted by DMA */
|
||||
data->rx_populated_buffer_index = current_descriptor;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* assumes that the caller has set up data->tx_buffer */
|
||||
static int setup_dma_tx_transfer(const struct device *dev,
|
||||
const struct xilinx_axienet_config *config,
|
||||
struct xilinx_axienet_data *data, uint32_t buffer_len)
|
||||
{
|
||||
int err;
|
||||
size_t next_descriptor =
|
||||
(data->tx_populated_buffer_index + 1) % CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_TX;
|
||||
size_t current_descriptor = data->tx_populated_buffer_index;
|
||||
|
||||
if (next_descriptor == data->tx_completed_buffer_index) {
|
||||
LOG_ERR("Cannot start TX via DMA - populated buffer %zu will run into completed"
|
||||
" buffer %zu!",
|
||||
data->tx_populated_buffer_index, data->tx_completed_buffer_index);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (!data->dma_is_configured_tx) {
|
||||
struct dma_block_config head_block = {
|
||||
.source_address = (uintptr_t)data->tx_buffer[current_descriptor].buffer,
|
||||
.dest_address = 0x0,
|
||||
.block_size = buffer_len,
|
||||
.next_block = NULL,
|
||||
.source_addr_adj = DMA_ADDR_ADJ_INCREMENT,
|
||||
.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT};
|
||||
struct dma_config dma_conf = {.dma_slot = 0,
|
||||
.channel_direction = MEMORY_TO_PERIPHERAL,
|
||||
.complete_callback_en = 1,
|
||||
.error_callback_dis = 0,
|
||||
.block_count = 1,
|
||||
.head_block = &head_block,
|
||||
.user_data = (void *)dev,
|
||||
.dma_callback = xilinx_axienet_tx_callback};
|
||||
|
||||
if (config->have_tx_csum_offload) {
|
||||
dma_conf.linked_channel = XILINX_AXI_DMA_LINKED_CHANNEL_FULL_CSUM_OFFLOAD;
|
||||
} else {
|
||||
dma_conf.linked_channel = XILINX_AXI_DMA_LINKED_CHANNEL_NO_CSUM_OFFLOAD;
|
||||
}
|
||||
|
||||
err = dma_config(config->dma, XILINX_AXI_DMA_TX_CHANNEL_NUM, &dma_conf);
|
||||
if (err) {
|
||||
LOG_ERR("DMA config failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
data->dma_is_configured_tx = true;
|
||||
} else {
|
||||
/* can use faster "reload" API, as everything else stays the same */
|
||||
err = dma_reload(config->dma, XILINX_AXI_DMA_TX_CHANNEL_NUM,
|
||||
(uintptr_t)data->tx_buffer[current_descriptor].buffer, 0x0,
|
||||
buffer_len);
|
||||
if (err) {
|
||||
LOG_ERR("DMA reconfigure failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* prevent concurrent modification */
|
||||
data->tx_populated_buffer_index = next_descriptor;
|
||||
|
||||
err = dma_start(config->dma, XILINX_AXI_DMA_TX_CHANNEL_NUM);
|
||||
|
||||
if (err) {
|
||||
/* buffer has not been accepted by DMA */
|
||||
data->tx_populated_buffer_index = current_descriptor;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void xilinx_axienet_isr(const struct device *dev)
|
||||
{
|
||||
const struct xilinx_axienet_config *config = dev->config;
|
||||
struct xilinx_axienet_data *data = dev->data;
|
||||
uint32_t status =
|
||||
xilinx_axienet_read_register(config, XILINX_AXIENET_INTERRUPT_PENDING_OFFSET);
|
||||
|
||||
(void)data;
|
||||
|
||||
if (status & XILINX_AXIENET_INTERRUPT_PENDING_RXFIFOOVR_MASK) {
|
||||
LOG_WRN("FIFO was overrun - probably lost packets!");
|
||||
eth_stats_update_errors_rx(data->interface);
|
||||
} else if (status & XILINX_AXIENET_INTERRUPT_PENDING_RXRJECT_MASK) {
|
||||
/* this is extremely rare on Ethernet */
|
||||
/* most likely cause is mistake in FPGA configuration */
|
||||
LOG_WRN("Erroneous frame received!");
|
||||
eth_stats_update_errors_rx(data->interface);
|
||||
}
|
||||
|
||||
if (status != 0) {
|
||||
/* clear IRQ by writing the same value back */
|
||||
xilinx_axienet_write_register(config, XILINX_AXIENET_INTERRUPT_STATUS_OFFSET,
|
||||
status);
|
||||
}
|
||||
}
|
||||
|
||||
static enum ethernet_hw_caps xilinx_axienet_caps(const struct device *dev)
|
||||
{
|
||||
const struct xilinx_axienet_config *config = dev->config;
|
||||
enum ethernet_hw_caps ret = ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T |
|
||||
ETHERNET_LINK_1000BASE_T;
|
||||
|
||||
if (config->have_rx_csum_offload) {
|
||||
ret |= ETHERNET_HW_RX_CHKSUM_OFFLOAD;
|
||||
}
|
||||
if (config->have_tx_csum_offload) {
|
||||
ret |= ETHERNET_HW_TX_CHKSUM_OFFLOAD;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct device *xilinx_axienet_get_phy(const struct device *dev)
|
||||
{
|
||||
const struct xilinx_axienet_config *config = dev->config;
|
||||
|
||||
return config->phy;
|
||||
}
|
||||
|
||||
static int xilinx_axienet_get_config(const struct device *dev, enum ethernet_config_type type,
|
||||
struct ethernet_config *config)
|
||||
{
|
||||
const struct xilinx_axienet_config *dev_config = dev->config;
|
||||
const struct xilinx_axienet_data *data = dev->data;
|
||||
struct phy_link_state link_state;
|
||||
int err;
|
||||
|
||||
switch (type) {
|
||||
case ETHERNET_CONFIG_TYPE_RX_CHECKSUM_SUPPORT:
|
||||
if (dev_config->have_rx_csum_offload) {
|
||||
config->chksum_support =
|
||||
ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER |
|
||||
ETHERNET_CHECKSUM_SUPPORT_TCP | ETHERNET_CHECKSUM_SUPPORT_UDP |
|
||||
ETHERNET_CHECKSUM_SUPPORT_IPV6_HEADER |
|
||||
ETHERNET_CHECKSUM_SUPPORT_TCP | ETHERNET_CHECKSUM_SUPPORT_UDP;
|
||||
} else {
|
||||
config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_NONE;
|
||||
}
|
||||
return 0;
|
||||
case ETHERNET_CONFIG_TYPE_TX_CHECKSUM_SUPPORT:
|
||||
if (dev_config->have_tx_csum_offload) {
|
||||
config->chksum_support =
|
||||
ETHERNET_CHECKSUM_SUPPORT_IPV4_HEADER |
|
||||
ETHERNET_CHECKSUM_SUPPORT_TCP | ETHERNET_CHECKSUM_SUPPORT_UDP |
|
||||
ETHERNET_CHECKSUM_SUPPORT_IPV6_HEADER |
|
||||
ETHERNET_CHECKSUM_SUPPORT_TCP | ETHERNET_CHECKSUM_SUPPORT_UDP;
|
||||
} else {
|
||||
config->chksum_support = ETHERNET_CHECKSUM_SUPPORT_NONE;
|
||||
}
|
||||
return 0;
|
||||
default:
|
||||
LOG_ERR("Unsupported configuration queried: %u", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void xilinx_axienet_set_mac_address(const struct xilinx_axienet_config *config,
|
||||
const struct xilinx_axienet_data *data)
|
||||
{
|
||||
xilinx_axienet_write_register(config, XILINX_AXIENET_UNICAST_ADDRESS_WORD_0_OFFSET,
|
||||
(data->mac_addr[0]) | (data->mac_addr[1] << 8) |
|
||||
(data->mac_addr[2] << 16) |
|
||||
(data->mac_addr[3] << 24));
|
||||
xilinx_axienet_write_register(config, XILINX_AXIENET_UNICAST_ADDRESS_WORD_1_OFFSET,
|
||||
(data->mac_addr[4]) | (data->mac_addr[5] << 8));
|
||||
}
|
||||
|
||||
static int xilinx_axienet_set_config(const struct device *dev, enum ethernet_config_type type,
|
||||
const struct ethernet_config *config)
|
||||
{
|
||||
const struct xilinx_axienet_config *dev_config = dev->config;
|
||||
struct xilinx_axienet_data *data = dev->data;
|
||||
|
||||
switch (type) {
|
||||
case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
|
||||
memcpy(data->mac_addr, config->mac_address.addr, sizeof(data->mac_addr));
|
||||
xilinx_axienet_set_mac_address(dev_config, data);
|
||||
return net_if_set_link_addr(data->interface, data->mac_addr,
|
||||
sizeof(data->mac_addr), NET_LINK_ETHERNET);
|
||||
default:
|
||||
LOG_ERR("Unsupported configuration set: %u", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void phy_link_state_changed(const struct device *dev, struct phy_link_state *state,
|
||||
void *user_data)
|
||||
{
|
||||
struct xilinx_axienet_data *data = user_data;
|
||||
|
||||
ARG_UNUSED(dev);
|
||||
|
||||
LOG_INF("Link state changed to: %s (speed %x)", state->is_up ? "up" : "down", state->speed);
|
||||
|
||||
/* inform the L2 driver about link event */
|
||||
if (state->is_up) {
|
||||
net_eth_carrier_on(data->interface);
|
||||
} else {
|
||||
net_eth_carrier_off(data->interface);
|
||||
}
|
||||
}
|
||||
|
||||
static void xilinx_axienet_iface_init(struct net_if *iface)
|
||||
{
|
||||
struct xilinx_axienet_data *data = net_if_get_device(iface)->data;
|
||||
const struct xilinx_axienet_config *config = net_if_get_device(iface)->config;
|
||||
int err;
|
||||
|
||||
data->interface = iface;
|
||||
|
||||
ethernet_init(iface);
|
||||
|
||||
net_if_set_link_addr(iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET);
|
||||
|
||||
/* carrier is initially off */
|
||||
net_eth_carrier_off(iface);
|
||||
|
||||
err = phy_link_callback_set(config->phy, phy_link_state_changed, data);
|
||||
|
||||
if (err) {
|
||||
LOG_ERR("Could not set PHY link state changed handler : %d",
|
||||
config->phy ? err : -1);
|
||||
}
|
||||
|
||||
LOG_INF("Interface initialized!");
|
||||
}
|
||||
|
||||
static int xilinx_axienet_send(const struct device *dev, struct net_pkt *pkt)
|
||||
{
|
||||
struct xilinx_axienet_data *data = dev->data;
|
||||
const struct xilinx_axienet_config *config = dev->config;
|
||||
size_t pkt_len = net_pkt_get_len(pkt);
|
||||
size_t current_descriptor = data->tx_populated_buffer_index;
|
||||
|
||||
if (net_pkt_read(pkt, data->tx_buffer[current_descriptor].buffer, pkt_len)) {
|
||||
LOG_ERR("Failed to read packet into TX buffer!");
|
||||
return -EIO;
|
||||
}
|
||||
return setup_dma_tx_transfer(dev, config, data, pkt_len);
|
||||
}
|
||||
|
||||
static int xilinx_axienet_probe(const struct device *dev)
|
||||
{
|
||||
const struct xilinx_axienet_config *config = dev->config;
|
||||
struct xilinx_axienet_data *data = dev->data;
|
||||
uint32_t status;
|
||||
int err;
|
||||
|
||||
status = xilinx_axienet_read_register(
|
||||
config, XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_OFFSET);
|
||||
status = status & ~XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_RX_EN_MASK;
|
||||
xilinx_axienet_write_register(
|
||||
config, XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_OFFSET, status);
|
||||
|
||||
/* RX disabled - it is safe to modify settings */
|
||||
|
||||
/* clear any RX rejected interrupts from when the core was not configured */
|
||||
xilinx_axienet_write_register(config, XILINX_AXIENET_INTERRUPT_STATUS_OFFSET,
|
||||
XILINX_AXIENET_INTERRUPT_STATUS_RXREJ_MASK |
|
||||
XILINX_AXIENET_INTERRUPT_STATUS_RXFIFOOVR_MASK);
|
||||
|
||||
xilinx_axienet_write_register(config, XILINX_AXIENET_INTERRUPT_ENABLE_OFFSET,
|
||||
config->have_irq
|
||||
? XILINX_AXIENET_INTERRUPT_ENABLE_RXREJ_MASK |
|
||||
XILINX_AXIENET_INTERRUPT_ENABLE_OVR_MASK
|
||||
: 0);
|
||||
|
||||
xilinx_axienet_write_register(config,
|
||||
XILINX_AXIENET_RECEIVER_CONFIGURATION_FLOW_CONTROL_OFFSET,
|
||||
XILINX_AXIENET_RECEIVER_CONFIGURATION_FLOW_CONTROL_EN_MASK);
|
||||
|
||||
/* at time of writing, hardware does not support half duplex */
|
||||
err = phy_configure_link(config->phy, LINK_FULL_10BASE_T | LINK_FULL_100BASE_T |
|
||||
LINK_FULL_1000BASE_T);
|
||||
if (err) {
|
||||
LOG_WRN("Could not configure PHY: %d", -err);
|
||||
}
|
||||
|
||||
LOG_INF("RX Checksum offloading %s",
|
||||
config->have_rx_csum_offload ? "requested" : "disabled");
|
||||
LOG_INF("TX Checksum offloading %s",
|
||||
config->have_tx_csum_offload ? "requested" : "disabled");
|
||||
|
||||
xilinx_axienet_set_mac_address(config, data);
|
||||
|
||||
for (int i = 0; i < CONFIG_ETH_XILINX_AXIENET_BUFFER_NUM_RX - 1; i++) {
|
||||
setup_dma_rx_transfer(dev, config, data);
|
||||
}
|
||||
|
||||
status = xilinx_axienet_read_register(
|
||||
config, XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_OFFSET);
|
||||
status = status | XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_RX_EN_MASK;
|
||||
xilinx_axienet_write_register(
|
||||
config, XILINX_AXIENET_RECEIVER_CONFIGURATION_WORD_1_REG_OFFSET, status);
|
||||
|
||||
status = xilinx_axienet_read_register(config, XILINX_AXIENET_TX_CONTROL_REG_OFFSET);
|
||||
status = status | XILINX_AXIENET_TX_CONTROL_TX_EN_MASK;
|
||||
xilinx_axienet_write_register(config, XILINX_AXIENET_TX_CONTROL_REG_OFFSET, status);
|
||||
|
||||
config->config_func(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO PTP, VLAN not supported yet */
|
||||
static const struct ethernet_api xilinx_axienet_api = {
|
||||
.iface_api.init = xilinx_axienet_iface_init,
|
||||
.get_capabilities = xilinx_axienet_caps,
|
||||
.get_config = xilinx_axienet_get_config,
|
||||
.set_config = xilinx_axienet_set_config,
|
||||
.get_phy = xilinx_axienet_get_phy,
|
||||
.send = xilinx_axienet_send,
|
||||
};
|
||||
|
||||
#define SETUP_IRQS(inst) \
|
||||
IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), xilinx_axienet_isr, \
|
||||
DEVICE_DT_INST_GET(inst), 0); \
|
||||
\
|
||||
irq_enable(DT_INST_IRQN(inst))
|
||||
|
||||
#define XILINX_AXIENET_INIT(inst) \
|
||||
\
|
||||
static void xilinx_axienet_config_##inst(const struct xilinx_axienet_data *dev) \
|
||||
{ \
|
||||
COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, interrupts), (SETUP_IRQS(inst)), \
|
||||
(LOG_INF("No IRQs defined!"))); \
|
||||
} \
|
||||
\
|
||||
static struct xilinx_axienet_data data_##inst = { \
|
||||
.mac_addr = DT_INST_PROP(inst, local_mac_address), \
|
||||
.dma_is_configured_rx = false, \
|
||||
.dma_is_configured_tx = false}; \
|
||||
static const struct xilinx_axienet_config config_##inst = { \
|
||||
.config_func = xilinx_axienet_config_##inst, \
|
||||
.dma = DEVICE_DT_GET(DT_INST_PHANDLE(inst, axistream_connected)), \
|
||||
.phy = DEVICE_DT_GET(DT_INST_PHANDLE(inst, phy_handle)), \
|
||||
.reg = DT_REG_ADDR(DT_INST_PARENT(inst)), \
|
||||
.have_irq = DT_INST_NODE_HAS_PROP(inst, interrupts), \
|
||||
.have_tx_csum_offload = DT_INST_PROP_OR(inst, xlnx_txcsum, 0x0) == 0x2, \
|
||||
.have_rx_csum_offload = DT_INST_PROP_OR(inst, xlnx_rxcsum, 0x0) == 0x2, \
|
||||
}; \
|
||||
\
|
||||
ETH_NET_DEVICE_DT_INST_DEFINE(inst, xilinx_axienet_probe, NULL, &data_##inst, \
|
||||
&config_##inst, CONFIG_ETH_INIT_PRIORITY, \
|
||||
&xilinx_axienet_api, NET_ETH_MTU);
|
||||
|
||||
#define DT_DRV_COMPAT xlnx_axi_ethernet_1_00_a
|
||||
DT_INST_FOREACH_STATUS_OKAY(XILINX_AXIENET_INIT);
|
||||
@ -19,3 +19,4 @@ zephyr_library_sources_ifdef(CONFIG_MDIO_DWCXGMAC mdio_dwcxgmac.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_MDIO_RENESAS_RA mdio_renesas_ra.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_MDIO_LAN865X mdio_lan865x.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_MDIO_SENSRY_SY1XX mdio_sy1xx.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_MDIO_XILINX_AXI_ENET mdio_xilinx_axienet.c)
|
||||
|
||||
@ -40,6 +40,7 @@ source "drivers/mdio/Kconfig.dwcxgmac"
|
||||
source "drivers/mdio/Kconfig.renesas_ra"
|
||||
source "drivers/mdio/Kconfig.lan865x"
|
||||
source "drivers/mdio/Kconfig.sy1xx"
|
||||
source "drivers/mdio/Kconfig.xilinx_axienet"
|
||||
|
||||
config MDIO_INIT_PRIORITY
|
||||
int "Init priority"
|
||||
|
||||
9
drivers/mdio/Kconfig.xilinx_axienet
Normal file
9
drivers/mdio/Kconfig.xilinx_axienet
Normal file
@ -0,0 +1,9 @@
|
||||
# Copyright 2024 CISPA Helmholtz Center for Information Security gGmbH
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
config MDIO_XILINX_AXI_ENET
|
||||
bool "Xilinx AXI Ethernet MDIO driver"
|
||||
default y
|
||||
depends on DT_HAS_XLNX_AXI_ETHERNET_1_00_A_MDIO_ENABLED
|
||||
help
|
||||
Enable Xilinx AXI Ethernet MDIO bus driver.
|
||||
@ -45,6 +45,8 @@ LOG_MODULE_REGISTER(mdio_shell, CONFIG_LOG_DEFAULT_LEVEL);
|
||||
#define DT_DRV_COMPAT snps_dwcxgmac_mdio
|
||||
#elif DT_HAS_COMPAT_STATUS_OKAY(sensry_sy1xx_mdio)
|
||||
#define DT_DRV_COMPAT sensry_sy1xx_mdio
|
||||
#elif DT_HAS_COMPAT_STATUS_OKAY(xlnx_axi_ethernet_1_00_a_mdio)
|
||||
#define DT_DRV_COMPAT xlnx_axi_ethernet_1_00_a_mdio
|
||||
#else
|
||||
#error "No known devicetree compatible match for MDIO shell"
|
||||
#endif
|
||||
|
||||
338
drivers/mdio/mdio_xilinx_axienet.c
Normal file
338
drivers/mdio/mdio_xilinx_axienet.c
Normal file
@ -0,0 +1,338 @@
|
||||
/*
|
||||
* Xilinx AXI 1G / 2.5G Ethernet Subsystem
|
||||
*
|
||||
* Copyright(c) 2024, CISPA Helmholtz Center for Information Security
|
||||
* SPDX - License - Identifier : Apache-2.0
|
||||
*/
|
||||
#include <zephyr/logging/log.h>
|
||||
LOG_MODULE_REGISTER(eth_xilinx_axienet_mdio, CONFIG_ETHERNET_LOG_LEVEL);
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/drivers/mdio.h>
|
||||
#include <zephyr/sys/barrier.h>
|
||||
|
||||
#define XILINX_AXIENET_MDIO_SETUP_REG_OFFSET 0x00000500
|
||||
#define XILINX_AXIENET_MDIO_SETUP_REG_MDIO_DISABLE_MASK BIT_MASK(0)
|
||||
#define XILINX_AXIENET_MDIO_SETUP_REG_MDIO_ENABLE_MASK BIT(6)
|
||||
#define XILINX_AXIENET_MDIO_SETUP_REG_MDIO_CLOCK_DIVIDER_MASK BIT_MASK(6)
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_OFFSET 0x00000504
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_MASK_READY BIT(7)
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_PHYADDR 24
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_REGADDR 16
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_TXOP 14
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_MASK_READ BIT(15)
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_MASK_WRITE BIT(14)
|
||||
#define XILINX_AXIENET_MDIO_CONTROL_REG_MASK_INITIATE BIT(11)
|
||||
#define XILINX_AXIENET_MDIO_WRITE_DATA_REG_OFFSET 0x00000508
|
||||
#define XILINX_AXIENET_MDIO_READ_DATA_REG_OFFSET 0x0000050C
|
||||
#define XILINX_AXIENET_MDIO_READ_DATA_REG_DATA_MASK BIT_MASK(16)
|
||||
/* same mask for all interrupt-related registers */
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_MASK BIT(0)
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_STATUS_REG_OFFSET 0x00000600
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_PENDING_REG_OFFSET 0x00000610
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_ENABLE_REG_OFFSET 0x00000620
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_DISABLE_ALL_MASK BIT_MASK(0)
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_CLEAR_REG_OFFSET 0x00000630
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_CLEAR_ALL_MASK BIT_MASK(8)
|
||||
|
||||
/* 2.5 MHz, i.e., max MDIO clock according to IEEE spec */
|
||||
#define XILINX_AXIENET_MDIO_MDIO_TARGET_FREQUENCY_HZ 2500000
|
||||
#define XILINX_AXIENET_MDIO_INTERRUPT_TIMEOUT_MS 100
|
||||
|
||||
struct mdio_xilinx_axienet_data {
|
||||
struct k_sem irq_sema;
|
||||
uint16_t clock_divider;
|
||||
bool bus_enabled;
|
||||
};
|
||||
|
||||
struct mdio_xilinx_axienet_config {
|
||||
void *reg;
|
||||
unsigned int clock_frequency_hz;
|
||||
void (*config_func)(const struct mdio_xilinx_axienet_data *data);
|
||||
bool have_irq;
|
||||
};
|
||||
|
||||
static void xilinx_axienet_mdio_write_register(const struct mdio_xilinx_axienet_config *config,
|
||||
uintptr_t reg_offset, uint32_t value)
|
||||
{
|
||||
volatile uint32_t *reg_addr = (uint32_t *)((uint8_t *)(config->reg) + reg_offset);
|
||||
*reg_addr = value;
|
||||
barrier_dmem_fence_full(); /* make sure that write commits */
|
||||
}
|
||||
|
||||
static uint32_t xilinx_axienet_read_mdio_register(const struct mdio_xilinx_axienet_config *config,
|
||||
uintptr_t reg_offset)
|
||||
{
|
||||
const volatile uint32_t *reg_addr = (uint32_t *)((uint8_t *)(config->reg) + reg_offset);
|
||||
const uint32_t ret = *reg_addr;
|
||||
|
||||
barrier_dmem_fence_full(); /* make sure that read commits */
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mdio_xilinx_axienet_bus_disable(const struct device *dev)
|
||||
{
|
||||
const struct mdio_xilinx_axienet_config *config = dev->config;
|
||||
struct mdio_xilinx_axienet_data *data = dev->data;
|
||||
|
||||
LOG_INF("Disable MDIO Bus!");
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_INTERRUPT_ENABLE_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_INTERRUPT_DISABLE_ALL_MASK);
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_SETUP_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_SETUP_REG_MDIO_DISABLE_MASK);
|
||||
data->bus_enabled = false;
|
||||
}
|
||||
|
||||
static void enable_mdio_bus(const struct mdio_xilinx_axienet_config *config,
|
||||
struct mdio_xilinx_axienet_data *data)
|
||||
{
|
||||
|
||||
if ((xilinx_axienet_read_mdio_register(config, XILINX_AXIENET_MDIO_SETUP_REG_OFFSET) &
|
||||
XILINX_AXIENET_MDIO_SETUP_REG_MDIO_ENABLE_MASK) == 0) {
|
||||
int err;
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_SETUP_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_SETUP_REG_MDIO_ENABLE_MASK |
|
||||
data->clock_divider);
|
||||
|
||||
xilinx_axienet_mdio_write_register(config,
|
||||
XILINX_AXIENET_MDIO_INTERRUPT_ENABLE_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_INTERRUPT_MASK);
|
||||
|
||||
if (config->have_irq) {
|
||||
LOG_DBG("Waiting for bus enable!");
|
||||
err = k_sem_take(&data->irq_sema,
|
||||
K_MSEC(XILINX_AXIENET_MDIO_INTERRUPT_TIMEOUT_MS));
|
||||
|
||||
if (err != 0) {
|
||||
LOG_ERR("Could not enable MDIO bus: %d (%s)", err, strerror(-err));
|
||||
}
|
||||
}
|
||||
|
||||
while ((xilinx_axienet_read_mdio_register(config,
|
||||
XILINX_AXIENET_MDIO_SETUP_REG_OFFSET) &
|
||||
XILINX_AXIENET_MDIO_SETUP_REG_MDIO_ENABLE_MASK) == 0) {
|
||||
LOG_DBG("Waiting for bus enable!");
|
||||
}
|
||||
|
||||
} else {
|
||||
data->bus_enabled = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void mdio_xilinx_axienet_bus_enable(const struct device *dev)
|
||||
{
|
||||
const struct mdio_xilinx_axienet_config *config = dev->config;
|
||||
struct mdio_xilinx_axienet_data *data = dev->data;
|
||||
uint16_t clock_divider, clock_divider_full;
|
||||
|
||||
if (!config->clock_frequency_hz) {
|
||||
LOG_ERR("No clock frequency specified for ethernet device!");
|
||||
return;
|
||||
}
|
||||
|
||||
/* this might result in a MDIO frequency that is a bit lower than the max frequency */
|
||||
clock_divider_full = DIV_ROUND_UP(config->clock_frequency_hz,
|
||||
XILINX_AXIENET_MDIO_MDIO_TARGET_FREQUENCY_HZ * 2);
|
||||
clock_divider = clock_divider_full & XILINX_AXIENET_MDIO_SETUP_REG_MDIO_CLOCK_DIVIDER_MASK;
|
||||
|
||||
if (clock_divider != clock_divider_full) {
|
||||
LOG_ERR("Clock divider overflow!");
|
||||
/* maximum divider value - lowest MDIO frequency we can achieve */
|
||||
clock_divider = XILINX_AXIENET_MDIO_SETUP_REG_MDIO_CLOCK_DIVIDER_MASK;
|
||||
}
|
||||
|
||||
data->clock_divider = clock_divider;
|
||||
|
||||
LOG_INF("Enable MDIO Bus assuming ethernet clock frequency %u divider %u!",
|
||||
config->clock_frequency_hz, clock_divider);
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_SETUP_REG_OFFSET,
|
||||
clock_divider);
|
||||
|
||||
enable_mdio_bus(config, data);
|
||||
|
||||
LOG_INF("MDIO ready!");
|
||||
}
|
||||
|
||||
static int mdio_xilinx_axienet_read(const struct device *dev, uint8_t prtad, uint8_t devad,
|
||||
uint16_t *data)
|
||||
{
|
||||
const struct mdio_xilinx_axienet_config *config = dev->config;
|
||||
struct mdio_xilinx_axienet_data *dev_data = dev->data;
|
||||
int err;
|
||||
|
||||
if (k_is_in_isr()) {
|
||||
LOG_ERR("Called MDIO read in ISR!");
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
|
||||
enable_mdio_bus(config, dev_data);
|
||||
|
||||
if (!dev_data->bus_enabled) {
|
||||
LOG_ERR("Bus needs to be enabled!");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
LOG_DBG("Waiting for IRQ from MDIO!");
|
||||
|
||||
xilinx_axienet_mdio_write_register(
|
||||
config, XILINX_AXIENET_MDIO_CONTROL_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_CONTROL_REG_MASK_INITIATE |
|
||||
(prtad << XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_PHYADDR) |
|
||||
(devad << XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_REGADDR) |
|
||||
XILINX_AXIENET_MDIO_CONTROL_REG_MASK_READ);
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_INTERRUPT_ENABLE_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_INTERRUPT_MASK);
|
||||
|
||||
if (config->have_irq) {
|
||||
err = k_sem_take(&dev_data->irq_sema,
|
||||
K_MSEC(XILINX_AXIENET_MDIO_INTERRUPT_TIMEOUT_MS));
|
||||
|
||||
if (err != 0) {
|
||||
LOG_DBG("Error %d (%s) from IRQ semaphore - polling!", err, strerror(-err));
|
||||
}
|
||||
}
|
||||
|
||||
while ((xilinx_axienet_read_mdio_register(config, XILINX_AXIENET_MDIO_CONTROL_REG_OFFSET) &
|
||||
XILINX_AXIENET_MDIO_CONTROL_REG_MASK_READY) == 0x0) {
|
||||
LOG_DBG("Transfer is not yet ready!");
|
||||
}
|
||||
|
||||
LOG_DBG("IRQ from MDIO received - read complete!");
|
||||
|
||||
*data = (uint16_t)(xilinx_axienet_read_mdio_register(
|
||||
config, XILINX_AXIENET_MDIO_READ_DATA_REG_OFFSET) &
|
||||
XILINX_AXIENET_MDIO_READ_DATA_REG_DATA_MASK);
|
||||
|
||||
LOG_DBG("Read %" PRIu16 " from MDIO!", *data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mdio_xilinx_axienet_write(const struct device *dev, uint8_t prtad, uint8_t devad,
|
||||
uint16_t data)
|
||||
{
|
||||
const struct mdio_xilinx_axienet_config *config = dev->config;
|
||||
struct mdio_xilinx_axienet_data *dev_data = dev->data;
|
||||
int err;
|
||||
|
||||
if (k_is_in_isr()) {
|
||||
LOG_ERR("Called MDIO write in ISR!");
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
|
||||
enable_mdio_bus(config, dev_data);
|
||||
|
||||
if (!dev_data->bus_enabled) {
|
||||
LOG_ERR("Bus needs to be enabled!");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
LOG_DBG("Waiting for IRQ from MDIO!");
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_WRITE_DATA_REG_OFFSET, data);
|
||||
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_INTERRUPT_ENABLE_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_INTERRUPT_MASK);
|
||||
|
||||
xilinx_axienet_mdio_write_register(
|
||||
config, XILINX_AXIENET_MDIO_CONTROL_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_CONTROL_REG_MASK_INITIATE |
|
||||
(prtad << XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_PHYADDR) |
|
||||
(devad << XILINX_AXIENET_MDIO_CONTROL_REG_SHIFT_REGADDR) |
|
||||
XILINX_AXIENET_MDIO_CONTROL_REG_MASK_WRITE);
|
||||
|
||||
if (config->have_irq) {
|
||||
err = k_sem_take(&dev_data->irq_sema,
|
||||
K_MSEC(XILINX_AXIENET_MDIO_INTERRUPT_TIMEOUT_MS));
|
||||
if (err != 0) {
|
||||
LOG_DBG("Error %d (%s) from IRQ semaphore - polling!", err, strerror(-err));
|
||||
}
|
||||
}
|
||||
while ((xilinx_axienet_read_mdio_register(config, XILINX_AXIENET_MDIO_CONTROL_REG_OFFSET) &
|
||||
XILINX_AXIENET_MDIO_CONTROL_REG_MASK_READY) == 0x0) {
|
||||
LOG_DBG("IRQ from MDIO received but transfer is not yet ready!");
|
||||
}
|
||||
|
||||
LOG_DBG("IRQ from MDIO received - write complete!");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void xilinx_axienet_mdio_isr(const struct device *dev)
|
||||
{
|
||||
const struct mdio_xilinx_axienet_config *config = dev->config;
|
||||
struct mdio_xilinx_axienet_data *data = dev->data;
|
||||
uint32_t interrupt_status;
|
||||
|
||||
interrupt_status = xilinx_axienet_read_mdio_register(
|
||||
config, XILINX_AXIENET_MDIO_INTERRUPT_STATUS_REG_OFFSET);
|
||||
|
||||
if (interrupt_status & XILINX_AXIENET_MDIO_INTERRUPT_MASK) {
|
||||
k_sem_give(&data->irq_sema);
|
||||
LOG_DBG("MDIO interrupt received!");
|
||||
} else {
|
||||
LOG_DBG("Unknown interrupt received: %x!", interrupt_status);
|
||||
}
|
||||
xilinx_axienet_mdio_write_register(config, XILINX_AXIENET_MDIO_INTERRUPT_CLEAR_REG_OFFSET,
|
||||
XILINX_AXIENET_MDIO_INTERRUPT_MASK);
|
||||
}
|
||||
|
||||
static int xilinx_axienet_mdio_probe(const struct device *dev)
|
||||
{
|
||||
const struct mdio_xilinx_axienet_config *config = dev->config;
|
||||
struct mdio_xilinx_axienet_data *data = dev->data;
|
||||
int err;
|
||||
|
||||
if (config->have_irq) {
|
||||
err = k_sem_init(&data->irq_sema, 0, K_SEM_MAX_LIMIT);
|
||||
|
||||
if (err != 0) {
|
||||
LOG_ERR("Could not init semaphore: error %d (%s)", err, strerror(-err));
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INF("Enabling IRQ!");
|
||||
config->config_func(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mdio_driver_api mdio_xilinx_axienet_api = {
|
||||
.bus_disable = mdio_xilinx_axienet_bus_disable,
|
||||
.bus_enable = mdio_xilinx_axienet_bus_enable,
|
||||
.read = mdio_xilinx_axienet_read,
|
||||
.write = mdio_xilinx_axienet_write};
|
||||
|
||||
#define SETUP_IRQS(inst) \
|
||||
IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), xilinx_axienet_mdio_isr, \
|
||||
DEVICE_DT_INST_GET(inst), 0); \
|
||||
\
|
||||
irq_enable(DT_INST_IRQN(inst))
|
||||
|
||||
#define XILINX_AXIENET_MDIO_INIT(inst) \
|
||||
\
|
||||
static void xilinx_axienet_mdio_config_##inst(const struct mdio_xilinx_axienet_data *data) \
|
||||
{ \
|
||||
\
|
||||
COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, interrupts), (SETUP_IRQS(inst)), \
|
||||
(LOG_INF("No IRQs defined!"))); \
|
||||
} \
|
||||
static const struct mdio_xilinx_axienet_config mdio_xilinx_axienet_config##inst = { \
|
||||
.config_func = xilinx_axienet_mdio_config_##inst, \
|
||||
.reg = (void *)(uintptr_t)DT_REG_ADDR(DT_INST_PARENT(inst)), \
|
||||
.clock_frequency_hz = DT_INST_PROP(inst, clock_frequency), \
|
||||
.have_irq = DT_INST_NODE_HAS_PROP(inst, interrupts)}; \
|
||||
static struct mdio_xilinx_axienet_data mdio_xilinx_axienet_data##inst = {0}; \
|
||||
DEVICE_DT_INST_DEFINE(inst, xilinx_axienet_mdio_probe, NULL, \
|
||||
&mdio_xilinx_axienet_data##inst, &mdio_xilinx_axienet_config##inst, \
|
||||
POST_KERNEL, CONFIG_MDIO_INIT_PRIORITY, &mdio_xilinx_axienet_api);
|
||||
|
||||
#define DT_DRV_COMPAT xlnx_axi_ethernet_1_00_a_mdio
|
||||
DT_INST_FOREACH_STATUS_OKAY(XILINX_AXIENET_MDIO_INIT)
|
||||
48
dts/bindings/ethernet/xlnx,axi-ethernet-1.00.a.yaml
Normal file
48
dts/bindings/ethernet/xlnx,axi-ethernet-1.00.a.yaml
Normal file
@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2024 CISPA Helmholtz Center for Information Security
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
description: Xilinx AXI 1G/2.5G Ethernet Subsystem base bindings.
|
||||
|
||||
include: [ethernet-controller.yaml]
|
||||
|
||||
compatible: "xlnx,axi-ethernet-1.00.a"
|
||||
|
||||
properties:
|
||||
interrupts:
|
||||
description: Xilinx AXI Ethernet has an optional interrupt for updating statistics.
|
||||
|
||||
reg:
|
||||
description:
|
||||
Control registers of the MDIO device are also used for the Ethernet MAC controller.
|
||||
Thus, both devices should have a parent node with compatible "xlnx,axi-ethernet-subsystem-7.2"
|
||||
which defines the reg property, and MAC and MDIO should NOT have a reg property.
|
||||
|
||||
clock-frequency:
|
||||
type: int
|
||||
|
||||
axistream-connected:
|
||||
type: phandle
|
||||
required: true
|
||||
description: Xilinx AXI DMA that is connected to the data streams.
|
||||
|
||||
axistream-control-connected:
|
||||
type: phandle
|
||||
description:
|
||||
Xilinx AXI DMA that is connected to the control streams.
|
||||
Automatically generated by Xilinx' device tree generator.
|
||||
Included in the schema to prevent spurious warnings for such autogenerated device trees.
|
||||
|
||||
xlnx,rxcsum:
|
||||
type: int
|
||||
default: 0
|
||||
description: RX checksum offloading. 0 = none, 2 = full. 1 = partial is not supported.
|
||||
enum:
|
||||
- 0
|
||||
- 2
|
||||
xlnx,txcsum:
|
||||
type: int
|
||||
default: 0
|
||||
description: TX checksum offloading. 0 = none, 2 = full. 1 = partial is not supported.
|
||||
enum:
|
||||
- 0
|
||||
- 2
|
||||
18
dts/bindings/mdio/xilinx,axi-ethernet-1.00.a-mdio.yaml
Normal file
18
dts/bindings/mdio/xilinx,axi-ethernet-1.00.a-mdio.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2024 CISPA Helmholtz Center for Information Security
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Common fields for MDIO controllers
|
||||
|
||||
include: mdio-controller.yaml
|
||||
|
||||
compatible: xlnx,axi-ethernet-1.00.a-mdio
|
||||
|
||||
description: MDIO interface of Xilinx AXI 1G/2.5G Ethernet Subsystem.
|
||||
|
||||
properties:
|
||||
reg:
|
||||
type: array
|
||||
description: |
|
||||
Control registers of the MDIO device are also used for the Ethernet MAC controller.
|
||||
Thus, both devices should have a parent node with compatible "xlnx,axi-ethernet-subsystem-7.2"
|
||||
which defines the reg property, and MAC and MDIO should NOT have a reg property.
|
||||
Loading…
Reference in New Issue
Block a user